4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
24 #include "tcg-op-gvec.h"
27 #include "translate.h"
28 #include "internals.h"
29 #include "qemu/host-utils.h"
31 #include "exec/semihost.h"
32 #include "exec/gen-icount.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
38 #include "trace-tcg.h"
40 static TCGv_i64 cpu_X
[32];
41 static TCGv_i64 cpu_pc
;
43 /* Load/store exclusive handling */
44 static TCGv_i64 cpu_exclusive_high
;
45 static TCGv_i64
cpu_reg(DisasContext
*s
, int reg
);
47 static const char *regnames
[] = {
48 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
49 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
50 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
51 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
55 A64_SHIFT_TYPE_LSL
= 0,
56 A64_SHIFT_TYPE_LSR
= 1,
57 A64_SHIFT_TYPE_ASR
= 2,
58 A64_SHIFT_TYPE_ROR
= 3
61 /* Table based decoder typedefs - used when the relevant bits for decode
62 * are too awkwardly scattered across the instruction (eg SIMD).
64 typedef void AArch64DecodeFn(DisasContext
*s
, uint32_t insn
);
66 typedef struct AArch64DecodeTable
{
69 AArch64DecodeFn
*disas_fn
;
72 /* Function prototype for gen_ functions for calling Neon helpers */
73 typedef void NeonGenOneOpEnvFn(TCGv_i32
, TCGv_ptr
, TCGv_i32
);
74 typedef void NeonGenTwoOpFn(TCGv_i32
, TCGv_i32
, TCGv_i32
);
75 typedef void NeonGenTwoOpEnvFn(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
);
76 typedef void NeonGenTwo64OpFn(TCGv_i64
, TCGv_i64
, TCGv_i64
);
77 typedef void NeonGenTwo64OpEnvFn(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
);
78 typedef void NeonGenNarrowFn(TCGv_i32
, TCGv_i64
);
79 typedef void NeonGenNarrowEnvFn(TCGv_i32
, TCGv_ptr
, TCGv_i64
);
80 typedef void NeonGenWidenFn(TCGv_i64
, TCGv_i32
);
81 typedef void NeonGenTwoSingleOPFn(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
82 typedef void NeonGenTwoDoubleOPFn(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_ptr
);
83 typedef void NeonGenOneOpFn(TCGv_i64
, TCGv_i64
);
84 typedef void CryptoTwoOpFn(TCGv_ptr
, TCGv_ptr
);
85 typedef void CryptoThreeOpIntFn(TCGv_ptr
, TCGv_ptr
, TCGv_i32
);
86 typedef void CryptoThreeOpFn(TCGv_ptr
, TCGv_ptr
, TCGv_ptr
);
88 /* Note that the gvec expanders operate on offsets + sizes. */
89 typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
90 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
92 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
93 uint32_t, uint32_t, uint32_t);
95 /* initialize TCG globals. */
96 void a64_translate_init(void)
100 cpu_pc
= tcg_global_mem_new_i64(cpu_env
,
101 offsetof(CPUARMState
, pc
),
103 for (i
= 0; i
< 32; i
++) {
104 cpu_X
[i
] = tcg_global_mem_new_i64(cpu_env
,
105 offsetof(CPUARMState
, xregs
[i
]),
109 cpu_exclusive_high
= tcg_global_mem_new_i64(cpu_env
,
110 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
113 static inline int get_a64_user_mem_index(DisasContext
*s
)
115 /* Return the core mmu_idx to use for A64 "unprivileged load/store" insns:
116 * if EL1, access as if EL0; otherwise access at current EL
120 switch (s
->mmu_idx
) {
121 case ARMMMUIdx_S12NSE1
:
122 useridx
= ARMMMUIdx_S12NSE0
;
124 case ARMMMUIdx_S1SE1
:
125 useridx
= ARMMMUIdx_S1SE0
;
128 g_assert_not_reached();
130 useridx
= s
->mmu_idx
;
133 return arm_to_core_mmu_idx(useridx
);
136 void aarch64_cpu_dump_state(CPUState
*cs
, FILE *f
,
137 fprintf_function cpu_fprintf
, int flags
)
139 ARMCPU
*cpu
= ARM_CPU(cs
);
140 CPUARMState
*env
= &cpu
->env
;
141 uint32_t psr
= pstate_read(env
);
143 int el
= arm_current_el(env
);
144 const char *ns_status
;
146 cpu_fprintf(f
, "PC=%016"PRIx64
" SP=%016"PRIx64
"\n",
147 env
->pc
, env
->xregs
[31]);
148 for (i
= 0; i
< 31; i
++) {
149 cpu_fprintf(f
, "X%02d=%016"PRIx64
, i
, env
->xregs
[i
]);
151 cpu_fprintf(f
, "\n");
157 if (arm_feature(env
, ARM_FEATURE_EL3
) && el
!= 3) {
158 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
163 cpu_fprintf(f
, "\nPSTATE=%08x %c%c%c%c %sEL%d%c\n",
165 psr
& PSTATE_N
? 'N' : '-',
166 psr
& PSTATE_Z
? 'Z' : '-',
167 psr
& PSTATE_C
? 'C' : '-',
168 psr
& PSTATE_V
? 'V' : '-',
171 psr
& PSTATE_SP
? 'h' : 't');
173 if (flags
& CPU_DUMP_FPU
) {
175 for (i
= 0; i
< numvfpregs
; i
++) {
176 uint64_t *q
= aa64_vfp_qreg(env
, i
);
179 cpu_fprintf(f
, "q%02d=%016" PRIx64
":%016" PRIx64
"%c",
180 i
, vhi
, vlo
, (i
& 1 ? '\n' : ' '));
182 cpu_fprintf(f
, "FPCR: %08x FPSR: %08x\n",
183 vfp_get_fpcr(env
), vfp_get_fpsr(env
));
187 void gen_a64_set_pc_im(uint64_t val
)
189 tcg_gen_movi_i64(cpu_pc
, val
);
192 /* Load the PC from a generic TCG variable.
194 * If address tagging is enabled via the TCR TBI bits, then loading
195 * an address into the PC will clear out any tag in the it:
196 * + for EL2 and EL3 there is only one TBI bit, and if it is set
197 * then the address is zero-extended, clearing bits [63:56]
198 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
199 * and TBI1 controls addressses with bit 55 == 1.
200 * If the appropriate TBI bit is set for the address then
201 * the address is sign-extended from bit 55 into bits [63:56]
203 * We can avoid doing this for relative-branches, because the
204 * PC + offset can never overflow into the tag bits (assuming
205 * that virtual addresses are less than 56 bits wide, as they
206 * are currently), but we must handle it for branch-to-register.
208 static void gen_a64_set_pc(DisasContext
*s
, TCGv_i64 src
)
211 if (s
->current_el
<= 1) {
212 /* Test if NEITHER or BOTH TBI values are set. If so, no need to
213 * examine bit 55 of address, can just generate code.
214 * If mixed, then test via generated code
216 if (s
->tbi0
&& s
->tbi1
) {
217 TCGv_i64 tmp_reg
= tcg_temp_new_i64();
218 /* Both bits set, sign extension from bit 55 into [63:56] will
221 tcg_gen_shli_i64(tmp_reg
, src
, 8);
222 tcg_gen_sari_i64(cpu_pc
, tmp_reg
, 8);
223 tcg_temp_free_i64(tmp_reg
);
224 } else if (!s
->tbi0
&& !s
->tbi1
) {
225 /* Neither bit set, just load it as-is */
226 tcg_gen_mov_i64(cpu_pc
, src
);
228 TCGv_i64 tcg_tmpval
= tcg_temp_new_i64();
229 TCGv_i64 tcg_bit55
= tcg_temp_new_i64();
230 TCGv_i64 tcg_zero
= tcg_const_i64(0);
232 tcg_gen_andi_i64(tcg_bit55
, src
, (1ull << 55));
235 /* tbi0==1, tbi1==0, so 0-fill upper byte if bit 55 = 0 */
236 tcg_gen_andi_i64(tcg_tmpval
, src
,
237 0x00FFFFFFFFFFFFFFull
);
238 tcg_gen_movcond_i64(TCG_COND_EQ
, cpu_pc
, tcg_bit55
, tcg_zero
,
241 /* tbi0==0, tbi1==1, so 1-fill upper byte if bit 55 = 1 */
242 tcg_gen_ori_i64(tcg_tmpval
, src
,
243 0xFF00000000000000ull
);
244 tcg_gen_movcond_i64(TCG_COND_NE
, cpu_pc
, tcg_bit55
, tcg_zero
,
247 tcg_temp_free_i64(tcg_zero
);
248 tcg_temp_free_i64(tcg_bit55
);
249 tcg_temp_free_i64(tcg_tmpval
);
251 } else { /* EL > 1 */
253 /* Force tag byte to all zero */
254 tcg_gen_andi_i64(cpu_pc
, src
, 0x00FFFFFFFFFFFFFFull
);
256 /* Load unmodified address */
257 tcg_gen_mov_i64(cpu_pc
, src
);
262 typedef struct DisasCompare64
{
267 static void a64_test_cc(DisasCompare64
*c64
, int cc
)
271 arm_test_cc(&c32
, cc
);
273 /* Sign-extend the 32-bit value so that the GE/LT comparisons work
274 * properly. The NE/EQ comparisons are also fine with this choice. */
275 c64
->cond
= c32
.cond
;
276 c64
->value
= tcg_temp_new_i64();
277 tcg_gen_ext_i32_i64(c64
->value
, c32
.value
);
282 static void a64_free_cc(DisasCompare64
*c64
)
284 tcg_temp_free_i64(c64
->value
);
287 static void gen_exception_internal(int excp
)
289 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
291 assert(excp_is_internal(excp
));
292 gen_helper_exception_internal(cpu_env
, tcg_excp
);
293 tcg_temp_free_i32(tcg_excp
);
296 static void gen_exception(int excp
, uint32_t syndrome
, uint32_t target_el
)
298 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
299 TCGv_i32 tcg_syn
= tcg_const_i32(syndrome
);
300 TCGv_i32 tcg_el
= tcg_const_i32(target_el
);
302 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
,
304 tcg_temp_free_i32(tcg_el
);
305 tcg_temp_free_i32(tcg_syn
);
306 tcg_temp_free_i32(tcg_excp
);
309 static void gen_exception_internal_insn(DisasContext
*s
, int offset
, int excp
)
311 gen_a64_set_pc_im(s
->pc
- offset
);
312 gen_exception_internal(excp
);
313 s
->base
.is_jmp
= DISAS_NORETURN
;
316 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
,
317 uint32_t syndrome
, uint32_t target_el
)
319 gen_a64_set_pc_im(s
->pc
- offset
);
320 gen_exception(excp
, syndrome
, target_el
);
321 s
->base
.is_jmp
= DISAS_NORETURN
;
324 static void gen_exception_bkpt_insn(DisasContext
*s
, int offset
,
329 gen_a64_set_pc_im(s
->pc
- offset
);
330 tcg_syn
= tcg_const_i32(syndrome
);
331 gen_helper_exception_bkpt_insn(cpu_env
, tcg_syn
);
332 tcg_temp_free_i32(tcg_syn
);
333 s
->base
.is_jmp
= DISAS_NORETURN
;
336 static void gen_ss_advance(DisasContext
*s
)
338 /* If the singlestep state is Active-not-pending, advance to
343 gen_helper_clear_pstate_ss(cpu_env
);
347 static void gen_step_complete_exception(DisasContext
*s
)
349 /* We just completed step of an insn. Move from Active-not-pending
350 * to Active-pending, and then also take the swstep exception.
351 * This corresponds to making the (IMPDEF) choice to prioritize
352 * swstep exceptions over asynchronous exceptions taken to an exception
353 * level where debug is disabled. This choice has the advantage that
354 * we do not need to maintain internal state corresponding to the
355 * ISV/EX syndrome bits between completion of the step and generation
356 * of the exception, and our syndrome information is always correct.
359 gen_exception(EXCP_UDEF
, syn_swstep(s
->ss_same_el
, 1, s
->is_ldex
),
360 default_exception_el(s
));
361 s
->base
.is_jmp
= DISAS_NORETURN
;
364 static inline bool use_goto_tb(DisasContext
*s
, int n
, uint64_t dest
)
366 /* No direct tb linking with singlestep (either QEMU's or the ARM
367 * debug architecture kind) or deterministic io
369 if (s
->base
.singlestep_enabled
|| s
->ss_active
||
370 (tb_cflags(s
->base
.tb
) & CF_LAST_IO
)) {
374 #ifndef CONFIG_USER_ONLY
375 /* Only link tbs from inside the same guest page */
376 if ((s
->base
.tb
->pc
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
384 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint64_t dest
)
386 TranslationBlock
*tb
;
389 if (use_goto_tb(s
, n
, dest
)) {
391 gen_a64_set_pc_im(dest
);
392 tcg_gen_exit_tb((intptr_t)tb
+ n
);
393 s
->base
.is_jmp
= DISAS_NORETURN
;
395 gen_a64_set_pc_im(dest
);
397 gen_step_complete_exception(s
);
398 } else if (s
->base
.singlestep_enabled
) {
399 gen_exception_internal(EXCP_DEBUG
);
401 tcg_gen_lookup_and_goto_ptr();
402 s
->base
.is_jmp
= DISAS_NORETURN
;
407 static void unallocated_encoding(DisasContext
*s
)
409 /* Unallocated and reserved encodings are uncategorized */
410 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
411 default_exception_el(s
));
414 #define unsupported_encoding(s, insn) \
416 qemu_log_mask(LOG_UNIMP, \
417 "%s:%d: unsupported instruction encoding 0x%08x " \
418 "at pc=%016" PRIx64 "\n", \
419 __FILE__, __LINE__, insn, s->pc - 4); \
420 unallocated_encoding(s); \
423 static void init_tmp_a64_array(DisasContext
*s
)
425 #ifdef CONFIG_DEBUG_TCG
426 memset(s
->tmp_a64
, 0, sizeof(s
->tmp_a64
));
428 s
->tmp_a64_count
= 0;
431 static void free_tmp_a64(DisasContext
*s
)
434 for (i
= 0; i
< s
->tmp_a64_count
; i
++) {
435 tcg_temp_free_i64(s
->tmp_a64
[i
]);
437 init_tmp_a64_array(s
);
440 static TCGv_i64
new_tmp_a64(DisasContext
*s
)
442 assert(s
->tmp_a64_count
< TMP_A64_MAX
);
443 return s
->tmp_a64
[s
->tmp_a64_count
++] = tcg_temp_new_i64();
446 static TCGv_i64
new_tmp_a64_zero(DisasContext
*s
)
448 TCGv_i64 t
= new_tmp_a64(s
);
449 tcg_gen_movi_i64(t
, 0);
454 * Register access functions
456 * These functions are used for directly accessing a register in where
457 * changes to the final register value are likely to be made. If you
458 * need to use a register for temporary calculation (e.g. index type
459 * operations) use the read_* form.
461 * B1.2.1 Register mappings
463 * In instruction register encoding 31 can refer to ZR (zero register) or
464 * the SP (stack pointer) depending on context. In QEMU's case we map SP
465 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
466 * This is the point of the _sp forms.
468 static TCGv_i64
cpu_reg(DisasContext
*s
, int reg
)
471 return new_tmp_a64_zero(s
);
477 /* register access for when 31 == SP */
478 static TCGv_i64
cpu_reg_sp(DisasContext
*s
, int reg
)
483 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
484 * representing the register contents. This TCGv is an auto-freed
485 * temporary so it need not be explicitly freed, and may be modified.
487 static TCGv_i64
read_cpu_reg(DisasContext
*s
, int reg
, int sf
)
489 TCGv_i64 v
= new_tmp_a64(s
);
492 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
494 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
497 tcg_gen_movi_i64(v
, 0);
502 static TCGv_i64
read_cpu_reg_sp(DisasContext
*s
, int reg
, int sf
)
504 TCGv_i64 v
= new_tmp_a64(s
);
506 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
508 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
513 /* We should have at some point before trying to access an FP register
514 * done the necessary access check, so assert that
515 * (a) we did the check and
516 * (b) we didn't then just plough ahead anyway if it failed.
517 * Print the instruction pattern in the abort message so we can figure
518 * out what we need to fix if a user encounters this problem in the wild.
520 static inline void assert_fp_access_checked(DisasContext
*s
)
522 #ifdef CONFIG_DEBUG_TCG
523 if (unlikely(!s
->fp_access_checked
|| s
->fp_excp_el
)) {
524 fprintf(stderr
, "target-arm: FP access check missing for "
525 "instruction 0x%08x\n", s
->insn
);
531 /* Return the offset into CPUARMState of an element of specified
532 * size, 'element' places in from the least significant end of
533 * the FP/vector register Qn.
535 static inline int vec_reg_offset(DisasContext
*s
, int regno
,
536 int element
, TCGMemOp size
)
539 #ifdef HOST_WORDS_BIGENDIAN
540 /* This is complicated slightly because vfp.zregs[n].d[0] is
541 * still the low half and vfp.zregs[n].d[1] the high half
542 * of the 128 bit vector, even on big endian systems.
543 * Calculate the offset assuming a fully bigendian 128 bits,
544 * then XOR to account for the order of the two 64 bit halves.
546 offs
+= (16 - ((element
+ 1) * (1 << size
)));
549 offs
+= element
* (1 << size
);
551 offs
+= offsetof(CPUARMState
, vfp
.zregs
[regno
]);
552 assert_fp_access_checked(s
);
556 /* Return the offset info CPUARMState of the "whole" vector register Qn. */
557 static inline int vec_full_reg_offset(DisasContext
*s
, int regno
)
559 assert_fp_access_checked(s
);
560 return offsetof(CPUARMState
, vfp
.zregs
[regno
]);
563 /* Return a newly allocated pointer to the vector register. */
564 static TCGv_ptr
vec_full_reg_ptr(DisasContext
*s
, int regno
)
566 TCGv_ptr ret
= tcg_temp_new_ptr();
567 tcg_gen_addi_ptr(ret
, cpu_env
, vec_full_reg_offset(s
, regno
));
571 /* Return the byte size of the "whole" vector register, VL / 8. */
572 static inline int vec_full_reg_size(DisasContext
*s
)
574 /* FIXME SVE: We should put the composite ZCR_EL* value into tb->flags.
575 In the meantime this is just the AdvSIMD length of 128. */
579 /* Return the offset into CPUARMState of a slice (from
580 * the least significant end) of FP register Qn (ie
582 * (Note that this is not the same mapping as for A32; see cpu.h)
584 static inline int fp_reg_offset(DisasContext
*s
, int regno
, TCGMemOp size
)
586 return vec_reg_offset(s
, regno
, 0, size
);
589 /* Offset of the high half of the 128 bit vector Qn */
590 static inline int fp_reg_hi_offset(DisasContext
*s
, int regno
)
592 return vec_reg_offset(s
, regno
, 1, MO_64
);
595 /* Convenience accessors for reading and writing single and double
596 * FP registers. Writing clears the upper parts of the associated
597 * 128 bit vector register, as required by the architecture.
598 * Note that unlike the GP register accessors, the values returned
599 * by the read functions must be manually freed.
601 static TCGv_i64
read_fp_dreg(DisasContext
*s
, int reg
)
603 TCGv_i64 v
= tcg_temp_new_i64();
605 tcg_gen_ld_i64(v
, cpu_env
, fp_reg_offset(s
, reg
, MO_64
));
609 static TCGv_i32
read_fp_sreg(DisasContext
*s
, int reg
)
611 TCGv_i32 v
= tcg_temp_new_i32();
613 tcg_gen_ld_i32(v
, cpu_env
, fp_reg_offset(s
, reg
, MO_32
));
617 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
618 * If SVE is not enabled, then there are only 128 bits in the vector.
620 static void clear_vec_high(DisasContext
*s
, bool is_q
, int rd
)
622 unsigned ofs
= fp_reg_offset(s
, rd
, MO_64
);
623 unsigned vsz
= vec_full_reg_size(s
);
626 TCGv_i64 tcg_zero
= tcg_const_i64(0);
627 tcg_gen_st_i64(tcg_zero
, cpu_env
, ofs
+ 8);
628 tcg_temp_free_i64(tcg_zero
);
631 tcg_gen_gvec_dup8i(ofs
+ 16, vsz
- 16, vsz
- 16, 0);
635 static void write_fp_dreg(DisasContext
*s
, int reg
, TCGv_i64 v
)
637 unsigned ofs
= fp_reg_offset(s
, reg
, MO_64
);
639 tcg_gen_st_i64(v
, cpu_env
, ofs
);
640 clear_vec_high(s
, false, reg
);
643 static void write_fp_sreg(DisasContext
*s
, int reg
, TCGv_i32 v
)
645 TCGv_i64 tmp
= tcg_temp_new_i64();
647 tcg_gen_extu_i32_i64(tmp
, v
);
648 write_fp_dreg(s
, reg
, tmp
);
649 tcg_temp_free_i64(tmp
);
652 static TCGv_ptr
get_fpstatus_ptr(bool is_f16
)
654 TCGv_ptr statusptr
= tcg_temp_new_ptr();
657 /* In A64 all instructions (both FP and Neon) use the FPCR; there
658 * is no equivalent of the A32 Neon "standard FPSCR value".
659 * However half-precision operations operate under a different
660 * FZ16 flag and use vfp.fp_status_f16 instead of vfp.fp_status.
663 offset
= offsetof(CPUARMState
, vfp
.fp_status_f16
);
665 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
667 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
671 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
672 static void gen_gvec_fn2(DisasContext
*s
, bool is_q
, int rd
, int rn
,
673 GVecGen2Fn
*gvec_fn
, int vece
)
675 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
676 is_q
? 16 : 8, vec_full_reg_size(s
));
679 /* Expand a 2-operand + immediate AdvSIMD vector operation using
680 * an expander function.
682 static void gen_gvec_fn2i(DisasContext
*s
, bool is_q
, int rd
, int rn
,
683 int64_t imm
, GVecGen2iFn
*gvec_fn
, int vece
)
685 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
686 imm
, is_q
? 16 : 8, vec_full_reg_size(s
));
689 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
690 static void gen_gvec_fn3(DisasContext
*s
, bool is_q
, int rd
, int rn
, int rm
,
691 GVecGen3Fn
*gvec_fn
, int vece
)
693 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
694 vec_full_reg_offset(s
, rm
), is_q
? 16 : 8, vec_full_reg_size(s
));
697 /* Expand a 2-operand + immediate AdvSIMD vector operation using
700 static void gen_gvec_op2i(DisasContext
*s
, bool is_q
, int rd
,
701 int rn
, int64_t imm
, const GVecGen2i
*gvec_op
)
703 tcg_gen_gvec_2i(vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
704 is_q
? 16 : 8, vec_full_reg_size(s
), imm
, gvec_op
);
707 /* Expand a 3-operand AdvSIMD vector operation using an op descriptor. */
708 static void gen_gvec_op3(DisasContext
*s
, bool is_q
, int rd
,
709 int rn
, int rm
, const GVecGen3
*gvec_op
)
711 tcg_gen_gvec_3(vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
712 vec_full_reg_offset(s
, rm
), is_q
? 16 : 8,
713 vec_full_reg_size(s
), gvec_op
);
716 /* Expand a 3-operand + env pointer operation using
717 * an out-of-line helper.
719 static void gen_gvec_op3_env(DisasContext
*s
, bool is_q
, int rd
,
720 int rn
, int rm
, gen_helper_gvec_3_ptr
*fn
)
722 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
723 vec_full_reg_offset(s
, rn
),
724 vec_full_reg_offset(s
, rm
), cpu_env
,
725 is_q
? 16 : 8, vec_full_reg_size(s
), 0, fn
);
728 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
729 * an out-of-line helper.
731 static void gen_gvec_op3_fpst(DisasContext
*s
, bool is_q
, int rd
, int rn
,
732 int rm
, bool is_fp16
, int data
,
733 gen_helper_gvec_3_ptr
*fn
)
735 TCGv_ptr fpst
= get_fpstatus_ptr(is_fp16
);
736 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
737 vec_full_reg_offset(s
, rn
),
738 vec_full_reg_offset(s
, rm
), fpst
,
739 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
740 tcg_temp_free_ptr(fpst
);
743 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
744 * than the 32 bit equivalent.
746 static inline void gen_set_NZ64(TCGv_i64 result
)
748 tcg_gen_extr_i64_i32(cpu_ZF
, cpu_NF
, result
);
749 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, cpu_NF
);
752 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
753 static inline void gen_logic_CC(int sf
, TCGv_i64 result
)
756 gen_set_NZ64(result
);
758 tcg_gen_extrl_i64_i32(cpu_ZF
, result
);
759 tcg_gen_mov_i32(cpu_NF
, cpu_ZF
);
761 tcg_gen_movi_i32(cpu_CF
, 0);
762 tcg_gen_movi_i32(cpu_VF
, 0);
765 /* dest = T0 + T1; compute C, N, V and Z flags */
766 static void gen_add_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
769 TCGv_i64 result
, flag
, tmp
;
770 result
= tcg_temp_new_i64();
771 flag
= tcg_temp_new_i64();
772 tmp
= tcg_temp_new_i64();
774 tcg_gen_movi_i64(tmp
, 0);
775 tcg_gen_add2_i64(result
, flag
, t0
, tmp
, t1
, tmp
);
777 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
779 gen_set_NZ64(result
);
781 tcg_gen_xor_i64(flag
, result
, t0
);
782 tcg_gen_xor_i64(tmp
, t0
, t1
);
783 tcg_gen_andc_i64(flag
, flag
, tmp
);
784 tcg_temp_free_i64(tmp
);
785 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
787 tcg_gen_mov_i64(dest
, result
);
788 tcg_temp_free_i64(result
);
789 tcg_temp_free_i64(flag
);
791 /* 32 bit arithmetic */
792 TCGv_i32 t0_32
= tcg_temp_new_i32();
793 TCGv_i32 t1_32
= tcg_temp_new_i32();
794 TCGv_i32 tmp
= tcg_temp_new_i32();
796 tcg_gen_movi_i32(tmp
, 0);
797 tcg_gen_extrl_i64_i32(t0_32
, t0
);
798 tcg_gen_extrl_i64_i32(t1_32
, t1
);
799 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, tmp
, t1_32
, tmp
);
800 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
801 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
802 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
803 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
804 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
806 tcg_temp_free_i32(tmp
);
807 tcg_temp_free_i32(t0_32
);
808 tcg_temp_free_i32(t1_32
);
812 /* dest = T0 - T1; compute C, N, V and Z flags */
813 static void gen_sub_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
816 /* 64 bit arithmetic */
817 TCGv_i64 result
, flag
, tmp
;
819 result
= tcg_temp_new_i64();
820 flag
= tcg_temp_new_i64();
821 tcg_gen_sub_i64(result
, t0
, t1
);
823 gen_set_NZ64(result
);
825 tcg_gen_setcond_i64(TCG_COND_GEU
, flag
, t0
, t1
);
826 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
828 tcg_gen_xor_i64(flag
, result
, t0
);
829 tmp
= tcg_temp_new_i64();
830 tcg_gen_xor_i64(tmp
, t0
, t1
);
831 tcg_gen_and_i64(flag
, flag
, tmp
);
832 tcg_temp_free_i64(tmp
);
833 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
834 tcg_gen_mov_i64(dest
, result
);
835 tcg_temp_free_i64(flag
);
836 tcg_temp_free_i64(result
);
838 /* 32 bit arithmetic */
839 TCGv_i32 t0_32
= tcg_temp_new_i32();
840 TCGv_i32 t1_32
= tcg_temp_new_i32();
843 tcg_gen_extrl_i64_i32(t0_32
, t0
);
844 tcg_gen_extrl_i64_i32(t1_32
, t1
);
845 tcg_gen_sub_i32(cpu_NF
, t0_32
, t1_32
);
846 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
847 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0_32
, t1_32
);
848 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
849 tmp
= tcg_temp_new_i32();
850 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
851 tcg_temp_free_i32(t0_32
);
852 tcg_temp_free_i32(t1_32
);
853 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
854 tcg_temp_free_i32(tmp
);
855 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
859 /* dest = T0 + T1 + CF; do not compute flags. */
860 static void gen_adc(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
862 TCGv_i64 flag
= tcg_temp_new_i64();
863 tcg_gen_extu_i32_i64(flag
, cpu_CF
);
864 tcg_gen_add_i64(dest
, t0
, t1
);
865 tcg_gen_add_i64(dest
, dest
, flag
);
866 tcg_temp_free_i64(flag
);
869 tcg_gen_ext32u_i64(dest
, dest
);
873 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
874 static void gen_adc_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
877 TCGv_i64 result
, cf_64
, vf_64
, tmp
;
878 result
= tcg_temp_new_i64();
879 cf_64
= tcg_temp_new_i64();
880 vf_64
= tcg_temp_new_i64();
881 tmp
= tcg_const_i64(0);
883 tcg_gen_extu_i32_i64(cf_64
, cpu_CF
);
884 tcg_gen_add2_i64(result
, cf_64
, t0
, tmp
, cf_64
, tmp
);
885 tcg_gen_add2_i64(result
, cf_64
, result
, cf_64
, t1
, tmp
);
886 tcg_gen_extrl_i64_i32(cpu_CF
, cf_64
);
887 gen_set_NZ64(result
);
889 tcg_gen_xor_i64(vf_64
, result
, t0
);
890 tcg_gen_xor_i64(tmp
, t0
, t1
);
891 tcg_gen_andc_i64(vf_64
, vf_64
, tmp
);
892 tcg_gen_extrh_i64_i32(cpu_VF
, vf_64
);
894 tcg_gen_mov_i64(dest
, result
);
896 tcg_temp_free_i64(tmp
);
897 tcg_temp_free_i64(vf_64
);
898 tcg_temp_free_i64(cf_64
);
899 tcg_temp_free_i64(result
);
901 TCGv_i32 t0_32
, t1_32
, tmp
;
902 t0_32
= tcg_temp_new_i32();
903 t1_32
= tcg_temp_new_i32();
904 tmp
= tcg_const_i32(0);
906 tcg_gen_extrl_i64_i32(t0_32
, t0
);
907 tcg_gen_extrl_i64_i32(t1_32
, t1
);
908 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, tmp
, cpu_CF
, tmp
);
909 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1_32
, tmp
);
911 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
912 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
913 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
914 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
915 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
917 tcg_temp_free_i32(tmp
);
918 tcg_temp_free_i32(t1_32
);
919 tcg_temp_free_i32(t0_32
);
924 * Load/Store generators
928 * Store from GPR register to memory.
930 static void do_gpr_st_memidx(DisasContext
*s
, TCGv_i64 source
,
931 TCGv_i64 tcg_addr
, int size
, int memidx
,
933 unsigned int iss_srt
,
934 bool iss_sf
, bool iss_ar
)
937 tcg_gen_qemu_st_i64(source
, tcg_addr
, memidx
, s
->be_data
+ size
);
942 syn
= syn_data_abort_with_iss(0,
948 0, 0, 0, 0, 0, false);
949 disas_set_insn_syndrome(s
, syn
);
953 static void do_gpr_st(DisasContext
*s
, TCGv_i64 source
,
954 TCGv_i64 tcg_addr
, int size
,
956 unsigned int iss_srt
,
957 bool iss_sf
, bool iss_ar
)
959 do_gpr_st_memidx(s
, source
, tcg_addr
, size
, get_mem_index(s
),
960 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
964 * Load from memory to GPR register
966 static void do_gpr_ld_memidx(DisasContext
*s
,
967 TCGv_i64 dest
, TCGv_i64 tcg_addr
,
968 int size
, bool is_signed
,
969 bool extend
, int memidx
,
970 bool iss_valid
, unsigned int iss_srt
,
971 bool iss_sf
, bool iss_ar
)
973 TCGMemOp memop
= s
->be_data
+ size
;
981 tcg_gen_qemu_ld_i64(dest
, tcg_addr
, memidx
, memop
);
983 if (extend
&& is_signed
) {
985 tcg_gen_ext32u_i64(dest
, dest
);
991 syn
= syn_data_abort_with_iss(0,
997 0, 0, 0, 0, 0, false);
998 disas_set_insn_syndrome(s
, syn
);
1002 static void do_gpr_ld(DisasContext
*s
,
1003 TCGv_i64 dest
, TCGv_i64 tcg_addr
,
1004 int size
, bool is_signed
, bool extend
,
1005 bool iss_valid
, unsigned int iss_srt
,
1006 bool iss_sf
, bool iss_ar
)
1008 do_gpr_ld_memidx(s
, dest
, tcg_addr
, size
, is_signed
, extend
,
1010 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
1014 * Store from FP register to memory
1016 static void do_fp_st(DisasContext
*s
, int srcidx
, TCGv_i64 tcg_addr
, int size
)
1018 /* This writes the bottom N bits of a 128 bit wide vector to memory */
1019 TCGv_i64 tmp
= tcg_temp_new_i64();
1020 tcg_gen_ld_i64(tmp
, cpu_env
, fp_reg_offset(s
, srcidx
, MO_64
));
1022 tcg_gen_qemu_st_i64(tmp
, tcg_addr
, get_mem_index(s
),
1025 bool be
= s
->be_data
== MO_BE
;
1026 TCGv_i64 tcg_hiaddr
= tcg_temp_new_i64();
1028 tcg_gen_addi_i64(tcg_hiaddr
, tcg_addr
, 8);
1029 tcg_gen_qemu_st_i64(tmp
, be
? tcg_hiaddr
: tcg_addr
, get_mem_index(s
),
1031 tcg_gen_ld_i64(tmp
, cpu_env
, fp_reg_hi_offset(s
, srcidx
));
1032 tcg_gen_qemu_st_i64(tmp
, be
? tcg_addr
: tcg_hiaddr
, get_mem_index(s
),
1034 tcg_temp_free_i64(tcg_hiaddr
);
1037 tcg_temp_free_i64(tmp
);
1041 * Load from memory to FP register
1043 static void do_fp_ld(DisasContext
*s
, int destidx
, TCGv_i64 tcg_addr
, int size
)
1045 /* This always zero-extends and writes to a full 128 bit wide vector */
1046 TCGv_i64 tmplo
= tcg_temp_new_i64();
1050 TCGMemOp memop
= s
->be_data
+ size
;
1051 tmphi
= tcg_const_i64(0);
1052 tcg_gen_qemu_ld_i64(tmplo
, tcg_addr
, get_mem_index(s
), memop
);
1054 bool be
= s
->be_data
== MO_BE
;
1055 TCGv_i64 tcg_hiaddr
;
1057 tmphi
= tcg_temp_new_i64();
1058 tcg_hiaddr
= tcg_temp_new_i64();
1060 tcg_gen_addi_i64(tcg_hiaddr
, tcg_addr
, 8);
1061 tcg_gen_qemu_ld_i64(tmplo
, be
? tcg_hiaddr
: tcg_addr
, get_mem_index(s
),
1063 tcg_gen_qemu_ld_i64(tmphi
, be
? tcg_addr
: tcg_hiaddr
, get_mem_index(s
),
1065 tcg_temp_free_i64(tcg_hiaddr
);
1068 tcg_gen_st_i64(tmplo
, cpu_env
, fp_reg_offset(s
, destidx
, MO_64
));
1069 tcg_gen_st_i64(tmphi
, cpu_env
, fp_reg_hi_offset(s
, destidx
));
1071 tcg_temp_free_i64(tmplo
);
1072 tcg_temp_free_i64(tmphi
);
1074 clear_vec_high(s
, true, destidx
);
1078 * Vector load/store helpers.
1080 * The principal difference between this and a FP load is that we don't
1081 * zero extend as we are filling a partial chunk of the vector register.
1082 * These functions don't support 128 bit loads/stores, which would be
1083 * normal load/store operations.
1085 * The _i32 versions are useful when operating on 32 bit quantities
1086 * (eg for floating point single or using Neon helper functions).
1089 /* Get value of an element within a vector register */
1090 static void read_vec_element(DisasContext
*s
, TCGv_i64 tcg_dest
, int srcidx
,
1091 int element
, TCGMemOp memop
)
1093 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1096 tcg_gen_ld8u_i64(tcg_dest
, cpu_env
, vect_off
);
1099 tcg_gen_ld16u_i64(tcg_dest
, cpu_env
, vect_off
);
1102 tcg_gen_ld32u_i64(tcg_dest
, cpu_env
, vect_off
);
1105 tcg_gen_ld8s_i64(tcg_dest
, cpu_env
, vect_off
);
1108 tcg_gen_ld16s_i64(tcg_dest
, cpu_env
, vect_off
);
1111 tcg_gen_ld32s_i64(tcg_dest
, cpu_env
, vect_off
);
1115 tcg_gen_ld_i64(tcg_dest
, cpu_env
, vect_off
);
1118 g_assert_not_reached();
1122 static void read_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_dest
, int srcidx
,
1123 int element
, TCGMemOp memop
)
1125 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1128 tcg_gen_ld8u_i32(tcg_dest
, cpu_env
, vect_off
);
1131 tcg_gen_ld16u_i32(tcg_dest
, cpu_env
, vect_off
);
1134 tcg_gen_ld8s_i32(tcg_dest
, cpu_env
, vect_off
);
1137 tcg_gen_ld16s_i32(tcg_dest
, cpu_env
, vect_off
);
1141 tcg_gen_ld_i32(tcg_dest
, cpu_env
, vect_off
);
1144 g_assert_not_reached();
1148 /* Set value of an element within a vector register */
1149 static void write_vec_element(DisasContext
*s
, TCGv_i64 tcg_src
, int destidx
,
1150 int element
, TCGMemOp memop
)
1152 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1155 tcg_gen_st8_i64(tcg_src
, cpu_env
, vect_off
);
1158 tcg_gen_st16_i64(tcg_src
, cpu_env
, vect_off
);
1161 tcg_gen_st32_i64(tcg_src
, cpu_env
, vect_off
);
1164 tcg_gen_st_i64(tcg_src
, cpu_env
, vect_off
);
1167 g_assert_not_reached();
1171 static void write_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_src
,
1172 int destidx
, int element
, TCGMemOp memop
)
1174 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1177 tcg_gen_st8_i32(tcg_src
, cpu_env
, vect_off
);
1180 tcg_gen_st16_i32(tcg_src
, cpu_env
, vect_off
);
1183 tcg_gen_st_i32(tcg_src
, cpu_env
, vect_off
);
1186 g_assert_not_reached();
1190 /* Store from vector register to memory */
1191 static void do_vec_st(DisasContext
*s
, int srcidx
, int element
,
1192 TCGv_i64 tcg_addr
, int size
)
1194 TCGMemOp memop
= s
->be_data
+ size
;
1195 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1197 read_vec_element(s
, tcg_tmp
, srcidx
, element
, size
);
1198 tcg_gen_qemu_st_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), memop
);
1200 tcg_temp_free_i64(tcg_tmp
);
1203 /* Load from memory to vector register */
1204 static void do_vec_ld(DisasContext
*s
, int destidx
, int element
,
1205 TCGv_i64 tcg_addr
, int size
)
1207 TCGMemOp memop
= s
->be_data
+ size
;
1208 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1210 tcg_gen_qemu_ld_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), memop
);
1211 write_vec_element(s
, tcg_tmp
, destidx
, element
, size
);
1213 tcg_temp_free_i64(tcg_tmp
);
1216 /* Check that FP/Neon access is enabled. If it is, return
1217 * true. If not, emit code to generate an appropriate exception,
1218 * and return false; the caller should not emit any code for
1219 * the instruction. Note that this check must happen after all
1220 * unallocated-encoding checks (otherwise the syndrome information
1221 * for the resulting exception will be incorrect).
1223 static inline bool fp_access_check(DisasContext
*s
)
1225 assert(!s
->fp_access_checked
);
1226 s
->fp_access_checked
= true;
1228 if (!s
->fp_excp_el
) {
1232 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_fp_access_trap(1, 0xe, false),
1237 /* Check that SVE access is enabled. If it is, return true.
1238 * If not, emit code to generate an appropriate exception and return false.
1240 static inline bool sve_access_check(DisasContext
*s
)
1242 if (s
->sve_excp_el
) {
1243 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_sve_access_trap(),
1251 * This utility function is for doing register extension with an
1252 * optional shift. You will likely want to pass a temporary for the
1253 * destination register. See DecodeRegExtend() in the ARM ARM.
1255 static void ext_and_shift_reg(TCGv_i64 tcg_out
, TCGv_i64 tcg_in
,
1256 int option
, unsigned int shift
)
1258 int extsize
= extract32(option
, 0, 2);
1259 bool is_signed
= extract32(option
, 2, 1);
1264 tcg_gen_ext8s_i64(tcg_out
, tcg_in
);
1267 tcg_gen_ext16s_i64(tcg_out
, tcg_in
);
1270 tcg_gen_ext32s_i64(tcg_out
, tcg_in
);
1273 tcg_gen_mov_i64(tcg_out
, tcg_in
);
1279 tcg_gen_ext8u_i64(tcg_out
, tcg_in
);
1282 tcg_gen_ext16u_i64(tcg_out
, tcg_in
);
1285 tcg_gen_ext32u_i64(tcg_out
, tcg_in
);
1288 tcg_gen_mov_i64(tcg_out
, tcg_in
);
1294 tcg_gen_shli_i64(tcg_out
, tcg_out
, shift
);
1298 static inline void gen_check_sp_alignment(DisasContext
*s
)
1300 /* The AArch64 architecture mandates that (if enabled via PSTATE
1301 * or SCTLR bits) there is a check that SP is 16-aligned on every
1302 * SP-relative load or store (with an exception generated if it is not).
1303 * In line with general QEMU practice regarding misaligned accesses,
1304 * we omit these checks for the sake of guest program performance.
1305 * This function is provided as a hook so we can more easily add these
1306 * checks in future (possibly as a "favour catching guest program bugs
1307 * over speed" user selectable option).
1312 * This provides a simple table based table lookup decoder. It is
1313 * intended to be used when the relevant bits for decode are too
1314 * awkwardly placed and switch/if based logic would be confusing and
1315 * deeply nested. Since it's a linear search through the table, tables
1316 * should be kept small.
1318 * It returns the first handler where insn & mask == pattern, or
1319 * NULL if there is no match.
1320 * The table is terminated by an empty mask (i.e. 0)
1322 static inline AArch64DecodeFn
*lookup_disas_fn(const AArch64DecodeTable
*table
,
1325 const AArch64DecodeTable
*tptr
= table
;
1327 while (tptr
->mask
) {
1328 if ((insn
& tptr
->mask
) == tptr
->pattern
) {
1329 return tptr
->disas_fn
;
1337 * The instruction disassembly implemented here matches
1338 * the instruction encoding classifications in chapter C4
1339 * of the ARM Architecture Reference Manual (DDI0487B_a);
1340 * classification names and decode diagrams here should generally
1341 * match up with those in the manual.
1344 /* Unconditional branch (immediate)
1346 * +----+-----------+-------------------------------------+
1347 * | op | 0 0 1 0 1 | imm26 |
1348 * +----+-----------+-------------------------------------+
1350 static void disas_uncond_b_imm(DisasContext
*s
, uint32_t insn
)
1352 uint64_t addr
= s
->pc
+ sextract32(insn
, 0, 26) * 4 - 4;
1354 if (insn
& (1U << 31)) {
1355 /* BL Branch with link */
1356 tcg_gen_movi_i64(cpu_reg(s
, 30), s
->pc
);
1359 /* B Branch / BL Branch with link */
1360 gen_goto_tb(s
, 0, addr
);
1363 /* Compare and branch (immediate)
1364 * 31 30 25 24 23 5 4 0
1365 * +----+-------------+----+---------------------+--------+
1366 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
1367 * +----+-------------+----+---------------------+--------+
1369 static void disas_comp_b_imm(DisasContext
*s
, uint32_t insn
)
1371 unsigned int sf
, op
, rt
;
1373 TCGLabel
*label_match
;
1376 sf
= extract32(insn
, 31, 1);
1377 op
= extract32(insn
, 24, 1); /* 0: CBZ; 1: CBNZ */
1378 rt
= extract32(insn
, 0, 5);
1379 addr
= s
->pc
+ sextract32(insn
, 5, 19) * 4 - 4;
1381 tcg_cmp
= read_cpu_reg(s
, rt
, sf
);
1382 label_match
= gen_new_label();
1384 tcg_gen_brcondi_i64(op
? TCG_COND_NE
: TCG_COND_EQ
,
1385 tcg_cmp
, 0, label_match
);
1387 gen_goto_tb(s
, 0, s
->pc
);
1388 gen_set_label(label_match
);
1389 gen_goto_tb(s
, 1, addr
);
1392 /* Test and branch (immediate)
1393 * 31 30 25 24 23 19 18 5 4 0
1394 * +----+-------------+----+-------+-------------+------+
1395 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
1396 * +----+-------------+----+-------+-------------+------+
1398 static void disas_test_b_imm(DisasContext
*s
, uint32_t insn
)
1400 unsigned int bit_pos
, op
, rt
;
1402 TCGLabel
*label_match
;
1405 bit_pos
= (extract32(insn
, 31, 1) << 5) | extract32(insn
, 19, 5);
1406 op
= extract32(insn
, 24, 1); /* 0: TBZ; 1: TBNZ */
1407 addr
= s
->pc
+ sextract32(insn
, 5, 14) * 4 - 4;
1408 rt
= extract32(insn
, 0, 5);
1410 tcg_cmp
= tcg_temp_new_i64();
1411 tcg_gen_andi_i64(tcg_cmp
, cpu_reg(s
, rt
), (1ULL << bit_pos
));
1412 label_match
= gen_new_label();
1413 tcg_gen_brcondi_i64(op
? TCG_COND_NE
: TCG_COND_EQ
,
1414 tcg_cmp
, 0, label_match
);
1415 tcg_temp_free_i64(tcg_cmp
);
1416 gen_goto_tb(s
, 0, s
->pc
);
1417 gen_set_label(label_match
);
1418 gen_goto_tb(s
, 1, addr
);
1421 /* Conditional branch (immediate)
1422 * 31 25 24 23 5 4 3 0
1423 * +---------------+----+---------------------+----+------+
1424 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
1425 * +---------------+----+---------------------+----+------+
1427 static void disas_cond_b_imm(DisasContext
*s
, uint32_t insn
)
1432 if ((insn
& (1 << 4)) || (insn
& (1 << 24))) {
1433 unallocated_encoding(s
);
1436 addr
= s
->pc
+ sextract32(insn
, 5, 19) * 4 - 4;
1437 cond
= extract32(insn
, 0, 4);
1440 /* genuinely conditional branches */
1441 TCGLabel
*label_match
= gen_new_label();
1442 arm_gen_test_cc(cond
, label_match
);
1443 gen_goto_tb(s
, 0, s
->pc
);
1444 gen_set_label(label_match
);
1445 gen_goto_tb(s
, 1, addr
);
1447 /* 0xe and 0xf are both "always" conditions */
1448 gen_goto_tb(s
, 0, addr
);
1452 /* HINT instruction group, including various allocated HINTs */
1453 static void handle_hint(DisasContext
*s
, uint32_t insn
,
1454 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1456 unsigned int selector
= crm
<< 3 | op2
;
1459 unallocated_encoding(s
);
1467 s
->base
.is_jmp
= DISAS_WFI
;
1469 /* When running in MTTCG we don't generate jumps to the yield and
1470 * WFE helpers as it won't affect the scheduling of other vCPUs.
1471 * If we wanted to more completely model WFE/SEV so we don't busy
1472 * spin unnecessarily we would need to do something more involved.
1475 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1476 s
->base
.is_jmp
= DISAS_YIELD
;
1480 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1481 s
->base
.is_jmp
= DISAS_WFE
;
1486 /* we treat all as NOP at least for now */
1489 /* default specified as NOP equivalent */
1494 static void gen_clrex(DisasContext
*s
, uint32_t insn
)
1496 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
1499 /* CLREX, DSB, DMB, ISB */
1500 static void handle_sync(DisasContext
*s
, uint32_t insn
,
1501 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1506 unallocated_encoding(s
);
1517 case 1: /* MBReqTypes_Reads */
1518 bar
= TCG_BAR_SC
| TCG_MO_LD_LD
| TCG_MO_LD_ST
;
1520 case 2: /* MBReqTypes_Writes */
1521 bar
= TCG_BAR_SC
| TCG_MO_ST_ST
;
1523 default: /* MBReqTypes_All */
1524 bar
= TCG_BAR_SC
| TCG_MO_ALL
;
1530 /* We need to break the TB after this insn to execute
1531 * a self-modified code correctly and also to take
1532 * any pending interrupts immediately.
1534 gen_goto_tb(s
, 0, s
->pc
);
1537 unallocated_encoding(s
);
1542 /* MSR (immediate) - move immediate to processor state field */
1543 static void handle_msr_i(DisasContext
*s
, uint32_t insn
,
1544 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1546 int op
= op1
<< 3 | op2
;
1548 case 0x05: /* SPSel */
1549 if (s
->current_el
== 0) {
1550 unallocated_encoding(s
);
1554 case 0x1e: /* DAIFSet */
1555 case 0x1f: /* DAIFClear */
1557 TCGv_i32 tcg_imm
= tcg_const_i32(crm
);
1558 TCGv_i32 tcg_op
= tcg_const_i32(op
);
1559 gen_a64_set_pc_im(s
->pc
- 4);
1560 gen_helper_msr_i_pstate(cpu_env
, tcg_op
, tcg_imm
);
1561 tcg_temp_free_i32(tcg_imm
);
1562 tcg_temp_free_i32(tcg_op
);
1563 /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
1564 gen_a64_set_pc_im(s
->pc
);
1565 s
->base
.is_jmp
= (op
== 0x1f ? DISAS_EXIT
: DISAS_JUMP
);
1569 unallocated_encoding(s
);
1574 static void gen_get_nzcv(TCGv_i64 tcg_rt
)
1576 TCGv_i32 tmp
= tcg_temp_new_i32();
1577 TCGv_i32 nzcv
= tcg_temp_new_i32();
1579 /* build bit 31, N */
1580 tcg_gen_andi_i32(nzcv
, cpu_NF
, (1U << 31));
1581 /* build bit 30, Z */
1582 tcg_gen_setcondi_i32(TCG_COND_EQ
, tmp
, cpu_ZF
, 0);
1583 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 30, 1);
1584 /* build bit 29, C */
1585 tcg_gen_deposit_i32(nzcv
, nzcv
, cpu_CF
, 29, 1);
1586 /* build bit 28, V */
1587 tcg_gen_shri_i32(tmp
, cpu_VF
, 31);
1588 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 28, 1);
1589 /* generate result */
1590 tcg_gen_extu_i32_i64(tcg_rt
, nzcv
);
1592 tcg_temp_free_i32(nzcv
);
1593 tcg_temp_free_i32(tmp
);
1596 static void gen_set_nzcv(TCGv_i64 tcg_rt
)
1599 TCGv_i32 nzcv
= tcg_temp_new_i32();
1601 /* take NZCV from R[t] */
1602 tcg_gen_extrl_i64_i32(nzcv
, tcg_rt
);
1605 tcg_gen_andi_i32(cpu_NF
, nzcv
, (1U << 31));
1607 tcg_gen_andi_i32(cpu_ZF
, nzcv
, (1 << 30));
1608 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_ZF
, cpu_ZF
, 0);
1610 tcg_gen_andi_i32(cpu_CF
, nzcv
, (1 << 29));
1611 tcg_gen_shri_i32(cpu_CF
, cpu_CF
, 29);
1613 tcg_gen_andi_i32(cpu_VF
, nzcv
, (1 << 28));
1614 tcg_gen_shli_i32(cpu_VF
, cpu_VF
, 3);
1615 tcg_temp_free_i32(nzcv
);
1618 /* MRS - move from system register
1619 * MSR (register) - move to system register
1622 * These are all essentially the same insn in 'read' and 'write'
1623 * versions, with varying op0 fields.
1625 static void handle_sys(DisasContext
*s
, uint32_t insn
, bool isread
,
1626 unsigned int op0
, unsigned int op1
, unsigned int op2
,
1627 unsigned int crn
, unsigned int crm
, unsigned int rt
)
1629 const ARMCPRegInfo
*ri
;
1632 ri
= get_arm_cp_reginfo(s
->cp_regs
,
1633 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP
,
1634 crn
, crm
, op0
, op1
, op2
));
1637 /* Unknown register; this might be a guest error or a QEMU
1638 * unimplemented feature.
1640 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch64 "
1641 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1642 isread
? "read" : "write", op0
, op1
, crn
, crm
, op2
);
1643 unallocated_encoding(s
);
1647 /* Check access permissions */
1648 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
1649 unallocated_encoding(s
);
1654 /* Emit code to perform further access permissions checks at
1655 * runtime; this may result in an exception.
1658 TCGv_i32 tcg_syn
, tcg_isread
;
1661 gen_a64_set_pc_im(s
->pc
- 4);
1662 tmpptr
= tcg_const_ptr(ri
);
1663 syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
1664 tcg_syn
= tcg_const_i32(syndrome
);
1665 tcg_isread
= tcg_const_i32(isread
);
1666 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
, tcg_isread
);
1667 tcg_temp_free_ptr(tmpptr
);
1668 tcg_temp_free_i32(tcg_syn
);
1669 tcg_temp_free_i32(tcg_isread
);
1672 /* Handle special cases first */
1673 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
1677 tcg_rt
= cpu_reg(s
, rt
);
1679 gen_get_nzcv(tcg_rt
);
1681 gen_set_nzcv(tcg_rt
);
1684 case ARM_CP_CURRENTEL
:
1685 /* Reads as current EL value from pstate, which is
1686 * guaranteed to be constant by the tb flags.
1688 tcg_rt
= cpu_reg(s
, rt
);
1689 tcg_gen_movi_i64(tcg_rt
, s
->current_el
<< 2);
1692 /* Writes clear the aligned block of memory which rt points into. */
1693 tcg_rt
= cpu_reg(s
, rt
);
1694 gen_helper_dc_zva(cpu_env
, tcg_rt
);
1699 if ((ri
->type
& ARM_CP_SVE
) && !sve_access_check(s
)) {
1702 if ((ri
->type
& ARM_CP_FPU
) && !fp_access_check(s
)) {
1706 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
1710 tcg_rt
= cpu_reg(s
, rt
);
1713 if (ri
->type
& ARM_CP_CONST
) {
1714 tcg_gen_movi_i64(tcg_rt
, ri
->resetvalue
);
1715 } else if (ri
->readfn
) {
1717 tmpptr
= tcg_const_ptr(ri
);
1718 gen_helper_get_cp_reg64(tcg_rt
, cpu_env
, tmpptr
);
1719 tcg_temp_free_ptr(tmpptr
);
1721 tcg_gen_ld_i64(tcg_rt
, cpu_env
, ri
->fieldoffset
);
1724 if (ri
->type
& ARM_CP_CONST
) {
1725 /* If not forbidden by access permissions, treat as WI */
1727 } else if (ri
->writefn
) {
1729 tmpptr
= tcg_const_ptr(ri
);
1730 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tcg_rt
);
1731 tcg_temp_free_ptr(tmpptr
);
1733 tcg_gen_st_i64(tcg_rt
, cpu_env
, ri
->fieldoffset
);
1737 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
1738 /* I/O operations must end the TB here (whether read or write) */
1740 s
->base
.is_jmp
= DISAS_UPDATE
;
1741 } else if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
1742 /* We default to ending the TB on a coprocessor register write,
1743 * but allow this to be suppressed by the register definition
1744 * (usually only necessary to work around guest bugs).
1746 s
->base
.is_jmp
= DISAS_UPDATE
;
1751 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
1752 * +---------------------+---+-----+-----+-------+-------+-----+------+
1753 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
1754 * +---------------------+---+-----+-----+-------+-------+-----+------+
1756 static void disas_system(DisasContext
*s
, uint32_t insn
)
1758 unsigned int l
, op0
, op1
, crn
, crm
, op2
, rt
;
1759 l
= extract32(insn
, 21, 1);
1760 op0
= extract32(insn
, 19, 2);
1761 op1
= extract32(insn
, 16, 3);
1762 crn
= extract32(insn
, 12, 4);
1763 crm
= extract32(insn
, 8, 4);
1764 op2
= extract32(insn
, 5, 3);
1765 rt
= extract32(insn
, 0, 5);
1768 if (l
|| rt
!= 31) {
1769 unallocated_encoding(s
);
1773 case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
1774 handle_hint(s
, insn
, op1
, op2
, crm
);
1776 case 3: /* CLREX, DSB, DMB, ISB */
1777 handle_sync(s
, insn
, op1
, op2
, crm
);
1779 case 4: /* MSR (immediate) */
1780 handle_msr_i(s
, insn
, op1
, op2
, crm
);
1783 unallocated_encoding(s
);
1788 handle_sys(s
, insn
, l
, op0
, op1
, op2
, crn
, crm
, rt
);
1791 /* Exception generation
1793 * 31 24 23 21 20 5 4 2 1 0
1794 * +-----------------+-----+------------------------+-----+----+
1795 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
1796 * +-----------------------+------------------------+----------+
1798 static void disas_exc(DisasContext
*s
, uint32_t insn
)
1800 int opc
= extract32(insn
, 21, 3);
1801 int op2_ll
= extract32(insn
, 0, 5);
1802 int imm16
= extract32(insn
, 5, 16);
1807 /* For SVC, HVC and SMC we advance the single-step state
1808 * machine before taking the exception. This is architecturally
1809 * mandated, to ensure that single-stepping a system call
1810 * instruction works properly.
1815 gen_exception_insn(s
, 0, EXCP_SWI
, syn_aa64_svc(imm16
),
1816 default_exception_el(s
));
1819 if (s
->current_el
== 0) {
1820 unallocated_encoding(s
);
1823 /* The pre HVC helper handles cases when HVC gets trapped
1824 * as an undefined insn by runtime configuration.
1826 gen_a64_set_pc_im(s
->pc
- 4);
1827 gen_helper_pre_hvc(cpu_env
);
1829 gen_exception_insn(s
, 0, EXCP_HVC
, syn_aa64_hvc(imm16
), 2);
1832 if (s
->current_el
== 0) {
1833 unallocated_encoding(s
);
1836 gen_a64_set_pc_im(s
->pc
- 4);
1837 tmp
= tcg_const_i32(syn_aa64_smc(imm16
));
1838 gen_helper_pre_smc(cpu_env
, tmp
);
1839 tcg_temp_free_i32(tmp
);
1841 gen_exception_insn(s
, 0, EXCP_SMC
, syn_aa64_smc(imm16
), 3);
1844 unallocated_encoding(s
);
1850 unallocated_encoding(s
);
1854 gen_exception_bkpt_insn(s
, 4, syn_aa64_bkpt(imm16
));
1858 unallocated_encoding(s
);
1861 /* HLT. This has two purposes.
1862 * Architecturally, it is an external halting debug instruction.
1863 * Since QEMU doesn't implement external debug, we treat this as
1864 * it is required for halting debug disabled: it will UNDEF.
1865 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
1867 if (semihosting_enabled() && imm16
== 0xf000) {
1868 #ifndef CONFIG_USER_ONLY
1869 /* In system mode, don't allow userspace access to semihosting,
1870 * to provide some semblance of security (and for consistency
1871 * with our 32-bit semihosting).
1873 if (s
->current_el
== 0) {
1874 unsupported_encoding(s
, insn
);
1878 gen_exception_internal_insn(s
, 0, EXCP_SEMIHOST
);
1880 unsupported_encoding(s
, insn
);
1884 if (op2_ll
< 1 || op2_ll
> 3) {
1885 unallocated_encoding(s
);
1888 /* DCPS1, DCPS2, DCPS3 */
1889 unsupported_encoding(s
, insn
);
1892 unallocated_encoding(s
);
1897 /* Unconditional branch (register)
1898 * 31 25 24 21 20 16 15 10 9 5 4 0
1899 * +---------------+-------+-------+-------+------+-------+
1900 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
1901 * +---------------+-------+-------+-------+------+-------+
1903 static void disas_uncond_b_reg(DisasContext
*s
, uint32_t insn
)
1905 unsigned int opc
, op2
, op3
, rn
, op4
;
1907 opc
= extract32(insn
, 21, 4);
1908 op2
= extract32(insn
, 16, 5);
1909 op3
= extract32(insn
, 10, 6);
1910 rn
= extract32(insn
, 5, 5);
1911 op4
= extract32(insn
, 0, 5);
1913 if (op4
!= 0x0 || op3
!= 0x0 || op2
!= 0x1f) {
1914 unallocated_encoding(s
);
1922 gen_a64_set_pc(s
, cpu_reg(s
, rn
));
1923 /* BLR also needs to load return address */
1925 tcg_gen_movi_i64(cpu_reg(s
, 30), s
->pc
);
1929 if (s
->current_el
== 0) {
1930 unallocated_encoding(s
);
1933 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1936 gen_helper_exception_return(cpu_env
);
1937 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1940 /* Must exit loop to check un-masked IRQs */
1941 s
->base
.is_jmp
= DISAS_EXIT
;
1945 unallocated_encoding(s
);
1947 unsupported_encoding(s
, insn
);
1951 unallocated_encoding(s
);
1955 s
->base
.is_jmp
= DISAS_JUMP
;
1958 /* Branches, exception generating and system instructions */
1959 static void disas_b_exc_sys(DisasContext
*s
, uint32_t insn
)
1961 switch (extract32(insn
, 25, 7)) {
1962 case 0x0a: case 0x0b:
1963 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
1964 disas_uncond_b_imm(s
, insn
);
1966 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
1967 disas_comp_b_imm(s
, insn
);
1969 case 0x1b: case 0x5b: /* Test & branch (immediate) */
1970 disas_test_b_imm(s
, insn
);
1972 case 0x2a: /* Conditional branch (immediate) */
1973 disas_cond_b_imm(s
, insn
);
1975 case 0x6a: /* Exception generation / System */
1976 if (insn
& (1 << 24)) {
1977 disas_system(s
, insn
);
1982 case 0x6b: /* Unconditional branch (register) */
1983 disas_uncond_b_reg(s
, insn
);
1986 unallocated_encoding(s
);
1992 * Load/Store exclusive instructions are implemented by remembering
1993 * the value/address loaded, and seeing if these are the same
1994 * when the store is performed. This is not actually the architecturally
1995 * mandated semantics, but it works for typical guest code sequences
1996 * and avoids having to monitor regular stores.
1998 * The store exclusive uses the atomic cmpxchg primitives to avoid
1999 * races in multi-threaded linux-user and when MTTCG softmmu is
2002 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
2003 TCGv_i64 addr
, int size
, bool is_pair
)
2005 int idx
= get_mem_index(s
);
2006 TCGMemOp memop
= s
->be_data
;
2008 g_assert(size
<= 3);
2010 g_assert(size
>= 2);
2012 /* The pair must be single-copy atomic for the doubleword. */
2013 memop
|= MO_64
| MO_ALIGN
;
2014 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, addr
, idx
, memop
);
2015 if (s
->be_data
== MO_LE
) {
2016 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 0, 32);
2017 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 32, 32);
2019 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 32, 32);
2020 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 0, 32);
2023 /* The pair must be single-copy atomic for *each* doubleword, not
2024 the entire quadword, however it must be quadword aligned. */
2026 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, addr
, idx
,
2027 memop
| MO_ALIGN_16
);
2029 TCGv_i64 addr2
= tcg_temp_new_i64();
2030 tcg_gen_addi_i64(addr2
, addr
, 8);
2031 tcg_gen_qemu_ld_i64(cpu_exclusive_high
, addr2
, idx
, memop
);
2032 tcg_temp_free_i64(addr2
);
2034 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2035 tcg_gen_mov_i64(cpu_reg(s
, rt2
), cpu_exclusive_high
);
2038 memop
|= size
| MO_ALIGN
;
2039 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, addr
, idx
, memop
);
2040 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2042 tcg_gen_mov_i64(cpu_exclusive_addr
, addr
);
2045 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
2046 TCGv_i64 addr
, int size
, int is_pair
)
2048 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2049 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2052 * [addr + datasize] = {Rt2};
2058 * env->exclusive_addr = -1;
2060 TCGLabel
*fail_label
= gen_new_label();
2061 TCGLabel
*done_label
= gen_new_label();
2064 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
2066 tmp
= tcg_temp_new_i64();
2069 if (s
->be_data
== MO_LE
) {
2070 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2072 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt2
), cpu_reg(s
, rt
));
2074 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
,
2075 cpu_exclusive_val
, tmp
,
2077 MO_64
| MO_ALIGN
| s
->be_data
);
2078 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2079 } else if (s
->be_data
== MO_LE
) {
2080 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2081 gen_helper_paired_cmpxchg64_le_parallel(tmp
, cpu_env
,
2086 gen_helper_paired_cmpxchg64_le(tmp
, cpu_env
, cpu_exclusive_addr
,
2087 cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2090 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2091 gen_helper_paired_cmpxchg64_be_parallel(tmp
, cpu_env
,
2096 gen_helper_paired_cmpxchg64_be(tmp
, cpu_env
, cpu_exclusive_addr
,
2097 cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2101 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
, cpu_exclusive_val
,
2102 cpu_reg(s
, rt
), get_mem_index(s
),
2103 size
| MO_ALIGN
| s
->be_data
);
2104 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2106 tcg_gen_mov_i64(cpu_reg(s
, rd
), tmp
);
2107 tcg_temp_free_i64(tmp
);
2108 tcg_gen_br(done_label
);
2110 gen_set_label(fail_label
);
2111 tcg_gen_movi_i64(cpu_reg(s
, rd
), 1);
2112 gen_set_label(done_label
);
2113 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
2116 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
2117 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2119 static bool disas_ldst_compute_iss_sf(int size
, bool is_signed
, int opc
)
2121 int opc0
= extract32(opc
, 0, 1);
2125 regsize
= opc0
? 32 : 64;
2127 regsize
= size
== 3 ? 64 : 32;
2129 return regsize
== 64;
2132 /* Load/store exclusive
2134 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
2135 * +-----+-------------+----+---+----+------+----+-------+------+------+
2136 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
2137 * +-----+-------------+----+---+----+------+----+-------+------+------+
2139 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2140 * L: 0 -> store, 1 -> load
2141 * o2: 0 -> exclusive, 1 -> not
2142 * o1: 0 -> single register, 1 -> register pair
2143 * o0: 1 -> load-acquire/store-release, 0 -> not
2145 static void disas_ldst_excl(DisasContext
*s
, uint32_t insn
)
2147 int rt
= extract32(insn
, 0, 5);
2148 int rn
= extract32(insn
, 5, 5);
2149 int rt2
= extract32(insn
, 10, 5);
2150 int is_lasr
= extract32(insn
, 15, 1);
2151 int rs
= extract32(insn
, 16, 5);
2152 int is_pair
= extract32(insn
, 21, 1);
2153 int is_store
= !extract32(insn
, 22, 1);
2154 int is_excl
= !extract32(insn
, 23, 1);
2155 int size
= extract32(insn
, 30, 2);
2158 if ((!is_excl
&& !is_pair
&& !is_lasr
) ||
2159 (!is_excl
&& is_pair
) ||
2160 (is_pair
&& size
< 2)) {
2161 unallocated_encoding(s
);
2166 gen_check_sp_alignment(s
);
2168 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2170 /* Note that since TCG is single threaded load-acquire/store-release
2171 * semantics require no extra if (is_lasr) { ... } handling.
2177 gen_load_exclusive(s
, rt
, rt2
, tcg_addr
, size
, is_pair
);
2179 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2183 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2185 gen_store_exclusive(s
, rs
, rt
, rt2
, tcg_addr
, size
, is_pair
);
2188 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2189 bool iss_sf
= disas_ldst_compute_iss_sf(size
, false, 0);
2191 /* Generate ISS for non-exclusive accesses including LASR. */
2194 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2196 do_gpr_st(s
, tcg_rt
, tcg_addr
, size
,
2197 true, rt
, iss_sf
, is_lasr
);
2199 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
, false, false,
2200 true, rt
, iss_sf
, is_lasr
);
2202 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2209 * Load register (literal)
2211 * 31 30 29 27 26 25 24 23 5 4 0
2212 * +-----+-------+---+-----+-------------------+-------+
2213 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
2214 * +-----+-------+---+-----+-------------------+-------+
2216 * V: 1 -> vector (simd/fp)
2217 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2218 * 10-> 32 bit signed, 11 -> prefetch
2219 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2221 static void disas_ld_lit(DisasContext
*s
, uint32_t insn
)
2223 int rt
= extract32(insn
, 0, 5);
2224 int64_t imm
= sextract32(insn
, 5, 19) << 2;
2225 bool is_vector
= extract32(insn
, 26, 1);
2226 int opc
= extract32(insn
, 30, 2);
2227 bool is_signed
= false;
2229 TCGv_i64 tcg_rt
, tcg_addr
;
2233 unallocated_encoding(s
);
2237 if (!fp_access_check(s
)) {
2242 /* PRFM (literal) : prefetch */
2245 size
= 2 + extract32(opc
, 0, 1);
2246 is_signed
= extract32(opc
, 1, 1);
2249 tcg_rt
= cpu_reg(s
, rt
);
2251 tcg_addr
= tcg_const_i64((s
->pc
- 4) + imm
);
2253 do_fp_ld(s
, rt
, tcg_addr
, size
);
2255 /* Only unsigned 32bit loads target 32bit registers. */
2256 bool iss_sf
= opc
!= 0;
2258 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
, is_signed
, false,
2259 true, rt
, iss_sf
, false);
2261 tcg_temp_free_i64(tcg_addr
);
2265 * LDNP (Load Pair - non-temporal hint)
2266 * LDP (Load Pair - non vector)
2267 * LDPSW (Load Pair Signed Word - non vector)
2268 * STNP (Store Pair - non-temporal hint)
2269 * STP (Store Pair - non vector)
2270 * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2271 * LDP (Load Pair of SIMD&FP)
2272 * STNP (Store Pair of SIMD&FP - non-temporal hint)
2273 * STP (Store Pair of SIMD&FP)
2275 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
2276 * +-----+-------+---+---+-------+---+-----------------------------+
2277 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
2278 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2280 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
2282 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2283 * V: 0 -> GPR, 1 -> Vector
2284 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2285 * 10 -> signed offset, 11 -> pre-index
2286 * L: 0 -> Store 1 -> Load
2288 * Rt, Rt2 = GPR or SIMD registers to be stored
2289 * Rn = general purpose register containing address
2290 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2292 static void disas_ldst_pair(DisasContext
*s
, uint32_t insn
)
2294 int rt
= extract32(insn
, 0, 5);
2295 int rn
= extract32(insn
, 5, 5);
2296 int rt2
= extract32(insn
, 10, 5);
2297 uint64_t offset
= sextract64(insn
, 15, 7);
2298 int index
= extract32(insn
, 23, 2);
2299 bool is_vector
= extract32(insn
, 26, 1);
2300 bool is_load
= extract32(insn
, 22, 1);
2301 int opc
= extract32(insn
, 30, 2);
2303 bool is_signed
= false;
2304 bool postindex
= false;
2307 TCGv_i64 tcg_addr
; /* calculated address */
2311 unallocated_encoding(s
);
2318 size
= 2 + extract32(opc
, 1, 1);
2319 is_signed
= extract32(opc
, 0, 1);
2320 if (!is_load
&& is_signed
) {
2321 unallocated_encoding(s
);
2327 case 1: /* post-index */
2332 /* signed offset with "non-temporal" hint. Since we don't emulate
2333 * caches we don't care about hints to the cache system about
2334 * data access patterns, and handle this identically to plain
2338 /* There is no non-temporal-hint version of LDPSW */
2339 unallocated_encoding(s
);
2344 case 2: /* signed offset, rn not updated */
2347 case 3: /* pre-index */
2353 if (is_vector
&& !fp_access_check(s
)) {
2360 gen_check_sp_alignment(s
);
2363 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2366 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, offset
);
2371 do_fp_ld(s
, rt
, tcg_addr
, size
);
2373 do_fp_st(s
, rt
, tcg_addr
, size
);
2375 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, 1 << size
);
2377 do_fp_ld(s
, rt2
, tcg_addr
, size
);
2379 do_fp_st(s
, rt2
, tcg_addr
, size
);
2382 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2383 TCGv_i64 tcg_rt2
= cpu_reg(s
, rt2
);
2386 TCGv_i64 tmp
= tcg_temp_new_i64();
2388 /* Do not modify tcg_rt before recognizing any exception
2389 * from the second load.
2391 do_gpr_ld(s
, tmp
, tcg_addr
, size
, is_signed
, false,
2392 false, 0, false, false);
2393 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, 1 << size
);
2394 do_gpr_ld(s
, tcg_rt2
, tcg_addr
, size
, is_signed
, false,
2395 false, 0, false, false);
2397 tcg_gen_mov_i64(tcg_rt
, tmp
);
2398 tcg_temp_free_i64(tmp
);
2400 do_gpr_st(s
, tcg_rt
, tcg_addr
, size
,
2401 false, 0, false, false);
2402 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, 1 << size
);
2403 do_gpr_st(s
, tcg_rt2
, tcg_addr
, size
,
2404 false, 0, false, false);
2410 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, offset
- (1 << size
));
2412 tcg_gen_subi_i64(tcg_addr
, tcg_addr
, 1 << size
);
2414 tcg_gen_mov_i64(cpu_reg_sp(s
, rn
), tcg_addr
);
2419 * Load/store (immediate post-indexed)
2420 * Load/store (immediate pre-indexed)
2421 * Load/store (unscaled immediate)
2423 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
2424 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2425 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
2426 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2428 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
2430 * V = 0 -> non-vector
2431 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
2432 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2434 static void disas_ldst_reg_imm9(DisasContext
*s
, uint32_t insn
,
2440 int rn
= extract32(insn
, 5, 5);
2441 int imm9
= sextract32(insn
, 12, 9);
2442 int idx
= extract32(insn
, 10, 2);
2443 bool is_signed
= false;
2444 bool is_store
= false;
2445 bool is_extended
= false;
2446 bool is_unpriv
= (idx
== 2);
2447 bool iss_valid
= !is_vector
;
2454 size
|= (opc
& 2) << 1;
2455 if (size
> 4 || is_unpriv
) {
2456 unallocated_encoding(s
);
2459 is_store
= ((opc
& 1) == 0);
2460 if (!fp_access_check(s
)) {
2464 if (size
== 3 && opc
== 2) {
2465 /* PRFM - prefetch */
2467 unallocated_encoding(s
);
2472 if (opc
== 3 && size
> 1) {
2473 unallocated_encoding(s
);
2476 is_store
= (opc
== 0);
2477 is_signed
= extract32(opc
, 1, 1);
2478 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
2496 g_assert_not_reached();
2500 gen_check_sp_alignment(s
);
2502 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2505 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, imm9
);
2510 do_fp_st(s
, rt
, tcg_addr
, size
);
2512 do_fp_ld(s
, rt
, tcg_addr
, size
);
2515 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2516 int memidx
= is_unpriv
? get_a64_user_mem_index(s
) : get_mem_index(s
);
2517 bool iss_sf
= disas_ldst_compute_iss_sf(size
, is_signed
, opc
);
2520 do_gpr_st_memidx(s
, tcg_rt
, tcg_addr
, size
, memidx
,
2521 iss_valid
, rt
, iss_sf
, false);
2523 do_gpr_ld_memidx(s
, tcg_rt
, tcg_addr
, size
,
2524 is_signed
, is_extended
, memidx
,
2525 iss_valid
, rt
, iss_sf
, false);
2530 TCGv_i64 tcg_rn
= cpu_reg_sp(s
, rn
);
2532 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, imm9
);
2534 tcg_gen_mov_i64(tcg_rn
, tcg_addr
);
2539 * Load/store (register offset)
2541 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2542 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2543 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
2544 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2547 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2548 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2550 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2551 * opc<0>: 0 -> store, 1 -> load
2552 * V: 1 -> vector/simd
2553 * opt: extend encoding (see DecodeRegExtend)
2554 * S: if S=1 then scale (essentially index by sizeof(size))
2555 * Rt: register to transfer into/out of
2556 * Rn: address register or SP for base
2557 * Rm: offset register or ZR for offset
2559 static void disas_ldst_reg_roffset(DisasContext
*s
, uint32_t insn
,
2565 int rn
= extract32(insn
, 5, 5);
2566 int shift
= extract32(insn
, 12, 1);
2567 int rm
= extract32(insn
, 16, 5);
2568 int opt
= extract32(insn
, 13, 3);
2569 bool is_signed
= false;
2570 bool is_store
= false;
2571 bool is_extended
= false;
2576 if (extract32(opt
, 1, 1) == 0) {
2577 unallocated_encoding(s
);
2582 size
|= (opc
& 2) << 1;
2584 unallocated_encoding(s
);
2587 is_store
= !extract32(opc
, 0, 1);
2588 if (!fp_access_check(s
)) {
2592 if (size
== 3 && opc
== 2) {
2593 /* PRFM - prefetch */
2596 if (opc
== 3 && size
> 1) {
2597 unallocated_encoding(s
);
2600 is_store
= (opc
== 0);
2601 is_signed
= extract32(opc
, 1, 1);
2602 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
2606 gen_check_sp_alignment(s
);
2608 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2610 tcg_rm
= read_cpu_reg(s
, rm
, 1);
2611 ext_and_shift_reg(tcg_rm
, tcg_rm
, opt
, shift
? size
: 0);
2613 tcg_gen_add_i64(tcg_addr
, tcg_addr
, tcg_rm
);
2617 do_fp_st(s
, rt
, tcg_addr
, size
);
2619 do_fp_ld(s
, rt
, tcg_addr
, size
);
2622 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2623 bool iss_sf
= disas_ldst_compute_iss_sf(size
, is_signed
, opc
);
2625 do_gpr_st(s
, tcg_rt
, tcg_addr
, size
,
2626 true, rt
, iss_sf
, false);
2628 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
,
2629 is_signed
, is_extended
,
2630 true, rt
, iss_sf
, false);
2636 * Load/store (unsigned immediate)
2638 * 31 30 29 27 26 25 24 23 22 21 10 9 5
2639 * +----+-------+---+-----+-----+------------+-------+------+
2640 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
2641 * +----+-------+---+-----+-----+------------+-------+------+
2644 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2645 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2647 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2648 * opc<0>: 0 -> store, 1 -> load
2649 * Rn: base address register (inc SP)
2650 * Rt: target register
2652 static void disas_ldst_reg_unsigned_imm(DisasContext
*s
, uint32_t insn
,
2658 int rn
= extract32(insn
, 5, 5);
2659 unsigned int imm12
= extract32(insn
, 10, 12);
2660 unsigned int offset
;
2665 bool is_signed
= false;
2666 bool is_extended
= false;
2669 size
|= (opc
& 2) << 1;
2671 unallocated_encoding(s
);
2674 is_store
= !extract32(opc
, 0, 1);
2675 if (!fp_access_check(s
)) {
2679 if (size
== 3 && opc
== 2) {
2680 /* PRFM - prefetch */
2683 if (opc
== 3 && size
> 1) {
2684 unallocated_encoding(s
);
2687 is_store
= (opc
== 0);
2688 is_signed
= extract32(opc
, 1, 1);
2689 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
2693 gen_check_sp_alignment(s
);
2695 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2696 offset
= imm12
<< size
;
2697 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, offset
);
2701 do_fp_st(s
, rt
, tcg_addr
, size
);
2703 do_fp_ld(s
, rt
, tcg_addr
, size
);
2706 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2707 bool iss_sf
= disas_ldst_compute_iss_sf(size
, is_signed
, opc
);
2709 do_gpr_st(s
, tcg_rt
, tcg_addr
, size
,
2710 true, rt
, iss_sf
, false);
2712 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
, is_signed
, is_extended
,
2713 true, rt
, iss_sf
, false);
2718 /* Load/store register (all forms) */
2719 static void disas_ldst_reg(DisasContext
*s
, uint32_t insn
)
2721 int rt
= extract32(insn
, 0, 5);
2722 int opc
= extract32(insn
, 22, 2);
2723 bool is_vector
= extract32(insn
, 26, 1);
2724 int size
= extract32(insn
, 30, 2);
2726 switch (extract32(insn
, 24, 2)) {
2728 if (extract32(insn
, 21, 1) == 1 && extract32(insn
, 10, 2) == 2) {
2729 disas_ldst_reg_roffset(s
, insn
, opc
, size
, rt
, is_vector
);
2731 /* Load/store register (unscaled immediate)
2732 * Load/store immediate pre/post-indexed
2733 * Load/store register unprivileged
2735 disas_ldst_reg_imm9(s
, insn
, opc
, size
, rt
, is_vector
);
2739 disas_ldst_reg_unsigned_imm(s
, insn
, opc
, size
, rt
, is_vector
);
2742 unallocated_encoding(s
);
2747 /* AdvSIMD load/store multiple structures
2749 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
2750 * +---+---+---------------+---+-------------+--------+------+------+------+
2751 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
2752 * +---+---+---------------+---+-------------+--------+------+------+------+
2754 * AdvSIMD load/store multiple structures (post-indexed)
2756 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
2757 * +---+---+---------------+---+---+---------+--------+------+------+------+
2758 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
2759 * +---+---+---------------+---+---+---------+--------+------+------+------+
2761 * Rt: first (or only) SIMD&FP register to be transferred
2762 * Rn: base address or SP
2763 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2765 static void disas_ldst_multiple_struct(DisasContext
*s
, uint32_t insn
)
2767 int rt
= extract32(insn
, 0, 5);
2768 int rn
= extract32(insn
, 5, 5);
2769 int size
= extract32(insn
, 10, 2);
2770 int opcode
= extract32(insn
, 12, 4);
2771 bool is_store
= !extract32(insn
, 22, 1);
2772 bool is_postidx
= extract32(insn
, 23, 1);
2773 bool is_q
= extract32(insn
, 30, 1);
2774 TCGv_i64 tcg_addr
, tcg_rn
;
2776 int ebytes
= 1 << size
;
2777 int elements
= (is_q
? 128 : 64) / (8 << size
);
2778 int rpt
; /* num iterations */
2779 int selem
; /* structure elements */
2782 if (extract32(insn
, 31, 1) || extract32(insn
, 21, 1)) {
2783 unallocated_encoding(s
);
2787 /* From the shared decode logic */
2818 unallocated_encoding(s
);
2822 if (size
== 3 && !is_q
&& selem
!= 1) {
2824 unallocated_encoding(s
);
2828 if (!fp_access_check(s
)) {
2833 gen_check_sp_alignment(s
);
2836 tcg_rn
= cpu_reg_sp(s
, rn
);
2837 tcg_addr
= tcg_temp_new_i64();
2838 tcg_gen_mov_i64(tcg_addr
, tcg_rn
);
2840 for (r
= 0; r
< rpt
; r
++) {
2842 for (e
= 0; e
< elements
; e
++) {
2843 int tt
= (rt
+ r
) % 32;
2845 for (xs
= 0; xs
< selem
; xs
++) {
2847 do_vec_st(s
, tt
, e
, tcg_addr
, size
);
2849 do_vec_ld(s
, tt
, e
, tcg_addr
, size
);
2851 /* For non-quad operations, setting a slice of the low
2852 * 64 bits of the register clears the high 64 bits (in
2853 * the ARM ARM pseudocode this is implicit in the fact
2854 * that 'rval' is a 64 bit wide variable).
2855 * For quad operations, we might still need to zero the
2856 * high bits of SVE. We optimize by noticing that we only
2857 * need to do this the first time we touch a register.
2859 if (e
== 0 && (r
== 0 || xs
== selem
- 1)) {
2860 clear_vec_high(s
, is_q
, tt
);
2863 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, ebytes
);
2870 int rm
= extract32(insn
, 16, 5);
2872 tcg_gen_mov_i64(tcg_rn
, tcg_addr
);
2874 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, rm
));
2877 tcg_temp_free_i64(tcg_addr
);
2880 /* AdvSIMD load/store single structure
2882 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2883 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2884 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
2885 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2887 * AdvSIMD load/store single structure (post-indexed)
2889 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2890 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2891 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
2892 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2894 * Rt: first (or only) SIMD&FP register to be transferred
2895 * Rn: base address or SP
2896 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2897 * index = encoded in Q:S:size dependent on size
2899 * lane_size = encoded in R, opc
2900 * transfer width = encoded in opc, S, size
2902 static void disas_ldst_single_struct(DisasContext
*s
, uint32_t insn
)
2904 int rt
= extract32(insn
, 0, 5);
2905 int rn
= extract32(insn
, 5, 5);
2906 int size
= extract32(insn
, 10, 2);
2907 int S
= extract32(insn
, 12, 1);
2908 int opc
= extract32(insn
, 13, 3);
2909 int R
= extract32(insn
, 21, 1);
2910 int is_load
= extract32(insn
, 22, 1);
2911 int is_postidx
= extract32(insn
, 23, 1);
2912 int is_q
= extract32(insn
, 30, 1);
2914 int scale
= extract32(opc
, 1, 2);
2915 int selem
= (extract32(opc
, 0, 1) << 1 | R
) + 1;
2916 bool replicate
= false;
2917 int index
= is_q
<< 3 | S
<< 2 | size
;
2919 TCGv_i64 tcg_addr
, tcg_rn
;
2923 if (!is_load
|| S
) {
2924 unallocated_encoding(s
);
2933 if (extract32(size
, 0, 1)) {
2934 unallocated_encoding(s
);
2940 if (extract32(size
, 1, 1)) {
2941 unallocated_encoding(s
);
2944 if (!extract32(size
, 0, 1)) {
2948 unallocated_encoding(s
);
2956 g_assert_not_reached();
2959 if (!fp_access_check(s
)) {
2963 ebytes
= 1 << scale
;
2966 gen_check_sp_alignment(s
);
2969 tcg_rn
= cpu_reg_sp(s
, rn
);
2970 tcg_addr
= tcg_temp_new_i64();
2971 tcg_gen_mov_i64(tcg_addr
, tcg_rn
);
2973 for (xs
= 0; xs
< selem
; xs
++) {
2975 /* Load and replicate to all elements */
2977 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
2979 tcg_gen_qemu_ld_i64(tcg_tmp
, tcg_addr
,
2980 get_mem_index(s
), s
->be_data
+ scale
);
2983 mulconst
= 0x0101010101010101ULL
;
2986 mulconst
= 0x0001000100010001ULL
;
2989 mulconst
= 0x0000000100000001ULL
;
2995 g_assert_not_reached();
2998 tcg_gen_muli_i64(tcg_tmp
, tcg_tmp
, mulconst
);
3000 write_vec_element(s
, tcg_tmp
, rt
, 0, MO_64
);
3002 write_vec_element(s
, tcg_tmp
, rt
, 1, MO_64
);
3004 tcg_temp_free_i64(tcg_tmp
);
3005 clear_vec_high(s
, is_q
, rt
);
3007 /* Load/store one element per register */
3009 do_vec_ld(s
, rt
, index
, tcg_addr
, scale
);
3011 do_vec_st(s
, rt
, index
, tcg_addr
, scale
);
3014 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, ebytes
);
3019 int rm
= extract32(insn
, 16, 5);
3021 tcg_gen_mov_i64(tcg_rn
, tcg_addr
);
3023 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, rm
));
3026 tcg_temp_free_i64(tcg_addr
);
3029 /* Loads and stores */
3030 static void disas_ldst(DisasContext
*s
, uint32_t insn
)
3032 switch (extract32(insn
, 24, 6)) {
3033 case 0x08: /* Load/store exclusive */
3034 disas_ldst_excl(s
, insn
);
3036 case 0x18: case 0x1c: /* Load register (literal) */
3037 disas_ld_lit(s
, insn
);
3039 case 0x28: case 0x29:
3040 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
3041 disas_ldst_pair(s
, insn
);
3043 case 0x38: case 0x39:
3044 case 0x3c: case 0x3d: /* Load/store register (all forms) */
3045 disas_ldst_reg(s
, insn
);
3047 case 0x0c: /* AdvSIMD load/store multiple structures */
3048 disas_ldst_multiple_struct(s
, insn
);
3050 case 0x0d: /* AdvSIMD load/store single structure */
3051 disas_ldst_single_struct(s
, insn
);
3054 unallocated_encoding(s
);
3059 /* PC-rel. addressing
3060 * 31 30 29 28 24 23 5 4 0
3061 * +----+-------+-----------+-------------------+------+
3062 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
3063 * +----+-------+-----------+-------------------+------+
3065 static void disas_pc_rel_adr(DisasContext
*s
, uint32_t insn
)
3067 unsigned int page
, rd
;
3071 page
= extract32(insn
, 31, 1);
3072 /* SignExtend(immhi:immlo) -> offset */
3073 offset
= sextract64(insn
, 5, 19);
3074 offset
= offset
<< 2 | extract32(insn
, 29, 2);
3075 rd
= extract32(insn
, 0, 5);
3079 /* ADRP (page based) */
3084 tcg_gen_movi_i64(cpu_reg(s
, rd
), base
+ offset
);
3088 * Add/subtract (immediate)
3090 * 31 30 29 28 24 23 22 21 10 9 5 4 0
3091 * +--+--+--+-----------+-----+-------------+-----+-----+
3092 * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
3093 * +--+--+--+-----------+-----+-------------+-----+-----+
3095 * sf: 0 -> 32bit, 1 -> 64bit
3096 * op: 0 -> add , 1 -> sub
3098 * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
3100 static void disas_add_sub_imm(DisasContext
*s
, uint32_t insn
)
3102 int rd
= extract32(insn
, 0, 5);
3103 int rn
= extract32(insn
, 5, 5);
3104 uint64_t imm
= extract32(insn
, 10, 12);
3105 int shift
= extract32(insn
, 22, 2);
3106 bool setflags
= extract32(insn
, 29, 1);
3107 bool sub_op
= extract32(insn
, 30, 1);
3108 bool is_64bit
= extract32(insn
, 31, 1);
3110 TCGv_i64 tcg_rn
= cpu_reg_sp(s
, rn
);
3111 TCGv_i64 tcg_rd
= setflags
? cpu_reg(s
, rd
) : cpu_reg_sp(s
, rd
);
3112 TCGv_i64 tcg_result
;
3121 unallocated_encoding(s
);
3125 tcg_result
= tcg_temp_new_i64();
3128 tcg_gen_subi_i64(tcg_result
, tcg_rn
, imm
);
3130 tcg_gen_addi_i64(tcg_result
, tcg_rn
, imm
);
3133 TCGv_i64 tcg_imm
= tcg_const_i64(imm
);
3135 gen_sub_CC(is_64bit
, tcg_result
, tcg_rn
, tcg_imm
);
3137 gen_add_CC(is_64bit
, tcg_result
, tcg_rn
, tcg_imm
);
3139 tcg_temp_free_i64(tcg_imm
);
3143 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
3145 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
3148 tcg_temp_free_i64(tcg_result
);
3151 /* The input should be a value in the bottom e bits (with higher
3152 * bits zero); returns that value replicated into every element
3153 * of size e in a 64 bit integer.
3155 static uint64_t bitfield_replicate(uint64_t mask
, unsigned int e
)
3165 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
3166 static inline uint64_t bitmask64(unsigned int length
)
3168 assert(length
> 0 && length
<= 64);
3169 return ~0ULL >> (64 - length
);
3172 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
3173 * only require the wmask. Returns false if the imms/immr/immn are a reserved
3174 * value (ie should cause a guest UNDEF exception), and true if they are
3175 * valid, in which case the decoded bit pattern is written to result.
3177 static bool logic_imm_decode_wmask(uint64_t *result
, unsigned int immn
,
3178 unsigned int imms
, unsigned int immr
)
3181 unsigned e
, levels
, s
, r
;
3184 assert(immn
< 2 && imms
< 64 && immr
< 64);
3186 /* The bit patterns we create here are 64 bit patterns which
3187 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
3188 * 64 bits each. Each element contains the same value: a run
3189 * of between 1 and e-1 non-zero bits, rotated within the
3190 * element by between 0 and e-1 bits.
3192 * The element size and run length are encoded into immn (1 bit)
3193 * and imms (6 bits) as follows:
3194 * 64 bit elements: immn = 1, imms = <length of run - 1>
3195 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
3196 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
3197 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
3198 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
3199 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
3200 * Notice that immn = 0, imms = 11111x is the only combination
3201 * not covered by one of the above options; this is reserved.
3202 * Further, <length of run - 1> all-ones is a reserved pattern.
3204 * In all cases the rotation is by immr % e (and immr is 6 bits).
3207 /* First determine the element size */
3208 len
= 31 - clz32((immn
<< 6) | (~imms
& 0x3f));
3210 /* This is the immn == 0, imms == 0x11111x case */
3220 /* <length of run - 1> mustn't be all-ones. */
3224 /* Create the value of one element: s+1 set bits rotated
3225 * by r within the element (which is e bits wide)...
3227 mask
= bitmask64(s
+ 1);
3229 mask
= (mask
>> r
) | (mask
<< (e
- r
));
3230 mask
&= bitmask64(e
);
3232 /* ...then replicate the element over the whole 64 bit value */
3233 mask
= bitfield_replicate(mask
, e
);
3238 /* Logical (immediate)
3239 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3240 * +----+-----+-------------+---+------+------+------+------+
3241 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
3242 * +----+-----+-------------+---+------+------+------+------+
3244 static void disas_logic_imm(DisasContext
*s
, uint32_t insn
)
3246 unsigned int sf
, opc
, is_n
, immr
, imms
, rn
, rd
;
3247 TCGv_i64 tcg_rd
, tcg_rn
;
3249 bool is_and
= false;
3251 sf
= extract32(insn
, 31, 1);
3252 opc
= extract32(insn
, 29, 2);
3253 is_n
= extract32(insn
, 22, 1);
3254 immr
= extract32(insn
, 16, 6);
3255 imms
= extract32(insn
, 10, 6);
3256 rn
= extract32(insn
, 5, 5);
3257 rd
= extract32(insn
, 0, 5);
3260 unallocated_encoding(s
);
3264 if (opc
== 0x3) { /* ANDS */
3265 tcg_rd
= cpu_reg(s
, rd
);
3267 tcg_rd
= cpu_reg_sp(s
, rd
);
3269 tcg_rn
= cpu_reg(s
, rn
);
3271 if (!logic_imm_decode_wmask(&wmask
, is_n
, imms
, immr
)) {
3272 /* some immediate field values are reserved */
3273 unallocated_encoding(s
);
3278 wmask
&= 0xffffffff;
3282 case 0x3: /* ANDS */
3284 tcg_gen_andi_i64(tcg_rd
, tcg_rn
, wmask
);
3288 tcg_gen_ori_i64(tcg_rd
, tcg_rn
, wmask
);
3291 tcg_gen_xori_i64(tcg_rd
, tcg_rn
, wmask
);
3294 assert(FALSE
); /* must handle all above */
3298 if (!sf
&& !is_and
) {
3299 /* zero extend final result; we know we can skip this for AND
3300 * since the immediate had the high 32 bits clear.
3302 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3305 if (opc
== 3) { /* ANDS */
3306 gen_logic_CC(sf
, tcg_rd
);
3311 * Move wide (immediate)
3313 * 31 30 29 28 23 22 21 20 5 4 0
3314 * +--+-----+-------------+-----+----------------+------+
3315 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
3316 * +--+-----+-------------+-----+----------------+------+
3318 * sf: 0 -> 32 bit, 1 -> 64 bit
3319 * opc: 00 -> N, 10 -> Z, 11 -> K
3320 * hw: shift/16 (0,16, and sf only 32, 48)
3322 static void disas_movw_imm(DisasContext
*s
, uint32_t insn
)
3324 int rd
= extract32(insn
, 0, 5);
3325 uint64_t imm
= extract32(insn
, 5, 16);
3326 int sf
= extract32(insn
, 31, 1);
3327 int opc
= extract32(insn
, 29, 2);
3328 int pos
= extract32(insn
, 21, 2) << 4;
3329 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
3332 if (!sf
&& (pos
>= 32)) {
3333 unallocated_encoding(s
);
3347 tcg_gen_movi_i64(tcg_rd
, imm
);
3350 tcg_imm
= tcg_const_i64(imm
);
3351 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_imm
, pos
, 16);
3352 tcg_temp_free_i64(tcg_imm
);
3354 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3358 unallocated_encoding(s
);
3364 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3365 * +----+-----+-------------+---+------+------+------+------+
3366 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
3367 * +----+-----+-------------+---+------+------+------+------+
3369 static void disas_bitfield(DisasContext
*s
, uint32_t insn
)
3371 unsigned int sf
, n
, opc
, ri
, si
, rn
, rd
, bitsize
, pos
, len
;
3372 TCGv_i64 tcg_rd
, tcg_tmp
;
3374 sf
= extract32(insn
, 31, 1);
3375 opc
= extract32(insn
, 29, 2);
3376 n
= extract32(insn
, 22, 1);
3377 ri
= extract32(insn
, 16, 6);
3378 si
= extract32(insn
, 10, 6);
3379 rn
= extract32(insn
, 5, 5);
3380 rd
= extract32(insn
, 0, 5);
3381 bitsize
= sf
? 64 : 32;
3383 if (sf
!= n
|| ri
>= bitsize
|| si
>= bitsize
|| opc
> 2) {
3384 unallocated_encoding(s
);
3388 tcg_rd
= cpu_reg(s
, rd
);
3390 /* Suppress the zero-extend for !sf. Since RI and SI are constrained
3391 to be smaller than bitsize, we'll never reference data outside the
3392 low 32-bits anyway. */
3393 tcg_tmp
= read_cpu_reg(s
, rn
, 1);
3395 /* Recognize simple(r) extractions. */
3397 /* Wd<s-r:0> = Wn<s:r> */
3398 len
= (si
- ri
) + 1;
3399 if (opc
== 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
3400 tcg_gen_sextract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
3402 } else if (opc
== 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
3403 tcg_gen_extract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
3406 /* opc == 1, BXFIL fall through to deposit */
3407 tcg_gen_extract_i64(tcg_tmp
, tcg_tmp
, ri
, len
);
3410 /* Handle the ri > si case with a deposit
3411 * Wd<32+s-r,32-r> = Wn<s:0>
3414 pos
= (bitsize
- ri
) & (bitsize
- 1);
3417 if (opc
== 0 && len
< ri
) {
3418 /* SBFM: sign extend the destination field from len to fill
3419 the balance of the word. Let the deposit below insert all
3420 of those sign bits. */
3421 tcg_gen_sextract_i64(tcg_tmp
, tcg_tmp
, 0, len
);
3425 if (opc
== 1) { /* BFM, BXFIL */
3426 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_tmp
, pos
, len
);
3428 /* SBFM or UBFM: We start with zero, and we haven't modified
3429 any bits outside bitsize, therefore the zero-extension
3430 below is unneeded. */
3431 tcg_gen_deposit_z_i64(tcg_rd
, tcg_tmp
, pos
, len
);
3436 if (!sf
) { /* zero extend final result */
3437 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3442 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
3443 * +----+------+-------------+---+----+------+--------+------+------+
3444 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
3445 * +----+------+-------------+---+----+------+--------+------+------+
3447 static void disas_extract(DisasContext
*s
, uint32_t insn
)
3449 unsigned int sf
, n
, rm
, imm
, rn
, rd
, bitsize
, op21
, op0
;
3451 sf
= extract32(insn
, 31, 1);
3452 n
= extract32(insn
, 22, 1);
3453 rm
= extract32(insn
, 16, 5);
3454 imm
= extract32(insn
, 10, 6);
3455 rn
= extract32(insn
, 5, 5);
3456 rd
= extract32(insn
, 0, 5);
3457 op21
= extract32(insn
, 29, 2);
3458 op0
= extract32(insn
, 21, 1);
3459 bitsize
= sf
? 64 : 32;
3461 if (sf
!= n
|| op21
|| op0
|| imm
>= bitsize
) {
3462 unallocated_encoding(s
);
3464 TCGv_i64 tcg_rd
, tcg_rm
, tcg_rn
;
3466 tcg_rd
= cpu_reg(s
, rd
);
3468 if (unlikely(imm
== 0)) {
3469 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
3470 * so an extract from bit 0 is a special case.
3473 tcg_gen_mov_i64(tcg_rd
, cpu_reg(s
, rm
));
3475 tcg_gen_ext32u_i64(tcg_rd
, cpu_reg(s
, rm
));
3477 } else if (rm
== rn
) { /* ROR */
3478 tcg_rm
= cpu_reg(s
, rm
);
3480 tcg_gen_rotri_i64(tcg_rd
, tcg_rm
, imm
);
3482 TCGv_i32 tmp
= tcg_temp_new_i32();
3483 tcg_gen_extrl_i64_i32(tmp
, tcg_rm
);
3484 tcg_gen_rotri_i32(tmp
, tmp
, imm
);
3485 tcg_gen_extu_i32_i64(tcg_rd
, tmp
);
3486 tcg_temp_free_i32(tmp
);
3489 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
3490 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
3491 tcg_gen_shri_i64(tcg_rm
, tcg_rm
, imm
);
3492 tcg_gen_shli_i64(tcg_rn
, tcg_rn
, bitsize
- imm
);
3493 tcg_gen_or_i64(tcg_rd
, tcg_rm
, tcg_rn
);
3495 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3501 /* Data processing - immediate */
3502 static void disas_data_proc_imm(DisasContext
*s
, uint32_t insn
)
3504 switch (extract32(insn
, 23, 6)) {
3505 case 0x20: case 0x21: /* PC-rel. addressing */
3506 disas_pc_rel_adr(s
, insn
);
3508 case 0x22: case 0x23: /* Add/subtract (immediate) */
3509 disas_add_sub_imm(s
, insn
);
3511 case 0x24: /* Logical (immediate) */
3512 disas_logic_imm(s
, insn
);
3514 case 0x25: /* Move wide (immediate) */
3515 disas_movw_imm(s
, insn
);
3517 case 0x26: /* Bitfield */
3518 disas_bitfield(s
, insn
);
3520 case 0x27: /* Extract */
3521 disas_extract(s
, insn
);
3524 unallocated_encoding(s
);
3529 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
3530 * Note that it is the caller's responsibility to ensure that the
3531 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
3532 * mandated semantics for out of range shifts.
3534 static void shift_reg(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
3535 enum a64_shift_type shift_type
, TCGv_i64 shift_amount
)
3537 switch (shift_type
) {
3538 case A64_SHIFT_TYPE_LSL
:
3539 tcg_gen_shl_i64(dst
, src
, shift_amount
);
3541 case A64_SHIFT_TYPE_LSR
:
3542 tcg_gen_shr_i64(dst
, src
, shift_amount
);
3544 case A64_SHIFT_TYPE_ASR
:
3546 tcg_gen_ext32s_i64(dst
, src
);
3548 tcg_gen_sar_i64(dst
, sf
? src
: dst
, shift_amount
);
3550 case A64_SHIFT_TYPE_ROR
:
3552 tcg_gen_rotr_i64(dst
, src
, shift_amount
);
3555 t0
= tcg_temp_new_i32();
3556 t1
= tcg_temp_new_i32();
3557 tcg_gen_extrl_i64_i32(t0
, src
);
3558 tcg_gen_extrl_i64_i32(t1
, shift_amount
);
3559 tcg_gen_rotr_i32(t0
, t0
, t1
);
3560 tcg_gen_extu_i32_i64(dst
, t0
);
3561 tcg_temp_free_i32(t0
);
3562 tcg_temp_free_i32(t1
);
3566 assert(FALSE
); /* all shift types should be handled */
3570 if (!sf
) { /* zero extend final result */
3571 tcg_gen_ext32u_i64(dst
, dst
);
3575 /* Shift a TCGv src by immediate, put result in dst.
3576 * The shift amount must be in range (this should always be true as the
3577 * relevant instructions will UNDEF on bad shift immediates).
3579 static void shift_reg_imm(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
3580 enum a64_shift_type shift_type
, unsigned int shift_i
)
3582 assert(shift_i
< (sf
? 64 : 32));
3585 tcg_gen_mov_i64(dst
, src
);
3587 TCGv_i64 shift_const
;
3589 shift_const
= tcg_const_i64(shift_i
);
3590 shift_reg(dst
, src
, sf
, shift_type
, shift_const
);
3591 tcg_temp_free_i64(shift_const
);
3595 /* Logical (shifted register)
3596 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
3597 * +----+-----+-----------+-------+---+------+--------+------+------+
3598 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
3599 * +----+-----+-----------+-------+---+------+--------+------+------+
3601 static void disas_logic_reg(DisasContext
*s
, uint32_t insn
)
3603 TCGv_i64 tcg_rd
, tcg_rn
, tcg_rm
;
3604 unsigned int sf
, opc
, shift_type
, invert
, rm
, shift_amount
, rn
, rd
;
3606 sf
= extract32(insn
, 31, 1);
3607 opc
= extract32(insn
, 29, 2);
3608 shift_type
= extract32(insn
, 22, 2);
3609 invert
= extract32(insn
, 21, 1);
3610 rm
= extract32(insn
, 16, 5);
3611 shift_amount
= extract32(insn
, 10, 6);
3612 rn
= extract32(insn
, 5, 5);
3613 rd
= extract32(insn
, 0, 5);
3615 if (!sf
&& (shift_amount
& (1 << 5))) {
3616 unallocated_encoding(s
);
3620 tcg_rd
= cpu_reg(s
, rd
);
3622 if (opc
== 1 && shift_amount
== 0 && shift_type
== 0 && rn
== 31) {
3623 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
3624 * register-register MOV and MVN, so it is worth special casing.
3626 tcg_rm
= cpu_reg(s
, rm
);
3628 tcg_gen_not_i64(tcg_rd
, tcg_rm
);
3630 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3634 tcg_gen_mov_i64(tcg_rd
, tcg_rm
);
3636 tcg_gen_ext32u_i64(tcg_rd
, tcg_rm
);
3642 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
3645 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, shift_amount
);
3648 tcg_rn
= cpu_reg(s
, rn
);
3650 switch (opc
| (invert
<< 2)) {
3653 tcg_gen_and_i64(tcg_rd
, tcg_rn
, tcg_rm
);
3656 tcg_gen_or_i64(tcg_rd
, tcg_rn
, tcg_rm
);
3659 tcg_gen_xor_i64(tcg_rd
, tcg_rn
, tcg_rm
);
3663 tcg_gen_andc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
3666 tcg_gen_orc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
3669 tcg_gen_eqv_i64(tcg_rd
, tcg_rn
, tcg_rm
);
3677 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3681 gen_logic_CC(sf
, tcg_rd
);
3686 * Add/subtract (extended register)
3688 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
3689 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3690 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
3691 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3693 * sf: 0 -> 32bit, 1 -> 64bit
3694 * op: 0 -> add , 1 -> sub
3697 * option: extension type (see DecodeRegExtend)
3698 * imm3: optional shift to Rm
3700 * Rd = Rn + LSL(extend(Rm), amount)
3702 static void disas_add_sub_ext_reg(DisasContext
*s
, uint32_t insn
)
3704 int rd
= extract32(insn
, 0, 5);
3705 int rn
= extract32(insn
, 5, 5);
3706 int imm3
= extract32(insn
, 10, 3);
3707 int option
= extract32(insn
, 13, 3);
3708 int rm
= extract32(insn
, 16, 5);
3709 bool setflags
= extract32(insn
, 29, 1);
3710 bool sub_op
= extract32(insn
, 30, 1);
3711 bool sf
= extract32(insn
, 31, 1);
3713 TCGv_i64 tcg_rm
, tcg_rn
; /* temps */
3715 TCGv_i64 tcg_result
;
3718 unallocated_encoding(s
);
3722 /* non-flag setting ops may use SP */
3724 tcg_rd
= cpu_reg_sp(s
, rd
);
3726 tcg_rd
= cpu_reg(s
, rd
);
3728 tcg_rn
= read_cpu_reg_sp(s
, rn
, sf
);
3730 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
3731 ext_and_shift_reg(tcg_rm
, tcg_rm
, option
, imm3
);
3733 tcg_result
= tcg_temp_new_i64();
3737 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
3739 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
3743 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
3745 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
3750 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
3752 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
3755 tcg_temp_free_i64(tcg_result
);
3759 * Add/subtract (shifted register)
3761 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
3762 * +--+--+--+-----------+-----+--+-------+---------+------+------+
3763 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
3764 * +--+--+--+-----------+-----+--+-------+---------+------+------+
3766 * sf: 0 -> 32bit, 1 -> 64bit
3767 * op: 0 -> add , 1 -> sub
3769 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
3770 * imm6: Shift amount to apply to Rm before the add/sub
3772 static void disas_add_sub_reg(DisasContext
*s
, uint32_t insn
)
3774 int rd
= extract32(insn
, 0, 5);
3775 int rn
= extract32(insn
, 5, 5);
3776 int imm6
= extract32(insn
, 10, 6);
3777 int rm
= extract32(insn
, 16, 5);
3778 int shift_type
= extract32(insn
, 22, 2);
3779 bool setflags
= extract32(insn
, 29, 1);
3780 bool sub_op
= extract32(insn
, 30, 1);
3781 bool sf
= extract32(insn
, 31, 1);
3783 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
3784 TCGv_i64 tcg_rn
, tcg_rm
;
3785 TCGv_i64 tcg_result
;
3787 if ((shift_type
== 3) || (!sf
&& (imm6
> 31))) {
3788 unallocated_encoding(s
);
3792 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
3793 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
3795 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, imm6
);
3797 tcg_result
= tcg_temp_new_i64();
3801 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
3803 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
3807 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
3809 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
3814 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
3816 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
3819 tcg_temp_free_i64(tcg_result
);
3822 /* Data-processing (3 source)
3824 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
3825 * +--+------+-----------+------+------+----+------+------+------+
3826 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
3827 * +--+------+-----------+------+------+----+------+------+------+
3829 static void disas_data_proc_3src(DisasContext
*s
, uint32_t insn
)
3831 int rd
= extract32(insn
, 0, 5);
3832 int rn
= extract32(insn
, 5, 5);
3833 int ra
= extract32(insn
, 10, 5);
3834 int rm
= extract32(insn
, 16, 5);
3835 int op_id
= (extract32(insn
, 29, 3) << 4) |
3836 (extract32(insn
, 21, 3) << 1) |
3837 extract32(insn
, 15, 1);
3838 bool sf
= extract32(insn
, 31, 1);
3839 bool is_sub
= extract32(op_id
, 0, 1);
3840 bool is_high
= extract32(op_id
, 2, 1);
3841 bool is_signed
= false;
3846 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
3848 case 0x42: /* SMADDL */
3849 case 0x43: /* SMSUBL */
3850 case 0x44: /* SMULH */
3853 case 0x0: /* MADD (32bit) */
3854 case 0x1: /* MSUB (32bit) */
3855 case 0x40: /* MADD (64bit) */
3856 case 0x41: /* MSUB (64bit) */
3857 case 0x4a: /* UMADDL */
3858 case 0x4b: /* UMSUBL */
3859 case 0x4c: /* UMULH */
3862 unallocated_encoding(s
);
3867 TCGv_i64 low_bits
= tcg_temp_new_i64(); /* low bits discarded */
3868 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
3869 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
3870 TCGv_i64 tcg_rm
= cpu_reg(s
, rm
);
3873 tcg_gen_muls2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
3875 tcg_gen_mulu2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
3878 tcg_temp_free_i64(low_bits
);
3882 tcg_op1
= tcg_temp_new_i64();
3883 tcg_op2
= tcg_temp_new_i64();
3884 tcg_tmp
= tcg_temp_new_i64();
3887 tcg_gen_mov_i64(tcg_op1
, cpu_reg(s
, rn
));
3888 tcg_gen_mov_i64(tcg_op2
, cpu_reg(s
, rm
));
3891 tcg_gen_ext32s_i64(tcg_op1
, cpu_reg(s
, rn
));
3892 tcg_gen_ext32s_i64(tcg_op2
, cpu_reg(s
, rm
));
3894 tcg_gen_ext32u_i64(tcg_op1
, cpu_reg(s
, rn
));
3895 tcg_gen_ext32u_i64(tcg_op2
, cpu_reg(s
, rm
));
3899 if (ra
== 31 && !is_sub
) {
3900 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
3901 tcg_gen_mul_i64(cpu_reg(s
, rd
), tcg_op1
, tcg_op2
);
3903 tcg_gen_mul_i64(tcg_tmp
, tcg_op1
, tcg_op2
);
3905 tcg_gen_sub_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
3907 tcg_gen_add_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
3912 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), cpu_reg(s
, rd
));
3915 tcg_temp_free_i64(tcg_op1
);
3916 tcg_temp_free_i64(tcg_op2
);
3917 tcg_temp_free_i64(tcg_tmp
);
3920 /* Add/subtract (with carry)
3921 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
3922 * +--+--+--+------------------------+------+---------+------+-----+
3923 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
3924 * +--+--+--+------------------------+------+---------+------+-----+
3928 static void disas_adc_sbc(DisasContext
*s
, uint32_t insn
)
3930 unsigned int sf
, op
, setflags
, rm
, rn
, rd
;
3931 TCGv_i64 tcg_y
, tcg_rn
, tcg_rd
;
3933 if (extract32(insn
, 10, 6) != 0) {
3934 unallocated_encoding(s
);
3938 sf
= extract32(insn
, 31, 1);
3939 op
= extract32(insn
, 30, 1);
3940 setflags
= extract32(insn
, 29, 1);
3941 rm
= extract32(insn
, 16, 5);
3942 rn
= extract32(insn
, 5, 5);
3943 rd
= extract32(insn
, 0, 5);
3945 tcg_rd
= cpu_reg(s
, rd
);
3946 tcg_rn
= cpu_reg(s
, rn
);
3949 tcg_y
= new_tmp_a64(s
);
3950 tcg_gen_not_i64(tcg_y
, cpu_reg(s
, rm
));
3952 tcg_y
= cpu_reg(s
, rm
);
3956 gen_adc_CC(sf
, tcg_rd
, tcg_rn
, tcg_y
);
3958 gen_adc(sf
, tcg_rd
, tcg_rn
, tcg_y
);
3962 /* Conditional compare (immediate / register)
3963 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
3964 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
3965 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
3966 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
3969 static void disas_cc(DisasContext
*s
, uint32_t insn
)
3971 unsigned int sf
, op
, y
, cond
, rn
, nzcv
, is_imm
;
3972 TCGv_i32 tcg_t0
, tcg_t1
, tcg_t2
;
3973 TCGv_i64 tcg_tmp
, tcg_y
, tcg_rn
;
3976 if (!extract32(insn
, 29, 1)) {
3977 unallocated_encoding(s
);
3980 if (insn
& (1 << 10 | 1 << 4)) {
3981 unallocated_encoding(s
);
3984 sf
= extract32(insn
, 31, 1);
3985 op
= extract32(insn
, 30, 1);
3986 is_imm
= extract32(insn
, 11, 1);
3987 y
= extract32(insn
, 16, 5); /* y = rm (reg) or imm5 (imm) */
3988 cond
= extract32(insn
, 12, 4);
3989 rn
= extract32(insn
, 5, 5);
3990 nzcv
= extract32(insn
, 0, 4);
3992 /* Set T0 = !COND. */
3993 tcg_t0
= tcg_temp_new_i32();
3994 arm_test_cc(&c
, cond
);
3995 tcg_gen_setcondi_i32(tcg_invert_cond(c
.cond
), tcg_t0
, c
.value
, 0);
3998 /* Load the arguments for the new comparison. */
4000 tcg_y
= new_tmp_a64(s
);
4001 tcg_gen_movi_i64(tcg_y
, y
);
4003 tcg_y
= cpu_reg(s
, y
);
4005 tcg_rn
= cpu_reg(s
, rn
);
4007 /* Set the flags for the new comparison. */
4008 tcg_tmp
= tcg_temp_new_i64();
4010 gen_sub_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
4012 gen_add_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
4014 tcg_temp_free_i64(tcg_tmp
);
4016 /* If COND was false, force the flags to #nzcv. Compute two masks
4017 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
4018 * For tcg hosts that support ANDC, we can make do with just T1.
4019 * In either case, allow the tcg optimizer to delete any unused mask.
4021 tcg_t1
= tcg_temp_new_i32();
4022 tcg_t2
= tcg_temp_new_i32();
4023 tcg_gen_neg_i32(tcg_t1
, tcg_t0
);
4024 tcg_gen_subi_i32(tcg_t2
, tcg_t0
, 1);
4026 if (nzcv
& 8) { /* N */
4027 tcg_gen_or_i32(cpu_NF
, cpu_NF
, tcg_t1
);
4029 if (TCG_TARGET_HAS_andc_i32
) {
4030 tcg_gen_andc_i32(cpu_NF
, cpu_NF
, tcg_t1
);
4032 tcg_gen_and_i32(cpu_NF
, cpu_NF
, tcg_t2
);
4035 if (nzcv
& 4) { /* Z */
4036 if (TCG_TARGET_HAS_andc_i32
) {
4037 tcg_gen_andc_i32(cpu_ZF
, cpu_ZF
, tcg_t1
);
4039 tcg_gen_and_i32(cpu_ZF
, cpu_ZF
, tcg_t2
);
4042 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, tcg_t0
);
4044 if (nzcv
& 2) { /* C */
4045 tcg_gen_or_i32(cpu_CF
, cpu_CF
, tcg_t0
);
4047 if (TCG_TARGET_HAS_andc_i32
) {
4048 tcg_gen_andc_i32(cpu_CF
, cpu_CF
, tcg_t1
);
4050 tcg_gen_and_i32(cpu_CF
, cpu_CF
, tcg_t2
);
4053 if (nzcv
& 1) { /* V */
4054 tcg_gen_or_i32(cpu_VF
, cpu_VF
, tcg_t1
);
4056 if (TCG_TARGET_HAS_andc_i32
) {
4057 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tcg_t1
);
4059 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tcg_t2
);
4062 tcg_temp_free_i32(tcg_t0
);
4063 tcg_temp_free_i32(tcg_t1
);
4064 tcg_temp_free_i32(tcg_t2
);
4067 /* Conditional select
4068 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
4069 * +----+----+---+-----------------+------+------+-----+------+------+
4070 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
4071 * +----+----+---+-----------------+------+------+-----+------+------+
4073 static void disas_cond_select(DisasContext
*s
, uint32_t insn
)
4075 unsigned int sf
, else_inv
, rm
, cond
, else_inc
, rn
, rd
;
4076 TCGv_i64 tcg_rd
, zero
;
4079 if (extract32(insn
, 29, 1) || extract32(insn
, 11, 1)) {
4080 /* S == 1 or op2<1> == 1 */
4081 unallocated_encoding(s
);
4084 sf
= extract32(insn
, 31, 1);
4085 else_inv
= extract32(insn
, 30, 1);
4086 rm
= extract32(insn
, 16, 5);
4087 cond
= extract32(insn
, 12, 4);
4088 else_inc
= extract32(insn
, 10, 1);
4089 rn
= extract32(insn
, 5, 5);
4090 rd
= extract32(insn
, 0, 5);
4092 tcg_rd
= cpu_reg(s
, rd
);
4094 a64_test_cc(&c
, cond
);
4095 zero
= tcg_const_i64(0);
4097 if (rn
== 31 && rm
== 31 && (else_inc
^ else_inv
)) {
4099 tcg_gen_setcond_i64(tcg_invert_cond(c
.cond
), tcg_rd
, c
.value
, zero
);
4101 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
4104 TCGv_i64 t_true
= cpu_reg(s
, rn
);
4105 TCGv_i64 t_false
= read_cpu_reg(s
, rm
, 1);
4106 if (else_inv
&& else_inc
) {
4107 tcg_gen_neg_i64(t_false
, t_false
);
4108 } else if (else_inv
) {
4109 tcg_gen_not_i64(t_false
, t_false
);
4110 } else if (else_inc
) {
4111 tcg_gen_addi_i64(t_false
, t_false
, 1);
4113 tcg_gen_movcond_i64(c
.cond
, tcg_rd
, c
.value
, zero
, t_true
, t_false
);
4116 tcg_temp_free_i64(zero
);
4120 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4124 static void handle_clz(DisasContext
*s
, unsigned int sf
,
4125 unsigned int rn
, unsigned int rd
)
4127 TCGv_i64 tcg_rd
, tcg_rn
;
4128 tcg_rd
= cpu_reg(s
, rd
);
4129 tcg_rn
= cpu_reg(s
, rn
);
4132 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
4134 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
4135 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
4136 tcg_gen_clzi_i32(tcg_tmp32
, tcg_tmp32
, 32);
4137 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
4138 tcg_temp_free_i32(tcg_tmp32
);
4142 static void handle_cls(DisasContext
*s
, unsigned int sf
,
4143 unsigned int rn
, unsigned int rd
)
4145 TCGv_i64 tcg_rd
, tcg_rn
;
4146 tcg_rd
= cpu_reg(s
, rd
);
4147 tcg_rn
= cpu_reg(s
, rn
);
4150 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
4152 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
4153 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
4154 tcg_gen_clrsb_i32(tcg_tmp32
, tcg_tmp32
);
4155 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
4156 tcg_temp_free_i32(tcg_tmp32
);
4160 static void handle_rbit(DisasContext
*s
, unsigned int sf
,
4161 unsigned int rn
, unsigned int rd
)
4163 TCGv_i64 tcg_rd
, tcg_rn
;
4164 tcg_rd
= cpu_reg(s
, rd
);
4165 tcg_rn
= cpu_reg(s
, rn
);
4168 gen_helper_rbit64(tcg_rd
, tcg_rn
);
4170 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
4171 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
4172 gen_helper_rbit(tcg_tmp32
, tcg_tmp32
);
4173 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
4174 tcg_temp_free_i32(tcg_tmp32
);
4178 /* REV with sf==1, opcode==3 ("REV64") */
4179 static void handle_rev64(DisasContext
*s
, unsigned int sf
,
4180 unsigned int rn
, unsigned int rd
)
4183 unallocated_encoding(s
);
4186 tcg_gen_bswap64_i64(cpu_reg(s
, rd
), cpu_reg(s
, rn
));
4189 /* REV with sf==0, opcode==2
4190 * REV32 (sf==1, opcode==2)
4192 static void handle_rev32(DisasContext
*s
, unsigned int sf
,
4193 unsigned int rn
, unsigned int rd
)
4195 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4198 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
4199 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
4201 /* bswap32_i64 requires zero high word */
4202 tcg_gen_ext32u_i64(tcg_tmp
, tcg_rn
);
4203 tcg_gen_bswap32_i64(tcg_rd
, tcg_tmp
);
4204 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 32);
4205 tcg_gen_bswap32_i64(tcg_tmp
, tcg_tmp
);
4206 tcg_gen_concat32_i64(tcg_rd
, tcg_rd
, tcg_tmp
);
4208 tcg_temp_free_i64(tcg_tmp
);
4210 tcg_gen_ext32u_i64(tcg_rd
, cpu_reg(s
, rn
));
4211 tcg_gen_bswap32_i64(tcg_rd
, tcg_rd
);
4215 /* REV16 (opcode==1) */
4216 static void handle_rev16(DisasContext
*s
, unsigned int sf
,
4217 unsigned int rn
, unsigned int rd
)
4219 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4220 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
4221 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
4222 TCGv_i64 mask
= tcg_const_i64(sf
? 0x00ff00ff00ff00ffull
: 0x00ff00ff);
4224 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 8);
4225 tcg_gen_and_i64(tcg_rd
, tcg_rn
, mask
);
4226 tcg_gen_and_i64(tcg_tmp
, tcg_tmp
, mask
);
4227 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, 8);
4228 tcg_gen_or_i64(tcg_rd
, tcg_rd
, tcg_tmp
);
4230 tcg_temp_free_i64(mask
);
4231 tcg_temp_free_i64(tcg_tmp
);
4234 /* Data-processing (1 source)
4235 * 31 30 29 28 21 20 16 15 10 9 5 4 0
4236 * +----+---+---+-----------------+---------+--------+------+------+
4237 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
4238 * +----+---+---+-----------------+---------+--------+------+------+
4240 static void disas_data_proc_1src(DisasContext
*s
, uint32_t insn
)
4242 unsigned int sf
, opcode
, rn
, rd
;
4244 if (extract32(insn
, 29, 1) || extract32(insn
, 16, 5)) {
4245 unallocated_encoding(s
);
4249 sf
= extract32(insn
, 31, 1);
4250 opcode
= extract32(insn
, 10, 6);
4251 rn
= extract32(insn
, 5, 5);
4252 rd
= extract32(insn
, 0, 5);
4256 handle_rbit(s
, sf
, rn
, rd
);
4259 handle_rev16(s
, sf
, rn
, rd
);
4262 handle_rev32(s
, sf
, rn
, rd
);
4265 handle_rev64(s
, sf
, rn
, rd
);
4268 handle_clz(s
, sf
, rn
, rd
);
4271 handle_cls(s
, sf
, rn
, rd
);
4276 static void handle_div(DisasContext
*s
, bool is_signed
, unsigned int sf
,
4277 unsigned int rm
, unsigned int rn
, unsigned int rd
)
4279 TCGv_i64 tcg_n
, tcg_m
, tcg_rd
;
4280 tcg_rd
= cpu_reg(s
, rd
);
4282 if (!sf
&& is_signed
) {
4283 tcg_n
= new_tmp_a64(s
);
4284 tcg_m
= new_tmp_a64(s
);
4285 tcg_gen_ext32s_i64(tcg_n
, cpu_reg(s
, rn
));
4286 tcg_gen_ext32s_i64(tcg_m
, cpu_reg(s
, rm
));
4288 tcg_n
= read_cpu_reg(s
, rn
, sf
);
4289 tcg_m
= read_cpu_reg(s
, rm
, sf
);
4293 gen_helper_sdiv64(tcg_rd
, tcg_n
, tcg_m
);
4295 gen_helper_udiv64(tcg_rd
, tcg_n
, tcg_m
);
4298 if (!sf
) { /* zero extend final result */
4299 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4303 /* LSLV, LSRV, ASRV, RORV */
4304 static void handle_shift_reg(DisasContext
*s
,
4305 enum a64_shift_type shift_type
, unsigned int sf
,
4306 unsigned int rm
, unsigned int rn
, unsigned int rd
)
4308 TCGv_i64 tcg_shift
= tcg_temp_new_i64();
4309 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4310 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
4312 tcg_gen_andi_i64(tcg_shift
, cpu_reg(s
, rm
), sf
? 63 : 31);
4313 shift_reg(tcg_rd
, tcg_rn
, sf
, shift_type
, tcg_shift
);
4314 tcg_temp_free_i64(tcg_shift
);
4317 /* CRC32[BHWX], CRC32C[BHWX] */
4318 static void handle_crc32(DisasContext
*s
,
4319 unsigned int sf
, unsigned int sz
, bool crc32c
,
4320 unsigned int rm
, unsigned int rn
, unsigned int rd
)
4322 TCGv_i64 tcg_acc
, tcg_val
;
4325 if (!arm_dc_feature(s
, ARM_FEATURE_CRC
)
4326 || (sf
== 1 && sz
!= 3)
4327 || (sf
== 0 && sz
== 3)) {
4328 unallocated_encoding(s
);
4333 tcg_val
= cpu_reg(s
, rm
);
4347 g_assert_not_reached();
4349 tcg_val
= new_tmp_a64(s
);
4350 tcg_gen_andi_i64(tcg_val
, cpu_reg(s
, rm
), mask
);
4353 tcg_acc
= cpu_reg(s
, rn
);
4354 tcg_bytes
= tcg_const_i32(1 << sz
);
4357 gen_helper_crc32c_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
4359 gen_helper_crc32_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
4362 tcg_temp_free_i32(tcg_bytes
);
4365 /* Data-processing (2 source)
4366 * 31 30 29 28 21 20 16 15 10 9 5 4 0
4367 * +----+---+---+-----------------+------+--------+------+------+
4368 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
4369 * +----+---+---+-----------------+------+--------+------+------+
4371 static void disas_data_proc_2src(DisasContext
*s
, uint32_t insn
)
4373 unsigned int sf
, rm
, opcode
, rn
, rd
;
4374 sf
= extract32(insn
, 31, 1);
4375 rm
= extract32(insn
, 16, 5);
4376 opcode
= extract32(insn
, 10, 6);
4377 rn
= extract32(insn
, 5, 5);
4378 rd
= extract32(insn
, 0, 5);
4380 if (extract32(insn
, 29, 1)) {
4381 unallocated_encoding(s
);
4387 handle_div(s
, false, sf
, rm
, rn
, rd
);
4390 handle_div(s
, true, sf
, rm
, rn
, rd
);
4393 handle_shift_reg(s
, A64_SHIFT_TYPE_LSL
, sf
, rm
, rn
, rd
);
4396 handle_shift_reg(s
, A64_SHIFT_TYPE_LSR
, sf
, rm
, rn
, rd
);
4399 handle_shift_reg(s
, A64_SHIFT_TYPE_ASR
, sf
, rm
, rn
, rd
);
4402 handle_shift_reg(s
, A64_SHIFT_TYPE_ROR
, sf
, rm
, rn
, rd
);
4411 case 23: /* CRC32 */
4413 int sz
= extract32(opcode
, 0, 2);
4414 bool crc32c
= extract32(opcode
, 2, 1);
4415 handle_crc32(s
, sf
, sz
, crc32c
, rm
, rn
, rd
);
4419 unallocated_encoding(s
);
4424 /* Data processing - register */
4425 static void disas_data_proc_reg(DisasContext
*s
, uint32_t insn
)
4427 switch (extract32(insn
, 24, 5)) {
4428 case 0x0a: /* Logical (shifted register) */
4429 disas_logic_reg(s
, insn
);
4431 case 0x0b: /* Add/subtract */
4432 if (insn
& (1 << 21)) { /* (extended register) */
4433 disas_add_sub_ext_reg(s
, insn
);
4435 disas_add_sub_reg(s
, insn
);
4438 case 0x1b: /* Data-processing (3 source) */
4439 disas_data_proc_3src(s
, insn
);
4442 switch (extract32(insn
, 21, 3)) {
4443 case 0x0: /* Add/subtract (with carry) */
4444 disas_adc_sbc(s
, insn
);
4446 case 0x2: /* Conditional compare */
4447 disas_cc(s
, insn
); /* both imm and reg forms */
4449 case 0x4: /* Conditional select */
4450 disas_cond_select(s
, insn
);
4452 case 0x6: /* Data-processing */
4453 if (insn
& (1 << 30)) { /* (1 source) */
4454 disas_data_proc_1src(s
, insn
);
4455 } else { /* (2 source) */
4456 disas_data_proc_2src(s
, insn
);
4460 unallocated_encoding(s
);
4465 unallocated_encoding(s
);
4470 static void handle_fp_compare(DisasContext
*s
, bool is_double
,
4471 unsigned int rn
, unsigned int rm
,
4472 bool cmp_with_zero
, bool signal_all_nans
)
4474 TCGv_i64 tcg_flags
= tcg_temp_new_i64();
4475 TCGv_ptr fpst
= get_fpstatus_ptr(false);
4478 TCGv_i64 tcg_vn
, tcg_vm
;
4480 tcg_vn
= read_fp_dreg(s
, rn
);
4481 if (cmp_with_zero
) {
4482 tcg_vm
= tcg_const_i64(0);
4484 tcg_vm
= read_fp_dreg(s
, rm
);
4486 if (signal_all_nans
) {
4487 gen_helper_vfp_cmped_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
4489 gen_helper_vfp_cmpd_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
4491 tcg_temp_free_i64(tcg_vn
);
4492 tcg_temp_free_i64(tcg_vm
);
4494 TCGv_i32 tcg_vn
, tcg_vm
;
4496 tcg_vn
= read_fp_sreg(s
, rn
);
4497 if (cmp_with_zero
) {
4498 tcg_vm
= tcg_const_i32(0);
4500 tcg_vm
= read_fp_sreg(s
, rm
);
4502 if (signal_all_nans
) {
4503 gen_helper_vfp_cmpes_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
4505 gen_helper_vfp_cmps_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
4507 tcg_temp_free_i32(tcg_vn
);
4508 tcg_temp_free_i32(tcg_vm
);
4511 tcg_temp_free_ptr(fpst
);
4513 gen_set_nzcv(tcg_flags
);
4515 tcg_temp_free_i64(tcg_flags
);
4518 /* Floating point compare
4519 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
4520 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4521 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
4522 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4524 static void disas_fp_compare(DisasContext
*s
, uint32_t insn
)
4526 unsigned int mos
, type
, rm
, op
, rn
, opc
, op2r
;
4528 mos
= extract32(insn
, 29, 3);
4529 type
= extract32(insn
, 22, 2); /* 0 = single, 1 = double */
4530 rm
= extract32(insn
, 16, 5);
4531 op
= extract32(insn
, 14, 2);
4532 rn
= extract32(insn
, 5, 5);
4533 opc
= extract32(insn
, 3, 2);
4534 op2r
= extract32(insn
, 0, 3);
4536 if (mos
|| op
|| op2r
|| type
> 1) {
4537 unallocated_encoding(s
);
4541 if (!fp_access_check(s
)) {
4545 handle_fp_compare(s
, type
, rn
, rm
, opc
& 1, opc
& 2);
4548 /* Floating point conditional compare
4549 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
4550 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4551 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
4552 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4554 static void disas_fp_ccomp(DisasContext
*s
, uint32_t insn
)
4556 unsigned int mos
, type
, rm
, cond
, rn
, op
, nzcv
;
4558 TCGLabel
*label_continue
= NULL
;
4560 mos
= extract32(insn
, 29, 3);
4561 type
= extract32(insn
, 22, 2); /* 0 = single, 1 = double */
4562 rm
= extract32(insn
, 16, 5);
4563 cond
= extract32(insn
, 12, 4);
4564 rn
= extract32(insn
, 5, 5);
4565 op
= extract32(insn
, 4, 1);
4566 nzcv
= extract32(insn
, 0, 4);
4568 if (mos
|| type
> 1) {
4569 unallocated_encoding(s
);
4573 if (!fp_access_check(s
)) {
4577 if (cond
< 0x0e) { /* not always */
4578 TCGLabel
*label_match
= gen_new_label();
4579 label_continue
= gen_new_label();
4580 arm_gen_test_cc(cond
, label_match
);
4582 tcg_flags
= tcg_const_i64(nzcv
<< 28);
4583 gen_set_nzcv(tcg_flags
);
4584 tcg_temp_free_i64(tcg_flags
);
4585 tcg_gen_br(label_continue
);
4586 gen_set_label(label_match
);
4589 handle_fp_compare(s
, type
, rn
, rm
, false, op
);
4592 gen_set_label(label_continue
);
4596 /* Floating point conditional select
4597 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
4598 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4599 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
4600 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4602 static void disas_fp_csel(DisasContext
*s
, uint32_t insn
)
4604 unsigned int mos
, type
, rm
, cond
, rn
, rd
;
4605 TCGv_i64 t_true
, t_false
, t_zero
;
4608 mos
= extract32(insn
, 29, 3);
4609 type
= extract32(insn
, 22, 2); /* 0 = single, 1 = double */
4610 rm
= extract32(insn
, 16, 5);
4611 cond
= extract32(insn
, 12, 4);
4612 rn
= extract32(insn
, 5, 5);
4613 rd
= extract32(insn
, 0, 5);
4615 if (mos
|| type
> 1) {
4616 unallocated_encoding(s
);
4620 if (!fp_access_check(s
)) {
4624 /* Zero extend sreg inputs to 64 bits now. */
4625 t_true
= tcg_temp_new_i64();
4626 t_false
= tcg_temp_new_i64();
4627 read_vec_element(s
, t_true
, rn
, 0, type
? MO_64
: MO_32
);
4628 read_vec_element(s
, t_false
, rm
, 0, type
? MO_64
: MO_32
);
4630 a64_test_cc(&c
, cond
);
4631 t_zero
= tcg_const_i64(0);
4632 tcg_gen_movcond_i64(c
.cond
, t_true
, c
.value
, t_zero
, t_true
, t_false
);
4633 tcg_temp_free_i64(t_zero
);
4634 tcg_temp_free_i64(t_false
);
4637 /* Note that sregs write back zeros to the high bits,
4638 and we've already done the zero-extension. */
4639 write_fp_dreg(s
, rd
, t_true
);
4640 tcg_temp_free_i64(t_true
);
4643 /* Floating-point data-processing (1 source) - half precision */
4644 static void handle_fp_1src_half(DisasContext
*s
, int opcode
, int rd
, int rn
)
4646 TCGv_ptr fpst
= NULL
;
4647 TCGv_i32 tcg_op
= tcg_temp_new_i32();
4648 TCGv_i32 tcg_res
= tcg_temp_new_i32();
4650 read_vec_element_i32(s
, tcg_op
, rn
, 0, MO_16
);
4653 case 0x0: /* FMOV */
4654 tcg_gen_mov_i32(tcg_res
, tcg_op
);
4656 case 0x1: /* FABS */
4657 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
4659 case 0x2: /* FNEG */
4660 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
4662 case 0x3: /* FSQRT */
4663 gen_helper_sqrt_f16(tcg_res
, tcg_op
, cpu_env
);
4665 case 0x8: /* FRINTN */
4666 case 0x9: /* FRINTP */
4667 case 0xa: /* FRINTM */
4668 case 0xb: /* FRINTZ */
4669 case 0xc: /* FRINTA */
4671 TCGv_i32 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(opcode
& 7));
4672 fpst
= get_fpstatus_ptr(true);
4674 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
4675 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
4677 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
4678 tcg_temp_free_i32(tcg_rmode
);
4681 case 0xe: /* FRINTX */
4682 fpst
= get_fpstatus_ptr(true);
4683 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, fpst
);
4685 case 0xf: /* FRINTI */
4686 fpst
= get_fpstatus_ptr(true);
4687 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
4693 write_fp_sreg(s
, rd
, tcg_res
);
4696 tcg_temp_free_ptr(fpst
);
4698 tcg_temp_free_i32(tcg_op
);
4699 tcg_temp_free_i32(tcg_res
);
4702 /* Floating-point data-processing (1 source) - single precision */
4703 static void handle_fp_1src_single(DisasContext
*s
, int opcode
, int rd
, int rn
)
4709 fpst
= get_fpstatus_ptr(false);
4710 tcg_op
= read_fp_sreg(s
, rn
);
4711 tcg_res
= tcg_temp_new_i32();
4714 case 0x0: /* FMOV */
4715 tcg_gen_mov_i32(tcg_res
, tcg_op
);
4717 case 0x1: /* FABS */
4718 gen_helper_vfp_abss(tcg_res
, tcg_op
);
4720 case 0x2: /* FNEG */
4721 gen_helper_vfp_negs(tcg_res
, tcg_op
);
4723 case 0x3: /* FSQRT */
4724 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, cpu_env
);
4726 case 0x8: /* FRINTN */
4727 case 0x9: /* FRINTP */
4728 case 0xa: /* FRINTM */
4729 case 0xb: /* FRINTZ */
4730 case 0xc: /* FRINTA */
4732 TCGv_i32 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(opcode
& 7));
4734 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
4735 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
4737 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
4738 tcg_temp_free_i32(tcg_rmode
);
4741 case 0xe: /* FRINTX */
4742 gen_helper_rints_exact(tcg_res
, tcg_op
, fpst
);
4744 case 0xf: /* FRINTI */
4745 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
4751 write_fp_sreg(s
, rd
, tcg_res
);
4753 tcg_temp_free_ptr(fpst
);
4754 tcg_temp_free_i32(tcg_op
);
4755 tcg_temp_free_i32(tcg_res
);
4758 /* Floating-point data-processing (1 source) - double precision */
4759 static void handle_fp_1src_double(DisasContext
*s
, int opcode
, int rd
, int rn
)
4766 case 0x0: /* FMOV */
4767 gen_gvec_fn2(s
, false, rd
, rn
, tcg_gen_gvec_mov
, 0);
4771 fpst
= get_fpstatus_ptr(false);
4772 tcg_op
= read_fp_dreg(s
, rn
);
4773 tcg_res
= tcg_temp_new_i64();
4776 case 0x1: /* FABS */
4777 gen_helper_vfp_absd(tcg_res
, tcg_op
);
4779 case 0x2: /* FNEG */
4780 gen_helper_vfp_negd(tcg_res
, tcg_op
);
4782 case 0x3: /* FSQRT */
4783 gen_helper_vfp_sqrtd(tcg_res
, tcg_op
, cpu_env
);
4785 case 0x8: /* FRINTN */
4786 case 0x9: /* FRINTP */
4787 case 0xa: /* FRINTM */
4788 case 0xb: /* FRINTZ */
4789 case 0xc: /* FRINTA */
4791 TCGv_i32 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(opcode
& 7));
4793 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
4794 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
4796 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
4797 tcg_temp_free_i32(tcg_rmode
);
4800 case 0xe: /* FRINTX */
4801 gen_helper_rintd_exact(tcg_res
, tcg_op
, fpst
);
4803 case 0xf: /* FRINTI */
4804 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
4810 write_fp_dreg(s
, rd
, tcg_res
);
4812 tcg_temp_free_ptr(fpst
);
4813 tcg_temp_free_i64(tcg_op
);
4814 tcg_temp_free_i64(tcg_res
);
4817 static void handle_fp_fcvt(DisasContext
*s
, int opcode
,
4818 int rd
, int rn
, int dtype
, int ntype
)
4823 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
4825 /* Single to double */
4826 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
4827 gen_helper_vfp_fcvtds(tcg_rd
, tcg_rn
, cpu_env
);
4828 write_fp_dreg(s
, rd
, tcg_rd
);
4829 tcg_temp_free_i64(tcg_rd
);
4831 /* Single to half */
4832 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
4833 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd
, tcg_rn
, cpu_env
);
4834 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
4835 write_fp_sreg(s
, rd
, tcg_rd
);
4836 tcg_temp_free_i32(tcg_rd
);
4838 tcg_temp_free_i32(tcg_rn
);
4843 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
4844 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
4846 /* Double to single */
4847 gen_helper_vfp_fcvtsd(tcg_rd
, tcg_rn
, cpu_env
);
4849 /* Double to half */
4850 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd
, tcg_rn
, cpu_env
);
4851 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
4853 write_fp_sreg(s
, rd
, tcg_rd
);
4854 tcg_temp_free_i32(tcg_rd
);
4855 tcg_temp_free_i64(tcg_rn
);
4860 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
4861 tcg_gen_ext16u_i32(tcg_rn
, tcg_rn
);
4863 /* Half to single */
4864 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
4865 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd
, tcg_rn
, cpu_env
);
4866 write_fp_sreg(s
, rd
, tcg_rd
);
4867 tcg_temp_free_i32(tcg_rd
);
4869 /* Half to double */
4870 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
4871 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd
, tcg_rn
, cpu_env
);
4872 write_fp_dreg(s
, rd
, tcg_rd
);
4873 tcg_temp_free_i64(tcg_rd
);
4875 tcg_temp_free_i32(tcg_rn
);
4883 /* Floating point data-processing (1 source)
4884 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
4885 * +---+---+---+-----------+------+---+--------+-----------+------+------+
4886 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
4887 * +---+---+---+-----------+------+---+--------+-----------+------+------+
4889 static void disas_fp_1src(DisasContext
*s
, uint32_t insn
)
4891 int type
= extract32(insn
, 22, 2);
4892 int opcode
= extract32(insn
, 15, 6);
4893 int rn
= extract32(insn
, 5, 5);
4894 int rd
= extract32(insn
, 0, 5);
4897 case 0x4: case 0x5: case 0x7:
4899 /* FCVT between half, single and double precision */
4900 int dtype
= extract32(opcode
, 0, 2);
4901 if (type
== 2 || dtype
== type
) {
4902 unallocated_encoding(s
);
4905 if (!fp_access_check(s
)) {
4909 handle_fp_fcvt(s
, opcode
, rd
, rn
, dtype
, type
);
4915 /* 32-to-32 and 64-to-64 ops */
4918 if (!fp_access_check(s
)) {
4922 handle_fp_1src_single(s
, opcode
, rd
, rn
);
4925 if (!fp_access_check(s
)) {
4929 handle_fp_1src_double(s
, opcode
, rd
, rn
);
4932 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
4933 unallocated_encoding(s
);
4937 if (!fp_access_check(s
)) {
4941 handle_fp_1src_half(s
, opcode
, rd
, rn
);
4944 unallocated_encoding(s
);
4948 unallocated_encoding(s
);
4953 /* Floating-point data-processing (2 source) - single precision */
4954 static void handle_fp_2src_single(DisasContext
*s
, int opcode
,
4955 int rd
, int rn
, int rm
)
4962 tcg_res
= tcg_temp_new_i32();
4963 fpst
= get_fpstatus_ptr(false);
4964 tcg_op1
= read_fp_sreg(s
, rn
);
4965 tcg_op2
= read_fp_sreg(s
, rm
);
4968 case 0x0: /* FMUL */
4969 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4971 case 0x1: /* FDIV */
4972 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4974 case 0x2: /* FADD */
4975 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4977 case 0x3: /* FSUB */
4978 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4980 case 0x4: /* FMAX */
4981 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4983 case 0x5: /* FMIN */
4984 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4986 case 0x6: /* FMAXNM */
4987 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4989 case 0x7: /* FMINNM */
4990 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4992 case 0x8: /* FNMUL */
4993 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4994 gen_helper_vfp_negs(tcg_res
, tcg_res
);
4998 write_fp_sreg(s
, rd
, tcg_res
);
5000 tcg_temp_free_ptr(fpst
);
5001 tcg_temp_free_i32(tcg_op1
);
5002 tcg_temp_free_i32(tcg_op2
);
5003 tcg_temp_free_i32(tcg_res
);
5006 /* Floating-point data-processing (2 source) - double precision */
5007 static void handle_fp_2src_double(DisasContext
*s
, int opcode
,
5008 int rd
, int rn
, int rm
)
5015 tcg_res
= tcg_temp_new_i64();
5016 fpst
= get_fpstatus_ptr(false);
5017 tcg_op1
= read_fp_dreg(s
, rn
);
5018 tcg_op2
= read_fp_dreg(s
, rm
);
5021 case 0x0: /* FMUL */
5022 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5024 case 0x1: /* FDIV */
5025 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5027 case 0x2: /* FADD */
5028 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5030 case 0x3: /* FSUB */
5031 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5033 case 0x4: /* FMAX */
5034 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5036 case 0x5: /* FMIN */
5037 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5039 case 0x6: /* FMAXNM */
5040 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5042 case 0x7: /* FMINNM */
5043 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5045 case 0x8: /* FNMUL */
5046 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5047 gen_helper_vfp_negd(tcg_res
, tcg_res
);
5051 write_fp_dreg(s
, rd
, tcg_res
);
5053 tcg_temp_free_ptr(fpst
);
5054 tcg_temp_free_i64(tcg_op1
);
5055 tcg_temp_free_i64(tcg_op2
);
5056 tcg_temp_free_i64(tcg_res
);
5059 /* Floating point data-processing (2 source)
5060 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
5061 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
5062 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
5063 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
5065 static void disas_fp_2src(DisasContext
*s
, uint32_t insn
)
5067 int type
= extract32(insn
, 22, 2);
5068 int rd
= extract32(insn
, 0, 5);
5069 int rn
= extract32(insn
, 5, 5);
5070 int rm
= extract32(insn
, 16, 5);
5071 int opcode
= extract32(insn
, 12, 4);
5074 unallocated_encoding(s
);
5080 if (!fp_access_check(s
)) {
5083 handle_fp_2src_single(s
, opcode
, rd
, rn
, rm
);
5086 if (!fp_access_check(s
)) {
5089 handle_fp_2src_double(s
, opcode
, rd
, rn
, rm
);
5092 unallocated_encoding(s
);
5096 /* Floating-point data-processing (3 source) - single precision */
5097 static void handle_fp_3src_single(DisasContext
*s
, bool o0
, bool o1
,
5098 int rd
, int rn
, int rm
, int ra
)
5100 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
;
5101 TCGv_i32 tcg_res
= tcg_temp_new_i32();
5102 TCGv_ptr fpst
= get_fpstatus_ptr(false);
5104 tcg_op1
= read_fp_sreg(s
, rn
);
5105 tcg_op2
= read_fp_sreg(s
, rm
);
5106 tcg_op3
= read_fp_sreg(s
, ra
);
5108 /* These are fused multiply-add, and must be done as one
5109 * floating point operation with no rounding between the
5110 * multiplication and addition steps.
5111 * NB that doing the negations here as separate steps is
5112 * correct : an input NaN should come out with its sign bit
5113 * flipped if it is a negated-input.
5116 gen_helper_vfp_negs(tcg_op3
, tcg_op3
);
5120 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
5123 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
5125 write_fp_sreg(s
, rd
, tcg_res
);
5127 tcg_temp_free_ptr(fpst
);
5128 tcg_temp_free_i32(tcg_op1
);
5129 tcg_temp_free_i32(tcg_op2
);
5130 tcg_temp_free_i32(tcg_op3
);
5131 tcg_temp_free_i32(tcg_res
);
5134 /* Floating-point data-processing (3 source) - double precision */
5135 static void handle_fp_3src_double(DisasContext
*s
, bool o0
, bool o1
,
5136 int rd
, int rn
, int rm
, int ra
)
5138 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
;
5139 TCGv_i64 tcg_res
= tcg_temp_new_i64();
5140 TCGv_ptr fpst
= get_fpstatus_ptr(false);
5142 tcg_op1
= read_fp_dreg(s
, rn
);
5143 tcg_op2
= read_fp_dreg(s
, rm
);
5144 tcg_op3
= read_fp_dreg(s
, ra
);
5146 /* These are fused multiply-add, and must be done as one
5147 * floating point operation with no rounding between the
5148 * multiplication and addition steps.
5149 * NB that doing the negations here as separate steps is
5150 * correct : an input NaN should come out with its sign bit
5151 * flipped if it is a negated-input.
5154 gen_helper_vfp_negd(tcg_op3
, tcg_op3
);
5158 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
5161 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
5163 write_fp_dreg(s
, rd
, tcg_res
);
5165 tcg_temp_free_ptr(fpst
);
5166 tcg_temp_free_i64(tcg_op1
);
5167 tcg_temp_free_i64(tcg_op2
);
5168 tcg_temp_free_i64(tcg_op3
);
5169 tcg_temp_free_i64(tcg_res
);
5172 /* Floating point data-processing (3 source)
5173 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
5174 * +---+---+---+-----------+------+----+------+----+------+------+------+
5175 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
5176 * +---+---+---+-----------+------+----+------+----+------+------+------+
5178 static void disas_fp_3src(DisasContext
*s
, uint32_t insn
)
5180 int type
= extract32(insn
, 22, 2);
5181 int rd
= extract32(insn
, 0, 5);
5182 int rn
= extract32(insn
, 5, 5);
5183 int ra
= extract32(insn
, 10, 5);
5184 int rm
= extract32(insn
, 16, 5);
5185 bool o0
= extract32(insn
, 15, 1);
5186 bool o1
= extract32(insn
, 21, 1);
5190 if (!fp_access_check(s
)) {
5193 handle_fp_3src_single(s
, o0
, o1
, rd
, rn
, rm
, ra
);
5196 if (!fp_access_check(s
)) {
5199 handle_fp_3src_double(s
, o0
, o1
, rd
, rn
, rm
, ra
);
5202 unallocated_encoding(s
);
5206 /* The imm8 encodes the sign bit, enough bits to represent an exponent in
5207 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
5208 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
5210 static uint64_t vfp_expand_imm(int size
, uint8_t imm8
)
5216 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
5217 (extract32(imm8
, 6, 1) ? 0x3fc0 : 0x4000) |
5218 extract32(imm8
, 0, 6);
5222 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
5223 (extract32(imm8
, 6, 1) ? 0x3e00 : 0x4000) |
5224 (extract32(imm8
, 0, 6) << 3);
5228 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
5229 (extract32(imm8
, 6, 1) ? 0x3000 : 0x4000) |
5230 (extract32(imm8
, 0, 6) << 6);
5233 g_assert_not_reached();
5238 /* Floating point immediate
5239 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
5240 * +---+---+---+-----------+------+---+------------+-------+------+------+
5241 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
5242 * +---+---+---+-----------+------+---+------------+-------+------+------+
5244 static void disas_fp_imm(DisasContext
*s
, uint32_t insn
)
5246 int rd
= extract32(insn
, 0, 5);
5247 int imm8
= extract32(insn
, 13, 8);
5248 int is_double
= extract32(insn
, 22, 2);
5252 if (is_double
> 1) {
5253 unallocated_encoding(s
);
5257 if (!fp_access_check(s
)) {
5261 imm
= vfp_expand_imm(MO_32
+ is_double
, imm8
);
5263 tcg_res
= tcg_const_i64(imm
);
5264 write_fp_dreg(s
, rd
, tcg_res
);
5265 tcg_temp_free_i64(tcg_res
);
5268 /* Handle floating point <=> fixed point conversions. Note that we can
5269 * also deal with fp <=> integer conversions as a special case (scale == 64)
5270 * OPTME: consider handling that special case specially or at least skipping
5271 * the call to scalbn in the helpers for zero shifts.
5273 static void handle_fpfpcvt(DisasContext
*s
, int rd
, int rn
, int opcode
,
5274 bool itof
, int rmode
, int scale
, int sf
, int type
)
5276 bool is_signed
= !(opcode
& 1);
5277 bool is_double
= type
;
5278 TCGv_ptr tcg_fpstatus
;
5281 tcg_fpstatus
= get_fpstatus_ptr(false);
5283 tcg_shift
= tcg_const_i32(64 - scale
);
5286 TCGv_i64 tcg_int
= cpu_reg(s
, rn
);
5288 TCGv_i64 tcg_extend
= new_tmp_a64(s
);
5291 tcg_gen_ext32s_i64(tcg_extend
, tcg_int
);
5293 tcg_gen_ext32u_i64(tcg_extend
, tcg_int
);
5296 tcg_int
= tcg_extend
;
5300 TCGv_i64 tcg_double
= tcg_temp_new_i64();
5302 gen_helper_vfp_sqtod(tcg_double
, tcg_int
,
5303 tcg_shift
, tcg_fpstatus
);
5305 gen_helper_vfp_uqtod(tcg_double
, tcg_int
,
5306 tcg_shift
, tcg_fpstatus
);
5308 write_fp_dreg(s
, rd
, tcg_double
);
5309 tcg_temp_free_i64(tcg_double
);
5311 TCGv_i32 tcg_single
= tcg_temp_new_i32();
5313 gen_helper_vfp_sqtos(tcg_single
, tcg_int
,
5314 tcg_shift
, tcg_fpstatus
);
5316 gen_helper_vfp_uqtos(tcg_single
, tcg_int
,
5317 tcg_shift
, tcg_fpstatus
);
5319 write_fp_sreg(s
, rd
, tcg_single
);
5320 tcg_temp_free_i32(tcg_single
);
5323 TCGv_i64 tcg_int
= cpu_reg(s
, rd
);
5326 if (extract32(opcode
, 2, 1)) {
5327 /* There are too many rounding modes to all fit into rmode,
5328 * so FCVTA[US] is a special case.
5330 rmode
= FPROUNDING_TIEAWAY
;
5333 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
5335 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
5338 TCGv_i64 tcg_double
= read_fp_dreg(s
, rn
);
5341 gen_helper_vfp_tosld(tcg_int
, tcg_double
,
5342 tcg_shift
, tcg_fpstatus
);
5344 gen_helper_vfp_tosqd(tcg_int
, tcg_double
,
5345 tcg_shift
, tcg_fpstatus
);
5349 gen_helper_vfp_tould(tcg_int
, tcg_double
,
5350 tcg_shift
, tcg_fpstatus
);
5352 gen_helper_vfp_touqd(tcg_int
, tcg_double
,
5353 tcg_shift
, tcg_fpstatus
);
5356 tcg_temp_free_i64(tcg_double
);
5358 TCGv_i32 tcg_single
= read_fp_sreg(s
, rn
);
5361 gen_helper_vfp_tosqs(tcg_int
, tcg_single
,
5362 tcg_shift
, tcg_fpstatus
);
5364 gen_helper_vfp_touqs(tcg_int
, tcg_single
,
5365 tcg_shift
, tcg_fpstatus
);
5368 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
5370 gen_helper_vfp_tosls(tcg_dest
, tcg_single
,
5371 tcg_shift
, tcg_fpstatus
);
5373 gen_helper_vfp_touls(tcg_dest
, tcg_single
,
5374 tcg_shift
, tcg_fpstatus
);
5376 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
5377 tcg_temp_free_i32(tcg_dest
);
5379 tcg_temp_free_i32(tcg_single
);
5382 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
5383 tcg_temp_free_i32(tcg_rmode
);
5386 tcg_gen_ext32u_i64(tcg_int
, tcg_int
);
5390 tcg_temp_free_ptr(tcg_fpstatus
);
5391 tcg_temp_free_i32(tcg_shift
);
5394 /* Floating point <-> fixed point conversions
5395 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
5396 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5397 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
5398 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5400 static void disas_fp_fixed_conv(DisasContext
*s
, uint32_t insn
)
5402 int rd
= extract32(insn
, 0, 5);
5403 int rn
= extract32(insn
, 5, 5);
5404 int scale
= extract32(insn
, 10, 6);
5405 int opcode
= extract32(insn
, 16, 3);
5406 int rmode
= extract32(insn
, 19, 2);
5407 int type
= extract32(insn
, 22, 2);
5408 bool sbit
= extract32(insn
, 29, 1);
5409 bool sf
= extract32(insn
, 31, 1);
5412 if (sbit
|| (type
> 1)
5413 || (!sf
&& scale
< 32)) {
5414 unallocated_encoding(s
);
5418 switch ((rmode
<< 3) | opcode
) {
5419 case 0x2: /* SCVTF */
5420 case 0x3: /* UCVTF */
5423 case 0x18: /* FCVTZS */
5424 case 0x19: /* FCVTZU */
5428 unallocated_encoding(s
);
5432 if (!fp_access_check(s
)) {
5436 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, FPROUNDING_ZERO
, scale
, sf
, type
);
5439 static void handle_fmov(DisasContext
*s
, int rd
, int rn
, int type
, bool itof
)
5441 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
5442 * without conversion.
5446 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
5452 TCGv_i64 tmp
= tcg_temp_new_i64();
5453 tcg_gen_ext32u_i64(tmp
, tcg_rn
);
5454 tcg_gen_st_i64(tmp
, cpu_env
, fp_reg_offset(s
, rd
, MO_64
));
5455 tcg_gen_movi_i64(tmp
, 0);
5456 tcg_gen_st_i64(tmp
, cpu_env
, fp_reg_hi_offset(s
, rd
));
5457 tcg_temp_free_i64(tmp
);
5463 TCGv_i64 tmp
= tcg_const_i64(0);
5464 tcg_gen_st_i64(tcg_rn
, cpu_env
, fp_reg_offset(s
, rd
, MO_64
));
5465 tcg_gen_st_i64(tmp
, cpu_env
, fp_reg_hi_offset(s
, rd
));
5466 tcg_temp_free_i64(tmp
);
5470 /* 64 bit to top half. */
5471 tcg_gen_st_i64(tcg_rn
, cpu_env
, fp_reg_hi_offset(s
, rd
));
5475 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5480 tcg_gen_ld32u_i64(tcg_rd
, cpu_env
, fp_reg_offset(s
, rn
, MO_32
));
5484 tcg_gen_ld_i64(tcg_rd
, cpu_env
, fp_reg_offset(s
, rn
, MO_64
));
5487 /* 64 bits from top half */
5488 tcg_gen_ld_i64(tcg_rd
, cpu_env
, fp_reg_hi_offset(s
, rn
));
5494 /* Floating point <-> integer conversions
5495 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
5496 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
5497 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
5498 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
5500 static void disas_fp_int_conv(DisasContext
*s
, uint32_t insn
)
5502 int rd
= extract32(insn
, 0, 5);
5503 int rn
= extract32(insn
, 5, 5);
5504 int opcode
= extract32(insn
, 16, 3);
5505 int rmode
= extract32(insn
, 19, 2);
5506 int type
= extract32(insn
, 22, 2);
5507 bool sbit
= extract32(insn
, 29, 1);
5508 bool sf
= extract32(insn
, 31, 1);
5511 unallocated_encoding(s
);
5517 bool itof
= opcode
& 1;
5520 unallocated_encoding(s
);
5524 switch (sf
<< 3 | type
<< 1 | rmode
) {
5525 case 0x0: /* 32 bit */
5526 case 0xa: /* 64 bit */
5527 case 0xd: /* 64 bit to top half of quad */
5530 /* all other sf/type/rmode combinations are invalid */
5531 unallocated_encoding(s
);
5535 if (!fp_access_check(s
)) {
5538 handle_fmov(s
, rd
, rn
, type
, itof
);
5540 /* actual FP conversions */
5541 bool itof
= extract32(opcode
, 1, 1);
5543 if (type
> 1 || (rmode
!= 0 && opcode
> 1)) {
5544 unallocated_encoding(s
);
5548 if (!fp_access_check(s
)) {
5551 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, rmode
, 64, sf
, type
);
5555 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
5556 * 31 30 29 28 25 24 0
5557 * +---+---+---+---------+-----------------------------+
5558 * | | 0 | | 1 1 1 1 | |
5559 * +---+---+---+---------+-----------------------------+
5561 static void disas_data_proc_fp(DisasContext
*s
, uint32_t insn
)
5563 if (extract32(insn
, 24, 1)) {
5564 /* Floating point data-processing (3 source) */
5565 disas_fp_3src(s
, insn
);
5566 } else if (extract32(insn
, 21, 1) == 0) {
5567 /* Floating point to fixed point conversions */
5568 disas_fp_fixed_conv(s
, insn
);
5570 switch (extract32(insn
, 10, 2)) {
5572 /* Floating point conditional compare */
5573 disas_fp_ccomp(s
, insn
);
5576 /* Floating point data-processing (2 source) */
5577 disas_fp_2src(s
, insn
);
5580 /* Floating point conditional select */
5581 disas_fp_csel(s
, insn
);
5584 switch (ctz32(extract32(insn
, 12, 4))) {
5585 case 0: /* [15:12] == xxx1 */
5586 /* Floating point immediate */
5587 disas_fp_imm(s
, insn
);
5589 case 1: /* [15:12] == xx10 */
5590 /* Floating point compare */
5591 disas_fp_compare(s
, insn
);
5593 case 2: /* [15:12] == x100 */
5594 /* Floating point data-processing (1 source) */
5595 disas_fp_1src(s
, insn
);
5597 case 3: /* [15:12] == 1000 */
5598 unallocated_encoding(s
);
5600 default: /* [15:12] == 0000 */
5601 /* Floating point <-> integer conversions */
5602 disas_fp_int_conv(s
, insn
);
5610 static void do_ext64(DisasContext
*s
, TCGv_i64 tcg_left
, TCGv_i64 tcg_right
,
5613 /* Extract 64 bits from the middle of two concatenated 64 bit
5614 * vector register slices left:right. The extracted bits start
5615 * at 'pos' bits into the right (least significant) side.
5616 * We return the result in tcg_right, and guarantee not to
5619 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
5620 assert(pos
> 0 && pos
< 64);
5622 tcg_gen_shri_i64(tcg_right
, tcg_right
, pos
);
5623 tcg_gen_shli_i64(tcg_tmp
, tcg_left
, 64 - pos
);
5624 tcg_gen_or_i64(tcg_right
, tcg_right
, tcg_tmp
);
5626 tcg_temp_free_i64(tcg_tmp
);
5630 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
5631 * +---+---+-------------+-----+---+------+---+------+---+------+------+
5632 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
5633 * +---+---+-------------+-----+---+------+---+------+---+------+------+
5635 static void disas_simd_ext(DisasContext
*s
, uint32_t insn
)
5637 int is_q
= extract32(insn
, 30, 1);
5638 int op2
= extract32(insn
, 22, 2);
5639 int imm4
= extract32(insn
, 11, 4);
5640 int rm
= extract32(insn
, 16, 5);
5641 int rn
= extract32(insn
, 5, 5);
5642 int rd
= extract32(insn
, 0, 5);
5643 int pos
= imm4
<< 3;
5644 TCGv_i64 tcg_resl
, tcg_resh
;
5646 if (op2
!= 0 || (!is_q
&& extract32(imm4
, 3, 1))) {
5647 unallocated_encoding(s
);
5651 if (!fp_access_check(s
)) {
5655 tcg_resh
= tcg_temp_new_i64();
5656 tcg_resl
= tcg_temp_new_i64();
5658 /* Vd gets bits starting at pos bits into Vm:Vn. This is
5659 * either extracting 128 bits from a 128:128 concatenation, or
5660 * extracting 64 bits from a 64:64 concatenation.
5663 read_vec_element(s
, tcg_resl
, rn
, 0, MO_64
);
5665 read_vec_element(s
, tcg_resh
, rm
, 0, MO_64
);
5666 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
5668 tcg_gen_movi_i64(tcg_resh
, 0);
5675 EltPosns eltposns
[] = { {rn
, 0}, {rn
, 1}, {rm
, 0}, {rm
, 1} };
5676 EltPosns
*elt
= eltposns
;
5683 read_vec_element(s
, tcg_resl
, elt
->reg
, elt
->elt
, MO_64
);
5685 read_vec_element(s
, tcg_resh
, elt
->reg
, elt
->elt
, MO_64
);
5688 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
5689 tcg_hh
= tcg_temp_new_i64();
5690 read_vec_element(s
, tcg_hh
, elt
->reg
, elt
->elt
, MO_64
);
5691 do_ext64(s
, tcg_hh
, tcg_resh
, pos
);
5692 tcg_temp_free_i64(tcg_hh
);
5696 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
5697 tcg_temp_free_i64(tcg_resl
);
5698 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
5699 tcg_temp_free_i64(tcg_resh
);
5703 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
5704 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
5705 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
5706 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
5708 static void disas_simd_tb(DisasContext
*s
, uint32_t insn
)
5710 int op2
= extract32(insn
, 22, 2);
5711 int is_q
= extract32(insn
, 30, 1);
5712 int rm
= extract32(insn
, 16, 5);
5713 int rn
= extract32(insn
, 5, 5);
5714 int rd
= extract32(insn
, 0, 5);
5715 int is_tblx
= extract32(insn
, 12, 1);
5716 int len
= extract32(insn
, 13, 2);
5717 TCGv_i64 tcg_resl
, tcg_resh
, tcg_idx
;
5718 TCGv_i32 tcg_regno
, tcg_numregs
;
5721 unallocated_encoding(s
);
5725 if (!fp_access_check(s
)) {
5729 /* This does a table lookup: for every byte element in the input
5730 * we index into a table formed from up to four vector registers,
5731 * and then the output is the result of the lookups. Our helper
5732 * function does the lookup operation for a single 64 bit part of
5735 tcg_resl
= tcg_temp_new_i64();
5736 tcg_resh
= tcg_temp_new_i64();
5739 read_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
5741 tcg_gen_movi_i64(tcg_resl
, 0);
5743 if (is_tblx
&& is_q
) {
5744 read_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
5746 tcg_gen_movi_i64(tcg_resh
, 0);
5749 tcg_idx
= tcg_temp_new_i64();
5750 tcg_regno
= tcg_const_i32(rn
);
5751 tcg_numregs
= tcg_const_i32(len
+ 1);
5752 read_vec_element(s
, tcg_idx
, rm
, 0, MO_64
);
5753 gen_helper_simd_tbl(tcg_resl
, cpu_env
, tcg_resl
, tcg_idx
,
5754 tcg_regno
, tcg_numregs
);
5756 read_vec_element(s
, tcg_idx
, rm
, 1, MO_64
);
5757 gen_helper_simd_tbl(tcg_resh
, cpu_env
, tcg_resh
, tcg_idx
,
5758 tcg_regno
, tcg_numregs
);
5760 tcg_temp_free_i64(tcg_idx
);
5761 tcg_temp_free_i32(tcg_regno
);
5762 tcg_temp_free_i32(tcg_numregs
);
5764 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
5765 tcg_temp_free_i64(tcg_resl
);
5766 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
5767 tcg_temp_free_i64(tcg_resh
);
5771 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
5772 * +---+---+-------------+------+---+------+---+------------------+------+
5773 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
5774 * +---+---+-------------+------+---+------+---+------------------+------+
5776 static void disas_simd_zip_trn(DisasContext
*s
, uint32_t insn
)
5778 int rd
= extract32(insn
, 0, 5);
5779 int rn
= extract32(insn
, 5, 5);
5780 int rm
= extract32(insn
, 16, 5);
5781 int size
= extract32(insn
, 22, 2);
5782 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
5783 * bit 2 indicates 1 vs 2 variant of the insn.
5785 int opcode
= extract32(insn
, 12, 2);
5786 bool part
= extract32(insn
, 14, 1);
5787 bool is_q
= extract32(insn
, 30, 1);
5788 int esize
= 8 << size
;
5790 int datasize
= is_q
? 128 : 64;
5791 int elements
= datasize
/ esize
;
5792 TCGv_i64 tcg_res
, tcg_resl
, tcg_resh
;
5794 if (opcode
== 0 || (size
== 3 && !is_q
)) {
5795 unallocated_encoding(s
);
5799 if (!fp_access_check(s
)) {
5803 tcg_resl
= tcg_const_i64(0);
5804 tcg_resh
= tcg_const_i64(0);
5805 tcg_res
= tcg_temp_new_i64();
5807 for (i
= 0; i
< elements
; i
++) {
5809 case 1: /* UZP1/2 */
5811 int midpoint
= elements
/ 2;
5813 read_vec_element(s
, tcg_res
, rn
, 2 * i
+ part
, size
);
5815 read_vec_element(s
, tcg_res
, rm
,
5816 2 * (i
- midpoint
) + part
, size
);
5820 case 2: /* TRN1/2 */
5822 read_vec_element(s
, tcg_res
, rm
, (i
& ~1) + part
, size
);
5824 read_vec_element(s
, tcg_res
, rn
, (i
& ~1) + part
, size
);
5827 case 3: /* ZIP1/2 */
5829 int base
= part
* elements
/ 2;
5831 read_vec_element(s
, tcg_res
, rm
, base
+ (i
>> 1), size
);
5833 read_vec_element(s
, tcg_res
, rn
, base
+ (i
>> 1), size
);
5838 g_assert_not_reached();
5843 tcg_gen_shli_i64(tcg_res
, tcg_res
, ofs
);
5844 tcg_gen_or_i64(tcg_resl
, tcg_resl
, tcg_res
);
5846 tcg_gen_shli_i64(tcg_res
, tcg_res
, ofs
- 64);
5847 tcg_gen_or_i64(tcg_resh
, tcg_resh
, tcg_res
);
5851 tcg_temp_free_i64(tcg_res
);
5853 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
5854 tcg_temp_free_i64(tcg_resl
);
5855 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
5856 tcg_temp_free_i64(tcg_resh
);
5860 * do_reduction_op helper
5862 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
5863 * important for correct NaN propagation that we do these
5864 * operations in exactly the order specified by the pseudocode.
5866 * This is a recursive function, TCG temps should be freed by the
5867 * calling function once it is done with the values.
5869 static TCGv_i32
do_reduction_op(DisasContext
*s
, int fpopcode
, int rn
,
5870 int esize
, int size
, int vmap
, TCGv_ptr fpst
)
5872 if (esize
== size
) {
5874 TCGMemOp msize
= esize
== 16 ? MO_16
: MO_32
;
5877 /* We should have one register left here */
5878 assert(ctpop8(vmap
) == 1);
5879 element
= ctz32(vmap
);
5880 assert(element
< 8);
5882 tcg_elem
= tcg_temp_new_i32();
5883 read_vec_element_i32(s
, tcg_elem
, rn
, element
, msize
);
5886 int bits
= size
/ 2;
5887 int shift
= ctpop8(vmap
) / 2;
5888 int vmap_lo
= (vmap
>> shift
) & vmap
;
5889 int vmap_hi
= (vmap
& ~vmap_lo
);
5890 TCGv_i32 tcg_hi
, tcg_lo
, tcg_res
;
5892 tcg_hi
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_hi
, fpst
);
5893 tcg_lo
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_lo
, fpst
);
5894 tcg_res
= tcg_temp_new_i32();
5897 case 0x0c: /* fmaxnmv half-precision */
5898 gen_helper_advsimd_maxnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
5900 case 0x0f: /* fmaxv half-precision */
5901 gen_helper_advsimd_maxh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
5903 case 0x1c: /* fminnmv half-precision */
5904 gen_helper_advsimd_minnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
5906 case 0x1f: /* fminv half-precision */
5907 gen_helper_advsimd_minh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
5909 case 0x2c: /* fmaxnmv */
5910 gen_helper_vfp_maxnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
5912 case 0x2f: /* fmaxv */
5913 gen_helper_vfp_maxs(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
5915 case 0x3c: /* fminnmv */
5916 gen_helper_vfp_minnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
5918 case 0x3f: /* fminv */
5919 gen_helper_vfp_mins(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
5922 g_assert_not_reached();
5925 tcg_temp_free_i32(tcg_hi
);
5926 tcg_temp_free_i32(tcg_lo
);
5931 /* AdvSIMD across lanes
5932 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
5933 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
5934 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
5935 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
5937 static void disas_simd_across_lanes(DisasContext
*s
, uint32_t insn
)
5939 int rd
= extract32(insn
, 0, 5);
5940 int rn
= extract32(insn
, 5, 5);
5941 int size
= extract32(insn
, 22, 2);
5942 int opcode
= extract32(insn
, 12, 5);
5943 bool is_q
= extract32(insn
, 30, 1);
5944 bool is_u
= extract32(insn
, 29, 1);
5946 bool is_min
= false;
5950 TCGv_i64 tcg_res
, tcg_elt
;
5953 case 0x1b: /* ADDV */
5955 unallocated_encoding(s
);
5959 case 0x3: /* SADDLV, UADDLV */
5960 case 0xa: /* SMAXV, UMAXV */
5961 case 0x1a: /* SMINV, UMINV */
5962 if (size
== 3 || (size
== 2 && !is_q
)) {
5963 unallocated_encoding(s
);
5967 case 0xc: /* FMAXNMV, FMINNMV */
5968 case 0xf: /* FMAXV, FMINV */
5969 /* Bit 1 of size field encodes min vs max and the actual size
5970 * depends on the encoding of the U bit. If not set (and FP16
5971 * enabled) then we do half-precision float instead of single
5974 is_min
= extract32(size
, 1, 1);
5976 if (!is_u
&& arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
5978 } else if (!is_u
|| !is_q
|| extract32(size
, 0, 1)) {
5979 unallocated_encoding(s
);
5986 unallocated_encoding(s
);
5990 if (!fp_access_check(s
)) {
5995 elements
= (is_q
? 128 : 64) / esize
;
5997 tcg_res
= tcg_temp_new_i64();
5998 tcg_elt
= tcg_temp_new_i64();
6000 /* These instructions operate across all lanes of a vector
6001 * to produce a single result. We can guarantee that a 64
6002 * bit intermediate is sufficient:
6003 * + for [US]ADDLV the maximum element size is 32 bits, and
6004 * the result type is 64 bits
6005 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
6006 * same as the element size, which is 32 bits at most
6007 * For the integer operations we can choose to work at 64
6008 * or 32 bits and truncate at the end; for simplicity
6009 * we use 64 bits always. The floating point
6010 * ops do require 32 bit intermediates, though.
6013 read_vec_element(s
, tcg_res
, rn
, 0, size
| (is_u
? 0 : MO_SIGN
));
6015 for (i
= 1; i
< elements
; i
++) {
6016 read_vec_element(s
, tcg_elt
, rn
, i
, size
| (is_u
? 0 : MO_SIGN
));
6019 case 0x03: /* SADDLV / UADDLV */
6020 case 0x1b: /* ADDV */
6021 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_elt
);
6023 case 0x0a: /* SMAXV / UMAXV */
6024 tcg_gen_movcond_i64(is_u
? TCG_COND_GEU
: TCG_COND_GE
,
6026 tcg_res
, tcg_elt
, tcg_res
, tcg_elt
);
6028 case 0x1a: /* SMINV / UMINV */
6029 tcg_gen_movcond_i64(is_u
? TCG_COND_LEU
: TCG_COND_LE
,
6031 tcg_res
, tcg_elt
, tcg_res
, tcg_elt
);
6035 g_assert_not_reached();
6040 /* Floating point vector reduction ops which work across 32
6041 * bit (single) or 16 bit (half-precision) intermediates.
6042 * Note that correct NaN propagation requires that we do these
6043 * operations in exactly the order specified by the pseudocode.
6045 TCGv_ptr fpst
= get_fpstatus_ptr(size
== MO_16
);
6046 int fpopcode
= opcode
| is_min
<< 4 | is_u
<< 5;
6047 int vmap
= (1 << elements
) - 1;
6048 TCGv_i32 tcg_res32
= do_reduction_op(s
, fpopcode
, rn
, esize
,
6049 (is_q
? 128 : 64), vmap
, fpst
);
6050 tcg_gen_extu_i32_i64(tcg_res
, tcg_res32
);
6051 tcg_temp_free_i32(tcg_res32
);
6052 tcg_temp_free_ptr(fpst
);
6055 tcg_temp_free_i64(tcg_elt
);
6057 /* Now truncate the result to the width required for the final output */
6058 if (opcode
== 0x03) {
6059 /* SADDLV, UADDLV: result is 2*esize */
6065 tcg_gen_ext8u_i64(tcg_res
, tcg_res
);
6068 tcg_gen_ext16u_i64(tcg_res
, tcg_res
);
6071 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
6076 g_assert_not_reached();
6079 write_fp_dreg(s
, rd
, tcg_res
);
6080 tcg_temp_free_i64(tcg_res
);
6083 /* DUP (Element, Vector)
6085 * 31 30 29 21 20 16 15 10 9 5 4 0
6086 * +---+---+-------------------+--------+-------------+------+------+
6087 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
6088 * +---+---+-------------------+--------+-------------+------+------+
6090 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6092 static void handle_simd_dupe(DisasContext
*s
, int is_q
, int rd
, int rn
,
6095 int size
= ctz32(imm5
);
6096 int index
= imm5
>> (size
+ 1);
6098 if (size
> 3 || (size
== 3 && !is_q
)) {
6099 unallocated_encoding(s
);
6103 if (!fp_access_check(s
)) {
6107 tcg_gen_gvec_dup_mem(size
, vec_full_reg_offset(s
, rd
),
6108 vec_reg_offset(s
, rn
, index
, size
),
6109 is_q
? 16 : 8, vec_full_reg_size(s
));
6112 /* DUP (element, scalar)
6113 * 31 21 20 16 15 10 9 5 4 0
6114 * +-----------------------+--------+-------------+------+------+
6115 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
6116 * +-----------------------+--------+-------------+------+------+
6118 static void handle_simd_dupes(DisasContext
*s
, int rd
, int rn
,
6121 int size
= ctz32(imm5
);
6126 unallocated_encoding(s
);
6130 if (!fp_access_check(s
)) {
6134 index
= imm5
>> (size
+ 1);
6136 /* This instruction just extracts the specified element and
6137 * zero-extends it into the bottom of the destination register.
6139 tmp
= tcg_temp_new_i64();
6140 read_vec_element(s
, tmp
, rn
, index
, size
);
6141 write_fp_dreg(s
, rd
, tmp
);
6142 tcg_temp_free_i64(tmp
);
6147 * 31 30 29 21 20 16 15 10 9 5 4 0
6148 * +---+---+-------------------+--------+-------------+------+------+
6149 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
6150 * +---+---+-------------------+--------+-------------+------+------+
6152 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6154 static void handle_simd_dupg(DisasContext
*s
, int is_q
, int rd
, int rn
,
6157 int size
= ctz32(imm5
);
6158 uint32_t dofs
, oprsz
, maxsz
;
6160 if (size
> 3 || ((size
== 3) && !is_q
)) {
6161 unallocated_encoding(s
);
6165 if (!fp_access_check(s
)) {
6169 dofs
= vec_full_reg_offset(s
, rd
);
6170 oprsz
= is_q
? 16 : 8;
6171 maxsz
= vec_full_reg_size(s
);
6173 tcg_gen_gvec_dup_i64(size
, dofs
, oprsz
, maxsz
, cpu_reg(s
, rn
));
6178 * 31 21 20 16 15 14 11 10 9 5 4 0
6179 * +-----------------------+--------+------------+---+------+------+
6180 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6181 * +-----------------------+--------+------------+---+------+------+
6183 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6184 * index: encoded in imm5<4:size+1>
6186 static void handle_simd_inse(DisasContext
*s
, int rd
, int rn
,
6189 int size
= ctz32(imm5
);
6190 int src_index
, dst_index
;
6194 unallocated_encoding(s
);
6198 if (!fp_access_check(s
)) {
6202 dst_index
= extract32(imm5
, 1+size
, 5);
6203 src_index
= extract32(imm4
, size
, 4);
6205 tmp
= tcg_temp_new_i64();
6207 read_vec_element(s
, tmp
, rn
, src_index
, size
);
6208 write_vec_element(s
, tmp
, rd
, dst_index
, size
);
6210 tcg_temp_free_i64(tmp
);
6216 * 31 21 20 16 15 10 9 5 4 0
6217 * +-----------------------+--------+-------------+------+------+
6218 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
6219 * +-----------------------+--------+-------------+------+------+
6221 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6222 * index: encoded in imm5<4:size+1>
6224 static void handle_simd_insg(DisasContext
*s
, int rd
, int rn
, int imm5
)
6226 int size
= ctz32(imm5
);
6230 unallocated_encoding(s
);
6234 if (!fp_access_check(s
)) {
6238 idx
= extract32(imm5
, 1 + size
, 4 - size
);
6239 write_vec_element(s
, cpu_reg(s
, rn
), rd
, idx
, size
);
6246 * 31 30 29 21 20 16 15 12 10 9 5 4 0
6247 * +---+---+-------------------+--------+-------------+------+------+
6248 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
6249 * +---+---+-------------------+--------+-------------+------+------+
6251 * U: unsigned when set
6252 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6254 static void handle_simd_umov_smov(DisasContext
*s
, int is_q
, int is_signed
,
6255 int rn
, int rd
, int imm5
)
6257 int size
= ctz32(imm5
);
6261 /* Check for UnallocatedEncodings */
6263 if (size
> 2 || (size
== 2 && !is_q
)) {
6264 unallocated_encoding(s
);
6269 || (size
< 3 && is_q
)
6270 || (size
== 3 && !is_q
)) {
6271 unallocated_encoding(s
);
6276 if (!fp_access_check(s
)) {
6280 element
= extract32(imm5
, 1+size
, 4);
6282 tcg_rd
= cpu_reg(s
, rd
);
6283 read_vec_element(s
, tcg_rd
, rn
, element
, size
| (is_signed
? MO_SIGN
: 0));
6284 if (is_signed
&& !is_q
) {
6285 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
6290 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
6291 * +---+---+----+-----------------+------+---+------+---+------+------+
6292 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6293 * +---+---+----+-----------------+------+---+------+---+------+------+
6295 static void disas_simd_copy(DisasContext
*s
, uint32_t insn
)
6297 int rd
= extract32(insn
, 0, 5);
6298 int rn
= extract32(insn
, 5, 5);
6299 int imm4
= extract32(insn
, 11, 4);
6300 int op
= extract32(insn
, 29, 1);
6301 int is_q
= extract32(insn
, 30, 1);
6302 int imm5
= extract32(insn
, 16, 5);
6307 handle_simd_inse(s
, rd
, rn
, imm4
, imm5
);
6309 unallocated_encoding(s
);
6314 /* DUP (element - vector) */
6315 handle_simd_dupe(s
, is_q
, rd
, rn
, imm5
);
6319 handle_simd_dupg(s
, is_q
, rd
, rn
, imm5
);
6324 handle_simd_insg(s
, rd
, rn
, imm5
);
6326 unallocated_encoding(s
);
6331 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
6332 handle_simd_umov_smov(s
, is_q
, (imm4
== 5), rn
, rd
, imm5
);
6335 unallocated_encoding(s
);
6341 /* AdvSIMD modified immediate
6342 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
6343 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6344 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
6345 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6347 * There are a number of operations that can be carried out here:
6348 * MOVI - move (shifted) imm into register
6349 * MVNI - move inverted (shifted) imm into register
6350 * ORR - bitwise OR of (shifted) imm with register
6351 * BIC - bitwise clear of (shifted) imm with register
6352 * With ARMv8.2 we also have:
6353 * FMOV half-precision
6355 static void disas_simd_mod_imm(DisasContext
*s
, uint32_t insn
)
6357 int rd
= extract32(insn
, 0, 5);
6358 int cmode
= extract32(insn
, 12, 4);
6359 int cmode_3_1
= extract32(cmode
, 1, 3);
6360 int cmode_0
= extract32(cmode
, 0, 1);
6361 int o2
= extract32(insn
, 11, 1);
6362 uint64_t abcdefgh
= extract32(insn
, 5, 5) | (extract32(insn
, 16, 3) << 5);
6363 bool is_neg
= extract32(insn
, 29, 1);
6364 bool is_q
= extract32(insn
, 30, 1);
6367 if (o2
!= 0 || ((cmode
== 0xf) && is_neg
&& !is_q
)) {
6368 /* Check for FMOV (vector, immediate) - half-precision */
6369 if (!(arm_dc_feature(s
, ARM_FEATURE_V8_FP16
) && o2
&& cmode
== 0xf)) {
6370 unallocated_encoding(s
);
6375 if (!fp_access_check(s
)) {
6379 /* See AdvSIMDExpandImm() in ARM ARM */
6380 switch (cmode_3_1
) {
6381 case 0: /* Replicate(Zeros(24):imm8, 2) */
6382 case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
6383 case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
6384 case 3: /* Replicate(imm8:Zeros(24), 2) */
6386 int shift
= cmode_3_1
* 8;
6387 imm
= bitfield_replicate(abcdefgh
<< shift
, 32);
6390 case 4: /* Replicate(Zeros(8):imm8, 4) */
6391 case 5: /* Replicate(imm8:Zeros(8), 4) */
6393 int shift
= (cmode_3_1
& 0x1) * 8;
6394 imm
= bitfield_replicate(abcdefgh
<< shift
, 16);
6399 /* Replicate(Zeros(8):imm8:Ones(16), 2) */
6400 imm
= (abcdefgh
<< 16) | 0xffff;
6402 /* Replicate(Zeros(16):imm8:Ones(8), 2) */
6403 imm
= (abcdefgh
<< 8) | 0xff;
6405 imm
= bitfield_replicate(imm
, 32);
6408 if (!cmode_0
&& !is_neg
) {
6409 imm
= bitfield_replicate(abcdefgh
, 8);
6410 } else if (!cmode_0
&& is_neg
) {
6413 for (i
= 0; i
< 8; i
++) {
6414 if ((abcdefgh
) & (1 << i
)) {
6415 imm
|= 0xffULL
<< (i
* 8);
6418 } else if (cmode_0
) {
6420 imm
= (abcdefgh
& 0x3f) << 48;
6421 if (abcdefgh
& 0x80) {
6422 imm
|= 0x8000000000000000ULL
;
6424 if (abcdefgh
& 0x40) {
6425 imm
|= 0x3fc0000000000000ULL
;
6427 imm
|= 0x4000000000000000ULL
;
6431 /* FMOV (vector, immediate) - half-precision */
6432 imm
= vfp_expand_imm(MO_16
, abcdefgh
);
6433 /* now duplicate across the lanes */
6434 imm
= bitfield_replicate(imm
, 16);
6436 imm
= (abcdefgh
& 0x3f) << 19;
6437 if (abcdefgh
& 0x80) {
6440 if (abcdefgh
& 0x40) {
6451 fprintf(stderr
, "%s: cmode_3_1: %x\n", __func__
, cmode_3_1
);
6452 g_assert_not_reached();
6455 if (cmode_3_1
!= 7 && is_neg
) {
6459 if (!((cmode
& 0x9) == 0x1 || (cmode
& 0xd) == 0x9)) {
6460 /* MOVI or MVNI, with MVNI negation handled above. */
6461 tcg_gen_gvec_dup64i(vec_full_reg_offset(s
, rd
), is_q
? 16 : 8,
6462 vec_full_reg_size(s
), imm
);
6464 /* ORR or BIC, with BIC negation to AND handled above. */
6466 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_andi
, MO_64
);
6468 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_ori
, MO_64
);
6473 /* AdvSIMD scalar copy
6474 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
6475 * +-----+----+-----------------+------+---+------+---+------+------+
6476 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6477 * +-----+----+-----------------+------+---+------+---+------+------+
6479 static void disas_simd_scalar_copy(DisasContext
*s
, uint32_t insn
)
6481 int rd
= extract32(insn
, 0, 5);
6482 int rn
= extract32(insn
, 5, 5);
6483 int imm4
= extract32(insn
, 11, 4);
6484 int imm5
= extract32(insn
, 16, 5);
6485 int op
= extract32(insn
, 29, 1);
6487 if (op
!= 0 || imm4
!= 0) {
6488 unallocated_encoding(s
);
6492 /* DUP (element, scalar) */
6493 handle_simd_dupes(s
, rd
, rn
, imm5
);
6496 /* AdvSIMD scalar pairwise
6497 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
6498 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6499 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
6500 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6502 static void disas_simd_scalar_pairwise(DisasContext
*s
, uint32_t insn
)
6504 int u
= extract32(insn
, 29, 1);
6505 int size
= extract32(insn
, 22, 2);
6506 int opcode
= extract32(insn
, 12, 5);
6507 int rn
= extract32(insn
, 5, 5);
6508 int rd
= extract32(insn
, 0, 5);
6511 /* For some ops (the FP ones), size[1] is part of the encoding.
6512 * For ADDP strictly it is not but size[1] is always 1 for valid
6515 opcode
|= (extract32(size
, 1, 1) << 5);
6518 case 0x3b: /* ADDP */
6519 if (u
|| size
!= 3) {
6520 unallocated_encoding(s
);
6523 if (!fp_access_check(s
)) {
6529 case 0xc: /* FMAXNMP */
6530 case 0xd: /* FADDP */
6531 case 0xf: /* FMAXP */
6532 case 0x2c: /* FMINNMP */
6533 case 0x2f: /* FMINP */
6534 /* FP op, size[0] is 32 or 64 bit*/
6536 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
6537 unallocated_encoding(s
);
6543 size
= extract32(size
, 0, 1) ? MO_64
: MO_32
;
6546 if (!fp_access_check(s
)) {
6550 fpst
= get_fpstatus_ptr(size
== MO_16
);
6553 unallocated_encoding(s
);
6557 if (size
== MO_64
) {
6558 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
6559 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
6560 TCGv_i64 tcg_res
= tcg_temp_new_i64();
6562 read_vec_element(s
, tcg_op1
, rn
, 0, MO_64
);
6563 read_vec_element(s
, tcg_op2
, rn
, 1, MO_64
);
6566 case 0x3b: /* ADDP */
6567 tcg_gen_add_i64(tcg_res
, tcg_op1
, tcg_op2
);
6569 case 0xc: /* FMAXNMP */
6570 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6572 case 0xd: /* FADDP */
6573 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6575 case 0xf: /* FMAXP */
6576 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6578 case 0x2c: /* FMINNMP */
6579 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6581 case 0x2f: /* FMINP */
6582 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6585 g_assert_not_reached();
6588 write_fp_dreg(s
, rd
, tcg_res
);
6590 tcg_temp_free_i64(tcg_op1
);
6591 tcg_temp_free_i64(tcg_op2
);
6592 tcg_temp_free_i64(tcg_res
);
6594 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
6595 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
6596 TCGv_i32 tcg_res
= tcg_temp_new_i32();
6598 read_vec_element_i32(s
, tcg_op1
, rn
, 0, size
);
6599 read_vec_element_i32(s
, tcg_op2
, rn
, 1, size
);
6601 if (size
== MO_16
) {
6603 case 0xc: /* FMAXNMP */
6604 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6606 case 0xd: /* FADDP */
6607 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6609 case 0xf: /* FMAXP */
6610 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6612 case 0x2c: /* FMINNMP */
6613 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6615 case 0x2f: /* FMINP */
6616 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6619 g_assert_not_reached();
6623 case 0xc: /* FMAXNMP */
6624 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6626 case 0xd: /* FADDP */
6627 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6629 case 0xf: /* FMAXP */
6630 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6632 case 0x2c: /* FMINNMP */
6633 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6635 case 0x2f: /* FMINP */
6636 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6639 g_assert_not_reached();
6643 write_fp_sreg(s
, rd
, tcg_res
);
6645 tcg_temp_free_i32(tcg_op1
);
6646 tcg_temp_free_i32(tcg_op2
);
6647 tcg_temp_free_i32(tcg_res
);
6651 tcg_temp_free_ptr(fpst
);
6656 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
6658 * This code is handles the common shifting code and is used by both
6659 * the vector and scalar code.
6661 static void handle_shri_with_rndacc(TCGv_i64 tcg_res
, TCGv_i64 tcg_src
,
6662 TCGv_i64 tcg_rnd
, bool accumulate
,
6663 bool is_u
, int size
, int shift
)
6665 bool extended_result
= false;
6666 bool round
= tcg_rnd
!= NULL
;
6668 TCGv_i64 tcg_src_hi
;
6670 if (round
&& size
== 3) {
6671 extended_result
= true;
6672 ext_lshift
= 64 - shift
;
6673 tcg_src_hi
= tcg_temp_new_i64();
6674 } else if (shift
== 64) {
6675 if (!accumulate
&& is_u
) {
6676 /* result is zero */
6677 tcg_gen_movi_i64(tcg_res
, 0);
6682 /* Deal with the rounding step */
6684 if (extended_result
) {
6685 TCGv_i64 tcg_zero
= tcg_const_i64(0);
6687 /* take care of sign extending tcg_res */
6688 tcg_gen_sari_i64(tcg_src_hi
, tcg_src
, 63);
6689 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
6690 tcg_src
, tcg_src_hi
,
6693 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
6697 tcg_temp_free_i64(tcg_zero
);
6699 tcg_gen_add_i64(tcg_src
, tcg_src
, tcg_rnd
);
6703 /* Now do the shift right */
6704 if (round
&& extended_result
) {
6705 /* extended case, >64 bit precision required */
6706 if (ext_lshift
== 0) {
6707 /* special case, only high bits matter */
6708 tcg_gen_mov_i64(tcg_src
, tcg_src_hi
);
6710 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
6711 tcg_gen_shli_i64(tcg_src_hi
, tcg_src_hi
, ext_lshift
);
6712 tcg_gen_or_i64(tcg_src
, tcg_src
, tcg_src_hi
);
6717 /* essentially shifting in 64 zeros */
6718 tcg_gen_movi_i64(tcg_src
, 0);
6720 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
6724 /* effectively extending the sign-bit */
6725 tcg_gen_sari_i64(tcg_src
, tcg_src
, 63);
6727 tcg_gen_sari_i64(tcg_src
, tcg_src
, shift
);
6733 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_src
);
6735 tcg_gen_mov_i64(tcg_res
, tcg_src
);
6738 if (extended_result
) {
6739 tcg_temp_free_i64(tcg_src_hi
);
6743 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
6744 static void handle_scalar_simd_shri(DisasContext
*s
,
6745 bool is_u
, int immh
, int immb
,
6746 int opcode
, int rn
, int rd
)
6749 int immhb
= immh
<< 3 | immb
;
6750 int shift
= 2 * (8 << size
) - immhb
;
6751 bool accumulate
= false;
6753 bool insert
= false;
6758 if (!extract32(immh
, 3, 1)) {
6759 unallocated_encoding(s
);
6763 if (!fp_access_check(s
)) {
6768 case 0x02: /* SSRA / USRA (accumulate) */
6771 case 0x04: /* SRSHR / URSHR (rounding) */
6774 case 0x06: /* SRSRA / URSRA (accum + rounding) */
6775 accumulate
= round
= true;
6777 case 0x08: /* SRI */
6783 uint64_t round_const
= 1ULL << (shift
- 1);
6784 tcg_round
= tcg_const_i64(round_const
);
6789 tcg_rn
= read_fp_dreg(s
, rn
);
6790 tcg_rd
= (accumulate
|| insert
) ? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
6793 /* shift count same as element size is valid but does nothing;
6794 * special case to avoid potential shift by 64.
6796 int esize
= 8 << size
;
6797 if (shift
!= esize
) {
6798 tcg_gen_shri_i64(tcg_rn
, tcg_rn
, shift
);
6799 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, 0, esize
- shift
);
6802 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
6803 accumulate
, is_u
, size
, shift
);
6806 write_fp_dreg(s
, rd
, tcg_rd
);
6808 tcg_temp_free_i64(tcg_rn
);
6809 tcg_temp_free_i64(tcg_rd
);
6811 tcg_temp_free_i64(tcg_round
);
6815 /* SHL/SLI - Scalar shift left */
6816 static void handle_scalar_simd_shli(DisasContext
*s
, bool insert
,
6817 int immh
, int immb
, int opcode
,
6820 int size
= 32 - clz32(immh
) - 1;
6821 int immhb
= immh
<< 3 | immb
;
6822 int shift
= immhb
- (8 << size
);
6823 TCGv_i64 tcg_rn
= new_tmp_a64(s
);
6824 TCGv_i64 tcg_rd
= new_tmp_a64(s
);
6826 if (!extract32(immh
, 3, 1)) {
6827 unallocated_encoding(s
);
6831 if (!fp_access_check(s
)) {
6835 tcg_rn
= read_fp_dreg(s
, rn
);
6836 tcg_rd
= insert
? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
6839 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, shift
, 64 - shift
);
6841 tcg_gen_shli_i64(tcg_rd
, tcg_rn
, shift
);
6844 write_fp_dreg(s
, rd
, tcg_rd
);
6846 tcg_temp_free_i64(tcg_rn
);
6847 tcg_temp_free_i64(tcg_rd
);
6850 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
6851 * (signed/unsigned) narrowing */
6852 static void handle_vec_simd_sqshrn(DisasContext
*s
, bool is_scalar
, bool is_q
,
6853 bool is_u_shift
, bool is_u_narrow
,
6854 int immh
, int immb
, int opcode
,
6857 int immhb
= immh
<< 3 | immb
;
6858 int size
= 32 - clz32(immh
) - 1;
6859 int esize
= 8 << size
;
6860 int shift
= (2 * esize
) - immhb
;
6861 int elements
= is_scalar
? 1 : (64 / esize
);
6862 bool round
= extract32(opcode
, 0, 1);
6863 TCGMemOp ldop
= (size
+ 1) | (is_u_shift
? 0 : MO_SIGN
);
6864 TCGv_i64 tcg_rn
, tcg_rd
, tcg_round
;
6865 TCGv_i32 tcg_rd_narrowed
;
6868 static NeonGenNarrowEnvFn
* const signed_narrow_fns
[4][2] = {
6869 { gen_helper_neon_narrow_sat_s8
,
6870 gen_helper_neon_unarrow_sat8
},
6871 { gen_helper_neon_narrow_sat_s16
,
6872 gen_helper_neon_unarrow_sat16
},
6873 { gen_helper_neon_narrow_sat_s32
,
6874 gen_helper_neon_unarrow_sat32
},
6877 static NeonGenNarrowEnvFn
* const unsigned_narrow_fns
[4] = {
6878 gen_helper_neon_narrow_sat_u8
,
6879 gen_helper_neon_narrow_sat_u16
,
6880 gen_helper_neon_narrow_sat_u32
,
6883 NeonGenNarrowEnvFn
*narrowfn
;
6889 if (extract32(immh
, 3, 1)) {
6890 unallocated_encoding(s
);
6894 if (!fp_access_check(s
)) {
6899 narrowfn
= unsigned_narrow_fns
[size
];
6901 narrowfn
= signed_narrow_fns
[size
][is_u_narrow
? 1 : 0];
6904 tcg_rn
= tcg_temp_new_i64();
6905 tcg_rd
= tcg_temp_new_i64();
6906 tcg_rd_narrowed
= tcg_temp_new_i32();
6907 tcg_final
= tcg_const_i64(0);
6910 uint64_t round_const
= 1ULL << (shift
- 1);
6911 tcg_round
= tcg_const_i64(round_const
);
6916 for (i
= 0; i
< elements
; i
++) {
6917 read_vec_element(s
, tcg_rn
, rn
, i
, ldop
);
6918 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
6919 false, is_u_shift
, size
+1, shift
);
6920 narrowfn(tcg_rd_narrowed
, cpu_env
, tcg_rd
);
6921 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd_narrowed
);
6922 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
6926 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
6928 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
6932 tcg_temp_free_i64(tcg_round
);
6934 tcg_temp_free_i64(tcg_rn
);
6935 tcg_temp_free_i64(tcg_rd
);
6936 tcg_temp_free_i32(tcg_rd_narrowed
);
6937 tcg_temp_free_i64(tcg_final
);
6939 clear_vec_high(s
, is_q
, rd
);
6942 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
6943 static void handle_simd_qshl(DisasContext
*s
, bool scalar
, bool is_q
,
6944 bool src_unsigned
, bool dst_unsigned
,
6945 int immh
, int immb
, int rn
, int rd
)
6947 int immhb
= immh
<< 3 | immb
;
6948 int size
= 32 - clz32(immh
) - 1;
6949 int shift
= immhb
- (8 << size
);
6953 assert(!(scalar
&& is_q
));
6956 if (!is_q
&& extract32(immh
, 3, 1)) {
6957 unallocated_encoding(s
);
6961 /* Since we use the variable-shift helpers we must
6962 * replicate the shift count into each element of
6963 * the tcg_shift value.
6967 shift
|= shift
<< 8;
6970 shift
|= shift
<< 16;
6976 g_assert_not_reached();
6980 if (!fp_access_check(s
)) {
6985 TCGv_i64 tcg_shift
= tcg_const_i64(shift
);
6986 static NeonGenTwo64OpEnvFn
* const fns
[2][2] = {
6987 { gen_helper_neon_qshl_s64
, gen_helper_neon_qshlu_s64
},
6988 { NULL
, gen_helper_neon_qshl_u64
},
6990 NeonGenTwo64OpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
];
6991 int maxpass
= is_q
? 2 : 1;
6993 for (pass
= 0; pass
< maxpass
; pass
++) {
6994 TCGv_i64 tcg_op
= tcg_temp_new_i64();
6996 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
6997 genfn(tcg_op
, cpu_env
, tcg_op
, tcg_shift
);
6998 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
7000 tcg_temp_free_i64(tcg_op
);
7002 tcg_temp_free_i64(tcg_shift
);
7003 clear_vec_high(s
, is_q
, rd
);
7005 TCGv_i32 tcg_shift
= tcg_const_i32(shift
);
7006 static NeonGenTwoOpEnvFn
* const fns
[2][2][3] = {
7008 { gen_helper_neon_qshl_s8
,
7009 gen_helper_neon_qshl_s16
,
7010 gen_helper_neon_qshl_s32
},
7011 { gen_helper_neon_qshlu_s8
,
7012 gen_helper_neon_qshlu_s16
,
7013 gen_helper_neon_qshlu_s32
}
7015 { NULL
, NULL
, NULL
},
7016 { gen_helper_neon_qshl_u8
,
7017 gen_helper_neon_qshl_u16
,
7018 gen_helper_neon_qshl_u32
}
7021 NeonGenTwoOpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
][size
];
7022 TCGMemOp memop
= scalar
? size
: MO_32
;
7023 int maxpass
= scalar
? 1 : is_q
? 4 : 2;
7025 for (pass
= 0; pass
< maxpass
; pass
++) {
7026 TCGv_i32 tcg_op
= tcg_temp_new_i32();
7028 read_vec_element_i32(s
, tcg_op
, rn
, pass
, memop
);
7029 genfn(tcg_op
, cpu_env
, tcg_op
, tcg_shift
);
7033 tcg_gen_ext8u_i32(tcg_op
, tcg_op
);
7036 tcg_gen_ext16u_i32(tcg_op
, tcg_op
);
7041 g_assert_not_reached();
7043 write_fp_sreg(s
, rd
, tcg_op
);
7045 write_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
7048 tcg_temp_free_i32(tcg_op
);
7050 tcg_temp_free_i32(tcg_shift
);
7053 clear_vec_high(s
, is_q
, rd
);
7058 /* Common vector code for handling integer to FP conversion */
7059 static void handle_simd_intfp_conv(DisasContext
*s
, int rd
, int rn
,
7060 int elements
, int is_signed
,
7061 int fracbits
, int size
)
7063 TCGv_ptr tcg_fpst
= get_fpstatus_ptr(size
== MO_16
);
7064 TCGv_i32 tcg_shift
= NULL
;
7066 TCGMemOp mop
= size
| (is_signed
? MO_SIGN
: 0);
7069 if (fracbits
|| size
== MO_64
) {
7070 tcg_shift
= tcg_const_i32(fracbits
);
7073 if (size
== MO_64
) {
7074 TCGv_i64 tcg_int64
= tcg_temp_new_i64();
7075 TCGv_i64 tcg_double
= tcg_temp_new_i64();
7077 for (pass
= 0; pass
< elements
; pass
++) {
7078 read_vec_element(s
, tcg_int64
, rn
, pass
, mop
);
7081 gen_helper_vfp_sqtod(tcg_double
, tcg_int64
,
7082 tcg_shift
, tcg_fpst
);
7084 gen_helper_vfp_uqtod(tcg_double
, tcg_int64
,
7085 tcg_shift
, tcg_fpst
);
7087 if (elements
== 1) {
7088 write_fp_dreg(s
, rd
, tcg_double
);
7090 write_vec_element(s
, tcg_double
, rd
, pass
, MO_64
);
7094 tcg_temp_free_i64(tcg_int64
);
7095 tcg_temp_free_i64(tcg_double
);
7098 TCGv_i32 tcg_int32
= tcg_temp_new_i32();
7099 TCGv_i32 tcg_float
= tcg_temp_new_i32();
7101 for (pass
= 0; pass
< elements
; pass
++) {
7102 read_vec_element_i32(s
, tcg_int32
, rn
, pass
, mop
);
7108 gen_helper_vfp_sltos(tcg_float
, tcg_int32
,
7109 tcg_shift
, tcg_fpst
);
7111 gen_helper_vfp_ultos(tcg_float
, tcg_int32
,
7112 tcg_shift
, tcg_fpst
);
7116 gen_helper_vfp_sitos(tcg_float
, tcg_int32
, tcg_fpst
);
7118 gen_helper_vfp_uitos(tcg_float
, tcg_int32
, tcg_fpst
);
7125 gen_helper_vfp_sltoh(tcg_float
, tcg_int32
,
7126 tcg_shift
, tcg_fpst
);
7128 gen_helper_vfp_ultoh(tcg_float
, tcg_int32
,
7129 tcg_shift
, tcg_fpst
);
7133 gen_helper_vfp_sitoh(tcg_float
, tcg_int32
, tcg_fpst
);
7135 gen_helper_vfp_uitoh(tcg_float
, tcg_int32
, tcg_fpst
);
7140 g_assert_not_reached();
7143 if (elements
== 1) {
7144 write_fp_sreg(s
, rd
, tcg_float
);
7146 write_vec_element_i32(s
, tcg_float
, rd
, pass
, size
);
7150 tcg_temp_free_i32(tcg_int32
);
7151 tcg_temp_free_i32(tcg_float
);
7154 tcg_temp_free_ptr(tcg_fpst
);
7156 tcg_temp_free_i32(tcg_shift
);
7159 clear_vec_high(s
, elements
<< size
== 16, rd
);
7162 /* UCVTF/SCVTF - Integer to FP conversion */
7163 static void handle_simd_shift_intfp_conv(DisasContext
*s
, bool is_scalar
,
7164 bool is_q
, bool is_u
,
7165 int immh
, int immb
, int opcode
,
7168 bool is_double
= extract32(immh
, 3, 1);
7169 int size
= is_double
? MO_64
: MO_32
;
7171 int immhb
= immh
<< 3 | immb
;
7172 int fracbits
= (is_double
? 128 : 64) - immhb
;
7174 if (!extract32(immh
, 2, 2)) {
7175 unallocated_encoding(s
);
7182 elements
= is_double
? 2 : is_q
? 4 : 2;
7183 if (is_double
&& !is_q
) {
7184 unallocated_encoding(s
);
7189 if (!fp_access_check(s
)) {
7193 /* immh == 0 would be a failure of the decode logic */
7196 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !is_u
, fracbits
, size
);
7199 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
7200 static void handle_simd_shift_fpint_conv(DisasContext
*s
, bool is_scalar
,
7201 bool is_q
, bool is_u
,
7202 int immh
, int immb
, int rn
, int rd
)
7204 bool is_double
= extract32(immh
, 3, 1);
7205 int immhb
= immh
<< 3 | immb
;
7206 int fracbits
= (is_double
? 128 : 64) - immhb
;
7208 TCGv_ptr tcg_fpstatus
;
7209 TCGv_i32 tcg_rmode
, tcg_shift
;
7211 if (!extract32(immh
, 2, 2)) {
7212 unallocated_encoding(s
);
7216 if (!is_scalar
&& !is_q
&& is_double
) {
7217 unallocated_encoding(s
);
7221 if (!fp_access_check(s
)) {
7225 assert(!(is_scalar
&& is_q
));
7227 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO
));
7228 tcg_fpstatus
= get_fpstatus_ptr(false);
7229 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
7230 tcg_shift
= tcg_const_i32(fracbits
);
7233 int maxpass
= is_scalar
? 1 : 2;
7235 for (pass
= 0; pass
< maxpass
; pass
++) {
7236 TCGv_i64 tcg_op
= tcg_temp_new_i64();
7238 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
7240 gen_helper_vfp_touqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
7242 gen_helper_vfp_tosqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
7244 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
7245 tcg_temp_free_i64(tcg_op
);
7247 clear_vec_high(s
, is_q
, rd
);
7249 int maxpass
= is_scalar
? 1 : is_q
? 4 : 2;
7250 for (pass
= 0; pass
< maxpass
; pass
++) {
7251 TCGv_i32 tcg_op
= tcg_temp_new_i32();
7253 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
7255 gen_helper_vfp_touls(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
7257 gen_helper_vfp_tosls(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
7260 write_fp_sreg(s
, rd
, tcg_op
);
7262 write_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
7264 tcg_temp_free_i32(tcg_op
);
7267 clear_vec_high(s
, is_q
, rd
);
7271 tcg_temp_free_ptr(tcg_fpstatus
);
7272 tcg_temp_free_i32(tcg_shift
);
7273 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
7274 tcg_temp_free_i32(tcg_rmode
);
7277 /* AdvSIMD scalar shift by immediate
7278 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
7279 * +-----+---+-------------+------+------+--------+---+------+------+
7280 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
7281 * +-----+---+-------------+------+------+--------+---+------+------+
7283 * This is the scalar version so it works on a fixed sized registers
7285 static void disas_simd_scalar_shift_imm(DisasContext
*s
, uint32_t insn
)
7287 int rd
= extract32(insn
, 0, 5);
7288 int rn
= extract32(insn
, 5, 5);
7289 int opcode
= extract32(insn
, 11, 5);
7290 int immb
= extract32(insn
, 16, 3);
7291 int immh
= extract32(insn
, 19, 4);
7292 bool is_u
= extract32(insn
, 29, 1);
7295 unallocated_encoding(s
);
7300 case 0x08: /* SRI */
7302 unallocated_encoding(s
);
7306 case 0x00: /* SSHR / USHR */
7307 case 0x02: /* SSRA / USRA */
7308 case 0x04: /* SRSHR / URSHR */
7309 case 0x06: /* SRSRA / URSRA */
7310 handle_scalar_simd_shri(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
7312 case 0x0a: /* SHL / SLI */
7313 handle_scalar_simd_shli(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
7315 case 0x1c: /* SCVTF, UCVTF */
7316 handle_simd_shift_intfp_conv(s
, true, false, is_u
, immh
, immb
,
7319 case 0x10: /* SQSHRUN, SQSHRUN2 */
7320 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
7322 unallocated_encoding(s
);
7325 handle_vec_simd_sqshrn(s
, true, false, false, true,
7326 immh
, immb
, opcode
, rn
, rd
);
7328 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
7329 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
7330 handle_vec_simd_sqshrn(s
, true, false, is_u
, is_u
,
7331 immh
, immb
, opcode
, rn
, rd
);
7333 case 0xc: /* SQSHLU */
7335 unallocated_encoding(s
);
7338 handle_simd_qshl(s
, true, false, false, true, immh
, immb
, rn
, rd
);
7340 case 0xe: /* SQSHL, UQSHL */
7341 handle_simd_qshl(s
, true, false, is_u
, is_u
, immh
, immb
, rn
, rd
);
7343 case 0x1f: /* FCVTZS, FCVTZU */
7344 handle_simd_shift_fpint_conv(s
, true, false, is_u
, immh
, immb
, rn
, rd
);
7347 unallocated_encoding(s
);
7352 /* AdvSIMD scalar three different
7353 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
7354 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7355 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
7356 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7358 static void disas_simd_scalar_three_reg_diff(DisasContext
*s
, uint32_t insn
)
7360 bool is_u
= extract32(insn
, 29, 1);
7361 int size
= extract32(insn
, 22, 2);
7362 int opcode
= extract32(insn
, 12, 4);
7363 int rm
= extract32(insn
, 16, 5);
7364 int rn
= extract32(insn
, 5, 5);
7365 int rd
= extract32(insn
, 0, 5);
7368 unallocated_encoding(s
);
7373 case 0x9: /* SQDMLAL, SQDMLAL2 */
7374 case 0xb: /* SQDMLSL, SQDMLSL2 */
7375 case 0xd: /* SQDMULL, SQDMULL2 */
7376 if (size
== 0 || size
== 3) {
7377 unallocated_encoding(s
);
7382 unallocated_encoding(s
);
7386 if (!fp_access_check(s
)) {
7391 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
7392 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
7393 TCGv_i64 tcg_res
= tcg_temp_new_i64();
7395 read_vec_element(s
, tcg_op1
, rn
, 0, MO_32
| MO_SIGN
);
7396 read_vec_element(s
, tcg_op2
, rm
, 0, MO_32
| MO_SIGN
);
7398 tcg_gen_mul_i64(tcg_res
, tcg_op1
, tcg_op2
);
7399 gen_helper_neon_addl_saturate_s64(tcg_res
, cpu_env
, tcg_res
, tcg_res
);
7402 case 0xd: /* SQDMULL, SQDMULL2 */
7404 case 0xb: /* SQDMLSL, SQDMLSL2 */
7405 tcg_gen_neg_i64(tcg_res
, tcg_res
);
7407 case 0x9: /* SQDMLAL, SQDMLAL2 */
7408 read_vec_element(s
, tcg_op1
, rd
, 0, MO_64
);
7409 gen_helper_neon_addl_saturate_s64(tcg_res
, cpu_env
,
7413 g_assert_not_reached();
7416 write_fp_dreg(s
, rd
, tcg_res
);
7418 tcg_temp_free_i64(tcg_op1
);
7419 tcg_temp_free_i64(tcg_op2
);
7420 tcg_temp_free_i64(tcg_res
);
7422 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
7423 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
7424 TCGv_i64 tcg_res
= tcg_temp_new_i64();
7426 read_vec_element_i32(s
, tcg_op1
, rn
, 0, MO_16
);
7427 read_vec_element_i32(s
, tcg_op2
, rm
, 0, MO_16
);
7429 gen_helper_neon_mull_s16(tcg_res
, tcg_op1
, tcg_op2
);
7430 gen_helper_neon_addl_saturate_s32(tcg_res
, cpu_env
, tcg_res
, tcg_res
);
7433 case 0xd: /* SQDMULL, SQDMULL2 */
7435 case 0xb: /* SQDMLSL, SQDMLSL2 */
7436 gen_helper_neon_negl_u32(tcg_res
, tcg_res
);
7438 case 0x9: /* SQDMLAL, SQDMLAL2 */
7440 TCGv_i64 tcg_op3
= tcg_temp_new_i64();
7441 read_vec_element(s
, tcg_op3
, rd
, 0, MO_32
);
7442 gen_helper_neon_addl_saturate_s32(tcg_res
, cpu_env
,
7444 tcg_temp_free_i64(tcg_op3
);
7448 g_assert_not_reached();
7451 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
7452 write_fp_dreg(s
, rd
, tcg_res
);
7454 tcg_temp_free_i32(tcg_op1
);
7455 tcg_temp_free_i32(tcg_op2
);
7456 tcg_temp_free_i64(tcg_res
);
7460 /* CMTST : test is "if (X & Y != 0)". */
7461 static void gen_cmtst_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
7463 tcg_gen_and_i32(d
, a
, b
);
7464 tcg_gen_setcondi_i32(TCG_COND_NE
, d
, d
, 0);
7465 tcg_gen_neg_i32(d
, d
);
7468 static void gen_cmtst_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
7470 tcg_gen_and_i64(d
, a
, b
);
7471 tcg_gen_setcondi_i64(TCG_COND_NE
, d
, d
, 0);
7472 tcg_gen_neg_i64(d
, d
);
7475 static void gen_cmtst_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
7477 tcg_gen_and_vec(vece
, d
, a
, b
);
7478 tcg_gen_dupi_vec(vece
, a
, 0);
7479 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, d
, d
, a
);
7482 static void handle_3same_64(DisasContext
*s
, int opcode
, bool u
,
7483 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
, TCGv_i64 tcg_rm
)
7485 /* Handle 64x64->64 opcodes which are shared between the scalar
7486 * and vector 3-same groups. We cover every opcode where size == 3
7487 * is valid in either the three-reg-same (integer, not pairwise)
7488 * or scalar-three-reg-same groups.
7493 case 0x1: /* SQADD */
7495 gen_helper_neon_qadd_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
7497 gen_helper_neon_qadd_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
7500 case 0x5: /* SQSUB */
7502 gen_helper_neon_qsub_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
7504 gen_helper_neon_qsub_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
7507 case 0x6: /* CMGT, CMHI */
7508 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
7509 * We implement this using setcond (test) and then negating.
7511 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
7513 tcg_gen_setcond_i64(cond
, tcg_rd
, tcg_rn
, tcg_rm
);
7514 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
7516 case 0x7: /* CMGE, CMHS */
7517 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
7519 case 0x11: /* CMTST, CMEQ */
7524 gen_cmtst_i64(tcg_rd
, tcg_rn
, tcg_rm
);
7526 case 0x8: /* SSHL, USHL */
7528 gen_helper_neon_shl_u64(tcg_rd
, tcg_rn
, tcg_rm
);
7530 gen_helper_neon_shl_s64(tcg_rd
, tcg_rn
, tcg_rm
);
7533 case 0x9: /* SQSHL, UQSHL */
7535 gen_helper_neon_qshl_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
7537 gen_helper_neon_qshl_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
7540 case 0xa: /* SRSHL, URSHL */
7542 gen_helper_neon_rshl_u64(tcg_rd
, tcg_rn
, tcg_rm
);
7544 gen_helper_neon_rshl_s64(tcg_rd
, tcg_rn
, tcg_rm
);
7547 case 0xb: /* SQRSHL, UQRSHL */
7549 gen_helper_neon_qrshl_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
7551 gen_helper_neon_qrshl_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
7554 case 0x10: /* ADD, SUB */
7556 tcg_gen_sub_i64(tcg_rd
, tcg_rn
, tcg_rm
);
7558 tcg_gen_add_i64(tcg_rd
, tcg_rn
, tcg_rm
);
7562 g_assert_not_reached();
7566 /* Handle the 3-same-operands float operations; shared by the scalar
7567 * and vector encodings. The caller must filter out any encodings
7568 * not allocated for the encoding it is dealing with.
7570 static void handle_3same_float(DisasContext
*s
, int size
, int elements
,
7571 int fpopcode
, int rd
, int rn
, int rm
)
7574 TCGv_ptr fpst
= get_fpstatus_ptr(false);
7576 for (pass
= 0; pass
< elements
; pass
++) {
7579 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
7580 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
7581 TCGv_i64 tcg_res
= tcg_temp_new_i64();
7583 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
7584 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
7587 case 0x39: /* FMLS */
7588 /* As usual for ARM, separate negation for fused multiply-add */
7589 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
7591 case 0x19: /* FMLA */
7592 read_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
7593 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
,
7596 case 0x18: /* FMAXNM */
7597 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7599 case 0x1a: /* FADD */
7600 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7602 case 0x1b: /* FMULX */
7603 gen_helper_vfp_mulxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7605 case 0x1c: /* FCMEQ */
7606 gen_helper_neon_ceq_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7608 case 0x1e: /* FMAX */
7609 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7611 case 0x1f: /* FRECPS */
7612 gen_helper_recpsf_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7614 case 0x38: /* FMINNM */
7615 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7617 case 0x3a: /* FSUB */
7618 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7620 case 0x3e: /* FMIN */
7621 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7623 case 0x3f: /* FRSQRTS */
7624 gen_helper_rsqrtsf_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7626 case 0x5b: /* FMUL */
7627 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7629 case 0x5c: /* FCMGE */
7630 gen_helper_neon_cge_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7632 case 0x5d: /* FACGE */
7633 gen_helper_neon_acge_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7635 case 0x5f: /* FDIV */
7636 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7638 case 0x7a: /* FABD */
7639 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7640 gen_helper_vfp_absd(tcg_res
, tcg_res
);
7642 case 0x7c: /* FCMGT */
7643 gen_helper_neon_cgt_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7645 case 0x7d: /* FACGT */
7646 gen_helper_neon_acgt_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7649 g_assert_not_reached();
7652 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
7654 tcg_temp_free_i64(tcg_res
);
7655 tcg_temp_free_i64(tcg_op1
);
7656 tcg_temp_free_i64(tcg_op2
);
7659 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
7660 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
7661 TCGv_i32 tcg_res
= tcg_temp_new_i32();
7663 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
7664 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
7667 case 0x39: /* FMLS */
7668 /* As usual for ARM, separate negation for fused multiply-add */
7669 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
7671 case 0x19: /* FMLA */
7672 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
7673 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
,
7676 case 0x1a: /* FADD */
7677 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7679 case 0x1b: /* FMULX */
7680 gen_helper_vfp_mulxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7682 case 0x1c: /* FCMEQ */
7683 gen_helper_neon_ceq_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7685 case 0x1e: /* FMAX */
7686 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7688 case 0x1f: /* FRECPS */
7689 gen_helper_recpsf_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7691 case 0x18: /* FMAXNM */
7692 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7694 case 0x38: /* FMINNM */
7695 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7697 case 0x3a: /* FSUB */
7698 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7700 case 0x3e: /* FMIN */
7701 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7703 case 0x3f: /* FRSQRTS */
7704 gen_helper_rsqrtsf_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7706 case 0x5b: /* FMUL */
7707 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7709 case 0x5c: /* FCMGE */
7710 gen_helper_neon_cge_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7712 case 0x5d: /* FACGE */
7713 gen_helper_neon_acge_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7715 case 0x5f: /* FDIV */
7716 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7718 case 0x7a: /* FABD */
7719 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7720 gen_helper_vfp_abss(tcg_res
, tcg_res
);
7722 case 0x7c: /* FCMGT */
7723 gen_helper_neon_cgt_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7725 case 0x7d: /* FACGT */
7726 gen_helper_neon_acgt_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7729 g_assert_not_reached();
7732 if (elements
== 1) {
7733 /* scalar single so clear high part */
7734 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
7736 tcg_gen_extu_i32_i64(tcg_tmp
, tcg_res
);
7737 write_vec_element(s
, tcg_tmp
, rd
, pass
, MO_64
);
7738 tcg_temp_free_i64(tcg_tmp
);
7740 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
7743 tcg_temp_free_i32(tcg_res
);
7744 tcg_temp_free_i32(tcg_op1
);
7745 tcg_temp_free_i32(tcg_op2
);
7749 tcg_temp_free_ptr(fpst
);
7751 clear_vec_high(s
, elements
* (size
? 8 : 4) > 8, rd
);
7754 /* AdvSIMD scalar three same
7755 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
7756 * +-----+---+-----------+------+---+------+--------+---+------+------+
7757 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
7758 * +-----+---+-----------+------+---+------+--------+---+------+------+
7760 static void disas_simd_scalar_three_reg_same(DisasContext
*s
, uint32_t insn
)
7762 int rd
= extract32(insn
, 0, 5);
7763 int rn
= extract32(insn
, 5, 5);
7764 int opcode
= extract32(insn
, 11, 5);
7765 int rm
= extract32(insn
, 16, 5);
7766 int size
= extract32(insn
, 22, 2);
7767 bool u
= extract32(insn
, 29, 1);
7770 if (opcode
>= 0x18) {
7771 /* Floating point: U, size[1] and opcode indicate operation */
7772 int fpopcode
= opcode
| (extract32(size
, 1, 1) << 5) | (u
<< 6);
7774 case 0x1b: /* FMULX */
7775 case 0x1f: /* FRECPS */
7776 case 0x3f: /* FRSQRTS */
7777 case 0x5d: /* FACGE */
7778 case 0x7d: /* FACGT */
7779 case 0x1c: /* FCMEQ */
7780 case 0x5c: /* FCMGE */
7781 case 0x7c: /* FCMGT */
7782 case 0x7a: /* FABD */
7785 unallocated_encoding(s
);
7789 if (!fp_access_check(s
)) {
7793 handle_3same_float(s
, extract32(size
, 0, 1), 1, fpopcode
, rd
, rn
, rm
);
7798 case 0x1: /* SQADD, UQADD */
7799 case 0x5: /* SQSUB, UQSUB */
7800 case 0x9: /* SQSHL, UQSHL */
7801 case 0xb: /* SQRSHL, UQRSHL */
7803 case 0x8: /* SSHL, USHL */
7804 case 0xa: /* SRSHL, URSHL */
7805 case 0x6: /* CMGT, CMHI */
7806 case 0x7: /* CMGE, CMHS */
7807 case 0x11: /* CMTST, CMEQ */
7808 case 0x10: /* ADD, SUB (vector) */
7810 unallocated_encoding(s
);
7814 case 0x16: /* SQDMULH, SQRDMULH (vector) */
7815 if (size
!= 1 && size
!= 2) {
7816 unallocated_encoding(s
);
7821 unallocated_encoding(s
);
7825 if (!fp_access_check(s
)) {
7829 tcg_rd
= tcg_temp_new_i64();
7832 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
7833 TCGv_i64 tcg_rm
= read_fp_dreg(s
, rm
);
7835 handle_3same_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rm
);
7836 tcg_temp_free_i64(tcg_rn
);
7837 tcg_temp_free_i64(tcg_rm
);
7839 /* Do a single operation on the lowest element in the vector.
7840 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
7841 * no side effects for all these operations.
7842 * OPTME: special-purpose helpers would avoid doing some
7843 * unnecessary work in the helper for the 8 and 16 bit cases.
7845 NeonGenTwoOpEnvFn
*genenvfn
;
7846 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
7847 TCGv_i32 tcg_rm
= tcg_temp_new_i32();
7848 TCGv_i32 tcg_rd32
= tcg_temp_new_i32();
7850 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
7851 read_vec_element_i32(s
, tcg_rm
, rm
, 0, size
);
7854 case 0x1: /* SQADD, UQADD */
7856 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
7857 { gen_helper_neon_qadd_s8
, gen_helper_neon_qadd_u8
},
7858 { gen_helper_neon_qadd_s16
, gen_helper_neon_qadd_u16
},
7859 { gen_helper_neon_qadd_s32
, gen_helper_neon_qadd_u32
},
7861 genenvfn
= fns
[size
][u
];
7864 case 0x5: /* SQSUB, UQSUB */
7866 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
7867 { gen_helper_neon_qsub_s8
, gen_helper_neon_qsub_u8
},
7868 { gen_helper_neon_qsub_s16
, gen_helper_neon_qsub_u16
},
7869 { gen_helper_neon_qsub_s32
, gen_helper_neon_qsub_u32
},
7871 genenvfn
= fns
[size
][u
];
7874 case 0x9: /* SQSHL, UQSHL */
7876 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
7877 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
7878 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
7879 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
7881 genenvfn
= fns
[size
][u
];
7884 case 0xb: /* SQRSHL, UQRSHL */
7886 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
7887 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
7888 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
7889 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
7891 genenvfn
= fns
[size
][u
];
7894 case 0x16: /* SQDMULH, SQRDMULH */
7896 static NeonGenTwoOpEnvFn
* const fns
[2][2] = {
7897 { gen_helper_neon_qdmulh_s16
, gen_helper_neon_qrdmulh_s16
},
7898 { gen_helper_neon_qdmulh_s32
, gen_helper_neon_qrdmulh_s32
},
7900 assert(size
== 1 || size
== 2);
7901 genenvfn
= fns
[size
- 1][u
];
7905 g_assert_not_reached();
7908 genenvfn(tcg_rd32
, cpu_env
, tcg_rn
, tcg_rm
);
7909 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd32
);
7910 tcg_temp_free_i32(tcg_rd32
);
7911 tcg_temp_free_i32(tcg_rn
);
7912 tcg_temp_free_i32(tcg_rm
);
7915 write_fp_dreg(s
, rd
, tcg_rd
);
7917 tcg_temp_free_i64(tcg_rd
);
7920 /* AdvSIMD scalar three same FP16
7921 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
7922 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
7923 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
7924 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
7925 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
7926 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
7928 static void disas_simd_scalar_three_reg_same_fp16(DisasContext
*s
,
7931 int rd
= extract32(insn
, 0, 5);
7932 int rn
= extract32(insn
, 5, 5);
7933 int opcode
= extract32(insn
, 11, 3);
7934 int rm
= extract32(insn
, 16, 5);
7935 bool u
= extract32(insn
, 29, 1);
7936 bool a
= extract32(insn
, 23, 1);
7937 int fpopcode
= opcode
| (a
<< 3) | (u
<< 4);
7944 case 0x03: /* FMULX */
7945 case 0x04: /* FCMEQ (reg) */
7946 case 0x07: /* FRECPS */
7947 case 0x0f: /* FRSQRTS */
7948 case 0x14: /* FCMGE (reg) */
7949 case 0x15: /* FACGE */
7950 case 0x1a: /* FABD */
7951 case 0x1c: /* FCMGT (reg) */
7952 case 0x1d: /* FACGT */
7955 unallocated_encoding(s
);
7959 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
7960 unallocated_encoding(s
);
7963 if (!fp_access_check(s
)) {
7967 fpst
= get_fpstatus_ptr(true);
7969 tcg_op1
= tcg_temp_new_i32();
7970 tcg_op2
= tcg_temp_new_i32();
7971 tcg_res
= tcg_temp_new_i32();
7973 read_vec_element_i32(s
, tcg_op1
, rn
, 0, MO_16
);
7974 read_vec_element_i32(s
, tcg_op2
, rm
, 0, MO_16
);
7977 case 0x03: /* FMULX */
7978 gen_helper_advsimd_mulxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7980 case 0x04: /* FCMEQ (reg) */
7981 gen_helper_advsimd_ceq_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7983 case 0x07: /* FRECPS */
7984 gen_helper_recpsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7986 case 0x0f: /* FRSQRTS */
7987 gen_helper_rsqrtsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7989 case 0x14: /* FCMGE (reg) */
7990 gen_helper_advsimd_cge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7992 case 0x15: /* FACGE */
7993 gen_helper_advsimd_acge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7995 case 0x1a: /* FABD */
7996 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7997 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0x7fff);
7999 case 0x1c: /* FCMGT (reg) */
8000 gen_helper_advsimd_cgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8002 case 0x1d: /* FACGT */
8003 gen_helper_advsimd_acgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8006 g_assert_not_reached();
8009 write_fp_sreg(s
, rd
, tcg_res
);
8012 tcg_temp_free_i32(tcg_res
);
8013 tcg_temp_free_i32(tcg_op1
);
8014 tcg_temp_free_i32(tcg_op2
);
8015 tcg_temp_free_ptr(fpst
);
8018 /* AdvSIMD scalar three same extra
8019 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
8020 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
8021 * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
8022 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
8024 static void disas_simd_scalar_three_reg_same_extra(DisasContext
*s
,
8027 int rd
= extract32(insn
, 0, 5);
8028 int rn
= extract32(insn
, 5, 5);
8029 int opcode
= extract32(insn
, 11, 4);
8030 int rm
= extract32(insn
, 16, 5);
8031 int size
= extract32(insn
, 22, 2);
8032 bool u
= extract32(insn
, 29, 1);
8033 TCGv_i32 ele1
, ele2
, ele3
;
8037 switch (u
* 16 + opcode
) {
8038 case 0x10: /* SQRDMLAH (vector) */
8039 case 0x11: /* SQRDMLSH (vector) */
8040 if (size
!= 1 && size
!= 2) {
8041 unallocated_encoding(s
);
8044 feature
= ARM_FEATURE_V8_RDM
;
8047 unallocated_encoding(s
);
8050 if (!arm_dc_feature(s
, feature
)) {
8051 unallocated_encoding(s
);
8054 if (!fp_access_check(s
)) {
8058 /* Do a single operation on the lowest element in the vector.
8059 * We use the standard Neon helpers and rely on 0 OP 0 == 0
8060 * with no side effects for all these operations.
8061 * OPTME: special-purpose helpers would avoid doing some
8062 * unnecessary work in the helper for the 16 bit cases.
8064 ele1
= tcg_temp_new_i32();
8065 ele2
= tcg_temp_new_i32();
8066 ele3
= tcg_temp_new_i32();
8068 read_vec_element_i32(s
, ele1
, rn
, 0, size
);
8069 read_vec_element_i32(s
, ele2
, rm
, 0, size
);
8070 read_vec_element_i32(s
, ele3
, rd
, 0, size
);
8073 case 0x0: /* SQRDMLAH */
8075 gen_helper_neon_qrdmlah_s16(ele3
, cpu_env
, ele1
, ele2
, ele3
);
8077 gen_helper_neon_qrdmlah_s32(ele3
, cpu_env
, ele1
, ele2
, ele3
);
8080 case 0x1: /* SQRDMLSH */
8082 gen_helper_neon_qrdmlsh_s16(ele3
, cpu_env
, ele1
, ele2
, ele3
);
8084 gen_helper_neon_qrdmlsh_s32(ele3
, cpu_env
, ele1
, ele2
, ele3
);
8088 g_assert_not_reached();
8090 tcg_temp_free_i32(ele1
);
8091 tcg_temp_free_i32(ele2
);
8093 res
= tcg_temp_new_i64();
8094 tcg_gen_extu_i32_i64(res
, ele3
);
8095 tcg_temp_free_i32(ele3
);
8097 write_fp_dreg(s
, rd
, res
);
8098 tcg_temp_free_i64(res
);
8101 static void handle_2misc_64(DisasContext
*s
, int opcode
, bool u
,
8102 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
,
8103 TCGv_i32 tcg_rmode
, TCGv_ptr tcg_fpstatus
)
8105 /* Handle 64->64 opcodes which are shared between the scalar and
8106 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
8107 * is valid in either group and also the double-precision fp ops.
8108 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
8114 case 0x4: /* CLS, CLZ */
8116 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
8118 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
8122 /* This opcode is shared with CNT and RBIT but we have earlier
8123 * enforced that size == 3 if and only if this is the NOT insn.
8125 tcg_gen_not_i64(tcg_rd
, tcg_rn
);
8127 case 0x7: /* SQABS, SQNEG */
8129 gen_helper_neon_qneg_s64(tcg_rd
, cpu_env
, tcg_rn
);
8131 gen_helper_neon_qabs_s64(tcg_rd
, cpu_env
, tcg_rn
);
8134 case 0xa: /* CMLT */
8135 /* 64 bit integer comparison against zero, result is
8136 * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
8141 tcg_gen_setcondi_i64(cond
, tcg_rd
, tcg_rn
, 0);
8142 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
8144 case 0x8: /* CMGT, CMGE */
8145 cond
= u
? TCG_COND_GE
: TCG_COND_GT
;
8147 case 0x9: /* CMEQ, CMLE */
8148 cond
= u
? TCG_COND_LE
: TCG_COND_EQ
;
8150 case 0xb: /* ABS, NEG */
8152 tcg_gen_neg_i64(tcg_rd
, tcg_rn
);
8154 TCGv_i64 tcg_zero
= tcg_const_i64(0);
8155 tcg_gen_neg_i64(tcg_rd
, tcg_rn
);
8156 tcg_gen_movcond_i64(TCG_COND_GT
, tcg_rd
, tcg_rn
, tcg_zero
,
8158 tcg_temp_free_i64(tcg_zero
);
8161 case 0x2f: /* FABS */
8162 gen_helper_vfp_absd(tcg_rd
, tcg_rn
);
8164 case 0x6f: /* FNEG */
8165 gen_helper_vfp_negd(tcg_rd
, tcg_rn
);
8167 case 0x7f: /* FSQRT */
8168 gen_helper_vfp_sqrtd(tcg_rd
, tcg_rn
, cpu_env
);
8170 case 0x1a: /* FCVTNS */
8171 case 0x1b: /* FCVTMS */
8172 case 0x1c: /* FCVTAS */
8173 case 0x3a: /* FCVTPS */
8174 case 0x3b: /* FCVTZS */
8176 TCGv_i32 tcg_shift
= tcg_const_i32(0);
8177 gen_helper_vfp_tosqd(tcg_rd
, tcg_rn
, tcg_shift
, tcg_fpstatus
);
8178 tcg_temp_free_i32(tcg_shift
);
8181 case 0x5a: /* FCVTNU */
8182 case 0x5b: /* FCVTMU */
8183 case 0x5c: /* FCVTAU */
8184 case 0x7a: /* FCVTPU */
8185 case 0x7b: /* FCVTZU */
8187 TCGv_i32 tcg_shift
= tcg_const_i32(0);
8188 gen_helper_vfp_touqd(tcg_rd
, tcg_rn
, tcg_shift
, tcg_fpstatus
);
8189 tcg_temp_free_i32(tcg_shift
);
8192 case 0x18: /* FRINTN */
8193 case 0x19: /* FRINTM */
8194 case 0x38: /* FRINTP */
8195 case 0x39: /* FRINTZ */
8196 case 0x58: /* FRINTA */
8197 case 0x79: /* FRINTI */
8198 gen_helper_rintd(tcg_rd
, tcg_rn
, tcg_fpstatus
);
8200 case 0x59: /* FRINTX */
8201 gen_helper_rintd_exact(tcg_rd
, tcg_rn
, tcg_fpstatus
);
8204 g_assert_not_reached();
8208 static void handle_2misc_fcmp_zero(DisasContext
*s
, int opcode
,
8209 bool is_scalar
, bool is_u
, bool is_q
,
8210 int size
, int rn
, int rd
)
8212 bool is_double
= (size
== MO_64
);
8215 if (!fp_access_check(s
)) {
8219 fpst
= get_fpstatus_ptr(size
== MO_16
);
8222 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8223 TCGv_i64 tcg_zero
= tcg_const_i64(0);
8224 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8225 NeonGenTwoDoubleOPFn
*genfn
;
8230 case 0x2e: /* FCMLT (zero) */
8233 case 0x2c: /* FCMGT (zero) */
8234 genfn
= gen_helper_neon_cgt_f64
;
8236 case 0x2d: /* FCMEQ (zero) */
8237 genfn
= gen_helper_neon_ceq_f64
;
8239 case 0x6d: /* FCMLE (zero) */
8242 case 0x6c: /* FCMGE (zero) */
8243 genfn
= gen_helper_neon_cge_f64
;
8246 g_assert_not_reached();
8249 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
8250 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8252 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
8254 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
8256 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
8258 tcg_temp_free_i64(tcg_res
);
8259 tcg_temp_free_i64(tcg_zero
);
8260 tcg_temp_free_i64(tcg_op
);
8262 clear_vec_high(s
, !is_scalar
, rd
);
8264 TCGv_i32 tcg_op
= tcg_temp_new_i32();
8265 TCGv_i32 tcg_zero
= tcg_const_i32(0);
8266 TCGv_i32 tcg_res
= tcg_temp_new_i32();
8267 NeonGenTwoSingleOPFn
*genfn
;
8269 int pass
, maxpasses
;
8271 if (size
== MO_16
) {
8273 case 0x2e: /* FCMLT (zero) */
8276 case 0x2c: /* FCMGT (zero) */
8277 genfn
= gen_helper_advsimd_cgt_f16
;
8279 case 0x2d: /* FCMEQ (zero) */
8280 genfn
= gen_helper_advsimd_ceq_f16
;
8282 case 0x6d: /* FCMLE (zero) */
8285 case 0x6c: /* FCMGE (zero) */
8286 genfn
= gen_helper_advsimd_cge_f16
;
8289 g_assert_not_reached();
8293 case 0x2e: /* FCMLT (zero) */
8296 case 0x2c: /* FCMGT (zero) */
8297 genfn
= gen_helper_neon_cgt_f32
;
8299 case 0x2d: /* FCMEQ (zero) */
8300 genfn
= gen_helper_neon_ceq_f32
;
8302 case 0x6d: /* FCMLE (zero) */
8305 case 0x6c: /* FCMGE (zero) */
8306 genfn
= gen_helper_neon_cge_f32
;
8309 g_assert_not_reached();
8316 int vector_size
= 8 << is_q
;
8317 maxpasses
= vector_size
>> size
;
8320 for (pass
= 0; pass
< maxpasses
; pass
++) {
8321 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
8323 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
8325 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
8328 write_fp_sreg(s
, rd
, tcg_res
);
8330 write_vec_element_i32(s
, tcg_res
, rd
, pass
, size
);
8333 tcg_temp_free_i32(tcg_res
);
8334 tcg_temp_free_i32(tcg_zero
);
8335 tcg_temp_free_i32(tcg_op
);
8337 clear_vec_high(s
, is_q
, rd
);
8341 tcg_temp_free_ptr(fpst
);
8344 static void handle_2misc_reciprocal(DisasContext
*s
, int opcode
,
8345 bool is_scalar
, bool is_u
, bool is_q
,
8346 int size
, int rn
, int rd
)
8348 bool is_double
= (size
== 3);
8349 TCGv_ptr fpst
= get_fpstatus_ptr(false);
8352 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8353 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8356 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
8357 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8359 case 0x3d: /* FRECPE */
8360 gen_helper_recpe_f64(tcg_res
, tcg_op
, fpst
);
8362 case 0x3f: /* FRECPX */
8363 gen_helper_frecpx_f64(tcg_res
, tcg_op
, fpst
);
8365 case 0x7d: /* FRSQRTE */
8366 gen_helper_rsqrte_f64(tcg_res
, tcg_op
, fpst
);
8369 g_assert_not_reached();
8371 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
8373 tcg_temp_free_i64(tcg_res
);
8374 tcg_temp_free_i64(tcg_op
);
8375 clear_vec_high(s
, !is_scalar
, rd
);
8377 TCGv_i32 tcg_op
= tcg_temp_new_i32();
8378 TCGv_i32 tcg_res
= tcg_temp_new_i32();
8379 int pass
, maxpasses
;
8384 maxpasses
= is_q
? 4 : 2;
8387 for (pass
= 0; pass
< maxpasses
; pass
++) {
8388 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
8391 case 0x3c: /* URECPE */
8392 gen_helper_recpe_u32(tcg_res
, tcg_op
, fpst
);
8394 case 0x3d: /* FRECPE */
8395 gen_helper_recpe_f32(tcg_res
, tcg_op
, fpst
);
8397 case 0x3f: /* FRECPX */
8398 gen_helper_frecpx_f32(tcg_res
, tcg_op
, fpst
);
8400 case 0x7d: /* FRSQRTE */
8401 gen_helper_rsqrte_f32(tcg_res
, tcg_op
, fpst
);
8404 g_assert_not_reached();
8408 write_fp_sreg(s
, rd
, tcg_res
);
8410 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
8413 tcg_temp_free_i32(tcg_res
);
8414 tcg_temp_free_i32(tcg_op
);
8416 clear_vec_high(s
, is_q
, rd
);
8419 tcg_temp_free_ptr(fpst
);
8422 static void handle_2misc_narrow(DisasContext
*s
, bool scalar
,
8423 int opcode
, bool u
, bool is_q
,
8424 int size
, int rn
, int rd
)
8426 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
8427 * in the source becomes a size element in the destination).
8430 TCGv_i32 tcg_res
[2];
8431 int destelt
= is_q
? 2 : 0;
8432 int passes
= scalar
? 1 : 2;
8435 tcg_res
[1] = tcg_const_i32(0);
8438 for (pass
= 0; pass
< passes
; pass
++) {
8439 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8440 NeonGenNarrowFn
*genfn
= NULL
;
8441 NeonGenNarrowEnvFn
*genenvfn
= NULL
;
8444 read_vec_element(s
, tcg_op
, rn
, pass
, size
+ 1);
8446 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8448 tcg_res
[pass
] = tcg_temp_new_i32();
8451 case 0x12: /* XTN, SQXTUN */
8453 static NeonGenNarrowFn
* const xtnfns
[3] = {
8454 gen_helper_neon_narrow_u8
,
8455 gen_helper_neon_narrow_u16
,
8456 tcg_gen_extrl_i64_i32
,
8458 static NeonGenNarrowEnvFn
* const sqxtunfns
[3] = {
8459 gen_helper_neon_unarrow_sat8
,
8460 gen_helper_neon_unarrow_sat16
,
8461 gen_helper_neon_unarrow_sat32
,
8464 genenvfn
= sqxtunfns
[size
];
8466 genfn
= xtnfns
[size
];
8470 case 0x14: /* SQXTN, UQXTN */
8472 static NeonGenNarrowEnvFn
* const fns
[3][2] = {
8473 { gen_helper_neon_narrow_sat_s8
,
8474 gen_helper_neon_narrow_sat_u8
},
8475 { gen_helper_neon_narrow_sat_s16
,
8476 gen_helper_neon_narrow_sat_u16
},
8477 { gen_helper_neon_narrow_sat_s32
,
8478 gen_helper_neon_narrow_sat_u32
},
8480 genenvfn
= fns
[size
][u
];
8483 case 0x16: /* FCVTN, FCVTN2 */
8484 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
8486 gen_helper_vfp_fcvtsd(tcg_res
[pass
], tcg_op
, cpu_env
);
8488 TCGv_i32 tcg_lo
= tcg_temp_new_i32();
8489 TCGv_i32 tcg_hi
= tcg_temp_new_i32();
8490 tcg_gen_extr_i64_i32(tcg_lo
, tcg_hi
, tcg_op
);
8491 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo
, tcg_lo
, cpu_env
);
8492 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi
, tcg_hi
, cpu_env
);
8493 tcg_gen_deposit_i32(tcg_res
[pass
], tcg_lo
, tcg_hi
, 16, 16);
8494 tcg_temp_free_i32(tcg_lo
);
8495 tcg_temp_free_i32(tcg_hi
);
8498 case 0x56: /* FCVTXN, FCVTXN2 */
8499 /* 64 bit to 32 bit float conversion
8500 * with von Neumann rounding (round to odd)
8503 gen_helper_fcvtx_f64_to_f32(tcg_res
[pass
], tcg_op
, cpu_env
);
8506 g_assert_not_reached();
8510 genfn(tcg_res
[pass
], tcg_op
);
8511 } else if (genenvfn
) {
8512 genenvfn(tcg_res
[pass
], cpu_env
, tcg_op
);
8515 tcg_temp_free_i64(tcg_op
);
8518 for (pass
= 0; pass
< 2; pass
++) {
8519 write_vec_element_i32(s
, tcg_res
[pass
], rd
, destelt
+ pass
, MO_32
);
8520 tcg_temp_free_i32(tcg_res
[pass
]);
8522 clear_vec_high(s
, is_q
, rd
);
8525 /* Remaining saturating accumulating ops */
8526 static void handle_2misc_satacc(DisasContext
*s
, bool is_scalar
, bool is_u
,
8527 bool is_q
, int size
, int rn
, int rd
)
8529 bool is_double
= (size
== 3);
8532 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
8533 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
8536 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
8537 read_vec_element(s
, tcg_rn
, rn
, pass
, MO_64
);
8538 read_vec_element(s
, tcg_rd
, rd
, pass
, MO_64
);
8540 if (is_u
) { /* USQADD */
8541 gen_helper_neon_uqadd_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
8542 } else { /* SUQADD */
8543 gen_helper_neon_sqadd_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
8545 write_vec_element(s
, tcg_rd
, rd
, pass
, MO_64
);
8547 tcg_temp_free_i64(tcg_rd
);
8548 tcg_temp_free_i64(tcg_rn
);
8549 clear_vec_high(s
, !is_scalar
, rd
);
8551 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
8552 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
8553 int pass
, maxpasses
;
8558 maxpasses
= is_q
? 4 : 2;
8561 for (pass
= 0; pass
< maxpasses
; pass
++) {
8563 read_vec_element_i32(s
, tcg_rn
, rn
, pass
, size
);
8564 read_vec_element_i32(s
, tcg_rd
, rd
, pass
, size
);
8566 read_vec_element_i32(s
, tcg_rn
, rn
, pass
, MO_32
);
8567 read_vec_element_i32(s
, tcg_rd
, rd
, pass
, MO_32
);
8570 if (is_u
) { /* USQADD */
8573 gen_helper_neon_uqadd_s8(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
8576 gen_helper_neon_uqadd_s16(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
8579 gen_helper_neon_uqadd_s32(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
8582 g_assert_not_reached();
8584 } else { /* SUQADD */
8587 gen_helper_neon_sqadd_u8(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
8590 gen_helper_neon_sqadd_u16(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
8593 gen_helper_neon_sqadd_u32(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
8596 g_assert_not_reached();
8601 TCGv_i64 tcg_zero
= tcg_const_i64(0);
8602 write_vec_element(s
, tcg_zero
, rd
, 0, MO_64
);
8603 tcg_temp_free_i64(tcg_zero
);
8605 write_vec_element_i32(s
, tcg_rd
, rd
, pass
, MO_32
);
8607 tcg_temp_free_i32(tcg_rd
);
8608 tcg_temp_free_i32(tcg_rn
);
8609 clear_vec_high(s
, is_q
, rd
);
8613 /* AdvSIMD scalar two reg misc
8614 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
8615 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8616 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
8617 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8619 static void disas_simd_scalar_two_reg_misc(DisasContext
*s
, uint32_t insn
)
8621 int rd
= extract32(insn
, 0, 5);
8622 int rn
= extract32(insn
, 5, 5);
8623 int opcode
= extract32(insn
, 12, 5);
8624 int size
= extract32(insn
, 22, 2);
8625 bool u
= extract32(insn
, 29, 1);
8626 bool is_fcvt
= false;
8629 TCGv_ptr tcg_fpstatus
;
8632 case 0x3: /* USQADD / SUQADD*/
8633 if (!fp_access_check(s
)) {
8636 handle_2misc_satacc(s
, true, u
, false, size
, rn
, rd
);
8638 case 0x7: /* SQABS / SQNEG */
8640 case 0xa: /* CMLT */
8642 unallocated_encoding(s
);
8646 case 0x8: /* CMGT, CMGE */
8647 case 0x9: /* CMEQ, CMLE */
8648 case 0xb: /* ABS, NEG */
8650 unallocated_encoding(s
);
8654 case 0x12: /* SQXTUN */
8656 unallocated_encoding(s
);
8660 case 0x14: /* SQXTN, UQXTN */
8662 unallocated_encoding(s
);
8665 if (!fp_access_check(s
)) {
8668 handle_2misc_narrow(s
, true, opcode
, u
, false, size
, rn
, rd
);
8673 /* Floating point: U, size[1] and opcode indicate operation;
8674 * size[0] indicates single or double precision.
8676 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
8677 size
= extract32(size
, 0, 1) ? 3 : 2;
8679 case 0x2c: /* FCMGT (zero) */
8680 case 0x2d: /* FCMEQ (zero) */
8681 case 0x2e: /* FCMLT (zero) */
8682 case 0x6c: /* FCMGE (zero) */
8683 case 0x6d: /* FCMLE (zero) */
8684 handle_2misc_fcmp_zero(s
, opcode
, true, u
, true, size
, rn
, rd
);
8686 case 0x1d: /* SCVTF */
8687 case 0x5d: /* UCVTF */
8689 bool is_signed
= (opcode
== 0x1d);
8690 if (!fp_access_check(s
)) {
8693 handle_simd_intfp_conv(s
, rd
, rn
, 1, is_signed
, 0, size
);
8696 case 0x3d: /* FRECPE */
8697 case 0x3f: /* FRECPX */
8698 case 0x7d: /* FRSQRTE */
8699 if (!fp_access_check(s
)) {
8702 handle_2misc_reciprocal(s
, opcode
, true, u
, true, size
, rn
, rd
);
8704 case 0x1a: /* FCVTNS */
8705 case 0x1b: /* FCVTMS */
8706 case 0x3a: /* FCVTPS */
8707 case 0x3b: /* FCVTZS */
8708 case 0x5a: /* FCVTNU */
8709 case 0x5b: /* FCVTMU */
8710 case 0x7a: /* FCVTPU */
8711 case 0x7b: /* FCVTZU */
8713 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
8715 case 0x1c: /* FCVTAS */
8716 case 0x5c: /* FCVTAU */
8717 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
8719 rmode
= FPROUNDING_TIEAWAY
;
8721 case 0x56: /* FCVTXN, FCVTXN2 */
8723 unallocated_encoding(s
);
8726 if (!fp_access_check(s
)) {
8729 handle_2misc_narrow(s
, true, opcode
, u
, false, size
- 1, rn
, rd
);
8732 unallocated_encoding(s
);
8737 unallocated_encoding(s
);
8741 if (!fp_access_check(s
)) {
8746 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
8747 tcg_fpstatus
= get_fpstatus_ptr(false);
8748 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
8751 tcg_fpstatus
= NULL
;
8755 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
8756 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
8758 handle_2misc_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rmode
, tcg_fpstatus
);
8759 write_fp_dreg(s
, rd
, tcg_rd
);
8760 tcg_temp_free_i64(tcg_rd
);
8761 tcg_temp_free_i64(tcg_rn
);
8763 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
8764 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
8766 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
8769 case 0x7: /* SQABS, SQNEG */
8771 NeonGenOneOpEnvFn
*genfn
;
8772 static NeonGenOneOpEnvFn
* const fns
[3][2] = {
8773 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
8774 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
8775 { gen_helper_neon_qabs_s32
, gen_helper_neon_qneg_s32
},
8777 genfn
= fns
[size
][u
];
8778 genfn(tcg_rd
, cpu_env
, tcg_rn
);
8781 case 0x1a: /* FCVTNS */
8782 case 0x1b: /* FCVTMS */
8783 case 0x1c: /* FCVTAS */
8784 case 0x3a: /* FCVTPS */
8785 case 0x3b: /* FCVTZS */
8787 TCGv_i32 tcg_shift
= tcg_const_i32(0);
8788 gen_helper_vfp_tosls(tcg_rd
, tcg_rn
, tcg_shift
, tcg_fpstatus
);
8789 tcg_temp_free_i32(tcg_shift
);
8792 case 0x5a: /* FCVTNU */
8793 case 0x5b: /* FCVTMU */
8794 case 0x5c: /* FCVTAU */
8795 case 0x7a: /* FCVTPU */
8796 case 0x7b: /* FCVTZU */
8798 TCGv_i32 tcg_shift
= tcg_const_i32(0);
8799 gen_helper_vfp_touls(tcg_rd
, tcg_rn
, tcg_shift
, tcg_fpstatus
);
8800 tcg_temp_free_i32(tcg_shift
);
8804 g_assert_not_reached();
8807 write_fp_sreg(s
, rd
, tcg_rd
);
8808 tcg_temp_free_i32(tcg_rd
);
8809 tcg_temp_free_i32(tcg_rn
);
8813 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
8814 tcg_temp_free_i32(tcg_rmode
);
8815 tcg_temp_free_ptr(tcg_fpstatus
);
8819 static void gen_ssra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
8821 tcg_gen_vec_sar8i_i64(a
, a
, shift
);
8822 tcg_gen_vec_add8_i64(d
, d
, a
);
8825 static void gen_ssra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
8827 tcg_gen_vec_sar16i_i64(a
, a
, shift
);
8828 tcg_gen_vec_add16_i64(d
, d
, a
);
8831 static void gen_ssra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
8833 tcg_gen_sari_i32(a
, a
, shift
);
8834 tcg_gen_add_i32(d
, d
, a
);
8837 static void gen_ssra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
8839 tcg_gen_sari_i64(a
, a
, shift
);
8840 tcg_gen_add_i64(d
, d
, a
);
8843 static void gen_ssra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
8845 tcg_gen_sari_vec(vece
, a
, a
, sh
);
8846 tcg_gen_add_vec(vece
, d
, d
, a
);
8849 static void gen_usra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
8851 tcg_gen_vec_shr8i_i64(a
, a
, shift
);
8852 tcg_gen_vec_add8_i64(d
, d
, a
);
8855 static void gen_usra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
8857 tcg_gen_vec_shr16i_i64(a
, a
, shift
);
8858 tcg_gen_vec_add16_i64(d
, d
, a
);
8861 static void gen_usra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
8863 tcg_gen_shri_i32(a
, a
, shift
);
8864 tcg_gen_add_i32(d
, d
, a
);
8867 static void gen_usra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
8869 tcg_gen_shri_i64(a
, a
, shift
);
8870 tcg_gen_add_i64(d
, d
, a
);
8873 static void gen_usra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
8875 tcg_gen_shri_vec(vece
, a
, a
, sh
);
8876 tcg_gen_add_vec(vece
, d
, d
, a
);
8879 static void gen_shr8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
8881 uint64_t mask
= dup_const(MO_8
, 0xff >> shift
);
8882 TCGv_i64 t
= tcg_temp_new_i64();
8884 tcg_gen_shri_i64(t
, a
, shift
);
8885 tcg_gen_andi_i64(t
, t
, mask
);
8886 tcg_gen_andi_i64(d
, d
, ~mask
);
8887 tcg_gen_or_i64(d
, d
, t
);
8888 tcg_temp_free_i64(t
);
8891 static void gen_shr16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
8893 uint64_t mask
= dup_const(MO_16
, 0xffff >> shift
);
8894 TCGv_i64 t
= tcg_temp_new_i64();
8896 tcg_gen_shri_i64(t
, a
, shift
);
8897 tcg_gen_andi_i64(t
, t
, mask
);
8898 tcg_gen_andi_i64(d
, d
, ~mask
);
8899 tcg_gen_or_i64(d
, d
, t
);
8900 tcg_temp_free_i64(t
);
8903 static void gen_shr32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
8905 tcg_gen_shri_i32(a
, a
, shift
);
8906 tcg_gen_deposit_i32(d
, d
, a
, 0, 32 - shift
);
8909 static void gen_shr64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
8911 tcg_gen_shri_i64(a
, a
, shift
);
8912 tcg_gen_deposit_i64(d
, d
, a
, 0, 64 - shift
);
8915 static void gen_shr_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
8917 uint64_t mask
= (2ull << ((8 << vece
) - 1)) - 1;
8918 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
8919 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
8921 tcg_gen_dupi_vec(vece
, m
, mask
^ (mask
>> sh
));
8922 tcg_gen_shri_vec(vece
, t
, a
, sh
);
8923 tcg_gen_and_vec(vece
, d
, d
, m
);
8924 tcg_gen_or_vec(vece
, d
, d
, t
);
8926 tcg_temp_free_vec(t
);
8927 tcg_temp_free_vec(m
);
8930 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
8931 static void handle_vec_simd_shri(DisasContext
*s
, bool is_q
, bool is_u
,
8932 int immh
, int immb
, int opcode
, int rn
, int rd
)
8934 static const GVecGen2i ssra_op
[4] = {
8935 { .fni8
= gen_ssra8_i64
,
8936 .fniv
= gen_ssra_vec
,
8938 .opc
= INDEX_op_sari_vec
,
8940 { .fni8
= gen_ssra16_i64
,
8941 .fniv
= gen_ssra_vec
,
8943 .opc
= INDEX_op_sari_vec
,
8945 { .fni4
= gen_ssra32_i32
,
8946 .fniv
= gen_ssra_vec
,
8948 .opc
= INDEX_op_sari_vec
,
8950 { .fni8
= gen_ssra64_i64
,
8951 .fniv
= gen_ssra_vec
,
8952 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
8954 .opc
= INDEX_op_sari_vec
,
8957 static const GVecGen2i usra_op
[4] = {
8958 { .fni8
= gen_usra8_i64
,
8959 .fniv
= gen_usra_vec
,
8961 .opc
= INDEX_op_shri_vec
,
8963 { .fni8
= gen_usra16_i64
,
8964 .fniv
= gen_usra_vec
,
8966 .opc
= INDEX_op_shri_vec
,
8968 { .fni4
= gen_usra32_i32
,
8969 .fniv
= gen_usra_vec
,
8971 .opc
= INDEX_op_shri_vec
,
8973 { .fni8
= gen_usra64_i64
,
8974 .fniv
= gen_usra_vec
,
8975 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
8977 .opc
= INDEX_op_shri_vec
,
8980 static const GVecGen2i sri_op
[4] = {
8981 { .fni8
= gen_shr8_ins_i64
,
8982 .fniv
= gen_shr_ins_vec
,
8984 .opc
= INDEX_op_shri_vec
,
8986 { .fni8
= gen_shr16_ins_i64
,
8987 .fniv
= gen_shr_ins_vec
,
8989 .opc
= INDEX_op_shri_vec
,
8991 { .fni4
= gen_shr32_ins_i32
,
8992 .fniv
= gen_shr_ins_vec
,
8994 .opc
= INDEX_op_shri_vec
,
8996 { .fni8
= gen_shr64_ins_i64
,
8997 .fniv
= gen_shr_ins_vec
,
8998 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9000 .opc
= INDEX_op_shri_vec
,
9004 int size
= 32 - clz32(immh
) - 1;
9005 int immhb
= immh
<< 3 | immb
;
9006 int shift
= 2 * (8 << size
) - immhb
;
9007 bool accumulate
= false;
9008 int dsize
= is_q
? 128 : 64;
9009 int esize
= 8 << size
;
9010 int elements
= dsize
/esize
;
9011 TCGMemOp memop
= size
| (is_u
? 0 : MO_SIGN
);
9012 TCGv_i64 tcg_rn
= new_tmp_a64(s
);
9013 TCGv_i64 tcg_rd
= new_tmp_a64(s
);
9015 uint64_t round_const
;
9018 if (extract32(immh
, 3, 1) && !is_q
) {
9019 unallocated_encoding(s
);
9022 tcg_debug_assert(size
<= 3);
9024 if (!fp_access_check(s
)) {
9029 case 0x02: /* SSRA / USRA (accumulate) */
9031 /* Shift count same as element size produces zero to add. */
9032 if (shift
== 8 << size
) {
9035 gen_gvec_op2i(s
, is_q
, rd
, rn
, shift
, &usra_op
[size
]);
9037 /* Shift count same as element size produces all sign to add. */
9038 if (shift
== 8 << size
) {
9041 gen_gvec_op2i(s
, is_q
, rd
, rn
, shift
, &ssra_op
[size
]);
9044 case 0x08: /* SRI */
9045 /* Shift count same as element size is valid but does nothing. */
9046 if (shift
== 8 << size
) {
9049 gen_gvec_op2i(s
, is_q
, rd
, rn
, shift
, &sri_op
[size
]);
9052 case 0x00: /* SSHR / USHR */
9054 if (shift
== 8 << size
) {
9055 /* Shift count the same size as element size produces zero. */
9056 tcg_gen_gvec_dup8i(vec_full_reg_offset(s
, rd
),
9057 is_q
? 16 : 8, vec_full_reg_size(s
), 0);
9059 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, tcg_gen_gvec_shri
, size
);
9062 /* Shift count the same size as element size produces all sign. */
9063 if (shift
== 8 << size
) {
9066 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, tcg_gen_gvec_sari
, size
);
9070 case 0x04: /* SRSHR / URSHR (rounding) */
9072 case 0x06: /* SRSRA / URSRA (accum + rounding) */
9076 g_assert_not_reached();
9079 round_const
= 1ULL << (shift
- 1);
9080 tcg_round
= tcg_const_i64(round_const
);
9082 for (i
= 0; i
< elements
; i
++) {
9083 read_vec_element(s
, tcg_rn
, rn
, i
, memop
);
9085 read_vec_element(s
, tcg_rd
, rd
, i
, memop
);
9088 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
9089 accumulate
, is_u
, size
, shift
);
9091 write_vec_element(s
, tcg_rd
, rd
, i
, size
);
9093 tcg_temp_free_i64(tcg_round
);
9096 clear_vec_high(s
, is_q
, rd
);
9099 static void gen_shl8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9101 uint64_t mask
= dup_const(MO_8
, 0xff << shift
);
9102 TCGv_i64 t
= tcg_temp_new_i64();
9104 tcg_gen_shli_i64(t
, a
, shift
);
9105 tcg_gen_andi_i64(t
, t
, mask
);
9106 tcg_gen_andi_i64(d
, d
, ~mask
);
9107 tcg_gen_or_i64(d
, d
, t
);
9108 tcg_temp_free_i64(t
);
9111 static void gen_shl16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9113 uint64_t mask
= dup_const(MO_16
, 0xffff << shift
);
9114 TCGv_i64 t
= tcg_temp_new_i64();
9116 tcg_gen_shli_i64(t
, a
, shift
);
9117 tcg_gen_andi_i64(t
, t
, mask
);
9118 tcg_gen_andi_i64(d
, d
, ~mask
);
9119 tcg_gen_or_i64(d
, d
, t
);
9120 tcg_temp_free_i64(t
);
9123 static void gen_shl32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
9125 tcg_gen_deposit_i32(d
, d
, a
, shift
, 32 - shift
);
9128 static void gen_shl64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9130 tcg_gen_deposit_i64(d
, d
, a
, shift
, 64 - shift
);
9133 static void gen_shl_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
9135 uint64_t mask
= (1ull << sh
) - 1;
9136 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
9137 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
9139 tcg_gen_dupi_vec(vece
, m
, mask
);
9140 tcg_gen_shli_vec(vece
, t
, a
, sh
);
9141 tcg_gen_and_vec(vece
, d
, d
, m
);
9142 tcg_gen_or_vec(vece
, d
, d
, t
);
9144 tcg_temp_free_vec(t
);
9145 tcg_temp_free_vec(m
);
9148 /* SHL/SLI - Vector shift left */
9149 static void handle_vec_simd_shli(DisasContext
*s
, bool is_q
, bool insert
,
9150 int immh
, int immb
, int opcode
, int rn
, int rd
)
9152 static const GVecGen2i shi_op
[4] = {
9153 { .fni8
= gen_shl8_ins_i64
,
9154 .fniv
= gen_shl_ins_vec
,
9155 .opc
= INDEX_op_shli_vec
,
9156 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9159 { .fni8
= gen_shl16_ins_i64
,
9160 .fniv
= gen_shl_ins_vec
,
9161 .opc
= INDEX_op_shli_vec
,
9162 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9165 { .fni4
= gen_shl32_ins_i32
,
9166 .fniv
= gen_shl_ins_vec
,
9167 .opc
= INDEX_op_shli_vec
,
9168 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9171 { .fni8
= gen_shl64_ins_i64
,
9172 .fniv
= gen_shl_ins_vec
,
9173 .opc
= INDEX_op_shli_vec
,
9174 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9178 int size
= 32 - clz32(immh
) - 1;
9179 int immhb
= immh
<< 3 | immb
;
9180 int shift
= immhb
- (8 << size
);
9182 if (extract32(immh
, 3, 1) && !is_q
) {
9183 unallocated_encoding(s
);
9187 if (size
> 3 && !is_q
) {
9188 unallocated_encoding(s
);
9192 if (!fp_access_check(s
)) {
9197 gen_gvec_op2i(s
, is_q
, rd
, rn
, shift
, &shi_op
[size
]);
9199 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, tcg_gen_gvec_shli
, size
);
9203 /* USHLL/SHLL - Vector shift left with widening */
9204 static void handle_vec_simd_wshli(DisasContext
*s
, bool is_q
, bool is_u
,
9205 int immh
, int immb
, int opcode
, int rn
, int rd
)
9207 int size
= 32 - clz32(immh
) - 1;
9208 int immhb
= immh
<< 3 | immb
;
9209 int shift
= immhb
- (8 << size
);
9211 int esize
= 8 << size
;
9212 int elements
= dsize
/esize
;
9213 TCGv_i64 tcg_rn
= new_tmp_a64(s
);
9214 TCGv_i64 tcg_rd
= new_tmp_a64(s
);
9218 unallocated_encoding(s
);
9222 if (!fp_access_check(s
)) {
9226 /* For the LL variants the store is larger than the load,
9227 * so if rd == rn we would overwrite parts of our input.
9228 * So load everything right now and use shifts in the main loop.
9230 read_vec_element(s
, tcg_rn
, rn
, is_q
? 1 : 0, MO_64
);
9232 for (i
= 0; i
< elements
; i
++) {
9233 tcg_gen_shri_i64(tcg_rd
, tcg_rn
, i
* esize
);
9234 ext_and_shift_reg(tcg_rd
, tcg_rd
, size
| (!is_u
<< 2), 0);
9235 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, shift
);
9236 write_vec_element(s
, tcg_rd
, rd
, i
, size
+ 1);
9240 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
9241 static void handle_vec_simd_shrn(DisasContext
*s
, bool is_q
,
9242 int immh
, int immb
, int opcode
, int rn
, int rd
)
9244 int immhb
= immh
<< 3 | immb
;
9245 int size
= 32 - clz32(immh
) - 1;
9247 int esize
= 8 << size
;
9248 int elements
= dsize
/esize
;
9249 int shift
= (2 * esize
) - immhb
;
9250 bool round
= extract32(opcode
, 0, 1);
9251 TCGv_i64 tcg_rn
, tcg_rd
, tcg_final
;
9255 if (extract32(immh
, 3, 1)) {
9256 unallocated_encoding(s
);
9260 if (!fp_access_check(s
)) {
9264 tcg_rn
= tcg_temp_new_i64();
9265 tcg_rd
= tcg_temp_new_i64();
9266 tcg_final
= tcg_temp_new_i64();
9267 read_vec_element(s
, tcg_final
, rd
, is_q
? 1 : 0, MO_64
);
9270 uint64_t round_const
= 1ULL << (shift
- 1);
9271 tcg_round
= tcg_const_i64(round_const
);
9276 for (i
= 0; i
< elements
; i
++) {
9277 read_vec_element(s
, tcg_rn
, rn
, i
, size
+1);
9278 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
9279 false, true, size
+1, shift
);
9281 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
9285 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
9287 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
9290 tcg_temp_free_i64(tcg_round
);
9292 tcg_temp_free_i64(tcg_rn
);
9293 tcg_temp_free_i64(tcg_rd
);
9294 tcg_temp_free_i64(tcg_final
);
9296 clear_vec_high(s
, is_q
, rd
);
9300 /* AdvSIMD shift by immediate
9301 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
9302 * +---+---+---+-------------+------+------+--------+---+------+------+
9303 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
9304 * +---+---+---+-------------+------+------+--------+---+------+------+
9306 static void disas_simd_shift_imm(DisasContext
*s
, uint32_t insn
)
9308 int rd
= extract32(insn
, 0, 5);
9309 int rn
= extract32(insn
, 5, 5);
9310 int opcode
= extract32(insn
, 11, 5);
9311 int immb
= extract32(insn
, 16, 3);
9312 int immh
= extract32(insn
, 19, 4);
9313 bool is_u
= extract32(insn
, 29, 1);
9314 bool is_q
= extract32(insn
, 30, 1);
9317 case 0x08: /* SRI */
9319 unallocated_encoding(s
);
9323 case 0x00: /* SSHR / USHR */
9324 case 0x02: /* SSRA / USRA (accumulate) */
9325 case 0x04: /* SRSHR / URSHR (rounding) */
9326 case 0x06: /* SRSRA / URSRA (accum + rounding) */
9327 handle_vec_simd_shri(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
9329 case 0x0a: /* SHL / SLI */
9330 handle_vec_simd_shli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
9332 case 0x10: /* SHRN */
9333 case 0x11: /* RSHRN / SQRSHRUN */
9335 handle_vec_simd_sqshrn(s
, false, is_q
, false, true, immh
, immb
,
9338 handle_vec_simd_shrn(s
, is_q
, immh
, immb
, opcode
, rn
, rd
);
9341 case 0x12: /* SQSHRN / UQSHRN */
9342 case 0x13: /* SQRSHRN / UQRSHRN */
9343 handle_vec_simd_sqshrn(s
, false, is_q
, is_u
, is_u
, immh
, immb
,
9346 case 0x14: /* SSHLL / USHLL */
9347 handle_vec_simd_wshli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
9349 case 0x1c: /* SCVTF / UCVTF */
9350 handle_simd_shift_intfp_conv(s
, false, is_q
, is_u
, immh
, immb
,
9353 case 0xc: /* SQSHLU */
9355 unallocated_encoding(s
);
9358 handle_simd_qshl(s
, false, is_q
, false, true, immh
, immb
, rn
, rd
);
9360 case 0xe: /* SQSHL, UQSHL */
9361 handle_simd_qshl(s
, false, is_q
, is_u
, is_u
, immh
, immb
, rn
, rd
);
9363 case 0x1f: /* FCVTZS/ FCVTZU */
9364 handle_simd_shift_fpint_conv(s
, false, is_q
, is_u
, immh
, immb
, rn
, rd
);
9367 unallocated_encoding(s
);
9372 /* Generate code to do a "long" addition or subtraction, ie one done in
9373 * TCGv_i64 on vector lanes twice the width specified by size.
9375 static void gen_neon_addl(int size
, bool is_sub
, TCGv_i64 tcg_res
,
9376 TCGv_i64 tcg_op1
, TCGv_i64 tcg_op2
)
9378 static NeonGenTwo64OpFn
* const fns
[3][2] = {
9379 { gen_helper_neon_addl_u16
, gen_helper_neon_subl_u16
},
9380 { gen_helper_neon_addl_u32
, gen_helper_neon_subl_u32
},
9381 { tcg_gen_add_i64
, tcg_gen_sub_i64
},
9383 NeonGenTwo64OpFn
*genfn
;
9386 genfn
= fns
[size
][is_sub
];
9387 genfn(tcg_res
, tcg_op1
, tcg_op2
);
9390 static void handle_3rd_widening(DisasContext
*s
, int is_q
, int is_u
, int size
,
9391 int opcode
, int rd
, int rn
, int rm
)
9393 /* 3-reg-different widening insns: 64 x 64 -> 128 */
9394 TCGv_i64 tcg_res
[2];
9397 tcg_res
[0] = tcg_temp_new_i64();
9398 tcg_res
[1] = tcg_temp_new_i64();
9400 /* Does this op do an adding accumulate, a subtracting accumulate,
9401 * or no accumulate at all?
9419 read_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
9420 read_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
9423 /* size == 2 means two 32x32->64 operations; this is worth special
9424 * casing because we can generally handle it inline.
9427 for (pass
= 0; pass
< 2; pass
++) {
9428 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
9429 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
9430 TCGv_i64 tcg_passres
;
9431 TCGMemOp memop
= MO_32
| (is_u
? 0 : MO_SIGN
);
9433 int elt
= pass
+ is_q
* 2;
9435 read_vec_element(s
, tcg_op1
, rn
, elt
, memop
);
9436 read_vec_element(s
, tcg_op2
, rm
, elt
, memop
);
9439 tcg_passres
= tcg_res
[pass
];
9441 tcg_passres
= tcg_temp_new_i64();
9445 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
9446 tcg_gen_add_i64(tcg_passres
, tcg_op1
, tcg_op2
);
9448 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
9449 tcg_gen_sub_i64(tcg_passres
, tcg_op1
, tcg_op2
);
9451 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
9452 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
9454 TCGv_i64 tcg_tmp1
= tcg_temp_new_i64();
9455 TCGv_i64 tcg_tmp2
= tcg_temp_new_i64();
9457 tcg_gen_sub_i64(tcg_tmp1
, tcg_op1
, tcg_op2
);
9458 tcg_gen_sub_i64(tcg_tmp2
, tcg_op2
, tcg_op1
);
9459 tcg_gen_movcond_i64(is_u
? TCG_COND_GEU
: TCG_COND_GE
,
9461 tcg_op1
, tcg_op2
, tcg_tmp1
, tcg_tmp2
);
9462 tcg_temp_free_i64(tcg_tmp1
);
9463 tcg_temp_free_i64(tcg_tmp2
);
9466 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
9467 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
9468 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
9469 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
9471 case 9: /* SQDMLAL, SQDMLAL2 */
9472 case 11: /* SQDMLSL, SQDMLSL2 */
9473 case 13: /* SQDMULL, SQDMULL2 */
9474 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
9475 gen_helper_neon_addl_saturate_s64(tcg_passres
, cpu_env
,
9476 tcg_passres
, tcg_passres
);
9479 g_assert_not_reached();
9482 if (opcode
== 9 || opcode
== 11) {
9483 /* saturating accumulate ops */
9485 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
9487 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], cpu_env
,
9488 tcg_res
[pass
], tcg_passres
);
9489 } else if (accop
> 0) {
9490 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
9491 } else if (accop
< 0) {
9492 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
9496 tcg_temp_free_i64(tcg_passres
);
9499 tcg_temp_free_i64(tcg_op1
);
9500 tcg_temp_free_i64(tcg_op2
);
9503 /* size 0 or 1, generally helper functions */
9504 for (pass
= 0; pass
< 2; pass
++) {
9505 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
9506 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
9507 TCGv_i64 tcg_passres
;
9508 int elt
= pass
+ is_q
* 2;
9510 read_vec_element_i32(s
, tcg_op1
, rn
, elt
, MO_32
);
9511 read_vec_element_i32(s
, tcg_op2
, rm
, elt
, MO_32
);
9514 tcg_passres
= tcg_res
[pass
];
9516 tcg_passres
= tcg_temp_new_i64();
9520 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
9521 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
9523 TCGv_i64 tcg_op2_64
= tcg_temp_new_i64();
9524 static NeonGenWidenFn
* const widenfns
[2][2] = {
9525 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
9526 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
9528 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
9530 widenfn(tcg_op2_64
, tcg_op2
);
9531 widenfn(tcg_passres
, tcg_op1
);
9532 gen_neon_addl(size
, (opcode
== 2), tcg_passres
,
9533 tcg_passres
, tcg_op2_64
);
9534 tcg_temp_free_i64(tcg_op2_64
);
9537 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
9538 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
9541 gen_helper_neon_abdl_u16(tcg_passres
, tcg_op1
, tcg_op2
);
9543 gen_helper_neon_abdl_s16(tcg_passres
, tcg_op1
, tcg_op2
);
9547 gen_helper_neon_abdl_u32(tcg_passres
, tcg_op1
, tcg_op2
);
9549 gen_helper_neon_abdl_s32(tcg_passres
, tcg_op1
, tcg_op2
);
9553 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
9554 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
9555 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
9558 gen_helper_neon_mull_u8(tcg_passres
, tcg_op1
, tcg_op2
);
9560 gen_helper_neon_mull_s8(tcg_passres
, tcg_op1
, tcg_op2
);
9564 gen_helper_neon_mull_u16(tcg_passres
, tcg_op1
, tcg_op2
);
9566 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
9570 case 9: /* SQDMLAL, SQDMLAL2 */
9571 case 11: /* SQDMLSL, SQDMLSL2 */
9572 case 13: /* SQDMULL, SQDMULL2 */
9574 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
9575 gen_helper_neon_addl_saturate_s32(tcg_passres
, cpu_env
,
9576 tcg_passres
, tcg_passres
);
9578 case 14: /* PMULL */
9580 gen_helper_neon_mull_p8(tcg_passres
, tcg_op1
, tcg_op2
);
9583 g_assert_not_reached();
9585 tcg_temp_free_i32(tcg_op1
);
9586 tcg_temp_free_i32(tcg_op2
);
9589 if (opcode
== 9 || opcode
== 11) {
9590 /* saturating accumulate ops */
9592 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
9594 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], cpu_env
,
9598 gen_neon_addl(size
, (accop
< 0), tcg_res
[pass
],
9599 tcg_res
[pass
], tcg_passres
);
9601 tcg_temp_free_i64(tcg_passres
);
9606 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
9607 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
9608 tcg_temp_free_i64(tcg_res
[0]);
9609 tcg_temp_free_i64(tcg_res
[1]);
9612 static void handle_3rd_wide(DisasContext
*s
, int is_q
, int is_u
, int size
,
9613 int opcode
, int rd
, int rn
, int rm
)
9615 TCGv_i64 tcg_res
[2];
9616 int part
= is_q
? 2 : 0;
9619 for (pass
= 0; pass
< 2; pass
++) {
9620 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
9621 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
9622 TCGv_i64 tcg_op2_wide
= tcg_temp_new_i64();
9623 static NeonGenWidenFn
* const widenfns
[3][2] = {
9624 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
9625 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
9626 { tcg_gen_ext_i32_i64
, tcg_gen_extu_i32_i64
},
9628 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
9630 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
9631 read_vec_element_i32(s
, tcg_op2
, rm
, part
+ pass
, MO_32
);
9632 widenfn(tcg_op2_wide
, tcg_op2
);
9633 tcg_temp_free_i32(tcg_op2
);
9634 tcg_res
[pass
] = tcg_temp_new_i64();
9635 gen_neon_addl(size
, (opcode
== 3),
9636 tcg_res
[pass
], tcg_op1
, tcg_op2_wide
);
9637 tcg_temp_free_i64(tcg_op1
);
9638 tcg_temp_free_i64(tcg_op2_wide
);
9641 for (pass
= 0; pass
< 2; pass
++) {
9642 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
9643 tcg_temp_free_i64(tcg_res
[pass
]);
9647 static void do_narrow_round_high_u32(TCGv_i32 res
, TCGv_i64 in
)
9649 tcg_gen_addi_i64(in
, in
, 1U << 31);
9650 tcg_gen_extrh_i64_i32(res
, in
);
9653 static void handle_3rd_narrowing(DisasContext
*s
, int is_q
, int is_u
, int size
,
9654 int opcode
, int rd
, int rn
, int rm
)
9656 TCGv_i32 tcg_res
[2];
9657 int part
= is_q
? 2 : 0;
9660 for (pass
= 0; pass
< 2; pass
++) {
9661 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
9662 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
9663 TCGv_i64 tcg_wideres
= tcg_temp_new_i64();
9664 static NeonGenNarrowFn
* const narrowfns
[3][2] = {
9665 { gen_helper_neon_narrow_high_u8
,
9666 gen_helper_neon_narrow_round_high_u8
},
9667 { gen_helper_neon_narrow_high_u16
,
9668 gen_helper_neon_narrow_round_high_u16
},
9669 { tcg_gen_extrh_i64_i32
, do_narrow_round_high_u32
},
9671 NeonGenNarrowFn
*gennarrow
= narrowfns
[size
][is_u
];
9673 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
9674 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
9676 gen_neon_addl(size
, (opcode
== 6), tcg_wideres
, tcg_op1
, tcg_op2
);
9678 tcg_temp_free_i64(tcg_op1
);
9679 tcg_temp_free_i64(tcg_op2
);
9681 tcg_res
[pass
] = tcg_temp_new_i32();
9682 gennarrow(tcg_res
[pass
], tcg_wideres
);
9683 tcg_temp_free_i64(tcg_wideres
);
9686 for (pass
= 0; pass
< 2; pass
++) {
9687 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
+ part
, MO_32
);
9688 tcg_temp_free_i32(tcg_res
[pass
]);
9690 clear_vec_high(s
, is_q
, rd
);
9693 static void handle_pmull_64(DisasContext
*s
, int is_q
, int rd
, int rn
, int rm
)
9695 /* PMULL of 64 x 64 -> 128 is an odd special case because it
9696 * is the only three-reg-diff instruction which produces a
9697 * 128-bit wide result from a single operation. However since
9698 * it's possible to calculate the two halves more or less
9699 * separately we just use two helper calls.
9701 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
9702 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
9703 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9705 read_vec_element(s
, tcg_op1
, rn
, is_q
, MO_64
);
9706 read_vec_element(s
, tcg_op2
, rm
, is_q
, MO_64
);
9707 gen_helper_neon_pmull_64_lo(tcg_res
, tcg_op1
, tcg_op2
);
9708 write_vec_element(s
, tcg_res
, rd
, 0, MO_64
);
9709 gen_helper_neon_pmull_64_hi(tcg_res
, tcg_op1
, tcg_op2
);
9710 write_vec_element(s
, tcg_res
, rd
, 1, MO_64
);
9712 tcg_temp_free_i64(tcg_op1
);
9713 tcg_temp_free_i64(tcg_op2
);
9714 tcg_temp_free_i64(tcg_res
);
9717 /* AdvSIMD three different
9718 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
9719 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
9720 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
9721 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
9723 static void disas_simd_three_reg_diff(DisasContext
*s
, uint32_t insn
)
9725 /* Instructions in this group fall into three basic classes
9726 * (in each case with the operation working on each element in
9727 * the input vectors):
9728 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
9730 * (2) wide 64 x 128 -> 128
9731 * (3) narrowing 128 x 128 -> 64
9732 * Here we do initial decode, catch unallocated cases and
9733 * dispatch to separate functions for each class.
9735 int is_q
= extract32(insn
, 30, 1);
9736 int is_u
= extract32(insn
, 29, 1);
9737 int size
= extract32(insn
, 22, 2);
9738 int opcode
= extract32(insn
, 12, 4);
9739 int rm
= extract32(insn
, 16, 5);
9740 int rn
= extract32(insn
, 5, 5);
9741 int rd
= extract32(insn
, 0, 5);
9744 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
9745 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
9746 /* 64 x 128 -> 128 */
9748 unallocated_encoding(s
);
9751 if (!fp_access_check(s
)) {
9754 handle_3rd_wide(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
9756 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
9757 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
9758 /* 128 x 128 -> 64 */
9760 unallocated_encoding(s
);
9763 if (!fp_access_check(s
)) {
9766 handle_3rd_narrowing(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
9768 case 14: /* PMULL, PMULL2 */
9769 if (is_u
|| size
== 1 || size
== 2) {
9770 unallocated_encoding(s
);
9774 if (!arm_dc_feature(s
, ARM_FEATURE_V8_PMULL
)) {
9775 unallocated_encoding(s
);
9778 if (!fp_access_check(s
)) {
9781 handle_pmull_64(s
, is_q
, rd
, rn
, rm
);
9785 case 9: /* SQDMLAL, SQDMLAL2 */
9786 case 11: /* SQDMLSL, SQDMLSL2 */
9787 case 13: /* SQDMULL, SQDMULL2 */
9788 if (is_u
|| size
== 0) {
9789 unallocated_encoding(s
);
9793 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
9794 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
9795 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
9796 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
9797 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
9798 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
9799 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
9800 /* 64 x 64 -> 128 */
9802 unallocated_encoding(s
);
9806 if (!fp_access_check(s
)) {
9810 handle_3rd_widening(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
9813 /* opcode 15 not allocated */
9814 unallocated_encoding(s
);
9819 static void gen_bsl_i64(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
9821 tcg_gen_xor_i64(rn
, rn
, rm
);
9822 tcg_gen_and_i64(rn
, rn
, rd
);
9823 tcg_gen_xor_i64(rd
, rm
, rn
);
9826 static void gen_bit_i64(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
9828 tcg_gen_xor_i64(rn
, rn
, rd
);
9829 tcg_gen_and_i64(rn
, rn
, rm
);
9830 tcg_gen_xor_i64(rd
, rd
, rn
);
9833 static void gen_bif_i64(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
9835 tcg_gen_xor_i64(rn
, rn
, rd
);
9836 tcg_gen_andc_i64(rn
, rn
, rm
);
9837 tcg_gen_xor_i64(rd
, rd
, rn
);
9840 static void gen_bsl_vec(unsigned vece
, TCGv_vec rd
, TCGv_vec rn
, TCGv_vec rm
)
9842 tcg_gen_xor_vec(vece
, rn
, rn
, rm
);
9843 tcg_gen_and_vec(vece
, rn
, rn
, rd
);
9844 tcg_gen_xor_vec(vece
, rd
, rm
, rn
);
9847 static void gen_bit_vec(unsigned vece
, TCGv_vec rd
, TCGv_vec rn
, TCGv_vec rm
)
9849 tcg_gen_xor_vec(vece
, rn
, rn
, rd
);
9850 tcg_gen_and_vec(vece
, rn
, rn
, rm
);
9851 tcg_gen_xor_vec(vece
, rd
, rd
, rn
);
9854 static void gen_bif_vec(unsigned vece
, TCGv_vec rd
, TCGv_vec rn
, TCGv_vec rm
)
9856 tcg_gen_xor_vec(vece
, rn
, rn
, rd
);
9857 tcg_gen_andc_vec(vece
, rn
, rn
, rm
);
9858 tcg_gen_xor_vec(vece
, rd
, rd
, rn
);
9861 /* Logic op (opcode == 3) subgroup of C3.6.16. */
9862 static void disas_simd_3same_logic(DisasContext
*s
, uint32_t insn
)
9864 static const GVecGen3 bsl_op
= {
9865 .fni8
= gen_bsl_i64
,
9866 .fniv
= gen_bsl_vec
,
9867 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9870 static const GVecGen3 bit_op
= {
9871 .fni8
= gen_bit_i64
,
9872 .fniv
= gen_bit_vec
,
9873 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9876 static const GVecGen3 bif_op
= {
9877 .fni8
= gen_bif_i64
,
9878 .fniv
= gen_bif_vec
,
9879 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9883 int rd
= extract32(insn
, 0, 5);
9884 int rn
= extract32(insn
, 5, 5);
9885 int rm
= extract32(insn
, 16, 5);
9886 int size
= extract32(insn
, 22, 2);
9887 bool is_u
= extract32(insn
, 29, 1);
9888 bool is_q
= extract32(insn
, 30, 1);
9890 if (!fp_access_check(s
)) {
9894 switch (size
+ 4 * is_u
) {
9896 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_and
, 0);
9899 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_andc
, 0);
9902 if (rn
== rm
) { /* MOV */
9903 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_mov
, 0);
9905 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_or
, 0);
9909 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_orc
, 0);
9912 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_xor
, 0);
9915 case 5: /* BSL bitwise select */
9916 gen_gvec_op3(s
, is_q
, rd
, rn
, rm
, &bsl_op
);
9918 case 6: /* BIT, bitwise insert if true */
9919 gen_gvec_op3(s
, is_q
, rd
, rn
, rm
, &bit_op
);
9921 case 7: /* BIF, bitwise insert if false */
9922 gen_gvec_op3(s
, is_q
, rd
, rn
, rm
, &bif_op
);
9926 g_assert_not_reached();
9930 /* Helper functions for 32 bit comparisons */
9931 static void gen_max_s32(TCGv_i32 res
, TCGv_i32 op1
, TCGv_i32 op2
)
9933 tcg_gen_movcond_i32(TCG_COND_GE
, res
, op1
, op2
, op1
, op2
);
9936 static void gen_max_u32(TCGv_i32 res
, TCGv_i32 op1
, TCGv_i32 op2
)
9938 tcg_gen_movcond_i32(TCG_COND_GEU
, res
, op1
, op2
, op1
, op2
);
9941 static void gen_min_s32(TCGv_i32 res
, TCGv_i32 op1
, TCGv_i32 op2
)
9943 tcg_gen_movcond_i32(TCG_COND_LE
, res
, op1
, op2
, op1
, op2
);
9946 static void gen_min_u32(TCGv_i32 res
, TCGv_i32 op1
, TCGv_i32 op2
)
9948 tcg_gen_movcond_i32(TCG_COND_LEU
, res
, op1
, op2
, op1
, op2
);
9951 /* Pairwise op subgroup of C3.6.16.
9953 * This is called directly or via the handle_3same_float for float pairwise
9954 * operations where the opcode and size are calculated differently.
9956 static void handle_simd_3same_pair(DisasContext
*s
, int is_q
, int u
, int opcode
,
9957 int size
, int rn
, int rm
, int rd
)
9962 /* Floating point operations need fpst */
9963 if (opcode
>= 0x58) {
9964 fpst
= get_fpstatus_ptr(false);
9969 if (!fp_access_check(s
)) {
9973 /* These operations work on the concatenated rm:rn, with each pair of
9974 * adjacent elements being operated on to produce an element in the result.
9977 TCGv_i64 tcg_res
[2];
9979 for (pass
= 0; pass
< 2; pass
++) {
9980 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
9981 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
9982 int passreg
= (pass
== 0) ? rn
: rm
;
9984 read_vec_element(s
, tcg_op1
, passreg
, 0, MO_64
);
9985 read_vec_element(s
, tcg_op2
, passreg
, 1, MO_64
);
9986 tcg_res
[pass
] = tcg_temp_new_i64();
9989 case 0x17: /* ADDP */
9990 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
9992 case 0x58: /* FMAXNMP */
9993 gen_helper_vfp_maxnumd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
9995 case 0x5a: /* FADDP */
9996 gen_helper_vfp_addd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
9998 case 0x5e: /* FMAXP */
9999 gen_helper_vfp_maxd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10001 case 0x78: /* FMINNMP */
10002 gen_helper_vfp_minnumd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10004 case 0x7e: /* FMINP */
10005 gen_helper_vfp_mind(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10008 g_assert_not_reached();
10011 tcg_temp_free_i64(tcg_op1
);
10012 tcg_temp_free_i64(tcg_op2
);
10015 for (pass
= 0; pass
< 2; pass
++) {
10016 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
10017 tcg_temp_free_i64(tcg_res
[pass
]);
10020 int maxpass
= is_q
? 4 : 2;
10021 TCGv_i32 tcg_res
[4];
10023 for (pass
= 0; pass
< maxpass
; pass
++) {
10024 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10025 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10026 NeonGenTwoOpFn
*genfn
= NULL
;
10027 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
10028 int passelt
= (is_q
&& (pass
& 1)) ? 2 : 0;
10030 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_32
);
10031 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_32
);
10032 tcg_res
[pass
] = tcg_temp_new_i32();
10035 case 0x17: /* ADDP */
10037 static NeonGenTwoOpFn
* const fns
[3] = {
10038 gen_helper_neon_padd_u8
,
10039 gen_helper_neon_padd_u16
,
10045 case 0x14: /* SMAXP, UMAXP */
10047 static NeonGenTwoOpFn
* const fns
[3][2] = {
10048 { gen_helper_neon_pmax_s8
, gen_helper_neon_pmax_u8
},
10049 { gen_helper_neon_pmax_s16
, gen_helper_neon_pmax_u16
},
10050 { gen_max_s32
, gen_max_u32
},
10052 genfn
= fns
[size
][u
];
10055 case 0x15: /* SMINP, UMINP */
10057 static NeonGenTwoOpFn
* const fns
[3][2] = {
10058 { gen_helper_neon_pmin_s8
, gen_helper_neon_pmin_u8
},
10059 { gen_helper_neon_pmin_s16
, gen_helper_neon_pmin_u16
},
10060 { gen_min_s32
, gen_min_u32
},
10062 genfn
= fns
[size
][u
];
10065 /* The FP operations are all on single floats (32 bit) */
10066 case 0x58: /* FMAXNMP */
10067 gen_helper_vfp_maxnums(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10069 case 0x5a: /* FADDP */
10070 gen_helper_vfp_adds(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10072 case 0x5e: /* FMAXP */
10073 gen_helper_vfp_maxs(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10075 case 0x78: /* FMINNMP */
10076 gen_helper_vfp_minnums(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10078 case 0x7e: /* FMINP */
10079 gen_helper_vfp_mins(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10082 g_assert_not_reached();
10085 /* FP ops called directly, otherwise call now */
10087 genfn(tcg_res
[pass
], tcg_op1
, tcg_op2
);
10090 tcg_temp_free_i32(tcg_op1
);
10091 tcg_temp_free_i32(tcg_op2
);
10094 for (pass
= 0; pass
< maxpass
; pass
++) {
10095 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
10096 tcg_temp_free_i32(tcg_res
[pass
]);
10098 clear_vec_high(s
, is_q
, rd
);
10102 tcg_temp_free_ptr(fpst
);
10106 /* Floating point op subgroup of C3.6.16. */
10107 static void disas_simd_3same_float(DisasContext
*s
, uint32_t insn
)
10109 /* For floating point ops, the U, size[1] and opcode bits
10110 * together indicate the operation. size[0] indicates single
10113 int fpopcode
= extract32(insn
, 11, 5)
10114 | (extract32(insn
, 23, 1) << 5)
10115 | (extract32(insn
, 29, 1) << 6);
10116 int is_q
= extract32(insn
, 30, 1);
10117 int size
= extract32(insn
, 22, 1);
10118 int rm
= extract32(insn
, 16, 5);
10119 int rn
= extract32(insn
, 5, 5);
10120 int rd
= extract32(insn
, 0, 5);
10122 int datasize
= is_q
? 128 : 64;
10123 int esize
= 32 << size
;
10124 int elements
= datasize
/ esize
;
10126 if (size
== 1 && !is_q
) {
10127 unallocated_encoding(s
);
10131 switch (fpopcode
) {
10132 case 0x58: /* FMAXNMP */
10133 case 0x5a: /* FADDP */
10134 case 0x5e: /* FMAXP */
10135 case 0x78: /* FMINNMP */
10136 case 0x7e: /* FMINP */
10137 if (size
&& !is_q
) {
10138 unallocated_encoding(s
);
10141 handle_simd_3same_pair(s
, is_q
, 0, fpopcode
, size
? MO_64
: MO_32
,
10144 case 0x1b: /* FMULX */
10145 case 0x1f: /* FRECPS */
10146 case 0x3f: /* FRSQRTS */
10147 case 0x5d: /* FACGE */
10148 case 0x7d: /* FACGT */
10149 case 0x19: /* FMLA */
10150 case 0x39: /* FMLS */
10151 case 0x18: /* FMAXNM */
10152 case 0x1a: /* FADD */
10153 case 0x1c: /* FCMEQ */
10154 case 0x1e: /* FMAX */
10155 case 0x38: /* FMINNM */
10156 case 0x3a: /* FSUB */
10157 case 0x3e: /* FMIN */
10158 case 0x5b: /* FMUL */
10159 case 0x5c: /* FCMGE */
10160 case 0x5f: /* FDIV */
10161 case 0x7a: /* FABD */
10162 case 0x7c: /* FCMGT */
10163 if (!fp_access_check(s
)) {
10167 handle_3same_float(s
, size
, elements
, fpopcode
, rd
, rn
, rm
);
10170 unallocated_encoding(s
);
10175 static void gen_mla8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
10177 gen_helper_neon_mul_u8(a
, a
, b
);
10178 gen_helper_neon_add_u8(d
, d
, a
);
10181 static void gen_mla16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
10183 gen_helper_neon_mul_u16(a
, a
, b
);
10184 gen_helper_neon_add_u16(d
, d
, a
);
10187 static void gen_mla32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
10189 tcg_gen_mul_i32(a
, a
, b
);
10190 tcg_gen_add_i32(d
, d
, a
);
10193 static void gen_mla64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
10195 tcg_gen_mul_i64(a
, a
, b
);
10196 tcg_gen_add_i64(d
, d
, a
);
10199 static void gen_mla_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
10201 tcg_gen_mul_vec(vece
, a
, a
, b
);
10202 tcg_gen_add_vec(vece
, d
, d
, a
);
10205 static void gen_mls8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
10207 gen_helper_neon_mul_u8(a
, a
, b
);
10208 gen_helper_neon_sub_u8(d
, d
, a
);
10211 static void gen_mls16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
10213 gen_helper_neon_mul_u16(a
, a
, b
);
10214 gen_helper_neon_sub_u16(d
, d
, a
);
10217 static void gen_mls32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
10219 tcg_gen_mul_i32(a
, a
, b
);
10220 tcg_gen_sub_i32(d
, d
, a
);
10223 static void gen_mls64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
10225 tcg_gen_mul_i64(a
, a
, b
);
10226 tcg_gen_sub_i64(d
, d
, a
);
10229 static void gen_mls_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
10231 tcg_gen_mul_vec(vece
, a
, a
, b
);
10232 tcg_gen_sub_vec(vece
, d
, d
, a
);
10235 /* Integer op subgroup of C3.6.16. */
10236 static void disas_simd_3same_int(DisasContext
*s
, uint32_t insn
)
10238 static const GVecGen3 cmtst_op
[4] = {
10239 { .fni4
= gen_helper_neon_tst_u8
,
10240 .fniv
= gen_cmtst_vec
,
10242 { .fni4
= gen_helper_neon_tst_u16
,
10243 .fniv
= gen_cmtst_vec
,
10245 { .fni4
= gen_cmtst_i32
,
10246 .fniv
= gen_cmtst_vec
,
10248 { .fni8
= gen_cmtst_i64
,
10249 .fniv
= gen_cmtst_vec
,
10250 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
10253 static const GVecGen3 mla_op
[4] = {
10254 { .fni4
= gen_mla8_i32
,
10255 .fniv
= gen_mla_vec
,
10256 .opc
= INDEX_op_mul_vec
,
10259 { .fni4
= gen_mla16_i32
,
10260 .fniv
= gen_mla_vec
,
10261 .opc
= INDEX_op_mul_vec
,
10264 { .fni4
= gen_mla32_i32
,
10265 .fniv
= gen_mla_vec
,
10266 .opc
= INDEX_op_mul_vec
,
10269 { .fni8
= gen_mla64_i64
,
10270 .fniv
= gen_mla_vec
,
10271 .opc
= INDEX_op_mul_vec
,
10272 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
10276 static const GVecGen3 mls_op
[4] = {
10277 { .fni4
= gen_mls8_i32
,
10278 .fniv
= gen_mls_vec
,
10279 .opc
= INDEX_op_mul_vec
,
10282 { .fni4
= gen_mls16_i32
,
10283 .fniv
= gen_mls_vec
,
10284 .opc
= INDEX_op_mul_vec
,
10287 { .fni4
= gen_mls32_i32
,
10288 .fniv
= gen_mls_vec
,
10289 .opc
= INDEX_op_mul_vec
,
10292 { .fni8
= gen_mls64_i64
,
10293 .fniv
= gen_mls_vec
,
10294 .opc
= INDEX_op_mul_vec
,
10295 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
10300 int is_q
= extract32(insn
, 30, 1);
10301 int u
= extract32(insn
, 29, 1);
10302 int size
= extract32(insn
, 22, 2);
10303 int opcode
= extract32(insn
, 11, 5);
10304 int rm
= extract32(insn
, 16, 5);
10305 int rn
= extract32(insn
, 5, 5);
10306 int rd
= extract32(insn
, 0, 5);
10311 case 0x13: /* MUL, PMUL */
10312 if (u
&& size
!= 0) {
10313 unallocated_encoding(s
);
10317 case 0x0: /* SHADD, UHADD */
10318 case 0x2: /* SRHADD, URHADD */
10319 case 0x4: /* SHSUB, UHSUB */
10320 case 0xc: /* SMAX, UMAX */
10321 case 0xd: /* SMIN, UMIN */
10322 case 0xe: /* SABD, UABD */
10323 case 0xf: /* SABA, UABA */
10324 case 0x12: /* MLA, MLS */
10326 unallocated_encoding(s
);
10330 case 0x16: /* SQDMULH, SQRDMULH */
10331 if (size
== 0 || size
== 3) {
10332 unallocated_encoding(s
);
10337 if (size
== 3 && !is_q
) {
10338 unallocated_encoding(s
);
10344 if (!fp_access_check(s
)) {
10349 case 0x10: /* ADD, SUB */
10351 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_sub
, size
);
10353 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_add
, size
);
10356 case 0x13: /* MUL, PMUL */
10357 if (!u
) { /* MUL */
10358 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_mul
, size
);
10362 case 0x12: /* MLA, MLS */
10364 gen_gvec_op3(s
, is_q
, rd
, rn
, rm
, &mls_op
[size
]);
10366 gen_gvec_op3(s
, is_q
, rd
, rn
, rm
, &mla_op
[size
]);
10370 if (!u
) { /* CMTST */
10371 gen_gvec_op3(s
, is_q
, rd
, rn
, rm
, &cmtst_op
[size
]);
10375 cond
= TCG_COND_EQ
;
10377 case 0x06: /* CMGT, CMHI */
10378 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
10380 case 0x07: /* CMGE, CMHS */
10381 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
10383 tcg_gen_gvec_cmp(cond
, size
, vec_full_reg_offset(s
, rd
),
10384 vec_full_reg_offset(s
, rn
),
10385 vec_full_reg_offset(s
, rm
),
10386 is_q
? 16 : 8, vec_full_reg_size(s
));
10392 for (pass
= 0; pass
< 2; pass
++) {
10393 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10394 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10395 TCGv_i64 tcg_res
= tcg_temp_new_i64();
10397 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
10398 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
10400 handle_3same_64(s
, opcode
, u
, tcg_res
, tcg_op1
, tcg_op2
);
10402 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
10404 tcg_temp_free_i64(tcg_res
);
10405 tcg_temp_free_i64(tcg_op1
);
10406 tcg_temp_free_i64(tcg_op2
);
10409 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
10410 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10411 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10412 TCGv_i32 tcg_res
= tcg_temp_new_i32();
10413 NeonGenTwoOpFn
*genfn
= NULL
;
10414 NeonGenTwoOpEnvFn
*genenvfn
= NULL
;
10416 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
10417 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
10420 case 0x0: /* SHADD, UHADD */
10422 static NeonGenTwoOpFn
* const fns
[3][2] = {
10423 { gen_helper_neon_hadd_s8
, gen_helper_neon_hadd_u8
},
10424 { gen_helper_neon_hadd_s16
, gen_helper_neon_hadd_u16
},
10425 { gen_helper_neon_hadd_s32
, gen_helper_neon_hadd_u32
},
10427 genfn
= fns
[size
][u
];
10430 case 0x1: /* SQADD, UQADD */
10432 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
10433 { gen_helper_neon_qadd_s8
, gen_helper_neon_qadd_u8
},
10434 { gen_helper_neon_qadd_s16
, gen_helper_neon_qadd_u16
},
10435 { gen_helper_neon_qadd_s32
, gen_helper_neon_qadd_u32
},
10437 genenvfn
= fns
[size
][u
];
10440 case 0x2: /* SRHADD, URHADD */
10442 static NeonGenTwoOpFn
* const fns
[3][2] = {
10443 { gen_helper_neon_rhadd_s8
, gen_helper_neon_rhadd_u8
},
10444 { gen_helper_neon_rhadd_s16
, gen_helper_neon_rhadd_u16
},
10445 { gen_helper_neon_rhadd_s32
, gen_helper_neon_rhadd_u32
},
10447 genfn
= fns
[size
][u
];
10450 case 0x4: /* SHSUB, UHSUB */
10452 static NeonGenTwoOpFn
* const fns
[3][2] = {
10453 { gen_helper_neon_hsub_s8
, gen_helper_neon_hsub_u8
},
10454 { gen_helper_neon_hsub_s16
, gen_helper_neon_hsub_u16
},
10455 { gen_helper_neon_hsub_s32
, gen_helper_neon_hsub_u32
},
10457 genfn
= fns
[size
][u
];
10460 case 0x5: /* SQSUB, UQSUB */
10462 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
10463 { gen_helper_neon_qsub_s8
, gen_helper_neon_qsub_u8
},
10464 { gen_helper_neon_qsub_s16
, gen_helper_neon_qsub_u16
},
10465 { gen_helper_neon_qsub_s32
, gen_helper_neon_qsub_u32
},
10467 genenvfn
= fns
[size
][u
];
10470 case 0x8: /* SSHL, USHL */
10472 static NeonGenTwoOpFn
* const fns
[3][2] = {
10473 { gen_helper_neon_shl_s8
, gen_helper_neon_shl_u8
},
10474 { gen_helper_neon_shl_s16
, gen_helper_neon_shl_u16
},
10475 { gen_helper_neon_shl_s32
, gen_helper_neon_shl_u32
},
10477 genfn
= fns
[size
][u
];
10480 case 0x9: /* SQSHL, UQSHL */
10482 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
10483 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
10484 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
10485 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
10487 genenvfn
= fns
[size
][u
];
10490 case 0xa: /* SRSHL, URSHL */
10492 static NeonGenTwoOpFn
* const fns
[3][2] = {
10493 { gen_helper_neon_rshl_s8
, gen_helper_neon_rshl_u8
},
10494 { gen_helper_neon_rshl_s16
, gen_helper_neon_rshl_u16
},
10495 { gen_helper_neon_rshl_s32
, gen_helper_neon_rshl_u32
},
10497 genfn
= fns
[size
][u
];
10500 case 0xb: /* SQRSHL, UQRSHL */
10502 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
10503 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
10504 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
10505 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
10507 genenvfn
= fns
[size
][u
];
10510 case 0xc: /* SMAX, UMAX */
10512 static NeonGenTwoOpFn
* const fns
[3][2] = {
10513 { gen_helper_neon_max_s8
, gen_helper_neon_max_u8
},
10514 { gen_helper_neon_max_s16
, gen_helper_neon_max_u16
},
10515 { gen_max_s32
, gen_max_u32
},
10517 genfn
= fns
[size
][u
];
10521 case 0xd: /* SMIN, UMIN */
10523 static NeonGenTwoOpFn
* const fns
[3][2] = {
10524 { gen_helper_neon_min_s8
, gen_helper_neon_min_u8
},
10525 { gen_helper_neon_min_s16
, gen_helper_neon_min_u16
},
10526 { gen_min_s32
, gen_min_u32
},
10528 genfn
= fns
[size
][u
];
10531 case 0xe: /* SABD, UABD */
10532 case 0xf: /* SABA, UABA */
10534 static NeonGenTwoOpFn
* const fns
[3][2] = {
10535 { gen_helper_neon_abd_s8
, gen_helper_neon_abd_u8
},
10536 { gen_helper_neon_abd_s16
, gen_helper_neon_abd_u16
},
10537 { gen_helper_neon_abd_s32
, gen_helper_neon_abd_u32
},
10539 genfn
= fns
[size
][u
];
10542 case 0x13: /* MUL, PMUL */
10543 assert(u
); /* PMUL */
10545 genfn
= gen_helper_neon_mul_p8
;
10547 case 0x16: /* SQDMULH, SQRDMULH */
10549 static NeonGenTwoOpEnvFn
* const fns
[2][2] = {
10550 { gen_helper_neon_qdmulh_s16
, gen_helper_neon_qrdmulh_s16
},
10551 { gen_helper_neon_qdmulh_s32
, gen_helper_neon_qrdmulh_s32
},
10553 assert(size
== 1 || size
== 2);
10554 genenvfn
= fns
[size
- 1][u
];
10558 g_assert_not_reached();
10562 genenvfn(tcg_res
, cpu_env
, tcg_op1
, tcg_op2
);
10564 genfn(tcg_res
, tcg_op1
, tcg_op2
);
10567 if (opcode
== 0xf) {
10568 /* SABA, UABA: accumulating ops */
10569 static NeonGenTwoOpFn
* const fns
[3] = {
10570 gen_helper_neon_add_u8
,
10571 gen_helper_neon_add_u16
,
10575 read_vec_element_i32(s
, tcg_op1
, rd
, pass
, MO_32
);
10576 fns
[size
](tcg_res
, tcg_op1
, tcg_res
);
10579 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
10581 tcg_temp_free_i32(tcg_res
);
10582 tcg_temp_free_i32(tcg_op1
);
10583 tcg_temp_free_i32(tcg_op2
);
10586 clear_vec_high(s
, is_q
, rd
);
10589 /* AdvSIMD three same
10590 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
10591 * +---+---+---+-----------+------+---+------+--------+---+------+------+
10592 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
10593 * +---+---+---+-----------+------+---+------+--------+---+------+------+
10595 static void disas_simd_three_reg_same(DisasContext
*s
, uint32_t insn
)
10597 int opcode
= extract32(insn
, 11, 5);
10600 case 0x3: /* logic ops */
10601 disas_simd_3same_logic(s
, insn
);
10603 case 0x17: /* ADDP */
10604 case 0x14: /* SMAXP, UMAXP */
10605 case 0x15: /* SMINP, UMINP */
10607 /* Pairwise operations */
10608 int is_q
= extract32(insn
, 30, 1);
10609 int u
= extract32(insn
, 29, 1);
10610 int size
= extract32(insn
, 22, 2);
10611 int rm
= extract32(insn
, 16, 5);
10612 int rn
= extract32(insn
, 5, 5);
10613 int rd
= extract32(insn
, 0, 5);
10614 if (opcode
== 0x17) {
10615 if (u
|| (size
== 3 && !is_q
)) {
10616 unallocated_encoding(s
);
10621 unallocated_encoding(s
);
10625 handle_simd_3same_pair(s
, is_q
, u
, opcode
, size
, rn
, rm
, rd
);
10628 case 0x18 ... 0x31:
10629 /* floating point ops, sz[1] and U are part of opcode */
10630 disas_simd_3same_float(s
, insn
);
10633 disas_simd_3same_int(s
, insn
);
10639 * Advanced SIMD three same (ARMv8.2 FP16 variants)
10641 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
10642 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
10643 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
10644 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
10646 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
10647 * (register), FACGE, FABD, FCMGT (register) and FACGT.
10650 static void disas_simd_three_reg_same_fp16(DisasContext
*s
, uint32_t insn
)
10652 int opcode
, fpopcode
;
10653 int is_q
, u
, a
, rm
, rn
, rd
;
10654 int datasize
, elements
;
10657 bool pairwise
= false;
10659 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
10660 unallocated_encoding(s
);
10664 if (!fp_access_check(s
)) {
10668 /* For these floating point ops, the U, a and opcode bits
10669 * together indicate the operation.
10671 opcode
= extract32(insn
, 11, 3);
10672 u
= extract32(insn
, 29, 1);
10673 a
= extract32(insn
, 23, 1);
10674 is_q
= extract32(insn
, 30, 1);
10675 rm
= extract32(insn
, 16, 5);
10676 rn
= extract32(insn
, 5, 5);
10677 rd
= extract32(insn
, 0, 5);
10679 fpopcode
= opcode
| (a
<< 3) | (u
<< 4);
10680 datasize
= is_q
? 128 : 64;
10681 elements
= datasize
/ 16;
10683 switch (fpopcode
) {
10684 case 0x10: /* FMAXNMP */
10685 case 0x12: /* FADDP */
10686 case 0x16: /* FMAXP */
10687 case 0x18: /* FMINNMP */
10688 case 0x1e: /* FMINP */
10693 fpst
= get_fpstatus_ptr(true);
10696 int maxpass
= is_q
? 8 : 4;
10697 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10698 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10699 TCGv_i32 tcg_res
[8];
10701 for (pass
= 0; pass
< maxpass
; pass
++) {
10702 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
10703 int passelt
= (pass
<< 1) & (maxpass
- 1);
10705 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_16
);
10706 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_16
);
10707 tcg_res
[pass
] = tcg_temp_new_i32();
10709 switch (fpopcode
) {
10710 case 0x10: /* FMAXNMP */
10711 gen_helper_advsimd_maxnumh(tcg_res
[pass
], tcg_op1
, tcg_op2
,
10714 case 0x12: /* FADDP */
10715 gen_helper_advsimd_addh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10717 case 0x16: /* FMAXP */
10718 gen_helper_advsimd_maxh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10720 case 0x18: /* FMINNMP */
10721 gen_helper_advsimd_minnumh(tcg_res
[pass
], tcg_op1
, tcg_op2
,
10724 case 0x1e: /* FMINP */
10725 gen_helper_advsimd_minh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10728 g_assert_not_reached();
10732 for (pass
= 0; pass
< maxpass
; pass
++) {
10733 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_16
);
10734 tcg_temp_free_i32(tcg_res
[pass
]);
10737 tcg_temp_free_i32(tcg_op1
);
10738 tcg_temp_free_i32(tcg_op2
);
10741 for (pass
= 0; pass
< elements
; pass
++) {
10742 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10743 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10744 TCGv_i32 tcg_res
= tcg_temp_new_i32();
10746 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_16
);
10747 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_16
);
10749 switch (fpopcode
) {
10750 case 0x0: /* FMAXNM */
10751 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10753 case 0x1: /* FMLA */
10754 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
10755 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_res
,
10758 case 0x2: /* FADD */
10759 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10761 case 0x3: /* FMULX */
10762 gen_helper_advsimd_mulxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10764 case 0x4: /* FCMEQ */
10765 gen_helper_advsimd_ceq_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10767 case 0x6: /* FMAX */
10768 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10770 case 0x7: /* FRECPS */
10771 gen_helper_recpsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10773 case 0x8: /* FMINNM */
10774 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10776 case 0x9: /* FMLS */
10777 /* As usual for ARM, separate negation for fused multiply-add */
10778 tcg_gen_xori_i32(tcg_op1
, tcg_op1
, 0x8000);
10779 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
10780 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_res
,
10783 case 0xa: /* FSUB */
10784 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10786 case 0xe: /* FMIN */
10787 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10789 case 0xf: /* FRSQRTS */
10790 gen_helper_rsqrtsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10792 case 0x13: /* FMUL */
10793 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10795 case 0x14: /* FCMGE */
10796 gen_helper_advsimd_cge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10798 case 0x15: /* FACGE */
10799 gen_helper_advsimd_acge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10801 case 0x17: /* FDIV */
10802 gen_helper_advsimd_divh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10804 case 0x1a: /* FABD */
10805 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10806 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0x7fff);
10808 case 0x1c: /* FCMGT */
10809 gen_helper_advsimd_cgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10811 case 0x1d: /* FACGT */
10812 gen_helper_advsimd_acgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
10815 fprintf(stderr
, "%s: insn %#04x, fpop %#2x @ %#" PRIx64
"\n",
10816 __func__
, insn
, fpopcode
, s
->pc
);
10817 g_assert_not_reached();
10820 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
10821 tcg_temp_free_i32(tcg_res
);
10822 tcg_temp_free_i32(tcg_op1
);
10823 tcg_temp_free_i32(tcg_op2
);
10827 tcg_temp_free_ptr(fpst
);
10829 clear_vec_high(s
, is_q
, rd
);
10832 /* AdvSIMD three same extra
10833 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
10834 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
10835 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
10836 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
10838 static void disas_simd_three_reg_same_extra(DisasContext
*s
, uint32_t insn
)
10840 int rd
= extract32(insn
, 0, 5);
10841 int rn
= extract32(insn
, 5, 5);
10842 int opcode
= extract32(insn
, 11, 4);
10843 int rm
= extract32(insn
, 16, 5);
10844 int size
= extract32(insn
, 22, 2);
10845 bool u
= extract32(insn
, 29, 1);
10846 bool is_q
= extract32(insn
, 30, 1);
10849 switch (u
* 16 + opcode
) {
10850 case 0x10: /* SQRDMLAH (vector) */
10851 case 0x11: /* SQRDMLSH (vector) */
10852 if (size
!= 1 && size
!= 2) {
10853 unallocated_encoding(s
);
10856 feature
= ARM_FEATURE_V8_RDM
;
10858 case 0x8: /* FCMLA, #0 */
10859 case 0x9: /* FCMLA, #90 */
10860 case 0xa: /* FCMLA, #180 */
10861 case 0xb: /* FCMLA, #270 */
10862 case 0xc: /* FCADD, #90 */
10863 case 0xe: /* FCADD, #270 */
10865 || (size
== 1 && !arm_dc_feature(s
, ARM_FEATURE_V8_FP16
))
10866 || (size
== 3 && !is_q
)) {
10867 unallocated_encoding(s
);
10870 feature
= ARM_FEATURE_V8_FCMA
;
10873 unallocated_encoding(s
);
10876 if (!arm_dc_feature(s
, feature
)) {
10877 unallocated_encoding(s
);
10880 if (!fp_access_check(s
)) {
10885 case 0x0: /* SQRDMLAH (vector) */
10888 gen_gvec_op3_env(s
, is_q
, rd
, rn
, rm
, gen_helper_gvec_qrdmlah_s16
);
10891 gen_gvec_op3_env(s
, is_q
, rd
, rn
, rm
, gen_helper_gvec_qrdmlah_s32
);
10894 g_assert_not_reached();
10898 case 0x1: /* SQRDMLSH (vector) */
10901 gen_gvec_op3_env(s
, is_q
, rd
, rn
, rm
, gen_helper_gvec_qrdmlsh_s16
);
10904 gen_gvec_op3_env(s
, is_q
, rd
, rn
, rm
, gen_helper_gvec_qrdmlsh_s32
);
10907 g_assert_not_reached();
10911 case 0x8: /* FCMLA, #0 */
10912 case 0x9: /* FCMLA, #90 */
10913 case 0xa: /* FCMLA, #180 */
10914 case 0xb: /* FCMLA, #270 */
10915 rot
= extract32(opcode
, 0, 2);
10918 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, true, rot
,
10919 gen_helper_gvec_fcmlah
);
10922 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, false, rot
,
10923 gen_helper_gvec_fcmlas
);
10926 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, false, rot
,
10927 gen_helper_gvec_fcmlad
);
10930 g_assert_not_reached();
10934 case 0xc: /* FCADD, #90 */
10935 case 0xe: /* FCADD, #270 */
10936 rot
= extract32(opcode
, 1, 1);
10939 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
10940 gen_helper_gvec_fcaddh
);
10943 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
10944 gen_helper_gvec_fcadds
);
10947 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
10948 gen_helper_gvec_fcaddd
);
10951 g_assert_not_reached();
10956 g_assert_not_reached();
10960 static void handle_2misc_widening(DisasContext
*s
, int opcode
, bool is_q
,
10961 int size
, int rn
, int rd
)
10963 /* Handle 2-reg-misc ops which are widening (so each size element
10964 * in the source becomes a 2*size element in the destination.
10965 * The only instruction like this is FCVTL.
10970 /* 32 -> 64 bit fp conversion */
10971 TCGv_i64 tcg_res
[2];
10972 int srcelt
= is_q
? 2 : 0;
10974 for (pass
= 0; pass
< 2; pass
++) {
10975 TCGv_i32 tcg_op
= tcg_temp_new_i32();
10976 tcg_res
[pass
] = tcg_temp_new_i64();
10978 read_vec_element_i32(s
, tcg_op
, rn
, srcelt
+ pass
, MO_32
);
10979 gen_helper_vfp_fcvtds(tcg_res
[pass
], tcg_op
, cpu_env
);
10980 tcg_temp_free_i32(tcg_op
);
10982 for (pass
= 0; pass
< 2; pass
++) {
10983 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
10984 tcg_temp_free_i64(tcg_res
[pass
]);
10987 /* 16 -> 32 bit fp conversion */
10988 int srcelt
= is_q
? 4 : 0;
10989 TCGv_i32 tcg_res
[4];
10991 for (pass
= 0; pass
< 4; pass
++) {
10992 tcg_res
[pass
] = tcg_temp_new_i32();
10994 read_vec_element_i32(s
, tcg_res
[pass
], rn
, srcelt
+ pass
, MO_16
);
10995 gen_helper_vfp_fcvt_f16_to_f32(tcg_res
[pass
], tcg_res
[pass
],
10998 for (pass
= 0; pass
< 4; pass
++) {
10999 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
11000 tcg_temp_free_i32(tcg_res
[pass
]);
11005 static void handle_rev(DisasContext
*s
, int opcode
, bool u
,
11006 bool is_q
, int size
, int rn
, int rd
)
11008 int op
= (opcode
<< 1) | u
;
11009 int opsz
= op
+ size
;
11010 int grp_size
= 3 - opsz
;
11011 int dsize
= is_q
? 128 : 64;
11015 unallocated_encoding(s
);
11019 if (!fp_access_check(s
)) {
11024 /* Special case bytes, use bswap op on each group of elements */
11025 int groups
= dsize
/ (8 << grp_size
);
11027 for (i
= 0; i
< groups
; i
++) {
11028 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
11030 read_vec_element(s
, tcg_tmp
, rn
, i
, grp_size
);
11031 switch (grp_size
) {
11033 tcg_gen_bswap16_i64(tcg_tmp
, tcg_tmp
);
11036 tcg_gen_bswap32_i64(tcg_tmp
, tcg_tmp
);
11039 tcg_gen_bswap64_i64(tcg_tmp
, tcg_tmp
);
11042 g_assert_not_reached();
11044 write_vec_element(s
, tcg_tmp
, rd
, i
, grp_size
);
11045 tcg_temp_free_i64(tcg_tmp
);
11047 clear_vec_high(s
, is_q
, rd
);
11049 int revmask
= (1 << grp_size
) - 1;
11050 int esize
= 8 << size
;
11051 int elements
= dsize
/ esize
;
11052 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
11053 TCGv_i64 tcg_rd
= tcg_const_i64(0);
11054 TCGv_i64 tcg_rd_hi
= tcg_const_i64(0);
11056 for (i
= 0; i
< elements
; i
++) {
11057 int e_rev
= (i
& 0xf) ^ revmask
;
11058 int off
= e_rev
* esize
;
11059 read_vec_element(s
, tcg_rn
, rn
, i
, size
);
11061 tcg_gen_deposit_i64(tcg_rd_hi
, tcg_rd_hi
,
11062 tcg_rn
, off
- 64, esize
);
11064 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, off
, esize
);
11067 write_vec_element(s
, tcg_rd
, rd
, 0, MO_64
);
11068 write_vec_element(s
, tcg_rd_hi
, rd
, 1, MO_64
);
11070 tcg_temp_free_i64(tcg_rd_hi
);
11071 tcg_temp_free_i64(tcg_rd
);
11072 tcg_temp_free_i64(tcg_rn
);
11076 static void handle_2misc_pairwise(DisasContext
*s
, int opcode
, bool u
,
11077 bool is_q
, int size
, int rn
, int rd
)
11079 /* Implement the pairwise operations from 2-misc:
11080 * SADDLP, UADDLP, SADALP, UADALP.
11081 * These all add pairs of elements in the input to produce a
11082 * double-width result element in the output (possibly accumulating).
11084 bool accum
= (opcode
== 0x6);
11085 int maxpass
= is_q
? 2 : 1;
11087 TCGv_i64 tcg_res
[2];
11090 /* 32 + 32 -> 64 op */
11091 TCGMemOp memop
= size
+ (u
? 0 : MO_SIGN
);
11093 for (pass
= 0; pass
< maxpass
; pass
++) {
11094 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11095 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
11097 tcg_res
[pass
] = tcg_temp_new_i64();
11099 read_vec_element(s
, tcg_op1
, rn
, pass
* 2, memop
);
11100 read_vec_element(s
, tcg_op2
, rn
, pass
* 2 + 1, memop
);
11101 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
11103 read_vec_element(s
, tcg_op1
, rd
, pass
, MO_64
);
11104 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
11107 tcg_temp_free_i64(tcg_op1
);
11108 tcg_temp_free_i64(tcg_op2
);
11111 for (pass
= 0; pass
< maxpass
; pass
++) {
11112 TCGv_i64 tcg_op
= tcg_temp_new_i64();
11113 NeonGenOneOpFn
*genfn
;
11114 static NeonGenOneOpFn
* const fns
[2][2] = {
11115 { gen_helper_neon_addlp_s8
, gen_helper_neon_addlp_u8
},
11116 { gen_helper_neon_addlp_s16
, gen_helper_neon_addlp_u16
},
11119 genfn
= fns
[size
][u
];
11121 tcg_res
[pass
] = tcg_temp_new_i64();
11123 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
11124 genfn(tcg_res
[pass
], tcg_op
);
11127 read_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
11129 gen_helper_neon_addl_u16(tcg_res
[pass
],
11130 tcg_res
[pass
], tcg_op
);
11132 gen_helper_neon_addl_u32(tcg_res
[pass
],
11133 tcg_res
[pass
], tcg_op
);
11136 tcg_temp_free_i64(tcg_op
);
11140 tcg_res
[1] = tcg_const_i64(0);
11142 for (pass
= 0; pass
< 2; pass
++) {
11143 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11144 tcg_temp_free_i64(tcg_res
[pass
]);
11148 static void handle_shll(DisasContext
*s
, bool is_q
, int size
, int rn
, int rd
)
11150 /* Implement SHLL and SHLL2 */
11152 int part
= is_q
? 2 : 0;
11153 TCGv_i64 tcg_res
[2];
11155 for (pass
= 0; pass
< 2; pass
++) {
11156 static NeonGenWidenFn
* const widenfns
[3] = {
11157 gen_helper_neon_widen_u8
,
11158 gen_helper_neon_widen_u16
,
11159 tcg_gen_extu_i32_i64
,
11161 NeonGenWidenFn
*widenfn
= widenfns
[size
];
11162 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11164 read_vec_element_i32(s
, tcg_op
, rn
, part
+ pass
, MO_32
);
11165 tcg_res
[pass
] = tcg_temp_new_i64();
11166 widenfn(tcg_res
[pass
], tcg_op
);
11167 tcg_gen_shli_i64(tcg_res
[pass
], tcg_res
[pass
], 8 << size
);
11169 tcg_temp_free_i32(tcg_op
);
11172 for (pass
= 0; pass
< 2; pass
++) {
11173 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11174 tcg_temp_free_i64(tcg_res
[pass
]);
11178 /* AdvSIMD two reg misc
11179 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
11180 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11181 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
11182 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11184 static void disas_simd_two_reg_misc(DisasContext
*s
, uint32_t insn
)
11186 int size
= extract32(insn
, 22, 2);
11187 int opcode
= extract32(insn
, 12, 5);
11188 bool u
= extract32(insn
, 29, 1);
11189 bool is_q
= extract32(insn
, 30, 1);
11190 int rn
= extract32(insn
, 5, 5);
11191 int rd
= extract32(insn
, 0, 5);
11192 bool need_fpstatus
= false;
11193 bool need_rmode
= false;
11195 TCGv_i32 tcg_rmode
;
11196 TCGv_ptr tcg_fpstatus
;
11199 case 0x0: /* REV64, REV32 */
11200 case 0x1: /* REV16 */
11201 handle_rev(s
, opcode
, u
, is_q
, size
, rn
, rd
);
11203 case 0x5: /* CNT, NOT, RBIT */
11204 if (u
&& size
== 0) {
11207 } else if (u
&& size
== 1) {
11210 } else if (!u
&& size
== 0) {
11214 unallocated_encoding(s
);
11216 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
11217 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
11219 unallocated_encoding(s
);
11222 if (!fp_access_check(s
)) {
11226 handle_2misc_narrow(s
, false, opcode
, u
, is_q
, size
, rn
, rd
);
11228 case 0x4: /* CLS, CLZ */
11230 unallocated_encoding(s
);
11234 case 0x2: /* SADDLP, UADDLP */
11235 case 0x6: /* SADALP, UADALP */
11237 unallocated_encoding(s
);
11240 if (!fp_access_check(s
)) {
11243 handle_2misc_pairwise(s
, opcode
, u
, is_q
, size
, rn
, rd
);
11245 case 0x13: /* SHLL, SHLL2 */
11246 if (u
== 0 || size
== 3) {
11247 unallocated_encoding(s
);
11250 if (!fp_access_check(s
)) {
11253 handle_shll(s
, is_q
, size
, rn
, rd
);
11255 case 0xa: /* CMLT */
11257 unallocated_encoding(s
);
11261 case 0x8: /* CMGT, CMGE */
11262 case 0x9: /* CMEQ, CMLE */
11263 case 0xb: /* ABS, NEG */
11264 if (size
== 3 && !is_q
) {
11265 unallocated_encoding(s
);
11269 case 0x3: /* SUQADD, USQADD */
11270 if (size
== 3 && !is_q
) {
11271 unallocated_encoding(s
);
11274 if (!fp_access_check(s
)) {
11277 handle_2misc_satacc(s
, false, u
, is_q
, size
, rn
, rd
);
11279 case 0x7: /* SQABS, SQNEG */
11280 if (size
== 3 && !is_q
) {
11281 unallocated_encoding(s
);
11286 case 0x16 ... 0x1d:
11289 /* Floating point: U, size[1] and opcode indicate operation;
11290 * size[0] indicates single or double precision.
11292 int is_double
= extract32(size
, 0, 1);
11293 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
11294 size
= is_double
? 3 : 2;
11296 case 0x2f: /* FABS */
11297 case 0x6f: /* FNEG */
11298 if (size
== 3 && !is_q
) {
11299 unallocated_encoding(s
);
11303 case 0x1d: /* SCVTF */
11304 case 0x5d: /* UCVTF */
11306 bool is_signed
= (opcode
== 0x1d) ? true : false;
11307 int elements
= is_double
? 2 : is_q
? 4 : 2;
11308 if (is_double
&& !is_q
) {
11309 unallocated_encoding(s
);
11312 if (!fp_access_check(s
)) {
11315 handle_simd_intfp_conv(s
, rd
, rn
, elements
, is_signed
, 0, size
);
11318 case 0x2c: /* FCMGT (zero) */
11319 case 0x2d: /* FCMEQ (zero) */
11320 case 0x2e: /* FCMLT (zero) */
11321 case 0x6c: /* FCMGE (zero) */
11322 case 0x6d: /* FCMLE (zero) */
11323 if (size
== 3 && !is_q
) {
11324 unallocated_encoding(s
);
11327 handle_2misc_fcmp_zero(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
11329 case 0x7f: /* FSQRT */
11330 if (size
== 3 && !is_q
) {
11331 unallocated_encoding(s
);
11335 case 0x1a: /* FCVTNS */
11336 case 0x1b: /* FCVTMS */
11337 case 0x3a: /* FCVTPS */
11338 case 0x3b: /* FCVTZS */
11339 case 0x5a: /* FCVTNU */
11340 case 0x5b: /* FCVTMU */
11341 case 0x7a: /* FCVTPU */
11342 case 0x7b: /* FCVTZU */
11343 need_fpstatus
= true;
11345 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
11346 if (size
== 3 && !is_q
) {
11347 unallocated_encoding(s
);
11351 case 0x5c: /* FCVTAU */
11352 case 0x1c: /* FCVTAS */
11353 need_fpstatus
= true;
11355 rmode
= FPROUNDING_TIEAWAY
;
11356 if (size
== 3 && !is_q
) {
11357 unallocated_encoding(s
);
11361 case 0x3c: /* URECPE */
11363 unallocated_encoding(s
);
11367 case 0x3d: /* FRECPE */
11368 case 0x7d: /* FRSQRTE */
11369 if (size
== 3 && !is_q
) {
11370 unallocated_encoding(s
);
11373 if (!fp_access_check(s
)) {
11376 handle_2misc_reciprocal(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
11378 case 0x56: /* FCVTXN, FCVTXN2 */
11380 unallocated_encoding(s
);
11384 case 0x16: /* FCVTN, FCVTN2 */
11385 /* handle_2misc_narrow does a 2*size -> size operation, but these
11386 * instructions encode the source size rather than dest size.
11388 if (!fp_access_check(s
)) {
11391 handle_2misc_narrow(s
, false, opcode
, 0, is_q
, size
- 1, rn
, rd
);
11393 case 0x17: /* FCVTL, FCVTL2 */
11394 if (!fp_access_check(s
)) {
11397 handle_2misc_widening(s
, opcode
, is_q
, size
, rn
, rd
);
11399 case 0x18: /* FRINTN */
11400 case 0x19: /* FRINTM */
11401 case 0x38: /* FRINTP */
11402 case 0x39: /* FRINTZ */
11404 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
11406 case 0x59: /* FRINTX */
11407 case 0x79: /* FRINTI */
11408 need_fpstatus
= true;
11409 if (size
== 3 && !is_q
) {
11410 unallocated_encoding(s
);
11414 case 0x58: /* FRINTA */
11416 rmode
= FPROUNDING_TIEAWAY
;
11417 need_fpstatus
= true;
11418 if (size
== 3 && !is_q
) {
11419 unallocated_encoding(s
);
11423 case 0x7c: /* URSQRTE */
11425 unallocated_encoding(s
);
11428 need_fpstatus
= true;
11431 unallocated_encoding(s
);
11437 unallocated_encoding(s
);
11441 if (!fp_access_check(s
)) {
11445 if (need_fpstatus
|| need_rmode
) {
11446 tcg_fpstatus
= get_fpstatus_ptr(false);
11448 tcg_fpstatus
= NULL
;
11451 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
11452 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
11459 if (u
&& size
== 0) { /* NOT */
11460 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_not
, 0);
11466 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_neg
, size
);
11473 /* All 64-bit element operations can be shared with scalar 2misc */
11476 for (pass
= 0; pass
< (is_q
? 2 : 1); pass
++) {
11477 TCGv_i64 tcg_op
= tcg_temp_new_i64();
11478 TCGv_i64 tcg_res
= tcg_temp_new_i64();
11480 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
11482 handle_2misc_64(s
, opcode
, u
, tcg_res
, tcg_op
,
11483 tcg_rmode
, tcg_fpstatus
);
11485 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
11487 tcg_temp_free_i64(tcg_res
);
11488 tcg_temp_free_i64(tcg_op
);
11493 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
11494 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11495 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11498 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
11501 /* Special cases for 32 bit elements */
11503 case 0xa: /* CMLT */
11504 /* 32 bit integer comparison against zero, result is
11505 * test ? (2^32 - 1) : 0. We implement via setcond(test)
11508 cond
= TCG_COND_LT
;
11510 tcg_gen_setcondi_i32(cond
, tcg_res
, tcg_op
, 0);
11511 tcg_gen_neg_i32(tcg_res
, tcg_res
);
11513 case 0x8: /* CMGT, CMGE */
11514 cond
= u
? TCG_COND_GE
: TCG_COND_GT
;
11516 case 0x9: /* CMEQ, CMLE */
11517 cond
= u
? TCG_COND_LE
: TCG_COND_EQ
;
11519 case 0x4: /* CLS */
11521 tcg_gen_clzi_i32(tcg_res
, tcg_op
, 32);
11523 tcg_gen_clrsb_i32(tcg_res
, tcg_op
);
11526 case 0x7: /* SQABS, SQNEG */
11528 gen_helper_neon_qneg_s32(tcg_res
, cpu_env
, tcg_op
);
11530 gen_helper_neon_qabs_s32(tcg_res
, cpu_env
, tcg_op
);
11533 case 0xb: /* ABS, NEG */
11535 tcg_gen_neg_i32(tcg_res
, tcg_op
);
11537 TCGv_i32 tcg_zero
= tcg_const_i32(0);
11538 tcg_gen_neg_i32(tcg_res
, tcg_op
);
11539 tcg_gen_movcond_i32(TCG_COND_GT
, tcg_res
, tcg_op
,
11540 tcg_zero
, tcg_op
, tcg_res
);
11541 tcg_temp_free_i32(tcg_zero
);
11544 case 0x2f: /* FABS */
11545 gen_helper_vfp_abss(tcg_res
, tcg_op
);
11547 case 0x6f: /* FNEG */
11548 gen_helper_vfp_negs(tcg_res
, tcg_op
);
11550 case 0x7f: /* FSQRT */
11551 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, cpu_env
);
11553 case 0x1a: /* FCVTNS */
11554 case 0x1b: /* FCVTMS */
11555 case 0x1c: /* FCVTAS */
11556 case 0x3a: /* FCVTPS */
11557 case 0x3b: /* FCVTZS */
11559 TCGv_i32 tcg_shift
= tcg_const_i32(0);
11560 gen_helper_vfp_tosls(tcg_res
, tcg_op
,
11561 tcg_shift
, tcg_fpstatus
);
11562 tcg_temp_free_i32(tcg_shift
);
11565 case 0x5a: /* FCVTNU */
11566 case 0x5b: /* FCVTMU */
11567 case 0x5c: /* FCVTAU */
11568 case 0x7a: /* FCVTPU */
11569 case 0x7b: /* FCVTZU */
11571 TCGv_i32 tcg_shift
= tcg_const_i32(0);
11572 gen_helper_vfp_touls(tcg_res
, tcg_op
,
11573 tcg_shift
, tcg_fpstatus
);
11574 tcg_temp_free_i32(tcg_shift
);
11577 case 0x18: /* FRINTN */
11578 case 0x19: /* FRINTM */
11579 case 0x38: /* FRINTP */
11580 case 0x39: /* FRINTZ */
11581 case 0x58: /* FRINTA */
11582 case 0x79: /* FRINTI */
11583 gen_helper_rints(tcg_res
, tcg_op
, tcg_fpstatus
);
11585 case 0x59: /* FRINTX */
11586 gen_helper_rints_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
11588 case 0x7c: /* URSQRTE */
11589 gen_helper_rsqrte_u32(tcg_res
, tcg_op
, tcg_fpstatus
);
11592 g_assert_not_reached();
11595 /* Use helpers for 8 and 16 bit elements */
11597 case 0x5: /* CNT, RBIT */
11598 /* For these two insns size is part of the opcode specifier
11599 * (handled earlier); they always operate on byte elements.
11602 gen_helper_neon_rbit_u8(tcg_res
, tcg_op
);
11604 gen_helper_neon_cnt_u8(tcg_res
, tcg_op
);
11607 case 0x7: /* SQABS, SQNEG */
11609 NeonGenOneOpEnvFn
*genfn
;
11610 static NeonGenOneOpEnvFn
* const fns
[2][2] = {
11611 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
11612 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
11614 genfn
= fns
[size
][u
];
11615 genfn(tcg_res
, cpu_env
, tcg_op
);
11618 case 0x8: /* CMGT, CMGE */
11619 case 0x9: /* CMEQ, CMLE */
11620 case 0xa: /* CMLT */
11622 static NeonGenTwoOpFn
* const fns
[3][2] = {
11623 { gen_helper_neon_cgt_s8
, gen_helper_neon_cgt_s16
},
11624 { gen_helper_neon_cge_s8
, gen_helper_neon_cge_s16
},
11625 { gen_helper_neon_ceq_u8
, gen_helper_neon_ceq_u16
},
11627 NeonGenTwoOpFn
*genfn
;
11630 TCGv_i32 tcg_zero
= tcg_const_i32(0);
11632 /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */
11633 comp
= (opcode
- 0x8) * 2 + u
;
11634 /* ...but LE, LT are implemented as reverse GE, GT */
11635 reverse
= (comp
> 2);
11639 genfn
= fns
[comp
][size
];
11641 genfn(tcg_res
, tcg_zero
, tcg_op
);
11643 genfn(tcg_res
, tcg_op
, tcg_zero
);
11645 tcg_temp_free_i32(tcg_zero
);
11648 case 0xb: /* ABS, NEG */
11650 TCGv_i32 tcg_zero
= tcg_const_i32(0);
11652 gen_helper_neon_sub_u16(tcg_res
, tcg_zero
, tcg_op
);
11654 gen_helper_neon_sub_u8(tcg_res
, tcg_zero
, tcg_op
);
11656 tcg_temp_free_i32(tcg_zero
);
11659 gen_helper_neon_abs_s16(tcg_res
, tcg_op
);
11661 gen_helper_neon_abs_s8(tcg_res
, tcg_op
);
11665 case 0x4: /* CLS, CLZ */
11668 gen_helper_neon_clz_u8(tcg_res
, tcg_op
);
11670 gen_helper_neon_clz_u16(tcg_res
, tcg_op
);
11674 gen_helper_neon_cls_s8(tcg_res
, tcg_op
);
11676 gen_helper_neon_cls_s16(tcg_res
, tcg_op
);
11681 g_assert_not_reached();
11685 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
11687 tcg_temp_free_i32(tcg_res
);
11688 tcg_temp_free_i32(tcg_op
);
11691 clear_vec_high(s
, is_q
, rd
);
11694 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
11695 tcg_temp_free_i32(tcg_rmode
);
11697 if (need_fpstatus
) {
11698 tcg_temp_free_ptr(tcg_fpstatus
);
11702 /* AdvSIMD [scalar] two register miscellaneous (FP16)
11704 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
11705 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
11706 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
11707 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
11708 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
11709 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
11711 * This actually covers two groups where scalar access is governed by
11712 * bit 28. A bunch of the instructions (float to integral) only exist
11713 * in the vector form and are un-allocated for the scalar decode. Also
11714 * in the scalar decode Q is always 1.
11716 static void disas_simd_two_reg_misc_fp16(DisasContext
*s
, uint32_t insn
)
11718 int fpop
, opcode
, a
, u
;
11722 bool only_in_vector
= false;
11725 TCGv_i32 tcg_rmode
= NULL
;
11726 TCGv_ptr tcg_fpstatus
= NULL
;
11727 bool need_rmode
= false;
11728 bool need_fpst
= true;
11731 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
11732 unallocated_encoding(s
);
11736 rd
= extract32(insn
, 0, 5);
11737 rn
= extract32(insn
, 5, 5);
11739 a
= extract32(insn
, 23, 1);
11740 u
= extract32(insn
, 29, 1);
11741 is_scalar
= extract32(insn
, 28, 1);
11742 is_q
= extract32(insn
, 30, 1);
11744 opcode
= extract32(insn
, 12, 5);
11745 fpop
= deposit32(opcode
, 5, 1, a
);
11746 fpop
= deposit32(fpop
, 6, 1, u
);
11748 rd
= extract32(insn
, 0, 5);
11749 rn
= extract32(insn
, 5, 5);
11752 case 0x1d: /* SCVTF */
11753 case 0x5d: /* UCVTF */
11760 elements
= (is_q
? 8 : 4);
11763 if (!fp_access_check(s
)) {
11766 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !u
, 0, MO_16
);
11770 case 0x2c: /* FCMGT (zero) */
11771 case 0x2d: /* FCMEQ (zero) */
11772 case 0x2e: /* FCMLT (zero) */
11773 case 0x6c: /* FCMGE (zero) */
11774 case 0x6d: /* FCMLE (zero) */
11775 handle_2misc_fcmp_zero(s
, fpop
, is_scalar
, 0, is_q
, MO_16
, rn
, rd
);
11777 case 0x3d: /* FRECPE */
11778 case 0x3f: /* FRECPX */
11780 case 0x18: /* FRINTN */
11782 only_in_vector
= true;
11783 rmode
= FPROUNDING_TIEEVEN
;
11785 case 0x19: /* FRINTM */
11787 only_in_vector
= true;
11788 rmode
= FPROUNDING_NEGINF
;
11790 case 0x38: /* FRINTP */
11792 only_in_vector
= true;
11793 rmode
= FPROUNDING_POSINF
;
11795 case 0x39: /* FRINTZ */
11797 only_in_vector
= true;
11798 rmode
= FPROUNDING_ZERO
;
11800 case 0x58: /* FRINTA */
11802 only_in_vector
= true;
11803 rmode
= FPROUNDING_TIEAWAY
;
11805 case 0x59: /* FRINTX */
11806 case 0x79: /* FRINTI */
11807 only_in_vector
= true;
11808 /* current rounding mode */
11810 case 0x1a: /* FCVTNS */
11812 rmode
= FPROUNDING_TIEEVEN
;
11814 case 0x1b: /* FCVTMS */
11816 rmode
= FPROUNDING_NEGINF
;
11818 case 0x1c: /* FCVTAS */
11820 rmode
= FPROUNDING_TIEAWAY
;
11822 case 0x3a: /* FCVTPS */
11824 rmode
= FPROUNDING_POSINF
;
11826 case 0x3b: /* FCVTZS */
11828 rmode
= FPROUNDING_ZERO
;
11830 case 0x5a: /* FCVTNU */
11832 rmode
= FPROUNDING_TIEEVEN
;
11834 case 0x5b: /* FCVTMU */
11836 rmode
= FPROUNDING_NEGINF
;
11838 case 0x5c: /* FCVTAU */
11840 rmode
= FPROUNDING_TIEAWAY
;
11842 case 0x7a: /* FCVTPU */
11844 rmode
= FPROUNDING_POSINF
;
11846 case 0x7b: /* FCVTZU */
11848 rmode
= FPROUNDING_ZERO
;
11850 case 0x2f: /* FABS */
11851 case 0x6f: /* FNEG */
11854 case 0x7d: /* FRSQRTE */
11855 case 0x7f: /* FSQRT (vector) */
11858 fprintf(stderr
, "%s: insn %#04x fpop %#2x\n", __func__
, insn
, fpop
);
11859 g_assert_not_reached();
11863 /* Check additional constraints for the scalar encoding */
11866 unallocated_encoding(s
);
11869 /* FRINTxx is only in the vector form */
11870 if (only_in_vector
) {
11871 unallocated_encoding(s
);
11876 if (!fp_access_check(s
)) {
11880 if (need_rmode
|| need_fpst
) {
11881 tcg_fpstatus
= get_fpstatus_ptr(true);
11885 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
11886 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
11890 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11891 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11893 read_vec_element_i32(s
, tcg_op
, rn
, 0, MO_16
);
11896 case 0x1a: /* FCVTNS */
11897 case 0x1b: /* FCVTMS */
11898 case 0x1c: /* FCVTAS */
11899 case 0x3a: /* FCVTPS */
11900 case 0x3b: /* FCVTZS */
11901 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
11903 case 0x3d: /* FRECPE */
11904 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
11906 case 0x3f: /* FRECPX */
11907 gen_helper_frecpx_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
11909 case 0x5a: /* FCVTNU */
11910 case 0x5b: /* FCVTMU */
11911 case 0x5c: /* FCVTAU */
11912 case 0x7a: /* FCVTPU */
11913 case 0x7b: /* FCVTZU */
11914 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
11916 case 0x6f: /* FNEG */
11917 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
11919 case 0x7d: /* FRSQRTE */
11920 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
11923 g_assert_not_reached();
11926 /* limit any sign extension going on */
11927 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0xffff);
11928 write_fp_sreg(s
, rd
, tcg_res
);
11930 tcg_temp_free_i32(tcg_res
);
11931 tcg_temp_free_i32(tcg_op
);
11933 for (pass
= 0; pass
< (is_q
? 8 : 4); pass
++) {
11934 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11935 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11937 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_16
);
11940 case 0x1a: /* FCVTNS */
11941 case 0x1b: /* FCVTMS */
11942 case 0x1c: /* FCVTAS */
11943 case 0x3a: /* FCVTPS */
11944 case 0x3b: /* FCVTZS */
11945 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
11947 case 0x3d: /* FRECPE */
11948 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
11950 case 0x5a: /* FCVTNU */
11951 case 0x5b: /* FCVTMU */
11952 case 0x5c: /* FCVTAU */
11953 case 0x7a: /* FCVTPU */
11954 case 0x7b: /* FCVTZU */
11955 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
11957 case 0x18: /* FRINTN */
11958 case 0x19: /* FRINTM */
11959 case 0x38: /* FRINTP */
11960 case 0x39: /* FRINTZ */
11961 case 0x58: /* FRINTA */
11962 case 0x79: /* FRINTI */
11963 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, tcg_fpstatus
);
11965 case 0x59: /* FRINTX */
11966 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
11968 case 0x2f: /* FABS */
11969 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
11971 case 0x6f: /* FNEG */
11972 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
11974 case 0x7d: /* FRSQRTE */
11975 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
11977 case 0x7f: /* FSQRT */
11978 gen_helper_sqrt_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
11981 g_assert_not_reached();
11984 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11986 tcg_temp_free_i32(tcg_res
);
11987 tcg_temp_free_i32(tcg_op
);
11990 clear_vec_high(s
, is_q
, rd
);
11994 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
11995 tcg_temp_free_i32(tcg_rmode
);
11998 if (tcg_fpstatus
) {
11999 tcg_temp_free_ptr(tcg_fpstatus
);
12003 /* AdvSIMD scalar x indexed element
12004 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12005 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12006 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12007 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12008 * AdvSIMD vector x indexed element
12009 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12010 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12011 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12012 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12014 static void disas_simd_indexed(DisasContext
*s
, uint32_t insn
)
12016 /* This encoding has two kinds of instruction:
12017 * normal, where we perform elt x idxelt => elt for each
12018 * element in the vector
12019 * long, where we perform elt x idxelt and generate a result of
12020 * double the width of the input element
12021 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12023 bool is_scalar
= extract32(insn
, 28, 1);
12024 bool is_q
= extract32(insn
, 30, 1);
12025 bool u
= extract32(insn
, 29, 1);
12026 int size
= extract32(insn
, 22, 2);
12027 int l
= extract32(insn
, 21, 1);
12028 int m
= extract32(insn
, 20, 1);
12029 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12030 int rm
= extract32(insn
, 16, 4);
12031 int opcode
= extract32(insn
, 12, 4);
12032 int h
= extract32(insn
, 11, 1);
12033 int rn
= extract32(insn
, 5, 5);
12034 int rd
= extract32(insn
, 0, 5);
12035 bool is_long
= false;
12037 bool is_fp16
= false;
12041 switch (16 * u
+ opcode
) {
12042 case 0x08: /* MUL */
12043 case 0x10: /* MLA */
12044 case 0x14: /* MLS */
12046 unallocated_encoding(s
);
12050 case 0x02: /* SMLAL, SMLAL2 */
12051 case 0x12: /* UMLAL, UMLAL2 */
12052 case 0x06: /* SMLSL, SMLSL2 */
12053 case 0x16: /* UMLSL, UMLSL2 */
12054 case 0x0a: /* SMULL, SMULL2 */
12055 case 0x1a: /* UMULL, UMULL2 */
12057 unallocated_encoding(s
);
12062 case 0x03: /* SQDMLAL, SQDMLAL2 */
12063 case 0x07: /* SQDMLSL, SQDMLSL2 */
12064 case 0x0b: /* SQDMULL, SQDMULL2 */
12067 case 0x0c: /* SQDMULH */
12068 case 0x0d: /* SQRDMULH */
12070 case 0x01: /* FMLA */
12071 case 0x05: /* FMLS */
12072 case 0x09: /* FMUL */
12073 case 0x19: /* FMULX */
12076 case 0x1d: /* SQRDMLAH */
12077 case 0x1f: /* SQRDMLSH */
12078 if (!arm_dc_feature(s
, ARM_FEATURE_V8_RDM
)) {
12079 unallocated_encoding(s
);
12083 case 0x11: /* FCMLA #0 */
12084 case 0x13: /* FCMLA #90 */
12085 case 0x15: /* FCMLA #180 */
12086 case 0x17: /* FCMLA #270 */
12087 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FCMA
)) {
12088 unallocated_encoding(s
);
12094 unallocated_encoding(s
);
12099 case 1: /* normal fp */
12100 /* convert insn encoded size to TCGMemOp size */
12102 case 0: /* half-precision */
12106 case MO_32
: /* single precision */
12107 case MO_64
: /* double precision */
12110 unallocated_encoding(s
);
12115 case 2: /* complex fp */
12116 /* Each indexable element is a complex pair. */
12121 unallocated_encoding(s
);
12129 unallocated_encoding(s
);
12134 default: /* integer */
12138 unallocated_encoding(s
);
12143 if (is_fp16
&& !arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
12144 unallocated_encoding(s
);
12148 /* Given TCGMemOp size, adjust register and indexing. */
12151 index
= h
<< 2 | l
<< 1 | m
;
12154 index
= h
<< 1 | l
;
12159 unallocated_encoding(s
);
12166 g_assert_not_reached();
12169 if (!fp_access_check(s
)) {
12174 fpst
= get_fpstatus_ptr(is_fp16
);
12179 switch (16 * u
+ opcode
) {
12180 case 0x11: /* FCMLA #0 */
12181 case 0x13: /* FCMLA #90 */
12182 case 0x15: /* FCMLA #180 */
12183 case 0x17: /* FCMLA #270 */
12184 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
12185 vec_full_reg_offset(s
, rn
),
12186 vec_reg_offset(s
, rm
, index
, size
), fpst
,
12187 is_q
? 16 : 8, vec_full_reg_size(s
),
12188 extract32(insn
, 13, 2), /* rot */
12190 ? gen_helper_gvec_fcmlas_idx
12191 : gen_helper_gvec_fcmlah_idx
);
12192 tcg_temp_free_ptr(fpst
);
12197 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
12200 assert(is_fp
&& is_q
&& !is_long
);
12202 read_vec_element(s
, tcg_idx
, rm
, index
, MO_64
);
12204 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
12205 TCGv_i64 tcg_op
= tcg_temp_new_i64();
12206 TCGv_i64 tcg_res
= tcg_temp_new_i64();
12208 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
12210 switch (16 * u
+ opcode
) {
12211 case 0x05: /* FMLS */
12212 /* As usual for ARM, separate negation for fused multiply-add */
12213 gen_helper_vfp_negd(tcg_op
, tcg_op
);
12215 case 0x01: /* FMLA */
12216 read_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
12217 gen_helper_vfp_muladdd(tcg_res
, tcg_op
, tcg_idx
, tcg_res
, fpst
);
12219 case 0x09: /* FMUL */
12220 gen_helper_vfp_muld(tcg_res
, tcg_op
, tcg_idx
, fpst
);
12222 case 0x19: /* FMULX */
12223 gen_helper_vfp_mulxd(tcg_res
, tcg_op
, tcg_idx
, fpst
);
12226 g_assert_not_reached();
12229 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
12230 tcg_temp_free_i64(tcg_op
);
12231 tcg_temp_free_i64(tcg_res
);
12234 tcg_temp_free_i64(tcg_idx
);
12235 clear_vec_high(s
, !is_scalar
, rd
);
12236 } else if (!is_long
) {
12237 /* 32 bit floating point, or 16 or 32 bit integer.
12238 * For the 16 bit scalar case we use the usual Neon helpers and
12239 * rely on the fact that 0 op 0 == 0 with no side effects.
12241 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
12242 int pass
, maxpasses
;
12247 maxpasses
= is_q
? 4 : 2;
12250 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
12252 if (size
== 1 && !is_scalar
) {
12253 /* The simplest way to handle the 16x16 indexed ops is to duplicate
12254 * the index into both halves of the 32 bit tcg_idx and then use
12255 * the usual Neon helpers.
12257 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
12260 for (pass
= 0; pass
< maxpasses
; pass
++) {
12261 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12262 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12264 read_vec_element_i32(s
, tcg_op
, rn
, pass
, is_scalar
? size
: MO_32
);
12266 switch (16 * u
+ opcode
) {
12267 case 0x08: /* MUL */
12268 case 0x10: /* MLA */
12269 case 0x14: /* MLS */
12271 static NeonGenTwoOpFn
* const fns
[2][2] = {
12272 { gen_helper_neon_add_u16
, gen_helper_neon_sub_u16
},
12273 { tcg_gen_add_i32
, tcg_gen_sub_i32
},
12275 NeonGenTwoOpFn
*genfn
;
12276 bool is_sub
= opcode
== 0x4;
12279 gen_helper_neon_mul_u16(tcg_res
, tcg_op
, tcg_idx
);
12281 tcg_gen_mul_i32(tcg_res
, tcg_op
, tcg_idx
);
12283 if (opcode
== 0x8) {
12286 read_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
12287 genfn
= fns
[size
- 1][is_sub
];
12288 genfn(tcg_res
, tcg_op
, tcg_res
);
12291 case 0x05: /* FMLS */
12292 case 0x01: /* FMLA */
12293 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
12294 is_scalar
? size
: MO_32
);
12297 if (opcode
== 0x5) {
12298 /* As usual for ARM, separate negation for fused
12300 tcg_gen_xori_i32(tcg_op
, tcg_op
, 0x80008000);
12303 gen_helper_advsimd_muladdh(tcg_res
, tcg_op
, tcg_idx
,
12306 gen_helper_advsimd_muladd2h(tcg_res
, tcg_op
, tcg_idx
,
12311 if (opcode
== 0x5) {
12312 /* As usual for ARM, separate negation for
12313 * fused multiply-add */
12314 tcg_gen_xori_i32(tcg_op
, tcg_op
, 0x80000000);
12316 gen_helper_vfp_muladds(tcg_res
, tcg_op
, tcg_idx
,
12320 g_assert_not_reached();
12323 case 0x09: /* FMUL */
12327 gen_helper_advsimd_mulh(tcg_res
, tcg_op
,
12330 gen_helper_advsimd_mul2h(tcg_res
, tcg_op
,
12335 gen_helper_vfp_muls(tcg_res
, tcg_op
, tcg_idx
, fpst
);
12338 g_assert_not_reached();
12341 case 0x19: /* FMULX */
12345 gen_helper_advsimd_mulxh(tcg_res
, tcg_op
,
12348 gen_helper_advsimd_mulx2h(tcg_res
, tcg_op
,
12353 gen_helper_vfp_mulxs(tcg_res
, tcg_op
, tcg_idx
, fpst
);
12356 g_assert_not_reached();
12359 case 0x0c: /* SQDMULH */
12361 gen_helper_neon_qdmulh_s16(tcg_res
, cpu_env
,
12364 gen_helper_neon_qdmulh_s32(tcg_res
, cpu_env
,
12368 case 0x0d: /* SQRDMULH */
12370 gen_helper_neon_qrdmulh_s16(tcg_res
, cpu_env
,
12373 gen_helper_neon_qrdmulh_s32(tcg_res
, cpu_env
,
12377 case 0x1d: /* SQRDMLAH */
12378 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
12379 is_scalar
? size
: MO_32
);
12381 gen_helper_neon_qrdmlah_s16(tcg_res
, cpu_env
,
12382 tcg_op
, tcg_idx
, tcg_res
);
12384 gen_helper_neon_qrdmlah_s32(tcg_res
, cpu_env
,
12385 tcg_op
, tcg_idx
, tcg_res
);
12388 case 0x1f: /* SQRDMLSH */
12389 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
12390 is_scalar
? size
: MO_32
);
12392 gen_helper_neon_qrdmlsh_s16(tcg_res
, cpu_env
,
12393 tcg_op
, tcg_idx
, tcg_res
);
12395 gen_helper_neon_qrdmlsh_s32(tcg_res
, cpu_env
,
12396 tcg_op
, tcg_idx
, tcg_res
);
12400 g_assert_not_reached();
12404 write_fp_sreg(s
, rd
, tcg_res
);
12406 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
12409 tcg_temp_free_i32(tcg_op
);
12410 tcg_temp_free_i32(tcg_res
);
12413 tcg_temp_free_i32(tcg_idx
);
12414 clear_vec_high(s
, is_q
, rd
);
12416 /* long ops: 16x16->32 or 32x32->64 */
12417 TCGv_i64 tcg_res
[2];
12419 bool satop
= extract32(opcode
, 0, 1);
12420 TCGMemOp memop
= MO_32
;
12427 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
12429 read_vec_element(s
, tcg_idx
, rm
, index
, memop
);
12431 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
12432 TCGv_i64 tcg_op
= tcg_temp_new_i64();
12433 TCGv_i64 tcg_passres
;
12439 passelt
= pass
+ (is_q
* 2);
12442 read_vec_element(s
, tcg_op
, rn
, passelt
, memop
);
12444 tcg_res
[pass
] = tcg_temp_new_i64();
12446 if (opcode
== 0xa || opcode
== 0xb) {
12447 /* Non-accumulating ops */
12448 tcg_passres
= tcg_res
[pass
];
12450 tcg_passres
= tcg_temp_new_i64();
12453 tcg_gen_mul_i64(tcg_passres
, tcg_op
, tcg_idx
);
12454 tcg_temp_free_i64(tcg_op
);
12457 /* saturating, doubling */
12458 gen_helper_neon_addl_saturate_s64(tcg_passres
, cpu_env
,
12459 tcg_passres
, tcg_passres
);
12462 if (opcode
== 0xa || opcode
== 0xb) {
12466 /* Accumulating op: handle accumulate step */
12467 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
12470 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
12471 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
12473 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
12474 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
12476 case 0x7: /* SQDMLSL, SQDMLSL2 */
12477 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
12479 case 0x3: /* SQDMLAL, SQDMLAL2 */
12480 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], cpu_env
,
12485 g_assert_not_reached();
12487 tcg_temp_free_i64(tcg_passres
);
12489 tcg_temp_free_i64(tcg_idx
);
12491 clear_vec_high(s
, !is_scalar
, rd
);
12493 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
12496 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
12499 /* The simplest way to handle the 16x16 indexed ops is to
12500 * duplicate the index into both halves of the 32 bit tcg_idx
12501 * and then use the usual Neon helpers.
12503 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
12506 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
12507 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12508 TCGv_i64 tcg_passres
;
12511 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
12513 read_vec_element_i32(s
, tcg_op
, rn
,
12514 pass
+ (is_q
* 2), MO_32
);
12517 tcg_res
[pass
] = tcg_temp_new_i64();
12519 if (opcode
== 0xa || opcode
== 0xb) {
12520 /* Non-accumulating ops */
12521 tcg_passres
= tcg_res
[pass
];
12523 tcg_passres
= tcg_temp_new_i64();
12526 if (memop
& MO_SIGN
) {
12527 gen_helper_neon_mull_s16(tcg_passres
, tcg_op
, tcg_idx
);
12529 gen_helper_neon_mull_u16(tcg_passres
, tcg_op
, tcg_idx
);
12532 gen_helper_neon_addl_saturate_s32(tcg_passres
, cpu_env
,
12533 tcg_passres
, tcg_passres
);
12535 tcg_temp_free_i32(tcg_op
);
12537 if (opcode
== 0xa || opcode
== 0xb) {
12541 /* Accumulating op: handle accumulate step */
12542 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
12545 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
12546 gen_helper_neon_addl_u32(tcg_res
[pass
], tcg_res
[pass
],
12549 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
12550 gen_helper_neon_subl_u32(tcg_res
[pass
], tcg_res
[pass
],
12553 case 0x7: /* SQDMLSL, SQDMLSL2 */
12554 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
12556 case 0x3: /* SQDMLAL, SQDMLAL2 */
12557 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], cpu_env
,
12562 g_assert_not_reached();
12564 tcg_temp_free_i64(tcg_passres
);
12566 tcg_temp_free_i32(tcg_idx
);
12569 tcg_gen_ext32u_i64(tcg_res
[0], tcg_res
[0]);
12574 tcg_res
[1] = tcg_const_i64(0);
12577 for (pass
= 0; pass
< 2; pass
++) {
12578 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
12579 tcg_temp_free_i64(tcg_res
[pass
]);
12584 tcg_temp_free_ptr(fpst
);
12589 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
12590 * +-----------------+------+-----------+--------+-----+------+------+
12591 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
12592 * +-----------------+------+-----------+--------+-----+------+------+
12594 static void disas_crypto_aes(DisasContext
*s
, uint32_t insn
)
12596 int size
= extract32(insn
, 22, 2);
12597 int opcode
= extract32(insn
, 12, 5);
12598 int rn
= extract32(insn
, 5, 5);
12599 int rd
= extract32(insn
, 0, 5);
12601 TCGv_ptr tcg_rd_ptr
, tcg_rn_ptr
;
12602 TCGv_i32 tcg_decrypt
;
12603 CryptoThreeOpIntFn
*genfn
;
12605 if (!arm_dc_feature(s
, ARM_FEATURE_V8_AES
)
12607 unallocated_encoding(s
);
12612 case 0x4: /* AESE */
12614 genfn
= gen_helper_crypto_aese
;
12616 case 0x6: /* AESMC */
12618 genfn
= gen_helper_crypto_aesmc
;
12620 case 0x5: /* AESD */
12622 genfn
= gen_helper_crypto_aese
;
12624 case 0x7: /* AESIMC */
12626 genfn
= gen_helper_crypto_aesmc
;
12629 unallocated_encoding(s
);
12633 if (!fp_access_check(s
)) {
12637 tcg_rd_ptr
= vec_full_reg_ptr(s
, rd
);
12638 tcg_rn_ptr
= vec_full_reg_ptr(s
, rn
);
12639 tcg_decrypt
= tcg_const_i32(decrypt
);
12641 genfn(tcg_rd_ptr
, tcg_rn_ptr
, tcg_decrypt
);
12643 tcg_temp_free_ptr(tcg_rd_ptr
);
12644 tcg_temp_free_ptr(tcg_rn_ptr
);
12645 tcg_temp_free_i32(tcg_decrypt
);
12648 /* Crypto three-reg SHA
12649 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
12650 * +-----------------+------+---+------+---+--------+-----+------+------+
12651 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
12652 * +-----------------+------+---+------+---+--------+-----+------+------+
12654 static void disas_crypto_three_reg_sha(DisasContext
*s
, uint32_t insn
)
12656 int size
= extract32(insn
, 22, 2);
12657 int opcode
= extract32(insn
, 12, 3);
12658 int rm
= extract32(insn
, 16, 5);
12659 int rn
= extract32(insn
, 5, 5);
12660 int rd
= extract32(insn
, 0, 5);
12661 CryptoThreeOpFn
*genfn
;
12662 TCGv_ptr tcg_rd_ptr
, tcg_rn_ptr
, tcg_rm_ptr
;
12663 int feature
= ARM_FEATURE_V8_SHA256
;
12666 unallocated_encoding(s
);
12671 case 0: /* SHA1C */
12672 case 1: /* SHA1P */
12673 case 2: /* SHA1M */
12674 case 3: /* SHA1SU0 */
12676 feature
= ARM_FEATURE_V8_SHA1
;
12678 case 4: /* SHA256H */
12679 genfn
= gen_helper_crypto_sha256h
;
12681 case 5: /* SHA256H2 */
12682 genfn
= gen_helper_crypto_sha256h2
;
12684 case 6: /* SHA256SU1 */
12685 genfn
= gen_helper_crypto_sha256su1
;
12688 unallocated_encoding(s
);
12692 if (!arm_dc_feature(s
, feature
)) {
12693 unallocated_encoding(s
);
12697 if (!fp_access_check(s
)) {
12701 tcg_rd_ptr
= vec_full_reg_ptr(s
, rd
);
12702 tcg_rn_ptr
= vec_full_reg_ptr(s
, rn
);
12703 tcg_rm_ptr
= vec_full_reg_ptr(s
, rm
);
12706 genfn(tcg_rd_ptr
, tcg_rn_ptr
, tcg_rm_ptr
);
12708 TCGv_i32 tcg_opcode
= tcg_const_i32(opcode
);
12710 gen_helper_crypto_sha1_3reg(tcg_rd_ptr
, tcg_rn_ptr
,
12711 tcg_rm_ptr
, tcg_opcode
);
12712 tcg_temp_free_i32(tcg_opcode
);
12715 tcg_temp_free_ptr(tcg_rd_ptr
);
12716 tcg_temp_free_ptr(tcg_rn_ptr
);
12717 tcg_temp_free_ptr(tcg_rm_ptr
);
12720 /* Crypto two-reg SHA
12721 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
12722 * +-----------------+------+-----------+--------+-----+------+------+
12723 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
12724 * +-----------------+------+-----------+--------+-----+------+------+
12726 static void disas_crypto_two_reg_sha(DisasContext
*s
, uint32_t insn
)
12728 int size
= extract32(insn
, 22, 2);
12729 int opcode
= extract32(insn
, 12, 5);
12730 int rn
= extract32(insn
, 5, 5);
12731 int rd
= extract32(insn
, 0, 5);
12732 CryptoTwoOpFn
*genfn
;
12734 TCGv_ptr tcg_rd_ptr
, tcg_rn_ptr
;
12737 unallocated_encoding(s
);
12742 case 0: /* SHA1H */
12743 feature
= ARM_FEATURE_V8_SHA1
;
12744 genfn
= gen_helper_crypto_sha1h
;
12746 case 1: /* SHA1SU1 */
12747 feature
= ARM_FEATURE_V8_SHA1
;
12748 genfn
= gen_helper_crypto_sha1su1
;
12750 case 2: /* SHA256SU0 */
12751 feature
= ARM_FEATURE_V8_SHA256
;
12752 genfn
= gen_helper_crypto_sha256su0
;
12755 unallocated_encoding(s
);
12759 if (!arm_dc_feature(s
, feature
)) {
12760 unallocated_encoding(s
);
12764 if (!fp_access_check(s
)) {
12768 tcg_rd_ptr
= vec_full_reg_ptr(s
, rd
);
12769 tcg_rn_ptr
= vec_full_reg_ptr(s
, rn
);
12771 genfn(tcg_rd_ptr
, tcg_rn_ptr
);
12773 tcg_temp_free_ptr(tcg_rd_ptr
);
12774 tcg_temp_free_ptr(tcg_rn_ptr
);
12777 /* Crypto three-reg SHA512
12778 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
12779 * +-----------------------+------+---+---+-----+--------+------+------+
12780 * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
12781 * +-----------------------+------+---+---+-----+--------+------+------+
12783 static void disas_crypto_three_reg_sha512(DisasContext
*s
, uint32_t insn
)
12785 int opcode
= extract32(insn
, 10, 2);
12786 int o
= extract32(insn
, 14, 1);
12787 int rm
= extract32(insn
, 16, 5);
12788 int rn
= extract32(insn
, 5, 5);
12789 int rd
= extract32(insn
, 0, 5);
12791 CryptoThreeOpFn
*genfn
;
12795 case 0: /* SHA512H */
12796 feature
= ARM_FEATURE_V8_SHA512
;
12797 genfn
= gen_helper_crypto_sha512h
;
12799 case 1: /* SHA512H2 */
12800 feature
= ARM_FEATURE_V8_SHA512
;
12801 genfn
= gen_helper_crypto_sha512h2
;
12803 case 2: /* SHA512SU1 */
12804 feature
= ARM_FEATURE_V8_SHA512
;
12805 genfn
= gen_helper_crypto_sha512su1
;
12808 feature
= ARM_FEATURE_V8_SHA3
;
12814 case 0: /* SM3PARTW1 */
12815 feature
= ARM_FEATURE_V8_SM3
;
12816 genfn
= gen_helper_crypto_sm3partw1
;
12818 case 1: /* SM3PARTW2 */
12819 feature
= ARM_FEATURE_V8_SM3
;
12820 genfn
= gen_helper_crypto_sm3partw2
;
12822 case 2: /* SM4EKEY */
12823 feature
= ARM_FEATURE_V8_SM4
;
12824 genfn
= gen_helper_crypto_sm4ekey
;
12827 unallocated_encoding(s
);
12832 if (!arm_dc_feature(s
, feature
)) {
12833 unallocated_encoding(s
);
12837 if (!fp_access_check(s
)) {
12842 TCGv_ptr tcg_rd_ptr
, tcg_rn_ptr
, tcg_rm_ptr
;
12844 tcg_rd_ptr
= vec_full_reg_ptr(s
, rd
);
12845 tcg_rn_ptr
= vec_full_reg_ptr(s
, rn
);
12846 tcg_rm_ptr
= vec_full_reg_ptr(s
, rm
);
12848 genfn(tcg_rd_ptr
, tcg_rn_ptr
, tcg_rm_ptr
);
12850 tcg_temp_free_ptr(tcg_rd_ptr
);
12851 tcg_temp_free_ptr(tcg_rn_ptr
);
12852 tcg_temp_free_ptr(tcg_rm_ptr
);
12854 TCGv_i64 tcg_op1
, tcg_op2
, tcg_res
[2];
12857 tcg_op1
= tcg_temp_new_i64();
12858 tcg_op2
= tcg_temp_new_i64();
12859 tcg_res
[0] = tcg_temp_new_i64();
12860 tcg_res
[1] = tcg_temp_new_i64();
12862 for (pass
= 0; pass
< 2; pass
++) {
12863 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
12864 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
12866 tcg_gen_rotli_i64(tcg_res
[pass
], tcg_op2
, 1);
12867 tcg_gen_xor_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
12869 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
12870 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
12872 tcg_temp_free_i64(tcg_op1
);
12873 tcg_temp_free_i64(tcg_op2
);
12874 tcg_temp_free_i64(tcg_res
[0]);
12875 tcg_temp_free_i64(tcg_res
[1]);
12879 /* Crypto two-reg SHA512
12880 * 31 12 11 10 9 5 4 0
12881 * +-----------------------------------------+--------+------+------+
12882 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
12883 * +-----------------------------------------+--------+------+------+
12885 static void disas_crypto_two_reg_sha512(DisasContext
*s
, uint32_t insn
)
12887 int opcode
= extract32(insn
, 10, 2);
12888 int rn
= extract32(insn
, 5, 5);
12889 int rd
= extract32(insn
, 0, 5);
12890 TCGv_ptr tcg_rd_ptr
, tcg_rn_ptr
;
12892 CryptoTwoOpFn
*genfn
;
12895 case 0: /* SHA512SU0 */
12896 feature
= ARM_FEATURE_V8_SHA512
;
12897 genfn
= gen_helper_crypto_sha512su0
;
12900 feature
= ARM_FEATURE_V8_SM4
;
12901 genfn
= gen_helper_crypto_sm4e
;
12904 unallocated_encoding(s
);
12908 if (!arm_dc_feature(s
, feature
)) {
12909 unallocated_encoding(s
);
12913 if (!fp_access_check(s
)) {
12917 tcg_rd_ptr
= vec_full_reg_ptr(s
, rd
);
12918 tcg_rn_ptr
= vec_full_reg_ptr(s
, rn
);
12920 genfn(tcg_rd_ptr
, tcg_rn_ptr
);
12922 tcg_temp_free_ptr(tcg_rd_ptr
);
12923 tcg_temp_free_ptr(tcg_rn_ptr
);
12926 /* Crypto four-register
12927 * 31 23 22 21 20 16 15 14 10 9 5 4 0
12928 * +-------------------+-----+------+---+------+------+------+
12929 * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
12930 * +-------------------+-----+------+---+------+------+------+
12932 static void disas_crypto_four_reg(DisasContext
*s
, uint32_t insn
)
12934 int op0
= extract32(insn
, 21, 2);
12935 int rm
= extract32(insn
, 16, 5);
12936 int ra
= extract32(insn
, 10, 5);
12937 int rn
= extract32(insn
, 5, 5);
12938 int rd
= extract32(insn
, 0, 5);
12944 feature
= ARM_FEATURE_V8_SHA3
;
12946 case 2: /* SM3SS1 */
12947 feature
= ARM_FEATURE_V8_SM3
;
12950 unallocated_encoding(s
);
12954 if (!arm_dc_feature(s
, feature
)) {
12955 unallocated_encoding(s
);
12959 if (!fp_access_check(s
)) {
12964 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
, tcg_res
[2];
12967 tcg_op1
= tcg_temp_new_i64();
12968 tcg_op2
= tcg_temp_new_i64();
12969 tcg_op3
= tcg_temp_new_i64();
12970 tcg_res
[0] = tcg_temp_new_i64();
12971 tcg_res
[1] = tcg_temp_new_i64();
12973 for (pass
= 0; pass
< 2; pass
++) {
12974 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
12975 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
12976 read_vec_element(s
, tcg_op3
, ra
, pass
, MO_64
);
12980 tcg_gen_xor_i64(tcg_res
[pass
], tcg_op2
, tcg_op3
);
12983 tcg_gen_andc_i64(tcg_res
[pass
], tcg_op2
, tcg_op3
);
12985 tcg_gen_xor_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
12987 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
12988 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
12990 tcg_temp_free_i64(tcg_op1
);
12991 tcg_temp_free_i64(tcg_op2
);
12992 tcg_temp_free_i64(tcg_op3
);
12993 tcg_temp_free_i64(tcg_res
[0]);
12994 tcg_temp_free_i64(tcg_res
[1]);
12996 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
, tcg_res
, tcg_zero
;
12998 tcg_op1
= tcg_temp_new_i32();
12999 tcg_op2
= tcg_temp_new_i32();
13000 tcg_op3
= tcg_temp_new_i32();
13001 tcg_res
= tcg_temp_new_i32();
13002 tcg_zero
= tcg_const_i32(0);
13004 read_vec_element_i32(s
, tcg_op1
, rn
, 3, MO_32
);
13005 read_vec_element_i32(s
, tcg_op2
, rm
, 3, MO_32
);
13006 read_vec_element_i32(s
, tcg_op3
, ra
, 3, MO_32
);
13008 tcg_gen_rotri_i32(tcg_res
, tcg_op1
, 20);
13009 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op2
);
13010 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op3
);
13011 tcg_gen_rotri_i32(tcg_res
, tcg_res
, 25);
13013 write_vec_element_i32(s
, tcg_zero
, rd
, 0, MO_32
);
13014 write_vec_element_i32(s
, tcg_zero
, rd
, 1, MO_32
);
13015 write_vec_element_i32(s
, tcg_zero
, rd
, 2, MO_32
);
13016 write_vec_element_i32(s
, tcg_res
, rd
, 3, MO_32
);
13018 tcg_temp_free_i32(tcg_op1
);
13019 tcg_temp_free_i32(tcg_op2
);
13020 tcg_temp_free_i32(tcg_op3
);
13021 tcg_temp_free_i32(tcg_res
);
13022 tcg_temp_free_i32(tcg_zero
);
13027 * 31 21 20 16 15 10 9 5 4 0
13028 * +-----------------------+------+--------+------+------+
13029 * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
13030 * +-----------------------+------+--------+------+------+
13032 static void disas_crypto_xar(DisasContext
*s
, uint32_t insn
)
13034 int rm
= extract32(insn
, 16, 5);
13035 int imm6
= extract32(insn
, 10, 6);
13036 int rn
= extract32(insn
, 5, 5);
13037 int rd
= extract32(insn
, 0, 5);
13038 TCGv_i64 tcg_op1
, tcg_op2
, tcg_res
[2];
13041 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA3
)) {
13042 unallocated_encoding(s
);
13046 if (!fp_access_check(s
)) {
13050 tcg_op1
= tcg_temp_new_i64();
13051 tcg_op2
= tcg_temp_new_i64();
13052 tcg_res
[0] = tcg_temp_new_i64();
13053 tcg_res
[1] = tcg_temp_new_i64();
13055 for (pass
= 0; pass
< 2; pass
++) {
13056 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
13057 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
13059 tcg_gen_xor_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
13060 tcg_gen_rotri_i64(tcg_res
[pass
], tcg_res
[pass
], imm6
);
13062 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
13063 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
13065 tcg_temp_free_i64(tcg_op1
);
13066 tcg_temp_free_i64(tcg_op2
);
13067 tcg_temp_free_i64(tcg_res
[0]);
13068 tcg_temp_free_i64(tcg_res
[1]);
13071 /* Crypto three-reg imm2
13072 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13073 * +-----------------------+------+-----+------+--------+------+------+
13074 * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
13075 * +-----------------------+------+-----+------+--------+------+------+
13077 static void disas_crypto_three_reg_imm2(DisasContext
*s
, uint32_t insn
)
13079 int opcode
= extract32(insn
, 10, 2);
13080 int imm2
= extract32(insn
, 12, 2);
13081 int rm
= extract32(insn
, 16, 5);
13082 int rn
= extract32(insn
, 5, 5);
13083 int rd
= extract32(insn
, 0, 5);
13084 TCGv_ptr tcg_rd_ptr
, tcg_rn_ptr
, tcg_rm_ptr
;
13085 TCGv_i32 tcg_imm2
, tcg_opcode
;
13087 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SM3
)) {
13088 unallocated_encoding(s
);
13092 if (!fp_access_check(s
)) {
13096 tcg_rd_ptr
= vec_full_reg_ptr(s
, rd
);
13097 tcg_rn_ptr
= vec_full_reg_ptr(s
, rn
);
13098 tcg_rm_ptr
= vec_full_reg_ptr(s
, rm
);
13099 tcg_imm2
= tcg_const_i32(imm2
);
13100 tcg_opcode
= tcg_const_i32(opcode
);
13102 gen_helper_crypto_sm3tt(tcg_rd_ptr
, tcg_rn_ptr
, tcg_rm_ptr
, tcg_imm2
,
13105 tcg_temp_free_ptr(tcg_rd_ptr
);
13106 tcg_temp_free_ptr(tcg_rn_ptr
);
13107 tcg_temp_free_ptr(tcg_rm_ptr
);
13108 tcg_temp_free_i32(tcg_imm2
);
13109 tcg_temp_free_i32(tcg_opcode
);
13112 /* C3.6 Data processing - SIMD, inc Crypto
13114 * As the decode gets a little complex we are using a table based
13115 * approach for this part of the decode.
13117 static const AArch64DecodeTable data_proc_simd
[] = {
13118 /* pattern , mask , fn */
13119 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same
},
13120 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra
},
13121 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff
},
13122 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc
},
13123 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes
},
13124 { 0x0e000400, 0x9fe08400, disas_simd_copy
},
13125 { 0x0f000000, 0x9f000400, disas_simd_indexed
}, /* vector indexed */
13126 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
13127 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm
},
13128 { 0x0f000400, 0x9f800400, disas_simd_shift_imm
},
13129 { 0x0e000000, 0xbf208c00, disas_simd_tb
},
13130 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn
},
13131 { 0x2e000000, 0xbf208400, disas_simd_ext
},
13132 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same
},
13133 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra
},
13134 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff
},
13135 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc
},
13136 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise
},
13137 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy
},
13138 { 0x5f000000, 0xdf000400, disas_simd_indexed
}, /* scalar indexed */
13139 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm
},
13140 { 0x4e280800, 0xff3e0c00, disas_crypto_aes
},
13141 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha
},
13142 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha
},
13143 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512
},
13144 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512
},
13145 { 0xce000000, 0xff808000, disas_crypto_four_reg
},
13146 { 0xce800000, 0xffe00000, disas_crypto_xar
},
13147 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2
},
13148 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16
},
13149 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16
},
13150 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16
},
13151 { 0x00000000, 0x00000000, NULL
}
13154 static void disas_data_proc_simd(DisasContext
*s
, uint32_t insn
)
13156 /* Note that this is called with all non-FP cases from
13157 * table C3-6 so it must UNDEF for entries not specifically
13158 * allocated to instructions in that table.
13160 AArch64DecodeFn
*fn
= lookup_disas_fn(&data_proc_simd
[0], insn
);
13164 unallocated_encoding(s
);
13168 /* C3.6 Data processing - SIMD and floating point */
13169 static void disas_data_proc_simd_fp(DisasContext
*s
, uint32_t insn
)
13171 if (extract32(insn
, 28, 1) == 1 && extract32(insn
, 30, 1) == 0) {
13172 disas_data_proc_fp(s
, insn
);
13174 /* SIMD, including crypto */
13175 disas_data_proc_simd(s
, insn
);
13179 /* C3.1 A64 instruction index by encoding */
13180 static void disas_a64_insn(CPUARMState
*env
, DisasContext
*s
)
13184 insn
= arm_ldl_code(env
, s
->pc
, s
->sctlr_b
);
13188 s
->fp_access_checked
= false;
13190 switch (extract32(insn
, 25, 4)) {
13191 case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
13192 unallocated_encoding(s
);
13194 case 0x8: case 0x9: /* Data processing - immediate */
13195 disas_data_proc_imm(s
, insn
);
13197 case 0xa: case 0xb: /* Branch, exception generation and system insns */
13198 disas_b_exc_sys(s
, insn
);
13203 case 0xe: /* Loads and stores */
13204 disas_ldst(s
, insn
);
13207 case 0xd: /* Data processing - register */
13208 disas_data_proc_reg(s
, insn
);
13211 case 0xf: /* Data processing - SIMD and floating point */
13212 disas_data_proc_simd_fp(s
, insn
);
13215 assert(FALSE
); /* all 15 cases should be handled above */
13219 /* if we allocated any temporaries, free them here */
13223 static int aarch64_tr_init_disas_context(DisasContextBase
*dcbase
,
13224 CPUState
*cpu
, int max_insns
)
13226 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13227 CPUARMState
*env
= cpu
->env_ptr
;
13228 ARMCPU
*arm_cpu
= arm_env_get_cpu(env
);
13231 dc
->pc
= dc
->base
.pc_first
;
13235 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
13236 * there is no secure EL1, so we route exceptions to EL3.
13238 dc
->secure_routed_to_el3
= arm_feature(env
, ARM_FEATURE_EL3
) &&
13239 !arm_el_is_aa64(env
, 3);
13242 dc
->be_data
= ARM_TBFLAG_BE_DATA(dc
->base
.tb
->flags
) ? MO_BE
: MO_LE
;
13243 dc
->condexec_mask
= 0;
13244 dc
->condexec_cond
= 0;
13245 dc
->mmu_idx
= core_to_arm_mmu_idx(env
, ARM_TBFLAG_MMUIDX(dc
->base
.tb
->flags
));
13246 dc
->tbi0
= ARM_TBFLAG_TBI0(dc
->base
.tb
->flags
);
13247 dc
->tbi1
= ARM_TBFLAG_TBI1(dc
->base
.tb
->flags
);
13248 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
13249 #if !defined(CONFIG_USER_ONLY)
13250 dc
->user
= (dc
->current_el
== 0);
13252 dc
->fp_excp_el
= ARM_TBFLAG_FPEXC_EL(dc
->base
.tb
->flags
);
13253 dc
->sve_excp_el
= ARM_TBFLAG_SVEEXC_EL(dc
->base
.tb
->flags
);
13254 dc
->sve_len
= (ARM_TBFLAG_ZCR_LEN(dc
->base
.tb
->flags
) + 1) * 16;
13256 dc
->vec_stride
= 0;
13257 dc
->cp_regs
= arm_cpu
->cp_regs
;
13258 dc
->features
= env
->features
;
13260 /* Single step state. The code-generation logic here is:
13262 * generate code with no special handling for single-stepping (except
13263 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13264 * this happens anyway because those changes are all system register or
13266 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13267 * emit code for one insn
13268 * emit code to clear PSTATE.SS
13269 * emit code to generate software step exception for completed step
13270 * end TB (as usual for having generated an exception)
13271 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13272 * emit code to generate a software step exception
13275 dc
->ss_active
= ARM_TBFLAG_SS_ACTIVE(dc
->base
.tb
->flags
);
13276 dc
->pstate_ss
= ARM_TBFLAG_PSTATE_SS(dc
->base
.tb
->flags
);
13277 dc
->is_ldex
= false;
13278 dc
->ss_same_el
= (arm_debug_target_el(env
) == dc
->current_el
);
13280 /* Bound the number of insns to execute to those left on the page. */
13281 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
13283 /* If architectural single step active, limit to 1. */
13284 if (dc
->ss_active
) {
13287 max_insns
= MIN(max_insns
, bound
);
13289 init_tmp_a64_array(dc
);
13294 static void aarch64_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
13296 tcg_clear_temp_count();
13299 static void aarch64_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
13301 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13303 tcg_gen_insn_start(dc
->pc
, 0, 0);
13304 dc
->insn_start
= tcg_last_op();
13307 static bool aarch64_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
13308 const CPUBreakpoint
*bp
)
13310 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13312 if (bp
->flags
& BP_CPU
) {
13313 gen_a64_set_pc_im(dc
->pc
);
13314 gen_helper_check_breakpoints(cpu_env
);
13315 /* End the TB early; it likely won't be executed */
13316 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
13318 gen_exception_internal_insn(dc
, 0, EXCP_DEBUG
);
13319 /* The address covered by the breakpoint must be
13320 included in [tb->pc, tb->pc + tb->size) in order
13321 to for it to be properly cleared -- thus we
13322 increment the PC here so that the logic setting
13323 tb->size below does the right thing. */
13325 dc
->base
.is_jmp
= DISAS_NORETURN
;
13331 static void aarch64_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
13333 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13334 CPUARMState
*env
= cpu
->env_ptr
;
13336 if (dc
->ss_active
&& !dc
->pstate_ss
) {
13337 /* Singlestep state is Active-pending.
13338 * If we're in this state at the start of a TB then either
13339 * a) we just took an exception to an EL which is being debugged
13340 * and this is the first insn in the exception handler
13341 * b) debug exceptions were masked and we just unmasked them
13342 * without changing EL (eg by clearing PSTATE.D)
13343 * In either case we're going to take a swstep exception in the
13344 * "did not step an insn" case, and so the syndrome ISV and EX
13345 * bits should be zero.
13347 assert(dc
->base
.num_insns
== 1);
13348 gen_exception(EXCP_UDEF
, syn_swstep(dc
->ss_same_el
, 0, 0),
13349 default_exception_el(dc
));
13350 dc
->base
.is_jmp
= DISAS_NORETURN
;
13352 disas_a64_insn(env
, dc
);
13355 dc
->base
.pc_next
= dc
->pc
;
13356 translator_loop_temp_check(&dc
->base
);
13359 static void aarch64_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
13361 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13363 if (unlikely(dc
->base
.singlestep_enabled
|| dc
->ss_active
)) {
13364 /* Note that this means single stepping WFI doesn't halt the CPU.
13365 * For conditional branch insns this is harmless unreachable code as
13366 * gen_goto_tb() has already handled emitting the debug exception
13367 * (and thus a tb-jump is not possible when singlestepping).
13369 switch (dc
->base
.is_jmp
) {
13371 gen_a64_set_pc_im(dc
->pc
);
13375 if (dc
->base
.singlestep_enabled
) {
13376 gen_exception_internal(EXCP_DEBUG
);
13378 gen_step_complete_exception(dc
);
13381 case DISAS_NORETURN
:
13385 switch (dc
->base
.is_jmp
) {
13387 case DISAS_TOO_MANY
:
13388 gen_goto_tb(dc
, 1, dc
->pc
);
13392 gen_a64_set_pc_im(dc
->pc
);
13395 tcg_gen_exit_tb(0);
13398 tcg_gen_lookup_and_goto_ptr();
13400 case DISAS_NORETURN
:
13404 gen_a64_set_pc_im(dc
->pc
);
13405 gen_helper_wfe(cpu_env
);
13408 gen_a64_set_pc_im(dc
->pc
);
13409 gen_helper_yield(cpu_env
);
13413 /* This is a special case because we don't want to just halt the CPU
13414 * if trying to debug across a WFI.
13416 TCGv_i32 tmp
= tcg_const_i32(4);
13418 gen_a64_set_pc_im(dc
->pc
);
13419 gen_helper_wfi(cpu_env
, tmp
);
13420 tcg_temp_free_i32(tmp
);
13421 /* The helper doesn't necessarily throw an exception, but we
13422 * must go back to the main loop to check for interrupts anyway.
13424 tcg_gen_exit_tb(0);
13430 /* Functions above can change dc->pc, so re-align db->pc_next */
13431 dc
->base
.pc_next
= dc
->pc
;
13434 static void aarch64_tr_disas_log(const DisasContextBase
*dcbase
,
13437 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13439 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
13440 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
13443 const TranslatorOps aarch64_translator_ops
= {
13444 .init_disas_context
= aarch64_tr_init_disas_context
,
13445 .tb_start
= aarch64_tr_tb_start
,
13446 .insn_start
= aarch64_tr_insn_start
,
13447 .breakpoint_check
= aarch64_tr_breakpoint_check
,
13448 .translate_insn
= aarch64_tr_translate_insn
,
13449 .tb_stop
= aarch64_tr_tb_stop
,
13450 .disas_log
= aarch64_tr_disas_log
,