4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/exec-all.h"
22 #include "translate.h"
23 #include "translate-a64.h"
25 #include "disas/disas.h"
27 #include "semihosting/semihost.h"
30 static TCGv_i64 cpu_X
[32];
31 static TCGv_i64 cpu_pc
;
33 /* Load/store exclusive handling */
34 static TCGv_i64 cpu_exclusive_high
;
36 static const char *regnames
[] = {
37 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
38 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
39 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
40 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
44 A64_SHIFT_TYPE_LSL
= 0,
45 A64_SHIFT_TYPE_LSR
= 1,
46 A64_SHIFT_TYPE_ASR
= 2,
47 A64_SHIFT_TYPE_ROR
= 3
51 * Helpers for extracting complex instruction fields
55 * For load/store with an unsigned 12 bit immediate scaled by the element
56 * size. The input has the immediate field in bits [14:3] and the element
59 static int uimm_scaled(DisasContext
*s
, int x
)
61 unsigned imm
= x
>> 3;
62 unsigned scale
= extract32(x
, 0, 3);
66 /* For load/store memory tags: scale offset by LOG2_TAG_GRANULE */
67 static int scale_by_log2_tag_granule(DisasContext
*s
, int x
)
69 return x
<< LOG2_TAG_GRANULE
;
73 * Include the generated decoders.
76 #include "decode-sme-fa64.c.inc"
77 #include "decode-a64.c.inc"
79 /* Table based decoder typedefs - used when the relevant bits for decode
80 * are too awkwardly scattered across the instruction (eg SIMD).
82 typedef void AArch64DecodeFn(DisasContext
*s
, uint32_t insn
);
84 typedef struct AArch64DecodeTable
{
87 AArch64DecodeFn
*disas_fn
;
90 /* initialize TCG globals. */
91 void a64_translate_init(void)
95 cpu_pc
= tcg_global_mem_new_i64(tcg_env
,
96 offsetof(CPUARMState
, pc
),
98 for (i
= 0; i
< 32; i
++) {
99 cpu_X
[i
] = tcg_global_mem_new_i64(tcg_env
,
100 offsetof(CPUARMState
, xregs
[i
]),
104 cpu_exclusive_high
= tcg_global_mem_new_i64(tcg_env
,
105 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
109 * Return the core mmu_idx to use for A64 load/store insns which
110 * have a "unprivileged load/store" variant. Those insns access
111 * EL0 if executed from an EL which has control over EL0 (usually
112 * EL1) but behave like normal loads and stores if executed from
113 * elsewhere (eg EL3).
115 * @unpriv : true for the unprivileged encoding; false for the
116 * normal encoding (in which case we will return the same
117 * thing as get_mem_index().
119 static int get_a64_user_mem_index(DisasContext
*s
, bool unpriv
)
122 * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
123 * which is the usual mmu_idx for this cpu state.
125 ARMMMUIdx useridx
= s
->mmu_idx
;
127 if (unpriv
&& s
->unpriv
) {
129 * We have pre-computed the condition for AccType_UNPRIV.
130 * Therefore we should never get here with a mmu_idx for
131 * which we do not know the corresponding user mmu_idx.
134 case ARMMMUIdx_E10_1
:
135 case ARMMMUIdx_E10_1_PAN
:
136 useridx
= ARMMMUIdx_E10_0
;
138 case ARMMMUIdx_E20_2
:
139 case ARMMMUIdx_E20_2_PAN
:
140 useridx
= ARMMMUIdx_E20_0
;
143 g_assert_not_reached();
146 return arm_to_core_mmu_idx(useridx
);
149 static void set_btype_raw(int val
)
151 tcg_gen_st_i32(tcg_constant_i32(val
), tcg_env
,
152 offsetof(CPUARMState
, btype
));
155 static void set_btype(DisasContext
*s
, int val
)
157 /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
158 tcg_debug_assert(val
>= 1 && val
<= 3);
163 static void reset_btype(DisasContext
*s
)
171 static void gen_pc_plus_diff(DisasContext
*s
, TCGv_i64 dest
, target_long diff
)
173 assert(s
->pc_save
!= -1);
174 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
175 tcg_gen_addi_i64(dest
, cpu_pc
, (s
->pc_curr
- s
->pc_save
) + diff
);
177 tcg_gen_movi_i64(dest
, s
->pc_curr
+ diff
);
181 void gen_a64_update_pc(DisasContext
*s
, target_long diff
)
183 gen_pc_plus_diff(s
, cpu_pc
, diff
);
184 s
->pc_save
= s
->pc_curr
+ diff
;
188 * Handle Top Byte Ignore (TBI) bits.
190 * If address tagging is enabled via the TCR TBI bits:
191 * + for EL2 and EL3 there is only one TBI bit, and if it is set
192 * then the address is zero-extended, clearing bits [63:56]
193 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
194 * and TBI1 controls addresses with bit 55 == 1.
195 * If the appropriate TBI bit is set for the address then
196 * the address is sign-extended from bit 55 into bits [63:56]
198 * Here We have concatenated TBI{1,0} into tbi.
200 static void gen_top_byte_ignore(DisasContext
*s
, TCGv_i64 dst
,
201 TCGv_i64 src
, int tbi
)
204 /* Load unmodified address */
205 tcg_gen_mov_i64(dst
, src
);
206 } else if (!regime_has_2_ranges(s
->mmu_idx
)) {
207 /* Force tag byte to all zero */
208 tcg_gen_extract_i64(dst
, src
, 0, 56);
210 /* Sign-extend from bit 55. */
211 tcg_gen_sextract_i64(dst
, src
, 0, 56);
215 /* tbi0 but !tbi1: only use the extension if positive */
216 tcg_gen_and_i64(dst
, dst
, src
);
219 /* !tbi0 but tbi1: only use the extension if negative */
220 tcg_gen_or_i64(dst
, dst
, src
);
223 /* tbi0 and tbi1: always use the extension */
226 g_assert_not_reached();
231 static void gen_a64_set_pc(DisasContext
*s
, TCGv_i64 src
)
234 * If address tagging is enabled for instructions via the TCR TBI bits,
235 * then loading an address into the PC will clear out any tag.
237 gen_top_byte_ignore(s
, cpu_pc
, src
, s
->tbii
);
242 * Handle MTE and/or TBI.
244 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
245 * for the tag to be present in the FAR_ELx register. But for user-only
246 * mode we do not have a TLB with which to implement this, so we must
247 * remove the top byte now.
249 * Always return a fresh temporary that we can increment independently
250 * of the write-back address.
253 TCGv_i64
clean_data_tbi(DisasContext
*s
, TCGv_i64 addr
)
255 TCGv_i64 clean
= tcg_temp_new_i64();
256 #ifdef CONFIG_USER_ONLY
257 gen_top_byte_ignore(s
, clean
, addr
, s
->tbid
);
259 tcg_gen_mov_i64(clean
, addr
);
264 /* Insert a zero tag into src, with the result at dst. */
265 static void gen_address_with_allocation_tag0(TCGv_i64 dst
, TCGv_i64 src
)
267 tcg_gen_andi_i64(dst
, src
, ~MAKE_64BIT_MASK(56, 4));
270 static void gen_probe_access(DisasContext
*s
, TCGv_i64 ptr
,
271 MMUAccessType acc
, int log2_size
)
273 gen_helper_probe_access(tcg_env
, ptr
,
274 tcg_constant_i32(acc
),
275 tcg_constant_i32(get_mem_index(s
)),
276 tcg_constant_i32(1 << log2_size
));
280 * For MTE, check a single logical or atomic access. This probes a single
281 * address, the exact one specified. The size and alignment of the access
282 * is not relevant to MTE, per se, but watchpoints do require the size,
283 * and we want to recognize those before making any other changes to state.
285 static TCGv_i64
gen_mte_check1_mmuidx(DisasContext
*s
, TCGv_i64 addr
,
286 bool is_write
, bool tag_checked
,
287 MemOp memop
, bool is_unpriv
,
290 if (tag_checked
&& s
->mte_active
[is_unpriv
]) {
294 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, core_idx
);
295 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
296 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
297 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, is_write
);
298 desc
= FIELD_DP32(desc
, MTEDESC
, ALIGN
, get_alignment_bits(memop
));
299 desc
= FIELD_DP32(desc
, MTEDESC
, SIZEM1
, memop_size(memop
) - 1);
301 ret
= tcg_temp_new_i64();
302 gen_helper_mte_check(ret
, tcg_env
, tcg_constant_i32(desc
), addr
);
306 return clean_data_tbi(s
, addr
);
309 TCGv_i64
gen_mte_check1(DisasContext
*s
, TCGv_i64 addr
, bool is_write
,
310 bool tag_checked
, MemOp memop
)
312 return gen_mte_check1_mmuidx(s
, addr
, is_write
, tag_checked
, memop
,
313 false, get_mem_index(s
));
317 * For MTE, check multiple logical sequential accesses.
319 TCGv_i64
gen_mte_checkN(DisasContext
*s
, TCGv_i64 addr
, bool is_write
,
320 bool tag_checked
, int total_size
, MemOp single_mop
)
322 if (tag_checked
&& s
->mte_active
[0]) {
326 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, get_mem_index(s
));
327 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
328 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
329 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, is_write
);
330 desc
= FIELD_DP32(desc
, MTEDESC
, ALIGN
, get_alignment_bits(single_mop
));
331 desc
= FIELD_DP32(desc
, MTEDESC
, SIZEM1
, total_size
- 1);
333 ret
= tcg_temp_new_i64();
334 gen_helper_mte_check(ret
, tcg_env
, tcg_constant_i32(desc
), addr
);
338 return clean_data_tbi(s
, addr
);
342 * Generate the special alignment check that applies to AccType_ATOMIC
343 * and AccType_ORDERED insns under FEAT_LSE2: the access need not be
344 * naturally aligned, but it must not cross a 16-byte boundary.
345 * See AArch64.CheckAlignment().
347 static void check_lse2_align(DisasContext
*s
, int rn
, int imm
,
348 bool is_write
, MemOp mop
)
352 TCGLabel
*over_label
;
356 tmp
= tcg_temp_new_i32();
357 tcg_gen_extrl_i64_i32(tmp
, cpu_reg_sp(s
, rn
));
358 tcg_gen_addi_i32(tmp
, tmp
, imm
& 15);
359 tcg_gen_andi_i32(tmp
, tmp
, 15);
360 tcg_gen_addi_i32(tmp
, tmp
, memop_size(mop
));
362 over_label
= gen_new_label();
363 tcg_gen_brcondi_i32(TCG_COND_LEU
, tmp
, 16, over_label
);
365 addr
= tcg_temp_new_i64();
366 tcg_gen_addi_i64(addr
, cpu_reg_sp(s
, rn
), imm
);
368 type
= is_write
? MMU_DATA_STORE
: MMU_DATA_LOAD
,
369 mmu_idx
= get_mem_index(s
);
370 gen_helper_unaligned_access(tcg_env
, addr
, tcg_constant_i32(type
),
371 tcg_constant_i32(mmu_idx
));
373 gen_set_label(over_label
);
377 /* Handle the alignment check for AccType_ATOMIC instructions. */
378 static MemOp
check_atomic_align(DisasContext
*s
, int rn
, MemOp mop
)
380 MemOp size
= mop
& MO_SIZE
;
387 * If size == MO_128, this is a LDXP, and the operation is single-copy
388 * atomic for each doubleword, not the entire quadword; it still must
389 * be quadword aligned.
391 if (size
== MO_128
) {
392 return finalize_memop_atom(s
, MO_128
| MO_ALIGN
,
393 MO_ATOM_IFALIGN_PAIR
);
395 if (dc_isar_feature(aa64_lse2
, s
)) {
396 check_lse2_align(s
, rn
, 0, true, mop
);
400 return finalize_memop(s
, mop
);
403 /* Handle the alignment check for AccType_ORDERED instructions. */
404 static MemOp
check_ordered_align(DisasContext
*s
, int rn
, int imm
,
405 bool is_write
, MemOp mop
)
407 MemOp size
= mop
& MO_SIZE
;
412 if (size
== MO_128
) {
413 return finalize_memop_atom(s
, MO_128
| MO_ALIGN
,
414 MO_ATOM_IFALIGN_PAIR
);
416 if (!dc_isar_feature(aa64_lse2
, s
)) {
418 } else if (!s
->naa
) {
419 check_lse2_align(s
, rn
, imm
, is_write
, mop
);
421 return finalize_memop(s
, mop
);
424 typedef struct DisasCompare64
{
429 static void a64_test_cc(DisasCompare64
*c64
, int cc
)
433 arm_test_cc(&c32
, cc
);
436 * Sign-extend the 32-bit value so that the GE/LT comparisons work
437 * properly. The NE/EQ comparisons are also fine with this choice.
439 c64
->cond
= c32
.cond
;
440 c64
->value
= tcg_temp_new_i64();
441 tcg_gen_ext_i32_i64(c64
->value
, c32
.value
);
444 static void gen_rebuild_hflags(DisasContext
*s
)
446 gen_helper_rebuild_hflags_a64(tcg_env
, tcg_constant_i32(s
->current_el
));
449 static void gen_exception_internal(int excp
)
451 assert(excp_is_internal(excp
));
452 gen_helper_exception_internal(tcg_env
, tcg_constant_i32(excp
));
455 static void gen_exception_internal_insn(DisasContext
*s
, int excp
)
457 gen_a64_update_pc(s
, 0);
458 gen_exception_internal(excp
);
459 s
->base
.is_jmp
= DISAS_NORETURN
;
462 static void gen_exception_bkpt_insn(DisasContext
*s
, uint32_t syndrome
)
464 gen_a64_update_pc(s
, 0);
465 gen_helper_exception_bkpt_insn(tcg_env
, tcg_constant_i32(syndrome
));
466 s
->base
.is_jmp
= DISAS_NORETURN
;
469 static void gen_step_complete_exception(DisasContext
*s
)
471 /* We just completed step of an insn. Move from Active-not-pending
472 * to Active-pending, and then also take the swstep exception.
473 * This corresponds to making the (IMPDEF) choice to prioritize
474 * swstep exceptions over asynchronous exceptions taken to an exception
475 * level where debug is disabled. This choice has the advantage that
476 * we do not need to maintain internal state corresponding to the
477 * ISV/EX syndrome bits between completion of the step and generation
478 * of the exception, and our syndrome information is always correct.
481 gen_swstep_exception(s
, 1, s
->is_ldex
);
482 s
->base
.is_jmp
= DISAS_NORETURN
;
485 static inline bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
490 return translator_use_goto_tb(&s
->base
, dest
);
493 static void gen_goto_tb(DisasContext
*s
, int n
, int64_t diff
)
495 if (use_goto_tb(s
, s
->pc_curr
+ diff
)) {
497 * For pcrel, the pc must always be up-to-date on entry to
498 * the linked TB, so that it can use simple additions for all
499 * further adjustments. For !pcrel, the linked TB is compiled
500 * to know its full virtual address, so we can delay the
501 * update to pc to the unlinked path. A long chain of links
502 * can thus avoid many updates to the PC.
504 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
505 gen_a64_update_pc(s
, diff
);
509 gen_a64_update_pc(s
, diff
);
511 tcg_gen_exit_tb(s
->base
.tb
, n
);
512 s
->base
.is_jmp
= DISAS_NORETURN
;
514 gen_a64_update_pc(s
, diff
);
516 gen_step_complete_exception(s
);
518 tcg_gen_lookup_and_goto_ptr();
519 s
->base
.is_jmp
= DISAS_NORETURN
;
525 * Register access functions
527 * These functions are used for directly accessing a register in where
528 * changes to the final register value are likely to be made. If you
529 * need to use a register for temporary calculation (e.g. index type
530 * operations) use the read_* form.
532 * B1.2.1 Register mappings
534 * In instruction register encoding 31 can refer to ZR (zero register) or
535 * the SP (stack pointer) depending on context. In QEMU's case we map SP
536 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
537 * This is the point of the _sp forms.
539 TCGv_i64
cpu_reg(DisasContext
*s
, int reg
)
542 TCGv_i64 t
= tcg_temp_new_i64();
543 tcg_gen_movi_i64(t
, 0);
550 /* register access for when 31 == SP */
551 TCGv_i64
cpu_reg_sp(DisasContext
*s
, int reg
)
556 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
557 * representing the register contents. This TCGv is an auto-freed
558 * temporary so it need not be explicitly freed, and may be modified.
560 TCGv_i64
read_cpu_reg(DisasContext
*s
, int reg
, int sf
)
562 TCGv_i64 v
= tcg_temp_new_i64();
565 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
567 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
570 tcg_gen_movi_i64(v
, 0);
575 TCGv_i64
read_cpu_reg_sp(DisasContext
*s
, int reg
, int sf
)
577 TCGv_i64 v
= tcg_temp_new_i64();
579 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
581 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
586 /* Return the offset into CPUARMState of a slice (from
587 * the least significant end) of FP register Qn (ie
589 * (Note that this is not the same mapping as for A32; see cpu.h)
591 static inline int fp_reg_offset(DisasContext
*s
, int regno
, MemOp size
)
593 return vec_reg_offset(s
, regno
, 0, size
);
596 /* Offset of the high half of the 128 bit vector Qn */
597 static inline int fp_reg_hi_offset(DisasContext
*s
, int regno
)
599 return vec_reg_offset(s
, regno
, 1, MO_64
);
602 /* Convenience accessors for reading and writing single and double
603 * FP registers. Writing clears the upper parts of the associated
604 * 128 bit vector register, as required by the architecture.
605 * Note that unlike the GP register accessors, the values returned
606 * by the read functions must be manually freed.
608 static TCGv_i64
read_fp_dreg(DisasContext
*s
, int reg
)
610 TCGv_i64 v
= tcg_temp_new_i64();
612 tcg_gen_ld_i64(v
, tcg_env
, fp_reg_offset(s
, reg
, MO_64
));
616 static TCGv_i32
read_fp_sreg(DisasContext
*s
, int reg
)
618 TCGv_i32 v
= tcg_temp_new_i32();
620 tcg_gen_ld_i32(v
, tcg_env
, fp_reg_offset(s
, reg
, MO_32
));
624 static TCGv_i32
read_fp_hreg(DisasContext
*s
, int reg
)
626 TCGv_i32 v
= tcg_temp_new_i32();
628 tcg_gen_ld16u_i32(v
, tcg_env
, fp_reg_offset(s
, reg
, MO_16
));
632 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
633 * If SVE is not enabled, then there are only 128 bits in the vector.
635 static void clear_vec_high(DisasContext
*s
, bool is_q
, int rd
)
637 unsigned ofs
= fp_reg_offset(s
, rd
, MO_64
);
638 unsigned vsz
= vec_full_reg_size(s
);
640 /* Nop move, with side effect of clearing the tail. */
641 tcg_gen_gvec_mov(MO_64
, ofs
, ofs
, is_q
? 16 : 8, vsz
);
644 void write_fp_dreg(DisasContext
*s
, int reg
, TCGv_i64 v
)
646 unsigned ofs
= fp_reg_offset(s
, reg
, MO_64
);
648 tcg_gen_st_i64(v
, tcg_env
, ofs
);
649 clear_vec_high(s
, false, reg
);
652 static void write_fp_sreg(DisasContext
*s
, int reg
, TCGv_i32 v
)
654 TCGv_i64 tmp
= tcg_temp_new_i64();
656 tcg_gen_extu_i32_i64(tmp
, v
);
657 write_fp_dreg(s
, reg
, tmp
);
660 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
661 static void gen_gvec_fn2(DisasContext
*s
, bool is_q
, int rd
, int rn
,
662 GVecGen2Fn
*gvec_fn
, int vece
)
664 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
665 is_q
? 16 : 8, vec_full_reg_size(s
));
668 /* Expand a 2-operand + immediate AdvSIMD vector operation using
669 * an expander function.
671 static void gen_gvec_fn2i(DisasContext
*s
, bool is_q
, int rd
, int rn
,
672 int64_t imm
, GVecGen2iFn
*gvec_fn
, int vece
)
674 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
675 imm
, is_q
? 16 : 8, vec_full_reg_size(s
));
678 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
679 static void gen_gvec_fn3(DisasContext
*s
, bool is_q
, int rd
, int rn
, int rm
,
680 GVecGen3Fn
*gvec_fn
, int vece
)
682 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
683 vec_full_reg_offset(s
, rm
), is_q
? 16 : 8, vec_full_reg_size(s
));
686 /* Expand a 4-operand AdvSIMD vector operation using an expander function. */
687 static void gen_gvec_fn4(DisasContext
*s
, bool is_q
, int rd
, int rn
, int rm
,
688 int rx
, GVecGen4Fn
*gvec_fn
, int vece
)
690 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
691 vec_full_reg_offset(s
, rm
), vec_full_reg_offset(s
, rx
),
692 is_q
? 16 : 8, vec_full_reg_size(s
));
695 /* Expand a 2-operand operation using an out-of-line helper. */
696 static void gen_gvec_op2_ool(DisasContext
*s
, bool is_q
, int rd
,
697 int rn
, int data
, gen_helper_gvec_2
*fn
)
699 tcg_gen_gvec_2_ool(vec_full_reg_offset(s
, rd
),
700 vec_full_reg_offset(s
, rn
),
701 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
704 /* Expand a 3-operand operation using an out-of-line helper. */
705 static void gen_gvec_op3_ool(DisasContext
*s
, bool is_q
, int rd
,
706 int rn
, int rm
, int data
, gen_helper_gvec_3
*fn
)
708 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, rd
),
709 vec_full_reg_offset(s
, rn
),
710 vec_full_reg_offset(s
, rm
),
711 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
714 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
715 * an out-of-line helper.
717 static void gen_gvec_op3_fpst(DisasContext
*s
, bool is_q
, int rd
, int rn
,
718 int rm
, bool is_fp16
, int data
,
719 gen_helper_gvec_3_ptr
*fn
)
721 TCGv_ptr fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
722 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
723 vec_full_reg_offset(s
, rn
),
724 vec_full_reg_offset(s
, rm
), fpst
,
725 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
728 /* Expand a 3-operand + qc + operation using an out-of-line helper. */
729 static void gen_gvec_op3_qc(DisasContext
*s
, bool is_q
, int rd
, int rn
,
730 int rm
, gen_helper_gvec_3_ptr
*fn
)
732 TCGv_ptr qc_ptr
= tcg_temp_new_ptr();
734 tcg_gen_addi_ptr(qc_ptr
, tcg_env
, offsetof(CPUARMState
, vfp
.qc
));
735 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
736 vec_full_reg_offset(s
, rn
),
737 vec_full_reg_offset(s
, rm
), qc_ptr
,
738 is_q
? 16 : 8, vec_full_reg_size(s
), 0, fn
);
741 /* Expand a 4-operand operation using an out-of-line helper. */
742 static void gen_gvec_op4_ool(DisasContext
*s
, bool is_q
, int rd
, int rn
,
743 int rm
, int ra
, int data
, gen_helper_gvec_4
*fn
)
745 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
746 vec_full_reg_offset(s
, rn
),
747 vec_full_reg_offset(s
, rm
),
748 vec_full_reg_offset(s
, ra
),
749 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
753 * Expand a 4-operand + fpstatus pointer + simd data value operation using
754 * an out-of-line helper.
756 static void gen_gvec_op4_fpst(DisasContext
*s
, bool is_q
, int rd
, int rn
,
757 int rm
, int ra
, bool is_fp16
, int data
,
758 gen_helper_gvec_4_ptr
*fn
)
760 TCGv_ptr fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
761 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, rd
),
762 vec_full_reg_offset(s
, rn
),
763 vec_full_reg_offset(s
, rm
),
764 vec_full_reg_offset(s
, ra
), fpst
,
765 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
768 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
769 * than the 32 bit equivalent.
771 static inline void gen_set_NZ64(TCGv_i64 result
)
773 tcg_gen_extr_i64_i32(cpu_ZF
, cpu_NF
, result
);
774 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, cpu_NF
);
777 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
778 static inline void gen_logic_CC(int sf
, TCGv_i64 result
)
781 gen_set_NZ64(result
);
783 tcg_gen_extrl_i64_i32(cpu_ZF
, result
);
784 tcg_gen_mov_i32(cpu_NF
, cpu_ZF
);
786 tcg_gen_movi_i32(cpu_CF
, 0);
787 tcg_gen_movi_i32(cpu_VF
, 0);
790 /* dest = T0 + T1; compute C, N, V and Z flags */
791 static void gen_add64_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
793 TCGv_i64 result
, flag
, tmp
;
794 result
= tcg_temp_new_i64();
795 flag
= tcg_temp_new_i64();
796 tmp
= tcg_temp_new_i64();
798 tcg_gen_movi_i64(tmp
, 0);
799 tcg_gen_add2_i64(result
, flag
, t0
, tmp
, t1
, tmp
);
801 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
803 gen_set_NZ64(result
);
805 tcg_gen_xor_i64(flag
, result
, t0
);
806 tcg_gen_xor_i64(tmp
, t0
, t1
);
807 tcg_gen_andc_i64(flag
, flag
, tmp
);
808 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
810 tcg_gen_mov_i64(dest
, result
);
813 static void gen_add32_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
815 TCGv_i32 t0_32
= tcg_temp_new_i32();
816 TCGv_i32 t1_32
= tcg_temp_new_i32();
817 TCGv_i32 tmp
= tcg_temp_new_i32();
819 tcg_gen_movi_i32(tmp
, 0);
820 tcg_gen_extrl_i64_i32(t0_32
, t0
);
821 tcg_gen_extrl_i64_i32(t1_32
, t1
);
822 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, tmp
, t1_32
, tmp
);
823 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
824 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
825 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
826 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
827 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
830 static void gen_add_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
833 gen_add64_CC(dest
, t0
, t1
);
835 gen_add32_CC(dest
, t0
, t1
);
839 /* dest = T0 - T1; compute C, N, V and Z flags */
840 static void gen_sub64_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
842 /* 64 bit arithmetic */
843 TCGv_i64 result
, flag
, tmp
;
845 result
= tcg_temp_new_i64();
846 flag
= tcg_temp_new_i64();
847 tcg_gen_sub_i64(result
, t0
, t1
);
849 gen_set_NZ64(result
);
851 tcg_gen_setcond_i64(TCG_COND_GEU
, flag
, t0
, t1
);
852 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
854 tcg_gen_xor_i64(flag
, result
, t0
);
855 tmp
= tcg_temp_new_i64();
856 tcg_gen_xor_i64(tmp
, t0
, t1
);
857 tcg_gen_and_i64(flag
, flag
, tmp
);
858 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
859 tcg_gen_mov_i64(dest
, result
);
862 static void gen_sub32_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
864 /* 32 bit arithmetic */
865 TCGv_i32 t0_32
= tcg_temp_new_i32();
866 TCGv_i32 t1_32
= tcg_temp_new_i32();
869 tcg_gen_extrl_i64_i32(t0_32
, t0
);
870 tcg_gen_extrl_i64_i32(t1_32
, t1
);
871 tcg_gen_sub_i32(cpu_NF
, t0_32
, t1_32
);
872 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
873 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0_32
, t1_32
);
874 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
875 tmp
= tcg_temp_new_i32();
876 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
877 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
878 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
881 static void gen_sub_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
884 gen_sub64_CC(dest
, t0
, t1
);
886 gen_sub32_CC(dest
, t0
, t1
);
890 /* dest = T0 + T1 + CF; do not compute flags. */
891 static void gen_adc(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
893 TCGv_i64 flag
= tcg_temp_new_i64();
894 tcg_gen_extu_i32_i64(flag
, cpu_CF
);
895 tcg_gen_add_i64(dest
, t0
, t1
);
896 tcg_gen_add_i64(dest
, dest
, flag
);
899 tcg_gen_ext32u_i64(dest
, dest
);
903 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
904 static void gen_adc_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
907 TCGv_i64 result
= tcg_temp_new_i64();
908 TCGv_i64 cf_64
= tcg_temp_new_i64();
909 TCGv_i64 vf_64
= tcg_temp_new_i64();
910 TCGv_i64 tmp
= tcg_temp_new_i64();
911 TCGv_i64 zero
= tcg_constant_i64(0);
913 tcg_gen_extu_i32_i64(cf_64
, cpu_CF
);
914 tcg_gen_add2_i64(result
, cf_64
, t0
, zero
, cf_64
, zero
);
915 tcg_gen_add2_i64(result
, cf_64
, result
, cf_64
, t1
, zero
);
916 tcg_gen_extrl_i64_i32(cpu_CF
, cf_64
);
917 gen_set_NZ64(result
);
919 tcg_gen_xor_i64(vf_64
, result
, t0
);
920 tcg_gen_xor_i64(tmp
, t0
, t1
);
921 tcg_gen_andc_i64(vf_64
, vf_64
, tmp
);
922 tcg_gen_extrh_i64_i32(cpu_VF
, vf_64
);
924 tcg_gen_mov_i64(dest
, result
);
926 TCGv_i32 t0_32
= tcg_temp_new_i32();
927 TCGv_i32 t1_32
= tcg_temp_new_i32();
928 TCGv_i32 tmp
= tcg_temp_new_i32();
929 TCGv_i32 zero
= tcg_constant_i32(0);
931 tcg_gen_extrl_i64_i32(t0_32
, t0
);
932 tcg_gen_extrl_i64_i32(t1_32
, t1
);
933 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, zero
, cpu_CF
, zero
);
934 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1_32
, zero
);
936 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
937 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
938 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
939 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
940 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
945 * Load/Store generators
949 * Store from GPR register to memory.
951 static void do_gpr_st_memidx(DisasContext
*s
, TCGv_i64 source
,
952 TCGv_i64 tcg_addr
, MemOp memop
, int memidx
,
954 unsigned int iss_srt
,
955 bool iss_sf
, bool iss_ar
)
957 tcg_gen_qemu_st_i64(source
, tcg_addr
, memidx
, memop
);
962 syn
= syn_data_abort_with_iss(0,
968 0, 0, 0, 0, 0, false);
969 disas_set_insn_syndrome(s
, syn
);
973 static void do_gpr_st(DisasContext
*s
, TCGv_i64 source
,
974 TCGv_i64 tcg_addr
, MemOp memop
,
976 unsigned int iss_srt
,
977 bool iss_sf
, bool iss_ar
)
979 do_gpr_st_memidx(s
, source
, tcg_addr
, memop
, get_mem_index(s
),
980 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
984 * Load from memory to GPR register
986 static void do_gpr_ld_memidx(DisasContext
*s
, TCGv_i64 dest
, TCGv_i64 tcg_addr
,
987 MemOp memop
, bool extend
, int memidx
,
988 bool iss_valid
, unsigned int iss_srt
,
989 bool iss_sf
, bool iss_ar
)
991 tcg_gen_qemu_ld_i64(dest
, tcg_addr
, memidx
, memop
);
993 if (extend
&& (memop
& MO_SIGN
)) {
994 g_assert((memop
& MO_SIZE
) <= MO_32
);
995 tcg_gen_ext32u_i64(dest
, dest
);
1001 syn
= syn_data_abort_with_iss(0,
1003 (memop
& MO_SIGN
) != 0,
1007 0, 0, 0, 0, 0, false);
1008 disas_set_insn_syndrome(s
, syn
);
1012 static void do_gpr_ld(DisasContext
*s
, TCGv_i64 dest
, TCGv_i64 tcg_addr
,
1013 MemOp memop
, bool extend
,
1014 bool iss_valid
, unsigned int iss_srt
,
1015 bool iss_sf
, bool iss_ar
)
1017 do_gpr_ld_memidx(s
, dest
, tcg_addr
, memop
, extend
, get_mem_index(s
),
1018 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
1022 * Store from FP register to memory
1024 static void do_fp_st(DisasContext
*s
, int srcidx
, TCGv_i64 tcg_addr
, MemOp mop
)
1026 /* This writes the bottom N bits of a 128 bit wide vector to memory */
1027 TCGv_i64 tmplo
= tcg_temp_new_i64();
1029 tcg_gen_ld_i64(tmplo
, tcg_env
, fp_reg_offset(s
, srcidx
, MO_64
));
1031 if ((mop
& MO_SIZE
) < MO_128
) {
1032 tcg_gen_qemu_st_i64(tmplo
, tcg_addr
, get_mem_index(s
), mop
);
1034 TCGv_i64 tmphi
= tcg_temp_new_i64();
1035 TCGv_i128 t16
= tcg_temp_new_i128();
1037 tcg_gen_ld_i64(tmphi
, tcg_env
, fp_reg_hi_offset(s
, srcidx
));
1038 tcg_gen_concat_i64_i128(t16
, tmplo
, tmphi
);
1040 tcg_gen_qemu_st_i128(t16
, tcg_addr
, get_mem_index(s
), mop
);
1045 * Load from memory to FP register
1047 static void do_fp_ld(DisasContext
*s
, int destidx
, TCGv_i64 tcg_addr
, MemOp mop
)
1049 /* This always zero-extends and writes to a full 128 bit wide vector */
1050 TCGv_i64 tmplo
= tcg_temp_new_i64();
1051 TCGv_i64 tmphi
= NULL
;
1053 if ((mop
& MO_SIZE
) < MO_128
) {
1054 tcg_gen_qemu_ld_i64(tmplo
, tcg_addr
, get_mem_index(s
), mop
);
1056 TCGv_i128 t16
= tcg_temp_new_i128();
1058 tcg_gen_qemu_ld_i128(t16
, tcg_addr
, get_mem_index(s
), mop
);
1060 tmphi
= tcg_temp_new_i64();
1061 tcg_gen_extr_i128_i64(tmplo
, tmphi
, t16
);
1064 tcg_gen_st_i64(tmplo
, tcg_env
, fp_reg_offset(s
, destidx
, MO_64
));
1067 tcg_gen_st_i64(tmphi
, tcg_env
, fp_reg_hi_offset(s
, destidx
));
1069 clear_vec_high(s
, tmphi
!= NULL
, destidx
);
1073 * Vector load/store helpers.
1075 * The principal difference between this and a FP load is that we don't
1076 * zero extend as we are filling a partial chunk of the vector register.
1077 * These functions don't support 128 bit loads/stores, which would be
1078 * normal load/store operations.
1080 * The _i32 versions are useful when operating on 32 bit quantities
1081 * (eg for floating point single or using Neon helper functions).
1084 /* Get value of an element within a vector register */
1085 static void read_vec_element(DisasContext
*s
, TCGv_i64 tcg_dest
, int srcidx
,
1086 int element
, MemOp memop
)
1088 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1089 switch ((unsigned)memop
) {
1091 tcg_gen_ld8u_i64(tcg_dest
, tcg_env
, vect_off
);
1094 tcg_gen_ld16u_i64(tcg_dest
, tcg_env
, vect_off
);
1097 tcg_gen_ld32u_i64(tcg_dest
, tcg_env
, vect_off
);
1100 tcg_gen_ld8s_i64(tcg_dest
, tcg_env
, vect_off
);
1103 tcg_gen_ld16s_i64(tcg_dest
, tcg_env
, vect_off
);
1106 tcg_gen_ld32s_i64(tcg_dest
, tcg_env
, vect_off
);
1110 tcg_gen_ld_i64(tcg_dest
, tcg_env
, vect_off
);
1113 g_assert_not_reached();
1117 static void read_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_dest
, int srcidx
,
1118 int element
, MemOp memop
)
1120 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1123 tcg_gen_ld8u_i32(tcg_dest
, tcg_env
, vect_off
);
1126 tcg_gen_ld16u_i32(tcg_dest
, tcg_env
, vect_off
);
1129 tcg_gen_ld8s_i32(tcg_dest
, tcg_env
, vect_off
);
1132 tcg_gen_ld16s_i32(tcg_dest
, tcg_env
, vect_off
);
1136 tcg_gen_ld_i32(tcg_dest
, tcg_env
, vect_off
);
1139 g_assert_not_reached();
1143 /* Set value of an element within a vector register */
1144 static void write_vec_element(DisasContext
*s
, TCGv_i64 tcg_src
, int destidx
,
1145 int element
, MemOp memop
)
1147 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1150 tcg_gen_st8_i64(tcg_src
, tcg_env
, vect_off
);
1153 tcg_gen_st16_i64(tcg_src
, tcg_env
, vect_off
);
1156 tcg_gen_st32_i64(tcg_src
, tcg_env
, vect_off
);
1159 tcg_gen_st_i64(tcg_src
, tcg_env
, vect_off
);
1162 g_assert_not_reached();
1166 static void write_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_src
,
1167 int destidx
, int element
, MemOp memop
)
1169 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1172 tcg_gen_st8_i32(tcg_src
, tcg_env
, vect_off
);
1175 tcg_gen_st16_i32(tcg_src
, tcg_env
, vect_off
);
1178 tcg_gen_st_i32(tcg_src
, tcg_env
, vect_off
);
1181 g_assert_not_reached();
1185 /* Store from vector register to memory */
1186 static void do_vec_st(DisasContext
*s
, int srcidx
, int element
,
1187 TCGv_i64 tcg_addr
, MemOp mop
)
1189 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1191 read_vec_element(s
, tcg_tmp
, srcidx
, element
, mop
& MO_SIZE
);
1192 tcg_gen_qemu_st_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), mop
);
1195 /* Load from memory to vector register */
1196 static void do_vec_ld(DisasContext
*s
, int destidx
, int element
,
1197 TCGv_i64 tcg_addr
, MemOp mop
)
1199 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1201 tcg_gen_qemu_ld_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), mop
);
1202 write_vec_element(s
, tcg_tmp
, destidx
, element
, mop
& MO_SIZE
);
1205 /* Check that FP/Neon access is enabled. If it is, return
1206 * true. If not, emit code to generate an appropriate exception,
1207 * and return false; the caller should not emit any code for
1208 * the instruction. Note that this check must happen after all
1209 * unallocated-encoding checks (otherwise the syndrome information
1210 * for the resulting exception will be incorrect).
1212 static bool fp_access_check_only(DisasContext
*s
)
1214 if (s
->fp_excp_el
) {
1215 assert(!s
->fp_access_checked
);
1216 s
->fp_access_checked
= true;
1218 gen_exception_insn_el(s
, 0, EXCP_UDEF
,
1219 syn_fp_access_trap(1, 0xe, false, 0),
1223 s
->fp_access_checked
= true;
1227 static bool fp_access_check(DisasContext
*s
)
1229 if (!fp_access_check_only(s
)) {
1232 if (s
->sme_trap_nonstreaming
&& s
->is_nonstreaming
) {
1233 gen_exception_insn(s
, 0, EXCP_UDEF
,
1234 syn_smetrap(SME_ET_Streaming
, false));
1241 * Check that SVE access is enabled. If it is, return true.
1242 * If not, emit code to generate an appropriate exception and return false.
1243 * This function corresponds to CheckSVEEnabled().
1245 bool sve_access_check(DisasContext
*s
)
1247 if (s
->pstate_sm
|| !dc_isar_feature(aa64_sve
, s
)) {
1248 assert(dc_isar_feature(aa64_sme
, s
));
1249 if (!sme_sm_enabled_check(s
)) {
1252 } else if (s
->sve_excp_el
) {
1253 gen_exception_insn_el(s
, 0, EXCP_UDEF
,
1254 syn_sve_access_trap(), s
->sve_excp_el
);
1257 s
->sve_access_checked
= true;
1258 return fp_access_check(s
);
1261 /* Assert that we only raise one exception per instruction. */
1262 assert(!s
->sve_access_checked
);
1263 s
->sve_access_checked
= true;
1268 * Check that SME access is enabled, raise an exception if not.
1269 * Note that this function corresponds to CheckSMEAccess and is
1270 * only used directly for cpregs.
1272 static bool sme_access_check(DisasContext
*s
)
1274 if (s
->sme_excp_el
) {
1275 gen_exception_insn_el(s
, 0, EXCP_UDEF
,
1276 syn_smetrap(SME_ET_AccessTrap
, false),
1283 /* This function corresponds to CheckSMEEnabled. */
1284 bool sme_enabled_check(DisasContext
*s
)
1287 * Note that unlike sve_excp_el, we have not constrained sme_excp_el
1288 * to be zero when fp_excp_el has priority. This is because we need
1289 * sme_excp_el by itself for cpregs access checks.
1291 if (!s
->fp_excp_el
|| s
->sme_excp_el
< s
->fp_excp_el
) {
1292 s
->fp_access_checked
= true;
1293 return sme_access_check(s
);
1295 return fp_access_check_only(s
);
1298 /* Common subroutine for CheckSMEAnd*Enabled. */
1299 bool sme_enabled_check_with_svcr(DisasContext
*s
, unsigned req
)
1301 if (!sme_enabled_check(s
)) {
1304 if (FIELD_EX64(req
, SVCR
, SM
) && !s
->pstate_sm
) {
1305 gen_exception_insn(s
, 0, EXCP_UDEF
,
1306 syn_smetrap(SME_ET_NotStreaming
, false));
1309 if (FIELD_EX64(req
, SVCR
, ZA
) && !s
->pstate_za
) {
1310 gen_exception_insn(s
, 0, EXCP_UDEF
,
1311 syn_smetrap(SME_ET_InactiveZA
, false));
1318 * This utility function is for doing register extension with an
1319 * optional shift. You will likely want to pass a temporary for the
1320 * destination register. See DecodeRegExtend() in the ARM ARM.
1322 static void ext_and_shift_reg(TCGv_i64 tcg_out
, TCGv_i64 tcg_in
,
1323 int option
, unsigned int shift
)
1325 int extsize
= extract32(option
, 0, 2);
1326 bool is_signed
= extract32(option
, 2, 1);
1328 tcg_gen_ext_i64(tcg_out
, tcg_in
, extsize
| (is_signed
? MO_SIGN
: 0));
1329 tcg_gen_shli_i64(tcg_out
, tcg_out
, shift
);
1332 static inline void gen_check_sp_alignment(DisasContext
*s
)
1334 /* The AArch64 architecture mandates that (if enabled via PSTATE
1335 * or SCTLR bits) there is a check that SP is 16-aligned on every
1336 * SP-relative load or store (with an exception generated if it is not).
1337 * In line with general QEMU practice regarding misaligned accesses,
1338 * we omit these checks for the sake of guest program performance.
1339 * This function is provided as a hook so we can more easily add these
1340 * checks in future (possibly as a "favour catching guest program bugs
1341 * over speed" user selectable option).
1346 * This provides a simple table based table lookup decoder. It is
1347 * intended to be used when the relevant bits for decode are too
1348 * awkwardly placed and switch/if based logic would be confusing and
1349 * deeply nested. Since it's a linear search through the table, tables
1350 * should be kept small.
1352 * It returns the first handler where insn & mask == pattern, or
1353 * NULL if there is no match.
1354 * The table is terminated by an empty mask (i.e. 0)
1356 static inline AArch64DecodeFn
*lookup_disas_fn(const AArch64DecodeTable
*table
,
1359 const AArch64DecodeTable
*tptr
= table
;
1361 while (tptr
->mask
) {
1362 if ((insn
& tptr
->mask
) == tptr
->pattern
) {
1363 return tptr
->disas_fn
;
1371 * The instruction disassembly implemented here matches
1372 * the instruction encoding classifications in chapter C4
1373 * of the ARM Architecture Reference Manual (DDI0487B_a);
1374 * classification names and decode diagrams here should generally
1375 * match up with those in the manual.
1378 static bool trans_B(DisasContext
*s
, arg_i
*a
)
1381 gen_goto_tb(s
, 0, a
->imm
);
1385 static bool trans_BL(DisasContext
*s
, arg_i
*a
)
1387 gen_pc_plus_diff(s
, cpu_reg(s
, 30), curr_insn_len(s
));
1389 gen_goto_tb(s
, 0, a
->imm
);
1394 static bool trans_CBZ(DisasContext
*s
, arg_cbz
*a
)
1399 tcg_cmp
= read_cpu_reg(s
, a
->rt
, a
->sf
);
1402 match
= gen_disas_label(s
);
1403 tcg_gen_brcondi_i64(a
->nz
? TCG_COND_NE
: TCG_COND_EQ
,
1404 tcg_cmp
, 0, match
.label
);
1405 gen_goto_tb(s
, 0, 4);
1406 set_disas_label(s
, match
);
1407 gen_goto_tb(s
, 1, a
->imm
);
1411 static bool trans_TBZ(DisasContext
*s
, arg_tbz
*a
)
1416 tcg_cmp
= tcg_temp_new_i64();
1417 tcg_gen_andi_i64(tcg_cmp
, cpu_reg(s
, a
->rt
), 1ULL << a
->bitpos
);
1421 match
= gen_disas_label(s
);
1422 tcg_gen_brcondi_i64(a
->nz
? TCG_COND_NE
: TCG_COND_EQ
,
1423 tcg_cmp
, 0, match
.label
);
1424 gen_goto_tb(s
, 0, 4);
1425 set_disas_label(s
, match
);
1426 gen_goto_tb(s
, 1, a
->imm
);
1430 static bool trans_B_cond(DisasContext
*s
, arg_B_cond
*a
)
1432 /* BC.cond is only present with FEAT_HBC */
1433 if (a
->c
&& !dc_isar_feature(aa64_hbc
, s
)) {
1437 if (a
->cond
< 0x0e) {
1438 /* genuinely conditional branches */
1439 DisasLabel match
= gen_disas_label(s
);
1440 arm_gen_test_cc(a
->cond
, match
.label
);
1441 gen_goto_tb(s
, 0, 4);
1442 set_disas_label(s
, match
);
1443 gen_goto_tb(s
, 1, a
->imm
);
1445 /* 0xe and 0xf are both "always" conditions */
1446 gen_goto_tb(s
, 0, a
->imm
);
1451 static void set_btype_for_br(DisasContext
*s
, int rn
)
1453 if (dc_isar_feature(aa64_bti
, s
)) {
1454 /* BR to {x16,x17} or !guard -> 1, else 3. */
1455 set_btype(s
, rn
== 16 || rn
== 17 || !s
->guarded_page
? 1 : 3);
1459 static void set_btype_for_blr(DisasContext
*s
)
1461 if (dc_isar_feature(aa64_bti
, s
)) {
1462 /* BLR sets BTYPE to 2, regardless of source guarded page. */
1467 static bool trans_BR(DisasContext
*s
, arg_r
*a
)
1469 gen_a64_set_pc(s
, cpu_reg(s
, a
->rn
));
1470 set_btype_for_br(s
, a
->rn
);
1471 s
->base
.is_jmp
= DISAS_JUMP
;
1475 static bool trans_BLR(DisasContext
*s
, arg_r
*a
)
1477 TCGv_i64 dst
= cpu_reg(s
, a
->rn
);
1478 TCGv_i64 lr
= cpu_reg(s
, 30);
1480 TCGv_i64 tmp
= tcg_temp_new_i64();
1481 tcg_gen_mov_i64(tmp
, dst
);
1484 gen_pc_plus_diff(s
, lr
, curr_insn_len(s
));
1485 gen_a64_set_pc(s
, dst
);
1486 set_btype_for_blr(s
);
1487 s
->base
.is_jmp
= DISAS_JUMP
;
1491 static bool trans_RET(DisasContext
*s
, arg_r
*a
)
1493 gen_a64_set_pc(s
, cpu_reg(s
, a
->rn
));
1494 s
->base
.is_jmp
= DISAS_JUMP
;
1498 static TCGv_i64
auth_branch_target(DisasContext
*s
, TCGv_i64 dst
,
1499 TCGv_i64 modifier
, bool use_key_a
)
1503 * Return the branch target for a BRAA/RETA/etc, which is either
1504 * just the destination dst, or that value with the pauth check
1505 * done and the code removed from the high bits.
1507 if (!s
->pauth_active
) {
1511 truedst
= tcg_temp_new_i64();
1513 gen_helper_autia_combined(truedst
, tcg_env
, dst
, modifier
);
1515 gen_helper_autib_combined(truedst
, tcg_env
, dst
, modifier
);
1520 static bool trans_BRAZ(DisasContext
*s
, arg_braz
*a
)
1524 if (!dc_isar_feature(aa64_pauth
, s
)) {
1528 dst
= auth_branch_target(s
, cpu_reg(s
, a
->rn
), tcg_constant_i64(0), !a
->m
);
1529 gen_a64_set_pc(s
, dst
);
1530 set_btype_for_br(s
, a
->rn
);
1531 s
->base
.is_jmp
= DISAS_JUMP
;
1535 static bool trans_BLRAZ(DisasContext
*s
, arg_braz
*a
)
1539 if (!dc_isar_feature(aa64_pauth
, s
)) {
1543 dst
= auth_branch_target(s
, cpu_reg(s
, a
->rn
), tcg_constant_i64(0), !a
->m
);
1544 lr
= cpu_reg(s
, 30);
1546 TCGv_i64 tmp
= tcg_temp_new_i64();
1547 tcg_gen_mov_i64(tmp
, dst
);
1550 gen_pc_plus_diff(s
, lr
, curr_insn_len(s
));
1551 gen_a64_set_pc(s
, dst
);
1552 set_btype_for_blr(s
);
1553 s
->base
.is_jmp
= DISAS_JUMP
;
1557 static bool trans_RETA(DisasContext
*s
, arg_reta
*a
)
1561 dst
= auth_branch_target(s
, cpu_reg(s
, 30), cpu_X
[31], !a
->m
);
1562 gen_a64_set_pc(s
, dst
);
1563 s
->base
.is_jmp
= DISAS_JUMP
;
1567 static bool trans_BRA(DisasContext
*s
, arg_bra
*a
)
1571 if (!dc_isar_feature(aa64_pauth
, s
)) {
1574 dst
= auth_branch_target(s
, cpu_reg(s
,a
->rn
), cpu_reg_sp(s
, a
->rm
), !a
->m
);
1575 gen_a64_set_pc(s
, dst
);
1576 set_btype_for_br(s
, a
->rn
);
1577 s
->base
.is_jmp
= DISAS_JUMP
;
1581 static bool trans_BLRA(DisasContext
*s
, arg_bra
*a
)
1585 if (!dc_isar_feature(aa64_pauth
, s
)) {
1588 dst
= auth_branch_target(s
, cpu_reg(s
, a
->rn
), cpu_reg_sp(s
, a
->rm
), !a
->m
);
1589 lr
= cpu_reg(s
, 30);
1591 TCGv_i64 tmp
= tcg_temp_new_i64();
1592 tcg_gen_mov_i64(tmp
, dst
);
1595 gen_pc_plus_diff(s
, lr
, curr_insn_len(s
));
1596 gen_a64_set_pc(s
, dst
);
1597 set_btype_for_blr(s
);
1598 s
->base
.is_jmp
= DISAS_JUMP
;
1602 static bool trans_ERET(DisasContext
*s
, arg_ERET
*a
)
1606 if (s
->current_el
== 0) {
1610 gen_exception_insn_el(s
, 0, EXCP_UDEF
, syn_erettrap(0), 2);
1613 dst
= tcg_temp_new_i64();
1614 tcg_gen_ld_i64(dst
, tcg_env
,
1615 offsetof(CPUARMState
, elr_el
[s
->current_el
]));
1617 translator_io_start(&s
->base
);
1619 gen_helper_exception_return(tcg_env
, dst
);
1620 /* Must exit loop to check un-masked IRQs */
1621 s
->base
.is_jmp
= DISAS_EXIT
;
1625 static bool trans_ERETA(DisasContext
*s
, arg_reta
*a
)
1629 if (!dc_isar_feature(aa64_pauth
, s
)) {
1632 if (s
->current_el
== 0) {
1635 /* The FGT trap takes precedence over an auth trap. */
1637 gen_exception_insn_el(s
, 0, EXCP_UDEF
, syn_erettrap(a
->m
? 3 : 2), 2);
1640 dst
= tcg_temp_new_i64();
1641 tcg_gen_ld_i64(dst
, tcg_env
,
1642 offsetof(CPUARMState
, elr_el
[s
->current_el
]));
1644 dst
= auth_branch_target(s
, dst
, cpu_X
[31], !a
->m
);
1646 translator_io_start(&s
->base
);
1648 gen_helper_exception_return(tcg_env
, dst
);
1649 /* Must exit loop to check un-masked IRQs */
1650 s
->base
.is_jmp
= DISAS_EXIT
;
1654 static bool trans_NOP(DisasContext
*s
, arg_NOP
*a
)
1659 static bool trans_YIELD(DisasContext
*s
, arg_YIELD
*a
)
1662 * When running in MTTCG we don't generate jumps to the yield and
1663 * WFE helpers as it won't affect the scheduling of other vCPUs.
1664 * If we wanted to more completely model WFE/SEV so we don't busy
1665 * spin unnecessarily we would need to do something more involved.
1667 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1668 s
->base
.is_jmp
= DISAS_YIELD
;
1673 static bool trans_WFI(DisasContext
*s
, arg_WFI
*a
)
1675 s
->base
.is_jmp
= DISAS_WFI
;
1679 static bool trans_WFE(DisasContext
*s
, arg_WFI
*a
)
1682 * When running in MTTCG we don't generate jumps to the yield and
1683 * WFE helpers as it won't affect the scheduling of other vCPUs.
1684 * If we wanted to more completely model WFE/SEV so we don't busy
1685 * spin unnecessarily we would need to do something more involved.
1687 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1688 s
->base
.is_jmp
= DISAS_WFE
;
1693 static bool trans_XPACLRI(DisasContext
*s
, arg_XPACLRI
*a
)
1695 if (s
->pauth_active
) {
1696 gen_helper_xpaci(cpu_X
[30], tcg_env
, cpu_X
[30]);
1701 static bool trans_PACIA1716(DisasContext
*s
, arg_PACIA1716
*a
)
1703 if (s
->pauth_active
) {
1704 gen_helper_pacia(cpu_X
[17], tcg_env
, cpu_X
[17], cpu_X
[16]);
1709 static bool trans_PACIB1716(DisasContext
*s
, arg_PACIB1716
*a
)
1711 if (s
->pauth_active
) {
1712 gen_helper_pacib(cpu_X
[17], tcg_env
, cpu_X
[17], cpu_X
[16]);
1717 static bool trans_AUTIA1716(DisasContext
*s
, arg_AUTIA1716
*a
)
1719 if (s
->pauth_active
) {
1720 gen_helper_autia(cpu_X
[17], tcg_env
, cpu_X
[17], cpu_X
[16]);
1725 static bool trans_AUTIB1716(DisasContext
*s
, arg_AUTIB1716
*a
)
1727 if (s
->pauth_active
) {
1728 gen_helper_autib(cpu_X
[17], tcg_env
, cpu_X
[17], cpu_X
[16]);
1733 static bool trans_ESB(DisasContext
*s
, arg_ESB
*a
)
1735 /* Without RAS, we must implement this as NOP. */
1736 if (dc_isar_feature(aa64_ras
, s
)) {
1738 * QEMU does not have a source of physical SErrors,
1739 * so we are only concerned with virtual SErrors.
1740 * The pseudocode in the ARM for this case is
1741 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
1742 * AArch64.vESBOperation();
1743 * Most of the condition can be evaluated at translation time.
1744 * Test for EL2 present, and defer test for SEL2 to runtime.
1746 if (s
->current_el
<= 1 && arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
1747 gen_helper_vesb(tcg_env
);
1753 static bool trans_PACIAZ(DisasContext
*s
, arg_PACIAZ
*a
)
1755 if (s
->pauth_active
) {
1756 gen_helper_pacia(cpu_X
[30], tcg_env
, cpu_X
[30], tcg_constant_i64(0));
1761 static bool trans_PACIASP(DisasContext
*s
, arg_PACIASP
*a
)
1763 if (s
->pauth_active
) {
1764 gen_helper_pacia(cpu_X
[30], tcg_env
, cpu_X
[30], cpu_X
[31]);
1769 static bool trans_PACIBZ(DisasContext
*s
, arg_PACIBZ
*a
)
1771 if (s
->pauth_active
) {
1772 gen_helper_pacib(cpu_X
[30], tcg_env
, cpu_X
[30], tcg_constant_i64(0));
1777 static bool trans_PACIBSP(DisasContext
*s
, arg_PACIBSP
*a
)
1779 if (s
->pauth_active
) {
1780 gen_helper_pacib(cpu_X
[30], tcg_env
, cpu_X
[30], cpu_X
[31]);
1785 static bool trans_AUTIAZ(DisasContext
*s
, arg_AUTIAZ
*a
)
1787 if (s
->pauth_active
) {
1788 gen_helper_autia(cpu_X
[30], tcg_env
, cpu_X
[30], tcg_constant_i64(0));
1793 static bool trans_AUTIASP(DisasContext
*s
, arg_AUTIASP
*a
)
1795 if (s
->pauth_active
) {
1796 gen_helper_autia(cpu_X
[30], tcg_env
, cpu_X
[30], cpu_X
[31]);
1801 static bool trans_AUTIBZ(DisasContext
*s
, arg_AUTIBZ
*a
)
1803 if (s
->pauth_active
) {
1804 gen_helper_autib(cpu_X
[30], tcg_env
, cpu_X
[30], tcg_constant_i64(0));
1809 static bool trans_AUTIBSP(DisasContext
*s
, arg_AUTIBSP
*a
)
1811 if (s
->pauth_active
) {
1812 gen_helper_autib(cpu_X
[30], tcg_env
, cpu_X
[30], cpu_X
[31]);
1817 static bool trans_CLREX(DisasContext
*s
, arg_CLREX
*a
)
1819 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
1823 static bool trans_DSB_DMB(DisasContext
*s
, arg_DSB_DMB
*a
)
1825 /* We handle DSB and DMB the same way */
1829 case 1: /* MBReqTypes_Reads */
1830 bar
= TCG_BAR_SC
| TCG_MO_LD_LD
| TCG_MO_LD_ST
;
1832 case 2: /* MBReqTypes_Writes */
1833 bar
= TCG_BAR_SC
| TCG_MO_ST_ST
;
1835 default: /* MBReqTypes_All */
1836 bar
= TCG_BAR_SC
| TCG_MO_ALL
;
1843 static bool trans_ISB(DisasContext
*s
, arg_ISB
*a
)
1846 * We need to break the TB after this insn to execute
1847 * self-modifying code correctly and also to take
1848 * any pending interrupts immediately.
1851 gen_goto_tb(s
, 0, 4);
1855 static bool trans_SB(DisasContext
*s
, arg_SB
*a
)
1857 if (!dc_isar_feature(aa64_sb
, s
)) {
1861 * TODO: There is no speculation barrier opcode for TCG;
1862 * MB and end the TB instead.
1864 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1865 gen_goto_tb(s
, 0, 4);
1869 static bool trans_CFINV(DisasContext
*s
, arg_CFINV
*a
)
1871 if (!dc_isar_feature(aa64_condm_4
, s
)) {
1874 tcg_gen_xori_i32(cpu_CF
, cpu_CF
, 1);
1878 static bool trans_XAFLAG(DisasContext
*s
, arg_XAFLAG
*a
)
1882 if (!dc_isar_feature(aa64_condm_5
, s
)) {
1886 z
= tcg_temp_new_i32();
1888 tcg_gen_setcondi_i32(TCG_COND_EQ
, z
, cpu_ZF
, 0);
1897 tcg_gen_or_i32(cpu_NF
, cpu_CF
, z
);
1898 tcg_gen_subi_i32(cpu_NF
, cpu_NF
, 1);
1901 tcg_gen_and_i32(cpu_ZF
, z
, cpu_CF
);
1902 tcg_gen_xori_i32(cpu_ZF
, cpu_ZF
, 1);
1904 /* (!C & Z) << 31 -> -(Z & ~C) */
1905 tcg_gen_andc_i32(cpu_VF
, z
, cpu_CF
);
1906 tcg_gen_neg_i32(cpu_VF
, cpu_VF
);
1909 tcg_gen_or_i32(cpu_CF
, cpu_CF
, z
);
1914 static bool trans_AXFLAG(DisasContext
*s
, arg_AXFLAG
*a
)
1916 if (!dc_isar_feature(aa64_condm_5
, s
)) {
1920 tcg_gen_sari_i32(cpu_VF
, cpu_VF
, 31); /* V ? -1 : 0 */
1921 tcg_gen_andc_i32(cpu_CF
, cpu_CF
, cpu_VF
); /* C & !V */
1923 /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
1924 tcg_gen_andc_i32(cpu_ZF
, cpu_ZF
, cpu_VF
);
1926 tcg_gen_movi_i32(cpu_NF
, 0);
1927 tcg_gen_movi_i32(cpu_VF
, 0);
1932 static bool trans_MSR_i_UAO(DisasContext
*s
, arg_i
*a
)
1934 if (!dc_isar_feature(aa64_uao
, s
) || s
->current_el
== 0) {
1938 set_pstate_bits(PSTATE_UAO
);
1940 clear_pstate_bits(PSTATE_UAO
);
1942 gen_rebuild_hflags(s
);
1943 s
->base
.is_jmp
= DISAS_TOO_MANY
;
1947 static bool trans_MSR_i_PAN(DisasContext
*s
, arg_i
*a
)
1949 if (!dc_isar_feature(aa64_pan
, s
) || s
->current_el
== 0) {
1953 set_pstate_bits(PSTATE_PAN
);
1955 clear_pstate_bits(PSTATE_PAN
);
1957 gen_rebuild_hflags(s
);
1958 s
->base
.is_jmp
= DISAS_TOO_MANY
;
1962 static bool trans_MSR_i_SPSEL(DisasContext
*s
, arg_i
*a
)
1964 if (s
->current_el
== 0) {
1967 gen_helper_msr_i_spsel(tcg_env
, tcg_constant_i32(a
->imm
& PSTATE_SP
));
1968 s
->base
.is_jmp
= DISAS_TOO_MANY
;
1972 static bool trans_MSR_i_SBSS(DisasContext
*s
, arg_i
*a
)
1974 if (!dc_isar_feature(aa64_ssbs
, s
)) {
1978 set_pstate_bits(PSTATE_SSBS
);
1980 clear_pstate_bits(PSTATE_SSBS
);
1982 /* Don't need to rebuild hflags since SSBS is a nop */
1983 s
->base
.is_jmp
= DISAS_TOO_MANY
;
1987 static bool trans_MSR_i_DIT(DisasContext
*s
, arg_i
*a
)
1989 if (!dc_isar_feature(aa64_dit
, s
)) {
1993 set_pstate_bits(PSTATE_DIT
);
1995 clear_pstate_bits(PSTATE_DIT
);
1997 /* There's no need to rebuild hflags because DIT is a nop */
1998 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2002 static bool trans_MSR_i_TCO(DisasContext
*s
, arg_i
*a
)
2004 if (dc_isar_feature(aa64_mte
, s
)) {
2005 /* Full MTE is enabled -- set the TCO bit as directed. */
2007 set_pstate_bits(PSTATE_TCO
);
2009 clear_pstate_bits(PSTATE_TCO
);
2011 gen_rebuild_hflags(s
);
2012 /* Many factors, including TCO, go into MTE_ACTIVE. */
2013 s
->base
.is_jmp
= DISAS_UPDATE_NOCHAIN
;
2015 } else if (dc_isar_feature(aa64_mte_insn_reg
, s
)) {
2016 /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */
2019 /* Insn not present */
2024 static bool trans_MSR_i_DAIFSET(DisasContext
*s
, arg_i
*a
)
2026 gen_helper_msr_i_daifset(tcg_env
, tcg_constant_i32(a
->imm
));
2027 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2031 static bool trans_MSR_i_DAIFCLEAR(DisasContext
*s
, arg_i
*a
)
2033 gen_helper_msr_i_daifclear(tcg_env
, tcg_constant_i32(a
->imm
));
2034 /* Exit the cpu loop to re-evaluate pending IRQs. */
2035 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
2039 static bool trans_MSR_i_SVCR(DisasContext
*s
, arg_MSR_i_SVCR
*a
)
2041 if (!dc_isar_feature(aa64_sme
, s
) || a
->mask
== 0) {
2044 if (sme_access_check(s
)) {
2045 int old
= s
->pstate_sm
| (s
->pstate_za
<< 1);
2046 int new = a
->imm
* 3;
2048 if ((old
^ new) & a
->mask
) {
2049 /* At least one bit changes. */
2050 gen_helper_set_svcr(tcg_env
, tcg_constant_i32(new),
2051 tcg_constant_i32(a
->mask
));
2052 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2058 static void gen_get_nzcv(TCGv_i64 tcg_rt
)
2060 TCGv_i32 tmp
= tcg_temp_new_i32();
2061 TCGv_i32 nzcv
= tcg_temp_new_i32();
2063 /* build bit 31, N */
2064 tcg_gen_andi_i32(nzcv
, cpu_NF
, (1U << 31));
2065 /* build bit 30, Z */
2066 tcg_gen_setcondi_i32(TCG_COND_EQ
, tmp
, cpu_ZF
, 0);
2067 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 30, 1);
2068 /* build bit 29, C */
2069 tcg_gen_deposit_i32(nzcv
, nzcv
, cpu_CF
, 29, 1);
2070 /* build bit 28, V */
2071 tcg_gen_shri_i32(tmp
, cpu_VF
, 31);
2072 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 28, 1);
2073 /* generate result */
2074 tcg_gen_extu_i32_i64(tcg_rt
, nzcv
);
2077 static void gen_set_nzcv(TCGv_i64 tcg_rt
)
2079 TCGv_i32 nzcv
= tcg_temp_new_i32();
2081 /* take NZCV from R[t] */
2082 tcg_gen_extrl_i64_i32(nzcv
, tcg_rt
);
2085 tcg_gen_andi_i32(cpu_NF
, nzcv
, (1U << 31));
2087 tcg_gen_andi_i32(cpu_ZF
, nzcv
, (1 << 30));
2088 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_ZF
, cpu_ZF
, 0);
2090 tcg_gen_andi_i32(cpu_CF
, nzcv
, (1 << 29));
2091 tcg_gen_shri_i32(cpu_CF
, cpu_CF
, 29);
2093 tcg_gen_andi_i32(cpu_VF
, nzcv
, (1 << 28));
2094 tcg_gen_shli_i32(cpu_VF
, cpu_VF
, 3);
2097 static void gen_sysreg_undef(DisasContext
*s
, bool isread
,
2098 uint8_t op0
, uint8_t op1
, uint8_t op2
,
2099 uint8_t crn
, uint8_t crm
, uint8_t rt
)
2102 * Generate code to emit an UNDEF with correct syndrome
2103 * information for a failed system register access.
2104 * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
2105 * but if FEAT_IDST is implemented then read accesses to registers
2106 * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
2111 if (isread
&& dc_isar_feature(aa64_ids
, s
) &&
2112 arm_cpreg_encoding_in_idspace(op0
, op1
, op2
, crn
, crm
)) {
2113 syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
2115 syndrome
= syn_uncategorized();
2117 gen_exception_insn(s
, 0, EXCP_UDEF
, syndrome
);
2120 /* MRS - move from system register
2121 * MSR (register) - move to system register
2124 * These are all essentially the same insn in 'read' and 'write'
2125 * versions, with varying op0 fields.
2127 static void handle_sys(DisasContext
*s
, bool isread
,
2128 unsigned int op0
, unsigned int op1
, unsigned int op2
,
2129 unsigned int crn
, unsigned int crm
, unsigned int rt
)
2131 uint32_t key
= ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP
,
2132 crn
, crm
, op0
, op1
, op2
);
2133 const ARMCPRegInfo
*ri
= get_arm_cp_reginfo(s
->cp_regs
, key
);
2134 bool need_exit_tb
= false;
2135 bool nv_trap_to_el2
= false;
2136 bool nv_redirect_reg
= false;
2137 bool skip_fp_access_checks
= false;
2138 bool nv2_mem_redirect
= false;
2139 TCGv_ptr tcg_ri
= NULL
;
2141 uint32_t syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
2143 if (crn
== 11 || crn
== 15) {
2145 * Check for TIDCP trap, which must take precedence over
2146 * the UNDEF for "no such register" etc.
2148 switch (s
->current_el
) {
2150 if (dc_isar_feature(aa64_tidcp1
, s
)) {
2151 gen_helper_tidcp_el0(tcg_env
, tcg_constant_i32(syndrome
));
2155 gen_helper_tidcp_el1(tcg_env
, tcg_constant_i32(syndrome
));
2161 /* Unknown register; this might be a guest error or a QEMU
2162 * unimplemented feature.
2164 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch64 "
2165 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
2166 isread
? "read" : "write", op0
, op1
, crn
, crm
, op2
);
2167 gen_sysreg_undef(s
, isread
, op0
, op1
, op2
, crn
, crm
, rt
);
2171 if (s
->nv2
&& ri
->nv2_redirect_offset
) {
2173 * Some registers always redirect to memory; some only do so if
2174 * HCR_EL2.NV1 is 0, and some only if NV1 is 1 (these come in
2175 * pairs which share an offset; see the table in R_CSRPQ).
2177 if (ri
->nv2_redirect_offset
& NV2_REDIR_NV1
) {
2178 nv2_mem_redirect
= s
->nv1
;
2179 } else if (ri
->nv2_redirect_offset
& NV2_REDIR_NO_NV1
) {
2180 nv2_mem_redirect
= !s
->nv1
;
2182 nv2_mem_redirect
= true;
2186 /* Check access permissions */
2187 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
2189 * FEAT_NV/NV2 handling does not do the usual FP access checks
2190 * for registers only accessible at EL2 (though it *does* do them
2191 * for registers accessible at EL1).
2193 skip_fp_access_checks
= true;
2194 if (s
->nv2
&& (ri
->type
& ARM_CP_NV2_REDIRECT
)) {
2196 * This is one of the few EL2 registers which should redirect
2197 * to the equivalent EL1 register. We do that after running
2198 * the EL2 register's accessfn.
2200 nv_redirect_reg
= true;
2201 assert(!nv2_mem_redirect
);
2202 } else if (nv2_mem_redirect
) {
2204 * NV2 redirect-to-memory takes precedence over trap to EL2 or
2207 } else if (s
->nv
&& arm_cpreg_traps_in_nv(ri
)) {
2209 * This register / instruction exists and is an EL2 register, so
2210 * we must trap to EL2 if accessed in nested virtualization EL1
2211 * instead of UNDEFing. We'll do that after the usual access checks.
2212 * (This makes a difference only for a couple of registers like
2213 * VSTTBR_EL2 where the "UNDEF if NonSecure" should take priority
2214 * over the trap-to-EL2. Most trapped-by-FEAT_NV registers have
2215 * an accessfn which does nothing when called from EL1, because
2216 * the trap-to-EL3 controls which would apply to that register
2217 * at EL2 don't take priority over the FEAT_NV trap-to-EL2.)
2219 nv_trap_to_el2
= true;
2221 gen_sysreg_undef(s
, isread
, op0
, op1
, op2
, crn
, crm
, rt
);
2226 if (ri
->accessfn
|| (ri
->fgt
&& s
->fgt_active
)) {
2227 /* Emit code to perform further access permissions checks at
2228 * runtime; this may result in an exception.
2230 gen_a64_update_pc(s
, 0);
2231 tcg_ri
= tcg_temp_new_ptr();
2232 gen_helper_access_check_cp_reg(tcg_ri
, tcg_env
,
2233 tcg_constant_i32(key
),
2234 tcg_constant_i32(syndrome
),
2235 tcg_constant_i32(isread
));
2236 } else if (ri
->type
& ARM_CP_RAISES_EXC
) {
2238 * The readfn or writefn might raise an exception;
2239 * synchronize the CPU state in case it does.
2241 gen_a64_update_pc(s
, 0);
2244 if (!skip_fp_access_checks
) {
2245 if ((ri
->type
& ARM_CP_FPU
) && !fp_access_check_only(s
)) {
2247 } else if ((ri
->type
& ARM_CP_SVE
) && !sve_access_check(s
)) {
2249 } else if ((ri
->type
& ARM_CP_SME
) && !sme_access_check(s
)) {
2254 if (nv_trap_to_el2
) {
2255 gen_exception_insn_el(s
, 0, EXCP_UDEF
, syndrome
, 2);
2259 if (nv_redirect_reg
) {
2261 * FEAT_NV2 redirection of an EL2 register to an EL1 register.
2262 * Conveniently in all cases the encoding of the EL1 register is
2263 * identical to the EL2 register except that opc1 is 0.
2264 * Get the reginfo for the EL1 register to use for the actual access.
2265 * We don't use the EL1 register's access function, and
2266 * fine-grained-traps on EL1 also do not apply here.
2268 key
= ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP
,
2269 crn
, crm
, op0
, 0, op2
);
2270 ri
= get_arm_cp_reginfo(s
->cp_regs
, key
);
2272 assert(cp_access_ok(s
->current_el
, ri
, isread
));
2274 * We might not have done an update_pc earlier, so check we don't
2275 * need it. We could support this in future if necessary.
2277 assert(!(ri
->type
& ARM_CP_RAISES_EXC
));
2280 if (nv2_mem_redirect
) {
2282 * This system register is being redirected into an EL2 memory access.
2283 * This means it is not an IO operation, doesn't change hflags,
2284 * and need not end the TB, because it has no side effects.
2286 * The access is 64-bit single copy atomic, guaranteed aligned because
2287 * of the definition of VCNR_EL2. Its endianness depends on
2288 * SCTLR_EL2.EE, not on the data endianness of EL1.
2289 * It is done under either the EL2 translation regime or the EL2&0
2290 * translation regime, depending on HCR_EL2.E2H. It behaves as if
2293 TCGv_i64 ptr
= tcg_temp_new_i64();
2294 MemOp mop
= MO_64
| MO_ALIGN
| MO_ATOM_IFALIGN
;
2295 ARMMMUIdx armmemidx
= s
->nv2_mem_e20
? ARMMMUIdx_E20_2
: ARMMMUIdx_E2
;
2296 int memidx
= arm_to_core_mmu_idx(armmemidx
);
2299 mop
|= (s
->nv2_mem_be
? MO_BE
: MO_LE
);
2301 tcg_gen_ld_i64(ptr
, tcg_env
, offsetof(CPUARMState
, cp15
.vncr_el2
));
2302 tcg_gen_addi_i64(ptr
, ptr
,
2303 (ri
->nv2_redirect_offset
& ~NV2_REDIR_FLAG_MASK
));
2304 tcg_rt
= cpu_reg(s
, rt
);
2306 syn
= syn_data_abort_vncr(0, !isread
, 0);
2307 disas_set_insn_syndrome(s
, syn
);
2309 tcg_gen_qemu_ld_i64(tcg_rt
, ptr
, memidx
, mop
);
2311 tcg_gen_qemu_st_i64(tcg_rt
, ptr
, memidx
, mop
);
2316 /* Handle special cases first */
2317 switch (ri
->type
& ARM_CP_SPECIAL_MASK
) {
2323 tcg_rt
= cpu_reg(s
, rt
);
2325 gen_get_nzcv(tcg_rt
);
2327 gen_set_nzcv(tcg_rt
);
2330 case ARM_CP_CURRENTEL
:
2333 * Reads as current EL value from pstate, which is
2334 * guaranteed to be constant by the tb flags.
2335 * For nested virt we should report EL2.
2337 int el
= s
->nv
? 2 : s
->current_el
;
2338 tcg_rt
= cpu_reg(s
, rt
);
2339 tcg_gen_movi_i64(tcg_rt
, el
<< 2);
2343 /* Writes clear the aligned block of memory which rt points into. */
2344 if (s
->mte_active
[0]) {
2347 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, get_mem_index(s
));
2348 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
2349 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
2351 tcg_rt
= tcg_temp_new_i64();
2352 gen_helper_mte_check_zva(tcg_rt
, tcg_env
,
2353 tcg_constant_i32(desc
), cpu_reg(s
, rt
));
2355 tcg_rt
= clean_data_tbi(s
, cpu_reg(s
, rt
));
2357 gen_helper_dc_zva(tcg_env
, tcg_rt
);
2361 TCGv_i64 clean_addr
, tag
;
2364 * DC_GVA, like DC_ZVA, requires that we supply the original
2365 * pointer for an invalid page. Probe that address first.
2367 tcg_rt
= cpu_reg(s
, rt
);
2368 clean_addr
= clean_data_tbi(s
, tcg_rt
);
2369 gen_probe_access(s
, clean_addr
, MMU_DATA_STORE
, MO_8
);
2372 /* Extract the tag from the register to match STZGM. */
2373 tag
= tcg_temp_new_i64();
2374 tcg_gen_shri_i64(tag
, tcg_rt
, 56);
2375 gen_helper_stzgm_tags(tcg_env
, clean_addr
, tag
);
2379 case ARM_CP_DC_GZVA
:
2381 TCGv_i64 clean_addr
, tag
;
2383 /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
2384 tcg_rt
= cpu_reg(s
, rt
);
2385 clean_addr
= clean_data_tbi(s
, tcg_rt
);
2386 gen_helper_dc_zva(tcg_env
, clean_addr
);
2389 /* Extract the tag from the register to match STZGM. */
2390 tag
= tcg_temp_new_i64();
2391 tcg_gen_shri_i64(tag
, tcg_rt
, 56);
2392 gen_helper_stzgm_tags(tcg_env
, clean_addr
, tag
);
2397 g_assert_not_reached();
2400 if (ri
->type
& ARM_CP_IO
) {
2401 /* I/O operations must end the TB here (whether read or write) */
2402 need_exit_tb
= translator_io_start(&s
->base
);
2405 tcg_rt
= cpu_reg(s
, rt
);
2408 if (ri
->type
& ARM_CP_CONST
) {
2409 tcg_gen_movi_i64(tcg_rt
, ri
->resetvalue
);
2410 } else if (ri
->readfn
) {
2412 tcg_ri
= gen_lookup_cp_reg(key
);
2414 gen_helper_get_cp_reg64(tcg_rt
, tcg_env
, tcg_ri
);
2416 tcg_gen_ld_i64(tcg_rt
, tcg_env
, ri
->fieldoffset
);
2419 if (ri
->type
& ARM_CP_CONST
) {
2420 /* If not forbidden by access permissions, treat as WI */
2422 } else if (ri
->writefn
) {
2424 tcg_ri
= gen_lookup_cp_reg(key
);
2426 gen_helper_set_cp_reg64(tcg_env
, tcg_ri
, tcg_rt
);
2428 tcg_gen_st_i64(tcg_rt
, tcg_env
, ri
->fieldoffset
);
2432 if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
2434 * A write to any coprocessor register that ends a TB
2435 * must rebuild the hflags for the next TB.
2437 gen_rebuild_hflags(s
);
2439 * We default to ending the TB on a coprocessor register write,
2440 * but allow this to be suppressed by the register definition
2441 * (usually only necessary to work around guest bugs).
2443 need_exit_tb
= true;
2446 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
2450 static bool trans_SYS(DisasContext
*s
, arg_SYS
*a
)
2452 handle_sys(s
, a
->l
, a
->op0
, a
->op1
, a
->op2
, a
->crn
, a
->crm
, a
->rt
);
2456 static bool trans_SVC(DisasContext
*s
, arg_i
*a
)
2459 * For SVC, HVC and SMC we advance the single-step state
2460 * machine before taking the exception. This is architecturally
2461 * mandated, to ensure that single-stepping a system call
2462 * instruction works properly.
2464 uint32_t syndrome
= syn_aa64_svc(a
->imm
);
2466 gen_exception_insn_el(s
, 0, EXCP_UDEF
, syndrome
, 2);
2470 gen_exception_insn(s
, 4, EXCP_SWI
, syndrome
);
2474 static bool trans_HVC(DisasContext
*s
, arg_i
*a
)
2476 int target_el
= s
->current_el
== 3 ? 3 : 2;
2478 if (s
->current_el
== 0) {
2479 unallocated_encoding(s
);
2483 * The pre HVC helper handles cases when HVC gets trapped
2484 * as an undefined insn by runtime configuration.
2486 gen_a64_update_pc(s
, 0);
2487 gen_helper_pre_hvc(tcg_env
);
2488 /* Architecture requires ss advance before we do the actual work */
2490 gen_exception_insn_el(s
, 4, EXCP_HVC
, syn_aa64_hvc(a
->imm
), target_el
);
2494 static bool trans_SMC(DisasContext
*s
, arg_i
*a
)
2496 if (s
->current_el
== 0) {
2497 unallocated_encoding(s
);
2500 gen_a64_update_pc(s
, 0);
2501 gen_helper_pre_smc(tcg_env
, tcg_constant_i32(syn_aa64_smc(a
->imm
)));
2502 /* Architecture requires ss advance before we do the actual work */
2504 gen_exception_insn_el(s
, 4, EXCP_SMC
, syn_aa64_smc(a
->imm
), 3);
2508 static bool trans_BRK(DisasContext
*s
, arg_i
*a
)
2510 gen_exception_bkpt_insn(s
, syn_aa64_bkpt(a
->imm
));
2514 static bool trans_HLT(DisasContext
*s
, arg_i
*a
)
2517 * HLT. This has two purposes.
2518 * Architecturally, it is an external halting debug instruction.
2519 * Since QEMU doesn't implement external debug, we treat this as
2520 * it is required for halting debug disabled: it will UNDEF.
2521 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
2523 if (semihosting_enabled(s
->current_el
== 0) && a
->imm
== 0xf000) {
2524 gen_exception_internal_insn(s
, EXCP_SEMIHOST
);
2526 unallocated_encoding(s
);
2532 * Load/Store exclusive instructions are implemented by remembering
2533 * the value/address loaded, and seeing if these are the same
2534 * when the store is performed. This is not actually the architecturally
2535 * mandated semantics, but it works for typical guest code sequences
2536 * and avoids having to monitor regular stores.
2538 * The store exclusive uses the atomic cmpxchg primitives to avoid
2539 * races in multi-threaded linux-user and when MTTCG softmmu is
2542 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
, int rn
,
2543 int size
, bool is_pair
)
2545 int idx
= get_mem_index(s
);
2546 TCGv_i64 dirty_addr
, clean_addr
;
2547 MemOp memop
= check_atomic_align(s
, rn
, size
+ is_pair
);
2550 dirty_addr
= cpu_reg_sp(s
, rn
);
2551 clean_addr
= gen_mte_check1(s
, dirty_addr
, false, rn
!= 31, memop
);
2553 g_assert(size
<= 3);
2555 g_assert(size
>= 2);
2557 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, clean_addr
, idx
, memop
);
2558 if (s
->be_data
== MO_LE
) {
2559 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 0, 32);
2560 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 32, 32);
2562 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 32, 32);
2563 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 0, 32);
2566 TCGv_i128 t16
= tcg_temp_new_i128();
2568 tcg_gen_qemu_ld_i128(t16
, clean_addr
, idx
, memop
);
2570 if (s
->be_data
== MO_LE
) {
2571 tcg_gen_extr_i128_i64(cpu_exclusive_val
,
2572 cpu_exclusive_high
, t16
);
2574 tcg_gen_extr_i128_i64(cpu_exclusive_high
,
2575 cpu_exclusive_val
, t16
);
2577 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2578 tcg_gen_mov_i64(cpu_reg(s
, rt2
), cpu_exclusive_high
);
2581 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, clean_addr
, idx
, memop
);
2582 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2584 tcg_gen_mov_i64(cpu_exclusive_addr
, clean_addr
);
2587 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
2588 int rn
, int size
, int is_pair
)
2590 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2591 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2594 * [addr + datasize] = {Rt2};
2600 * env->exclusive_addr = -1;
2602 TCGLabel
*fail_label
= gen_new_label();
2603 TCGLabel
*done_label
= gen_new_label();
2604 TCGv_i64 tmp
, clean_addr
;
2608 * FIXME: We are out of spec here. We have recorded only the address
2609 * from load_exclusive, not the entire range, and we assume that the
2610 * size of the access on both sides match. The architecture allows the
2611 * store to be smaller than the load, so long as the stored bytes are
2612 * within the range recorded by the load.
2615 /* See AArch64.ExclusiveMonitorsPass() and AArch64.IsExclusiveVA(). */
2616 clean_addr
= clean_data_tbi(s
, cpu_reg_sp(s
, rn
));
2617 tcg_gen_brcond_i64(TCG_COND_NE
, clean_addr
, cpu_exclusive_addr
, fail_label
);
2620 * The write, and any associated faults, only happen if the virtual
2621 * and physical addresses pass the exclusive monitor check. These
2622 * faults are exceedingly unlikely, because normally the guest uses
2623 * the exact same address register for the load_exclusive, and we
2624 * would have recognized these faults there.
2626 * It is possible to trigger an alignment fault pre-LSE2, e.g. with an
2627 * unaligned 4-byte write within the range of an aligned 8-byte load.
2628 * With LSE2, the store would need to cross a 16-byte boundary when the
2629 * load did not, which would mean the store is outside the range
2630 * recorded for the monitor, which would have failed a corrected monitor
2631 * check above. For now, we assume no size change and retain the
2632 * MO_ALIGN to let tcg know what we checked in the load_exclusive.
2634 * It is possible to trigger an MTE fault, by performing the load with
2635 * a virtual address with a valid tag and performing the store with the
2636 * same virtual address and a different invalid tag.
2638 memop
= size
+ is_pair
;
2639 if (memop
== MO_128
|| !dc_isar_feature(aa64_lse2
, s
)) {
2642 memop
= finalize_memop(s
, memop
);
2643 gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, memop
);
2645 tmp
= tcg_temp_new_i64();
2648 if (s
->be_data
== MO_LE
) {
2649 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2651 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt2
), cpu_reg(s
, rt
));
2653 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
,
2654 cpu_exclusive_val
, tmp
,
2655 get_mem_index(s
), memop
);
2656 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2658 TCGv_i128 t16
= tcg_temp_new_i128();
2659 TCGv_i128 c16
= tcg_temp_new_i128();
2662 if (s
->be_data
== MO_LE
) {
2663 tcg_gen_concat_i64_i128(t16
, cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2664 tcg_gen_concat_i64_i128(c16
, cpu_exclusive_val
,
2665 cpu_exclusive_high
);
2667 tcg_gen_concat_i64_i128(t16
, cpu_reg(s
, rt2
), cpu_reg(s
, rt
));
2668 tcg_gen_concat_i64_i128(c16
, cpu_exclusive_high
,
2672 tcg_gen_atomic_cmpxchg_i128(t16
, cpu_exclusive_addr
, c16
, t16
,
2673 get_mem_index(s
), memop
);
2675 a
= tcg_temp_new_i64();
2676 b
= tcg_temp_new_i64();
2677 if (s
->be_data
== MO_LE
) {
2678 tcg_gen_extr_i128_i64(a
, b
, t16
);
2680 tcg_gen_extr_i128_i64(b
, a
, t16
);
2683 tcg_gen_xor_i64(a
, a
, cpu_exclusive_val
);
2684 tcg_gen_xor_i64(b
, b
, cpu_exclusive_high
);
2685 tcg_gen_or_i64(tmp
, a
, b
);
2687 tcg_gen_setcondi_i64(TCG_COND_NE
, tmp
, tmp
, 0);
2690 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
, cpu_exclusive_val
,
2691 cpu_reg(s
, rt
), get_mem_index(s
), memop
);
2692 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2694 tcg_gen_mov_i64(cpu_reg(s
, rd
), tmp
);
2695 tcg_gen_br(done_label
);
2697 gen_set_label(fail_label
);
2698 tcg_gen_movi_i64(cpu_reg(s
, rd
), 1);
2699 gen_set_label(done_label
);
2700 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
2703 static void gen_compare_and_swap(DisasContext
*s
, int rs
, int rt
,
2706 TCGv_i64 tcg_rs
= cpu_reg(s
, rs
);
2707 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2708 int memidx
= get_mem_index(s
);
2709 TCGv_i64 clean_addr
;
2713 gen_check_sp_alignment(s
);
2715 memop
= check_atomic_align(s
, rn
, size
);
2716 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, memop
);
2717 tcg_gen_atomic_cmpxchg_i64(tcg_rs
, clean_addr
, tcg_rs
, tcg_rt
,
2721 static void gen_compare_and_swap_pair(DisasContext
*s
, int rs
, int rt
,
2724 TCGv_i64 s1
= cpu_reg(s
, rs
);
2725 TCGv_i64 s2
= cpu_reg(s
, rs
+ 1);
2726 TCGv_i64 t1
= cpu_reg(s
, rt
);
2727 TCGv_i64 t2
= cpu_reg(s
, rt
+ 1);
2728 TCGv_i64 clean_addr
;
2729 int memidx
= get_mem_index(s
);
2733 gen_check_sp_alignment(s
);
2736 /* This is a single atomic access, despite the "pair". */
2737 memop
= check_atomic_align(s
, rn
, size
+ 1);
2738 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, memop
);
2741 TCGv_i64 cmp
= tcg_temp_new_i64();
2742 TCGv_i64 val
= tcg_temp_new_i64();
2744 if (s
->be_data
== MO_LE
) {
2745 tcg_gen_concat32_i64(val
, t1
, t2
);
2746 tcg_gen_concat32_i64(cmp
, s1
, s2
);
2748 tcg_gen_concat32_i64(val
, t2
, t1
);
2749 tcg_gen_concat32_i64(cmp
, s2
, s1
);
2752 tcg_gen_atomic_cmpxchg_i64(cmp
, clean_addr
, cmp
, val
, memidx
, memop
);
2754 if (s
->be_data
== MO_LE
) {
2755 tcg_gen_extr32_i64(s1
, s2
, cmp
);
2757 tcg_gen_extr32_i64(s2
, s1
, cmp
);
2760 TCGv_i128 cmp
= tcg_temp_new_i128();
2761 TCGv_i128 val
= tcg_temp_new_i128();
2763 if (s
->be_data
== MO_LE
) {
2764 tcg_gen_concat_i64_i128(val
, t1
, t2
);
2765 tcg_gen_concat_i64_i128(cmp
, s1
, s2
);
2767 tcg_gen_concat_i64_i128(val
, t2
, t1
);
2768 tcg_gen_concat_i64_i128(cmp
, s2
, s1
);
2771 tcg_gen_atomic_cmpxchg_i128(cmp
, clean_addr
, cmp
, val
, memidx
, memop
);
2773 if (s
->be_data
== MO_LE
) {
2774 tcg_gen_extr_i128_i64(s1
, s2
, cmp
);
2776 tcg_gen_extr_i128_i64(s2
, s1
, cmp
);
2782 * Compute the ISS.SF bit for syndrome information if an exception
2783 * is taken on a load or store. This indicates whether the instruction
2784 * is accessing a 32-bit or 64-bit register. This logic is derived
2785 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2787 static bool ldst_iss_sf(int size
, bool sign
, bool ext
)
2792 * Signed loads are 64 bit results if we are not going to
2793 * do a zero-extend from 32 to 64 after the load.
2794 * (For a store, sign and ext are always false.)
2798 /* Unsigned loads/stores work at the specified size */
2799 return size
== MO_64
;
2803 static bool trans_STXR(DisasContext
*s
, arg_stxr
*a
)
2806 gen_check_sp_alignment(s
);
2809 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2811 gen_store_exclusive(s
, a
->rs
, a
->rt
, a
->rt2
, a
->rn
, a
->sz
, false);
2815 static bool trans_LDXR(DisasContext
*s
, arg_stxr
*a
)
2818 gen_check_sp_alignment(s
);
2820 gen_load_exclusive(s
, a
->rt
, a
->rt2
, a
->rn
, a
->sz
, false);
2822 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2827 static bool trans_STLR(DisasContext
*s
, arg_stlr
*a
)
2829 TCGv_i64 clean_addr
;
2831 bool iss_sf
= ldst_iss_sf(a
->sz
, false, false);
2834 * StoreLORelease is the same as Store-Release for QEMU, but
2835 * needs the feature-test.
2837 if (!a
->lasr
&& !dc_isar_feature(aa64_lor
, s
)) {
2840 /* Generate ISS for non-exclusive accesses including LASR. */
2842 gen_check_sp_alignment(s
);
2844 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2845 memop
= check_ordered_align(s
, a
->rn
, 0, true, a
->sz
);
2846 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, a
->rn
),
2847 true, a
->rn
!= 31, memop
);
2848 do_gpr_st(s
, cpu_reg(s
, a
->rt
), clean_addr
, memop
, true, a
->rt
,
2853 static bool trans_LDAR(DisasContext
*s
, arg_stlr
*a
)
2855 TCGv_i64 clean_addr
;
2857 bool iss_sf
= ldst_iss_sf(a
->sz
, false, false);
2859 /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
2860 if (!a
->lasr
&& !dc_isar_feature(aa64_lor
, s
)) {
2863 /* Generate ISS for non-exclusive accesses including LASR. */
2865 gen_check_sp_alignment(s
);
2867 memop
= check_ordered_align(s
, a
->rn
, 0, false, a
->sz
);
2868 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, a
->rn
),
2869 false, a
->rn
!= 31, memop
);
2870 do_gpr_ld(s
, cpu_reg(s
, a
->rt
), clean_addr
, memop
, false, true,
2871 a
->rt
, iss_sf
, a
->lasr
);
2872 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2876 static bool trans_STXP(DisasContext
*s
, arg_stxr
*a
)
2879 gen_check_sp_alignment(s
);
2882 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2884 gen_store_exclusive(s
, a
->rs
, a
->rt
, a
->rt2
, a
->rn
, a
->sz
, true);
2888 static bool trans_LDXP(DisasContext
*s
, arg_stxr
*a
)
2891 gen_check_sp_alignment(s
);
2893 gen_load_exclusive(s
, a
->rt
, a
->rt2
, a
->rn
, a
->sz
, true);
2895 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2900 static bool trans_CASP(DisasContext
*s
, arg_CASP
*a
)
2902 if (!dc_isar_feature(aa64_atomics
, s
)) {
2905 if (((a
->rt
| a
->rs
) & 1) != 0) {
2909 gen_compare_and_swap_pair(s
, a
->rs
, a
->rt
, a
->rn
, a
->sz
);
2913 static bool trans_CAS(DisasContext
*s
, arg_CAS
*a
)
2915 if (!dc_isar_feature(aa64_atomics
, s
)) {
2918 gen_compare_and_swap(s
, a
->rs
, a
->rt
, a
->rn
, a
->sz
);
2922 static bool trans_LD_lit(DisasContext
*s
, arg_ldlit
*a
)
2924 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, false);
2925 TCGv_i64 tcg_rt
= cpu_reg(s
, a
->rt
);
2926 TCGv_i64 clean_addr
= tcg_temp_new_i64();
2927 MemOp memop
= finalize_memop(s
, a
->sz
+ a
->sign
* MO_SIGN
);
2929 gen_pc_plus_diff(s
, clean_addr
, a
->imm
);
2930 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
,
2931 false, true, a
->rt
, iss_sf
, false);
2935 static bool trans_LD_lit_v(DisasContext
*s
, arg_ldlit
*a
)
2937 /* Load register (literal), vector version */
2938 TCGv_i64 clean_addr
;
2941 if (!fp_access_check(s
)) {
2944 memop
= finalize_memop_asimd(s
, a
->sz
);
2945 clean_addr
= tcg_temp_new_i64();
2946 gen_pc_plus_diff(s
, clean_addr
, a
->imm
);
2947 do_fp_ld(s
, a
->rt
, clean_addr
, memop
);
2951 static void op_addr_ldstpair_pre(DisasContext
*s
, arg_ldstpair
*a
,
2952 TCGv_i64
*clean_addr
, TCGv_i64
*dirty_addr
,
2953 uint64_t offset
, bool is_store
, MemOp mop
)
2956 gen_check_sp_alignment(s
);
2959 *dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
2961 tcg_gen_addi_i64(*dirty_addr
, *dirty_addr
, offset
);
2964 *clean_addr
= gen_mte_checkN(s
, *dirty_addr
, is_store
,
2965 (a
->w
|| a
->rn
!= 31), 2 << a
->sz
, mop
);
2968 static void op_addr_ldstpair_post(DisasContext
*s
, arg_ldstpair
*a
,
2969 TCGv_i64 dirty_addr
, uint64_t offset
)
2973 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
2975 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), dirty_addr
);
2979 static bool trans_STP(DisasContext
*s
, arg_ldstpair
*a
)
2981 uint64_t offset
= a
->imm
<< a
->sz
;
2982 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
, tcg_rt2
;
2983 MemOp mop
= finalize_memop(s
, a
->sz
);
2985 op_addr_ldstpair_pre(s
, a
, &clean_addr
, &dirty_addr
, offset
, true, mop
);
2986 tcg_rt
= cpu_reg(s
, a
->rt
);
2987 tcg_rt2
= cpu_reg(s
, a
->rt2
);
2989 * We built mop above for the single logical access -- rebuild it
2990 * now for the paired operation.
2992 * With LSE2, non-sign-extending pairs are treated atomically if
2993 * aligned, and if unaligned one of the pair will be completely
2994 * within a 16-byte block and that element will be atomic.
2995 * Otherwise each element is separately atomic.
2996 * In all cases, issue one operation with the correct atomicity.
3000 mop
|= (a
->sz
== 2 ? MO_ALIGN_4
: MO_ALIGN_8
);
3002 mop
= finalize_memop_pair(s
, mop
);
3004 TCGv_i64 tmp
= tcg_temp_new_i64();
3006 if (s
->be_data
== MO_LE
) {
3007 tcg_gen_concat32_i64(tmp
, tcg_rt
, tcg_rt2
);
3009 tcg_gen_concat32_i64(tmp
, tcg_rt2
, tcg_rt
);
3011 tcg_gen_qemu_st_i64(tmp
, clean_addr
, get_mem_index(s
), mop
);
3013 TCGv_i128 tmp
= tcg_temp_new_i128();
3015 if (s
->be_data
== MO_LE
) {
3016 tcg_gen_concat_i64_i128(tmp
, tcg_rt
, tcg_rt2
);
3018 tcg_gen_concat_i64_i128(tmp
, tcg_rt2
, tcg_rt
);
3020 tcg_gen_qemu_st_i128(tmp
, clean_addr
, get_mem_index(s
), mop
);
3022 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3026 static bool trans_LDP(DisasContext
*s
, arg_ldstpair
*a
)
3028 uint64_t offset
= a
->imm
<< a
->sz
;
3029 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
, tcg_rt2
;
3030 MemOp mop
= finalize_memop(s
, a
->sz
);
3032 op_addr_ldstpair_pre(s
, a
, &clean_addr
, &dirty_addr
, offset
, false, mop
);
3033 tcg_rt
= cpu_reg(s
, a
->rt
);
3034 tcg_rt2
= cpu_reg(s
, a
->rt2
);
3037 * We built mop above for the single logical access -- rebuild it
3038 * now for the paired operation.
3040 * With LSE2, non-sign-extending pairs are treated atomically if
3041 * aligned, and if unaligned one of the pair will be completely
3042 * within a 16-byte block and that element will be atomic.
3043 * Otherwise each element is separately atomic.
3044 * In all cases, issue one operation with the correct atomicity.
3046 * This treats sign-extending loads like zero-extending loads,
3047 * since that reuses the most code below.
3051 mop
|= (a
->sz
== 2 ? MO_ALIGN_4
: MO_ALIGN_8
);
3053 mop
= finalize_memop_pair(s
, mop
);
3055 int o2
= s
->be_data
== MO_LE
? 32 : 0;
3058 tcg_gen_qemu_ld_i64(tcg_rt
, clean_addr
, get_mem_index(s
), mop
);
3060 tcg_gen_sextract_i64(tcg_rt2
, tcg_rt
, o2
, 32);
3061 tcg_gen_sextract_i64(tcg_rt
, tcg_rt
, o1
, 32);
3063 tcg_gen_extract_i64(tcg_rt2
, tcg_rt
, o2
, 32);
3064 tcg_gen_extract_i64(tcg_rt
, tcg_rt
, o1
, 32);
3067 TCGv_i128 tmp
= tcg_temp_new_i128();
3069 tcg_gen_qemu_ld_i128(tmp
, clean_addr
, get_mem_index(s
), mop
);
3070 if (s
->be_data
== MO_LE
) {
3071 tcg_gen_extr_i128_i64(tcg_rt
, tcg_rt2
, tmp
);
3073 tcg_gen_extr_i128_i64(tcg_rt2
, tcg_rt
, tmp
);
3076 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3080 static bool trans_STP_v(DisasContext
*s
, arg_ldstpair
*a
)
3082 uint64_t offset
= a
->imm
<< a
->sz
;
3083 TCGv_i64 clean_addr
, dirty_addr
;
3086 if (!fp_access_check(s
)) {
3090 /* LSE2 does not merge FP pairs; leave these as separate operations. */
3091 mop
= finalize_memop_asimd(s
, a
->sz
);
3092 op_addr_ldstpair_pre(s
, a
, &clean_addr
, &dirty_addr
, offset
, true, mop
);
3093 do_fp_st(s
, a
->rt
, clean_addr
, mop
);
3094 tcg_gen_addi_i64(clean_addr
, clean_addr
, 1 << a
->sz
);
3095 do_fp_st(s
, a
->rt2
, clean_addr
, mop
);
3096 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3100 static bool trans_LDP_v(DisasContext
*s
, arg_ldstpair
*a
)
3102 uint64_t offset
= a
->imm
<< a
->sz
;
3103 TCGv_i64 clean_addr
, dirty_addr
;
3106 if (!fp_access_check(s
)) {
3110 /* LSE2 does not merge FP pairs; leave these as separate operations. */
3111 mop
= finalize_memop_asimd(s
, a
->sz
);
3112 op_addr_ldstpair_pre(s
, a
, &clean_addr
, &dirty_addr
, offset
, false, mop
);
3113 do_fp_ld(s
, a
->rt
, clean_addr
, mop
);
3114 tcg_gen_addi_i64(clean_addr
, clean_addr
, 1 << a
->sz
);
3115 do_fp_ld(s
, a
->rt2
, clean_addr
, mop
);
3116 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3120 static bool trans_STGP(DisasContext
*s
, arg_ldstpair
*a
)
3122 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
, tcg_rt2
;
3123 uint64_t offset
= a
->imm
<< LOG2_TAG_GRANULE
;
3127 /* STGP only comes in one size. */
3128 tcg_debug_assert(a
->sz
== MO_64
);
3130 if (!dc_isar_feature(aa64_mte_insn_reg
, s
)) {
3135 gen_check_sp_alignment(s
);
3138 dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3140 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3143 clean_addr
= clean_data_tbi(s
, dirty_addr
);
3144 tcg_rt
= cpu_reg(s
, a
->rt
);
3145 tcg_rt2
= cpu_reg(s
, a
->rt2
);
3148 * STGP is defined as two 8-byte memory operations, aligned to TAG_GRANULE,
3149 * and one tag operation. We implement it as one single aligned 16-byte
3150 * memory operation for convenience. Note that the alignment ensures
3151 * MO_ATOM_IFALIGN_PAIR produces 8-byte atomicity for the memory store.
3153 mop
= finalize_memop_atom(s
, MO_128
| MO_ALIGN
, MO_ATOM_IFALIGN_PAIR
);
3155 tmp
= tcg_temp_new_i128();
3156 if (s
->be_data
== MO_LE
) {
3157 tcg_gen_concat_i64_i128(tmp
, tcg_rt
, tcg_rt2
);
3159 tcg_gen_concat_i64_i128(tmp
, tcg_rt2
, tcg_rt
);
3161 tcg_gen_qemu_st_i128(tmp
, clean_addr
, get_mem_index(s
), mop
);
3163 /* Perform the tag store, if tag access enabled. */
3165 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3166 gen_helper_stg_parallel(tcg_env
, dirty_addr
, dirty_addr
);
3168 gen_helper_stg(tcg_env
, dirty_addr
, dirty_addr
);
3172 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3176 static void op_addr_ldst_imm_pre(DisasContext
*s
, arg_ldst_imm
*a
,
3177 TCGv_i64
*clean_addr
, TCGv_i64
*dirty_addr
,
3178 uint64_t offset
, bool is_store
, MemOp mop
)
3183 gen_check_sp_alignment(s
);
3186 *dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3188 tcg_gen_addi_i64(*dirty_addr
, *dirty_addr
, offset
);
3190 memidx
= get_a64_user_mem_index(s
, a
->unpriv
);
3191 *clean_addr
= gen_mte_check1_mmuidx(s
, *dirty_addr
, is_store
,
3192 a
->w
|| a
->rn
!= 31,
3193 mop
, a
->unpriv
, memidx
);
3196 static void op_addr_ldst_imm_post(DisasContext
*s
, arg_ldst_imm
*a
,
3197 TCGv_i64 dirty_addr
, uint64_t offset
)
3201 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3203 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), dirty_addr
);
3207 static bool trans_STR_i(DisasContext
*s
, arg_ldst_imm
*a
)
3209 bool iss_sf
, iss_valid
= !a
->w
;
3210 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3211 int memidx
= get_a64_user_mem_index(s
, a
->unpriv
);
3212 MemOp mop
= finalize_memop(s
, a
->sz
+ a
->sign
* MO_SIGN
);
3214 op_addr_ldst_imm_pre(s
, a
, &clean_addr
, &dirty_addr
, a
->imm
, true, mop
);
3216 tcg_rt
= cpu_reg(s
, a
->rt
);
3217 iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3219 do_gpr_st_memidx(s
, tcg_rt
, clean_addr
, mop
, memidx
,
3220 iss_valid
, a
->rt
, iss_sf
, false);
3221 op_addr_ldst_imm_post(s
, a
, dirty_addr
, a
->imm
);
3225 static bool trans_LDR_i(DisasContext
*s
, arg_ldst_imm
*a
)
3227 bool iss_sf
, iss_valid
= !a
->w
;
3228 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3229 int memidx
= get_a64_user_mem_index(s
, a
->unpriv
);
3230 MemOp mop
= finalize_memop(s
, a
->sz
+ a
->sign
* MO_SIGN
);
3232 op_addr_ldst_imm_pre(s
, a
, &clean_addr
, &dirty_addr
, a
->imm
, false, mop
);
3234 tcg_rt
= cpu_reg(s
, a
->rt
);
3235 iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3237 do_gpr_ld_memidx(s
, tcg_rt
, clean_addr
, mop
,
3238 a
->ext
, memidx
, iss_valid
, a
->rt
, iss_sf
, false);
3239 op_addr_ldst_imm_post(s
, a
, dirty_addr
, a
->imm
);
3243 static bool trans_STR_v_i(DisasContext
*s
, arg_ldst_imm
*a
)
3245 TCGv_i64 clean_addr
, dirty_addr
;
3248 if (!fp_access_check(s
)) {
3251 mop
= finalize_memop_asimd(s
, a
->sz
);
3252 op_addr_ldst_imm_pre(s
, a
, &clean_addr
, &dirty_addr
, a
->imm
, true, mop
);
3253 do_fp_st(s
, a
->rt
, clean_addr
, mop
);
3254 op_addr_ldst_imm_post(s
, a
, dirty_addr
, a
->imm
);
3258 static bool trans_LDR_v_i(DisasContext
*s
, arg_ldst_imm
*a
)
3260 TCGv_i64 clean_addr
, dirty_addr
;
3263 if (!fp_access_check(s
)) {
3266 mop
= finalize_memop_asimd(s
, a
->sz
);
3267 op_addr_ldst_imm_pre(s
, a
, &clean_addr
, &dirty_addr
, a
->imm
, false, mop
);
3268 do_fp_ld(s
, a
->rt
, clean_addr
, mop
);
3269 op_addr_ldst_imm_post(s
, a
, dirty_addr
, a
->imm
);
3273 static void op_addr_ldst_pre(DisasContext
*s
, arg_ldst
*a
,
3274 TCGv_i64
*clean_addr
, TCGv_i64
*dirty_addr
,
3275 bool is_store
, MemOp memop
)
3280 gen_check_sp_alignment(s
);
3282 *dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3284 tcg_rm
= read_cpu_reg(s
, a
->rm
, 1);
3285 ext_and_shift_reg(tcg_rm
, tcg_rm
, a
->opt
, a
->s
? a
->sz
: 0);
3287 tcg_gen_add_i64(*dirty_addr
, *dirty_addr
, tcg_rm
);
3288 *clean_addr
= gen_mte_check1(s
, *dirty_addr
, is_store
, true, memop
);
3291 static bool trans_LDR(DisasContext
*s
, arg_ldst
*a
)
3293 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3294 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3297 if (extract32(a
->opt
, 1, 1) == 0) {
3301 memop
= finalize_memop(s
, a
->sz
+ a
->sign
* MO_SIGN
);
3302 op_addr_ldst_pre(s
, a
, &clean_addr
, &dirty_addr
, false, memop
);
3303 tcg_rt
= cpu_reg(s
, a
->rt
);
3304 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
,
3305 a
->ext
, true, a
->rt
, iss_sf
, false);
3309 static bool trans_STR(DisasContext
*s
, arg_ldst
*a
)
3311 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3312 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3315 if (extract32(a
->opt
, 1, 1) == 0) {
3319 memop
= finalize_memop(s
, a
->sz
);
3320 op_addr_ldst_pre(s
, a
, &clean_addr
, &dirty_addr
, true, memop
);
3321 tcg_rt
= cpu_reg(s
, a
->rt
);
3322 do_gpr_st(s
, tcg_rt
, clean_addr
, memop
, true, a
->rt
, iss_sf
, false);
3326 static bool trans_LDR_v(DisasContext
*s
, arg_ldst
*a
)
3328 TCGv_i64 clean_addr
, dirty_addr
;
3331 if (extract32(a
->opt
, 1, 1) == 0) {
3335 if (!fp_access_check(s
)) {
3339 memop
= finalize_memop_asimd(s
, a
->sz
);
3340 op_addr_ldst_pre(s
, a
, &clean_addr
, &dirty_addr
, false, memop
);
3341 do_fp_ld(s
, a
->rt
, clean_addr
, memop
);
3345 static bool trans_STR_v(DisasContext
*s
, arg_ldst
*a
)
3347 TCGv_i64 clean_addr
, dirty_addr
;
3350 if (extract32(a
->opt
, 1, 1) == 0) {
3354 if (!fp_access_check(s
)) {
3358 memop
= finalize_memop_asimd(s
, a
->sz
);
3359 op_addr_ldst_pre(s
, a
, &clean_addr
, &dirty_addr
, true, memop
);
3360 do_fp_st(s
, a
->rt
, clean_addr
, memop
);
3365 static bool do_atomic_ld(DisasContext
*s
, arg_atomic
*a
, AtomicThreeOpFn
*fn
,
3366 int sign
, bool invert
)
3368 MemOp mop
= a
->sz
| sign
;
3369 TCGv_i64 clean_addr
, tcg_rs
, tcg_rt
;
3372 gen_check_sp_alignment(s
);
3374 mop
= check_atomic_align(s
, a
->rn
, mop
);
3375 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, a
->rn
), false,
3377 tcg_rs
= read_cpu_reg(s
, a
->rs
, true);
3378 tcg_rt
= cpu_reg(s
, a
->rt
);
3380 tcg_gen_not_i64(tcg_rs
, tcg_rs
);
3383 * The tcg atomic primitives are all full barriers. Therefore we
3384 * can ignore the Acquire and Release bits of this instruction.
3386 fn(tcg_rt
, clean_addr
, tcg_rs
, get_mem_index(s
), mop
);
3388 if (mop
& MO_SIGN
) {
3391 tcg_gen_ext8u_i64(tcg_rt
, tcg_rt
);
3394 tcg_gen_ext16u_i64(tcg_rt
, tcg_rt
);
3397 tcg_gen_ext32u_i64(tcg_rt
, tcg_rt
);
3402 g_assert_not_reached();
3408 TRANS_FEAT(LDADD
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_add_i64
, 0, false)
3409 TRANS_FEAT(LDCLR
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_and_i64
, 0, true)
3410 TRANS_FEAT(LDEOR
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_xor_i64
, 0, false)
3411 TRANS_FEAT(LDSET
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_or_i64
, 0, false)
3412 TRANS_FEAT(LDSMAX
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_smax_i64
, MO_SIGN
, false)
3413 TRANS_FEAT(LDSMIN
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_smin_i64
, MO_SIGN
, false)
3414 TRANS_FEAT(LDUMAX
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_umax_i64
, 0, false)
3415 TRANS_FEAT(LDUMIN
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_umin_i64
, 0, false)
3416 TRANS_FEAT(SWP
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_xchg_i64
, 0, false)
3418 static bool trans_LDAPR(DisasContext
*s
, arg_LDAPR
*a
)
3420 bool iss_sf
= ldst_iss_sf(a
->sz
, false, false);
3421 TCGv_i64 clean_addr
;
3424 if (!dc_isar_feature(aa64_atomics
, s
) ||
3425 !dc_isar_feature(aa64_rcpc_8_3
, s
)) {
3429 gen_check_sp_alignment(s
);
3431 mop
= check_atomic_align(s
, a
->rn
, a
->sz
);
3432 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, a
->rn
), false,
3435 * LDAPR* are a special case because they are a simple load, not a
3436 * fetch-and-do-something op.
3437 * The architectural consistency requirements here are weaker than
3438 * full load-acquire (we only need "load-acquire processor consistent"),
3439 * but we choose to implement them as full LDAQ.
3441 do_gpr_ld(s
, cpu_reg(s
, a
->rt
), clean_addr
, mop
, false,
3442 true, a
->rt
, iss_sf
, true);
3443 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
3447 static bool trans_LDRA(DisasContext
*s
, arg_LDRA
*a
)
3449 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3452 /* Load with pointer authentication */
3453 if (!dc_isar_feature(aa64_pauth
, s
)) {
3458 gen_check_sp_alignment(s
);
3460 dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3462 if (s
->pauth_active
) {
3464 gen_helper_autda_combined(dirty_addr
, tcg_env
, dirty_addr
,
3465 tcg_constant_i64(0));
3467 gen_helper_autdb_combined(dirty_addr
, tcg_env
, dirty_addr
,
3468 tcg_constant_i64(0));
3472 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, a
->imm
);
3474 memop
= finalize_memop(s
, MO_64
);
3476 /* Note that "clean" and "dirty" here refer to TBI not PAC. */
3477 clean_addr
= gen_mte_check1(s
, dirty_addr
, false,
3478 a
->w
|| a
->rn
!= 31, memop
);
3480 tcg_rt
= cpu_reg(s
, a
->rt
);
3481 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
,
3482 /* extend */ false, /* iss_valid */ !a
->w
,
3483 /* iss_srt */ a
->rt
, /* iss_sf */ true, /* iss_ar */ false);
3486 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), dirty_addr
);
3491 static bool trans_LDAPR_i(DisasContext
*s
, arg_ldapr_stlr_i
*a
)
3493 TCGv_i64 clean_addr
, dirty_addr
;
3494 MemOp mop
= a
->sz
| (a
->sign
? MO_SIGN
: 0);
3495 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3497 if (!dc_isar_feature(aa64_rcpc_8_4
, s
)) {
3502 gen_check_sp_alignment(s
);
3505 mop
= check_ordered_align(s
, a
->rn
, a
->imm
, false, mop
);
3506 dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3507 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, a
->imm
);
3508 clean_addr
= clean_data_tbi(s
, dirty_addr
);
3511 * Load-AcquirePC semantics; we implement as the slightly more
3512 * restrictive Load-Acquire.
3514 do_gpr_ld(s
, cpu_reg(s
, a
->rt
), clean_addr
, mop
, a
->ext
, true,
3515 a
->rt
, iss_sf
, true);
3516 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
3520 static bool trans_STLR_i(DisasContext
*s
, arg_ldapr_stlr_i
*a
)
3522 TCGv_i64 clean_addr
, dirty_addr
;
3524 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3526 if (!dc_isar_feature(aa64_rcpc_8_4
, s
)) {
3530 /* TODO: ARMv8.4-LSE SCTLR.nAA */
3533 gen_check_sp_alignment(s
);
3536 mop
= check_ordered_align(s
, a
->rn
, a
->imm
, true, mop
);
3537 dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3538 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, a
->imm
);
3539 clean_addr
= clean_data_tbi(s
, dirty_addr
);
3541 /* Store-Release semantics */
3542 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
3543 do_gpr_st(s
, cpu_reg(s
, a
->rt
), clean_addr
, mop
, true, a
->rt
, iss_sf
, true);
3547 static bool trans_LD_mult(DisasContext
*s
, arg_ldst_mult
*a
)
3549 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3550 MemOp endian
, align
, mop
;
3552 int total
; /* total bytes */
3553 int elements
; /* elements per vector */
3557 if (!a
->p
&& a
->rm
!= 0) {
3558 /* For non-postindexed accesses the Rm field must be 0 */
3561 if (size
== 3 && !a
->q
&& a
->selem
!= 1) {
3564 if (!fp_access_check(s
)) {
3569 gen_check_sp_alignment(s
);
3572 /* For our purposes, bytes are always little-endian. */
3573 endian
= s
->be_data
;
3578 total
= a
->rpt
* a
->selem
* (a
->q
? 16 : 8);
3579 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3582 * Issue the MTE check vs the logical repeat count, before we
3583 * promote consecutive little-endian elements below.
3585 clean_addr
= gen_mte_checkN(s
, tcg_rn
, false, a
->p
|| a
->rn
!= 31, total
,
3586 finalize_memop_asimd(s
, size
));
3589 * Consecutive little-endian elements from a single register
3590 * can be promoted to a larger little-endian operation.
3593 if (a
->selem
== 1 && endian
== MO_LE
) {
3594 align
= pow2_align(size
);
3597 if (!s
->align_mem
) {
3600 mop
= endian
| size
| align
;
3602 elements
= (a
->q
? 16 : 8) >> size
;
3603 tcg_ebytes
= tcg_constant_i64(1 << size
);
3604 for (r
= 0; r
< a
->rpt
; r
++) {
3606 for (e
= 0; e
< elements
; e
++) {
3608 for (xs
= 0; xs
< a
->selem
; xs
++) {
3609 int tt
= (a
->rt
+ r
+ xs
) % 32;
3610 do_vec_ld(s
, tt
, e
, clean_addr
, mop
);
3611 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3617 * For non-quad operations, setting a slice of the low 64 bits of
3618 * the register clears the high 64 bits (in the ARM ARM pseudocode
3619 * this is implicit in the fact that 'rval' is a 64 bit wide
3620 * variable). For quad operations, we might still need to zero
3621 * the high bits of SVE.
3623 for (r
= 0; r
< a
->rpt
* a
->selem
; r
++) {
3624 int tt
= (a
->rt
+ r
) % 32;
3625 clear_vec_high(s
, a
->q
, tt
);
3630 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3632 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3638 static bool trans_ST_mult(DisasContext
*s
, arg_ldst_mult
*a
)
3640 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3641 MemOp endian
, align
, mop
;
3643 int total
; /* total bytes */
3644 int elements
; /* elements per vector */
3648 if (!a
->p
&& a
->rm
!= 0) {
3649 /* For non-postindexed accesses the Rm field must be 0 */
3652 if (size
== 3 && !a
->q
&& a
->selem
!= 1) {
3655 if (!fp_access_check(s
)) {
3660 gen_check_sp_alignment(s
);
3663 /* For our purposes, bytes are always little-endian. */
3664 endian
= s
->be_data
;
3669 total
= a
->rpt
* a
->selem
* (a
->q
? 16 : 8);
3670 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3673 * Issue the MTE check vs the logical repeat count, before we
3674 * promote consecutive little-endian elements below.
3676 clean_addr
= gen_mte_checkN(s
, tcg_rn
, true, a
->p
|| a
->rn
!= 31, total
,
3677 finalize_memop_asimd(s
, size
));
3680 * Consecutive little-endian elements from a single register
3681 * can be promoted to a larger little-endian operation.
3684 if (a
->selem
== 1 && endian
== MO_LE
) {
3685 align
= pow2_align(size
);
3688 if (!s
->align_mem
) {
3691 mop
= endian
| size
| align
;
3693 elements
= (a
->q
? 16 : 8) >> size
;
3694 tcg_ebytes
= tcg_constant_i64(1 << size
);
3695 for (r
= 0; r
< a
->rpt
; r
++) {
3697 for (e
= 0; e
< elements
; e
++) {
3699 for (xs
= 0; xs
< a
->selem
; xs
++) {
3700 int tt
= (a
->rt
+ r
+ xs
) % 32;
3701 do_vec_st(s
, tt
, e
, clean_addr
, mop
);
3702 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3709 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3711 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3717 static bool trans_ST_single(DisasContext
*s
, arg_ldst_single
*a
)
3720 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3723 if (!a
->p
&& a
->rm
!= 0) {
3726 if (!fp_access_check(s
)) {
3731 gen_check_sp_alignment(s
);
3734 total
= a
->selem
<< a
->scale
;
3735 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3737 mop
= finalize_memop_asimd(s
, a
->scale
);
3738 clean_addr
= gen_mte_checkN(s
, tcg_rn
, true, a
->p
|| a
->rn
!= 31,
3741 tcg_ebytes
= tcg_constant_i64(1 << a
->scale
);
3742 for (xs
= 0, rt
= a
->rt
; xs
< a
->selem
; xs
++, rt
= (rt
+ 1) % 32) {
3743 do_vec_st(s
, rt
, a
->index
, clean_addr
, mop
);
3744 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3749 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3751 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3757 static bool trans_LD_single(DisasContext
*s
, arg_ldst_single
*a
)
3760 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3763 if (!a
->p
&& a
->rm
!= 0) {
3766 if (!fp_access_check(s
)) {
3771 gen_check_sp_alignment(s
);
3774 total
= a
->selem
<< a
->scale
;
3775 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3777 mop
= finalize_memop_asimd(s
, a
->scale
);
3778 clean_addr
= gen_mte_checkN(s
, tcg_rn
, false, a
->p
|| a
->rn
!= 31,
3781 tcg_ebytes
= tcg_constant_i64(1 << a
->scale
);
3782 for (xs
= 0, rt
= a
->rt
; xs
< a
->selem
; xs
++, rt
= (rt
+ 1) % 32) {
3783 do_vec_ld(s
, rt
, a
->index
, clean_addr
, mop
);
3784 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3789 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3791 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3797 static bool trans_LD_single_repl(DisasContext
*s
, arg_LD_single_repl
*a
)
3800 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3803 if (!a
->p
&& a
->rm
!= 0) {
3806 if (!fp_access_check(s
)) {
3811 gen_check_sp_alignment(s
);
3814 total
= a
->selem
<< a
->scale
;
3815 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3817 mop
= finalize_memop_asimd(s
, a
->scale
);
3818 clean_addr
= gen_mte_checkN(s
, tcg_rn
, false, a
->p
|| a
->rn
!= 31,
3821 tcg_ebytes
= tcg_constant_i64(1 << a
->scale
);
3822 for (xs
= 0, rt
= a
->rt
; xs
< a
->selem
; xs
++, rt
= (rt
+ 1) % 32) {
3823 /* Load and replicate to all elements */
3824 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
3826 tcg_gen_qemu_ld_i64(tcg_tmp
, clean_addr
, get_mem_index(s
), mop
);
3827 tcg_gen_gvec_dup_i64(a
->scale
, vec_full_reg_offset(s
, rt
),
3828 (a
->q
+ 1) * 8, vec_full_reg_size(s
), tcg_tmp
);
3829 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3834 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3836 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3842 static bool trans_STZGM(DisasContext
*s
, arg_ldst_tag
*a
)
3844 TCGv_i64 addr
, clean_addr
, tcg_rt
;
3845 int size
= 4 << s
->dcz_blocksize
;
3847 if (!dc_isar_feature(aa64_mte
, s
)) {
3850 if (s
->current_el
== 0) {
3855 gen_check_sp_alignment(s
);
3858 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
3859 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3860 tcg_rt
= cpu_reg(s
, a
->rt
);
3863 gen_helper_stzgm_tags(tcg_env
, addr
, tcg_rt
);
3866 * The non-tags portion of STZGM is mostly like DC_ZVA,
3867 * except the alignment happens before the access.
3869 clean_addr
= clean_data_tbi(s
, addr
);
3870 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
3871 gen_helper_dc_zva(tcg_env
, clean_addr
);
3875 static bool trans_STGM(DisasContext
*s
, arg_ldst_tag
*a
)
3877 TCGv_i64 addr
, clean_addr
, tcg_rt
;
3879 if (!dc_isar_feature(aa64_mte
, s
)) {
3882 if (s
->current_el
== 0) {
3887 gen_check_sp_alignment(s
);
3890 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
3891 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3892 tcg_rt
= cpu_reg(s
, a
->rt
);
3895 gen_helper_stgm(tcg_env
, addr
, tcg_rt
);
3897 MMUAccessType acc
= MMU_DATA_STORE
;
3898 int size
= 4 << s
->gm_blocksize
;
3900 clean_addr
= clean_data_tbi(s
, addr
);
3901 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
3902 gen_probe_access(s
, clean_addr
, acc
, size
);
3907 static bool trans_LDGM(DisasContext
*s
, arg_ldst_tag
*a
)
3909 TCGv_i64 addr
, clean_addr
, tcg_rt
;
3911 if (!dc_isar_feature(aa64_mte
, s
)) {
3914 if (s
->current_el
== 0) {
3919 gen_check_sp_alignment(s
);
3922 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
3923 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3924 tcg_rt
= cpu_reg(s
, a
->rt
);
3927 gen_helper_ldgm(tcg_rt
, tcg_env
, addr
);
3929 MMUAccessType acc
= MMU_DATA_LOAD
;
3930 int size
= 4 << s
->gm_blocksize
;
3932 clean_addr
= clean_data_tbi(s
, addr
);
3933 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
3934 gen_probe_access(s
, clean_addr
, acc
, size
);
3935 /* The result tags are zeros. */
3936 tcg_gen_movi_i64(tcg_rt
, 0);
3941 static bool trans_LDG(DisasContext
*s
, arg_ldst_tag
*a
)
3943 TCGv_i64 addr
, clean_addr
, tcg_rt
;
3945 if (!dc_isar_feature(aa64_mte_insn_reg
, s
)) {
3950 gen_check_sp_alignment(s
);
3953 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
3955 /* pre-index or signed offset */
3956 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3959 tcg_gen_andi_i64(addr
, addr
, -TAG_GRANULE
);
3960 tcg_rt
= cpu_reg(s
, a
->rt
);
3962 gen_helper_ldg(tcg_rt
, tcg_env
, addr
, tcg_rt
);
3965 * Tag access disabled: we must check for aborts on the load
3966 * load from [rn+offset], and then insert a 0 tag into rt.
3968 clean_addr
= clean_data_tbi(s
, addr
);
3969 gen_probe_access(s
, clean_addr
, MMU_DATA_LOAD
, MO_8
);
3970 gen_address_with_allocation_tag0(tcg_rt
, tcg_rt
);
3974 /* pre-index or post-index */
3977 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3979 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), addr
);
3984 static bool do_STG(DisasContext
*s
, arg_ldst_tag
*a
, bool is_zero
, bool is_pair
)
3986 TCGv_i64 addr
, tcg_rt
;
3989 gen_check_sp_alignment(s
);
3992 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
3994 /* pre-index or signed offset */
3995 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3997 tcg_rt
= cpu_reg_sp(s
, a
->rt
);
4000 * For STG and ST2G, we need to check alignment and probe memory.
4001 * TODO: For STZG and STZ2G, we could rely on the stores below,
4002 * at least for system mode; user-only won't enforce alignment.
4005 gen_helper_st2g_stub(tcg_env
, addr
);
4007 gen_helper_stg_stub(tcg_env
, addr
);
4009 } else if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
4011 gen_helper_st2g_parallel(tcg_env
, addr
, tcg_rt
);
4013 gen_helper_stg_parallel(tcg_env
, addr
, tcg_rt
);
4017 gen_helper_st2g(tcg_env
, addr
, tcg_rt
);
4019 gen_helper_stg(tcg_env
, addr
, tcg_rt
);
4024 TCGv_i64 clean_addr
= clean_data_tbi(s
, addr
);
4025 TCGv_i64 zero64
= tcg_constant_i64(0);
4026 TCGv_i128 zero128
= tcg_temp_new_i128();
4027 int mem_index
= get_mem_index(s
);
4028 MemOp mop
= finalize_memop(s
, MO_128
| MO_ALIGN
);
4030 tcg_gen_concat_i64_i128(zero128
, zero64
, zero64
);
4032 /* This is 1 or 2 atomic 16-byte operations. */
4033 tcg_gen_qemu_st_i128(zero128
, clean_addr
, mem_index
, mop
);
4035 tcg_gen_addi_i64(clean_addr
, clean_addr
, 16);
4036 tcg_gen_qemu_st_i128(zero128
, clean_addr
, mem_index
, mop
);
4041 /* pre-index or post-index */
4044 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
4046 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), addr
);
4051 TRANS_FEAT(STG
, aa64_mte_insn_reg
, do_STG
, a
, false, false)
4052 TRANS_FEAT(STZG
, aa64_mte_insn_reg
, do_STG
, a
, true, false)
4053 TRANS_FEAT(ST2G
, aa64_mte_insn_reg
, do_STG
, a
, false, true)
4054 TRANS_FEAT(STZ2G
, aa64_mte_insn_reg
, do_STG
, a
, true, true)
4056 typedef void SetFn(TCGv_env
, TCGv_i32
, TCGv_i32
);
4058 static bool do_SET(DisasContext
*s
, arg_set
*a
, bool is_epilogue
,
4059 bool is_setg
, SetFn fn
)
4062 uint32_t syndrome
, desc
= 0;
4064 if (is_setg
&& !dc_isar_feature(aa64_mte
, s
)) {
4069 * UNPREDICTABLE cases: we choose to UNDEF, which allows
4070 * us to pull this check before the CheckMOPSEnabled() test
4071 * (which we do in the helper function)
4073 if (a
->rs
== a
->rn
|| a
->rs
== a
->rd
|| a
->rn
== a
->rd
||
4074 a
->rd
== 31 || a
->rn
== 31) {
4078 memidx
= get_a64_user_mem_index(s
, a
->unpriv
);
4081 * We pass option_a == true, matching our implementation;
4082 * we pass wrong_option == false: helper function may set that bit.
4084 syndrome
= syn_mop(true, is_setg
, (a
->nontemp
<< 1) | a
->unpriv
,
4085 is_epilogue
, false, true, a
->rd
, a
->rs
, a
->rn
);
4087 if (is_setg
? s
->ata
[a
->unpriv
] : s
->mte_active
[a
->unpriv
]) {
4088 /* We may need to do MTE tag checking, so assemble the descriptor */
4089 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
4090 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
4091 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, true);
4092 /* SIZEM1 and ALIGN we leave 0 (byte write) */
4094 /* The helper function always needs the memidx even with MTE disabled */
4095 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, memidx
);
4098 * The helper needs the register numbers, but since they're in
4099 * the syndrome anyway, we let it extract them from there rather
4100 * than passing in an extra three integer arguments.
4102 fn(tcg_env
, tcg_constant_i32(syndrome
), tcg_constant_i32(desc
));
4106 TRANS_FEAT(SETP
, aa64_mops
, do_SET
, a
, false, false, gen_helper_setp
)
4107 TRANS_FEAT(SETM
, aa64_mops
, do_SET
, a
, false, false, gen_helper_setm
)
4108 TRANS_FEAT(SETE
, aa64_mops
, do_SET
, a
, true, false, gen_helper_sete
)
4109 TRANS_FEAT(SETGP
, aa64_mops
, do_SET
, a
, false, true, gen_helper_setgp
)
4110 TRANS_FEAT(SETGM
, aa64_mops
, do_SET
, a
, false, true, gen_helper_setgm
)
4111 TRANS_FEAT(SETGE
, aa64_mops
, do_SET
, a
, true, true, gen_helper_setge
)
4113 typedef void CpyFn(TCGv_env
, TCGv_i32
, TCGv_i32
, TCGv_i32
);
4115 static bool do_CPY(DisasContext
*s
, arg_cpy
*a
, bool is_epilogue
, CpyFn fn
)
4117 int rmemidx
, wmemidx
;
4118 uint32_t syndrome
, rdesc
= 0, wdesc
= 0;
4119 bool wunpriv
= extract32(a
->options
, 0, 1);
4120 bool runpriv
= extract32(a
->options
, 1, 1);
4123 * UNPREDICTABLE cases: we choose to UNDEF, which allows
4124 * us to pull this check before the CheckMOPSEnabled() test
4125 * (which we do in the helper function)
4127 if (a
->rs
== a
->rn
|| a
->rs
== a
->rd
|| a
->rn
== a
->rd
||
4128 a
->rd
== 31 || a
->rs
== 31 || a
->rn
== 31) {
4132 rmemidx
= get_a64_user_mem_index(s
, runpriv
);
4133 wmemidx
= get_a64_user_mem_index(s
, wunpriv
);
4136 * We pass option_a == true, matching our implementation;
4137 * we pass wrong_option == false: helper function may set that bit.
4139 syndrome
= syn_mop(false, false, a
->options
, is_epilogue
,
4140 false, true, a
->rd
, a
->rs
, a
->rn
);
4142 /* If we need to do MTE tag checking, assemble the descriptors */
4143 if (s
->mte_active
[runpriv
]) {
4144 rdesc
= FIELD_DP32(rdesc
, MTEDESC
, TBI
, s
->tbid
);
4145 rdesc
= FIELD_DP32(rdesc
, MTEDESC
, TCMA
, s
->tcma
);
4147 if (s
->mte_active
[wunpriv
]) {
4148 wdesc
= FIELD_DP32(wdesc
, MTEDESC
, TBI
, s
->tbid
);
4149 wdesc
= FIELD_DP32(wdesc
, MTEDESC
, TCMA
, s
->tcma
);
4150 wdesc
= FIELD_DP32(wdesc
, MTEDESC
, WRITE
, true);
4152 /* The helper function needs these parts of the descriptor regardless */
4153 rdesc
= FIELD_DP32(rdesc
, MTEDESC
, MIDX
, rmemidx
);
4154 wdesc
= FIELD_DP32(wdesc
, MTEDESC
, MIDX
, wmemidx
);
4157 * The helper needs the register numbers, but since they're in
4158 * the syndrome anyway, we let it extract them from there rather
4159 * than passing in an extra three integer arguments.
4161 fn(tcg_env
, tcg_constant_i32(syndrome
), tcg_constant_i32(wdesc
),
4162 tcg_constant_i32(rdesc
));
4166 TRANS_FEAT(CPYP
, aa64_mops
, do_CPY
, a
, false, gen_helper_cpyp
)
4167 TRANS_FEAT(CPYM
, aa64_mops
, do_CPY
, a
, false, gen_helper_cpym
)
4168 TRANS_FEAT(CPYE
, aa64_mops
, do_CPY
, a
, true, gen_helper_cpye
)
4169 TRANS_FEAT(CPYFP
, aa64_mops
, do_CPY
, a
, false, gen_helper_cpyfp
)
4170 TRANS_FEAT(CPYFM
, aa64_mops
, do_CPY
, a
, false, gen_helper_cpyfm
)
4171 TRANS_FEAT(CPYFE
, aa64_mops
, do_CPY
, a
, true, gen_helper_cpyfe
)
4173 typedef void ArithTwoOp(TCGv_i64
, TCGv_i64
, TCGv_i64
);
4175 static bool gen_rri(DisasContext
*s
, arg_rri_sf
*a
,
4176 bool rd_sp
, bool rn_sp
, ArithTwoOp
*fn
)
4178 TCGv_i64 tcg_rn
= rn_sp
? cpu_reg_sp(s
, a
->rn
) : cpu_reg(s
, a
->rn
);
4179 TCGv_i64 tcg_rd
= rd_sp
? cpu_reg_sp(s
, a
->rd
) : cpu_reg(s
, a
->rd
);
4180 TCGv_i64 tcg_imm
= tcg_constant_i64(a
->imm
);
4182 fn(tcg_rd
, tcg_rn
, tcg_imm
);
4184 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4190 * PC-rel. addressing
4193 static bool trans_ADR(DisasContext
*s
, arg_ri
*a
)
4195 gen_pc_plus_diff(s
, cpu_reg(s
, a
->rd
), a
->imm
);
4199 static bool trans_ADRP(DisasContext
*s
, arg_ri
*a
)
4201 int64_t offset
= (int64_t)a
->imm
<< 12;
4203 /* The page offset is ok for CF_PCREL. */
4204 offset
-= s
->pc_curr
& 0xfff;
4205 gen_pc_plus_diff(s
, cpu_reg(s
, a
->rd
), offset
);
4210 * Add/subtract (immediate)
4212 TRANS(ADD_i
, gen_rri
, a
, 1, 1, tcg_gen_add_i64
)
4213 TRANS(SUB_i
, gen_rri
, a
, 1, 1, tcg_gen_sub_i64
)
4214 TRANS(ADDS_i
, gen_rri
, a
, 0, 1, a
->sf
? gen_add64_CC
: gen_add32_CC
)
4215 TRANS(SUBS_i
, gen_rri
, a
, 0, 1, a
->sf
? gen_sub64_CC
: gen_sub32_CC
)
4218 * Add/subtract (immediate, with tags)
4221 static bool gen_add_sub_imm_with_tags(DisasContext
*s
, arg_rri_tag
*a
,
4224 TCGv_i64 tcg_rn
, tcg_rd
;
4227 imm
= a
->uimm6
<< LOG2_TAG_GRANULE
;
4232 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
4233 tcg_rd
= cpu_reg_sp(s
, a
->rd
);
4236 gen_helper_addsubg(tcg_rd
, tcg_env
, tcg_rn
,
4237 tcg_constant_i32(imm
),
4238 tcg_constant_i32(a
->uimm4
));
4240 tcg_gen_addi_i64(tcg_rd
, tcg_rn
, imm
);
4241 gen_address_with_allocation_tag0(tcg_rd
, tcg_rd
);
4246 TRANS_FEAT(ADDG_i
, aa64_mte_insn_reg
, gen_add_sub_imm_with_tags
, a
, false)
4247 TRANS_FEAT(SUBG_i
, aa64_mte_insn_reg
, gen_add_sub_imm_with_tags
, a
, true)
4249 /* The input should be a value in the bottom e bits (with higher
4250 * bits zero); returns that value replicated into every element
4251 * of size e in a 64 bit integer.
4253 static uint64_t bitfield_replicate(uint64_t mask
, unsigned int e
)
4264 * Logical (immediate)
4268 * Simplified variant of pseudocode DecodeBitMasks() for the case where we
4269 * only require the wmask. Returns false if the imms/immr/immn are a reserved
4270 * value (ie should cause a guest UNDEF exception), and true if they are
4271 * valid, in which case the decoded bit pattern is written to result.
4273 bool logic_imm_decode_wmask(uint64_t *result
, unsigned int immn
,
4274 unsigned int imms
, unsigned int immr
)
4277 unsigned e
, levels
, s
, r
;
4280 assert(immn
< 2 && imms
< 64 && immr
< 64);
4282 /* The bit patterns we create here are 64 bit patterns which
4283 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
4284 * 64 bits each. Each element contains the same value: a run
4285 * of between 1 and e-1 non-zero bits, rotated within the
4286 * element by between 0 and e-1 bits.
4288 * The element size and run length are encoded into immn (1 bit)
4289 * and imms (6 bits) as follows:
4290 * 64 bit elements: immn = 1, imms = <length of run - 1>
4291 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
4292 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
4293 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
4294 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
4295 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
4296 * Notice that immn = 0, imms = 11111x is the only combination
4297 * not covered by one of the above options; this is reserved.
4298 * Further, <length of run - 1> all-ones is a reserved pattern.
4300 * In all cases the rotation is by immr % e (and immr is 6 bits).
4303 /* First determine the element size */
4304 len
= 31 - clz32((immn
<< 6) | (~imms
& 0x3f));
4306 /* This is the immn == 0, imms == 0x11111x case */
4316 /* <length of run - 1> mustn't be all-ones. */
4320 /* Create the value of one element: s+1 set bits rotated
4321 * by r within the element (which is e bits wide)...
4323 mask
= MAKE_64BIT_MASK(0, s
+ 1);
4325 mask
= (mask
>> r
) | (mask
<< (e
- r
));
4326 mask
&= MAKE_64BIT_MASK(0, e
);
4328 /* ...then replicate the element over the whole 64 bit value */
4329 mask
= bitfield_replicate(mask
, e
);
4334 static bool gen_rri_log(DisasContext
*s
, arg_rri_log
*a
, bool set_cc
,
4335 void (*fn
)(TCGv_i64
, TCGv_i64
, int64_t))
4337 TCGv_i64 tcg_rd
, tcg_rn
;
4340 /* Some immediate field values are reserved. */
4341 if (!logic_imm_decode_wmask(&imm
, extract32(a
->dbm
, 12, 1),
4342 extract32(a
->dbm
, 0, 6),
4343 extract32(a
->dbm
, 6, 6))) {
4347 imm
&= 0xffffffffull
;
4350 tcg_rd
= set_cc
? cpu_reg(s
, a
->rd
) : cpu_reg_sp(s
, a
->rd
);
4351 tcg_rn
= cpu_reg(s
, a
->rn
);
4353 fn(tcg_rd
, tcg_rn
, imm
);
4355 gen_logic_CC(a
->sf
, tcg_rd
);
4358 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4363 TRANS(AND_i
, gen_rri_log
, a
, false, tcg_gen_andi_i64
)
4364 TRANS(ORR_i
, gen_rri_log
, a
, false, tcg_gen_ori_i64
)
4365 TRANS(EOR_i
, gen_rri_log
, a
, false, tcg_gen_xori_i64
)
4366 TRANS(ANDS_i
, gen_rri_log
, a
, true, tcg_gen_andi_i64
)
4369 * Move wide (immediate)
4372 static bool trans_MOVZ(DisasContext
*s
, arg_movw
*a
)
4374 int pos
= a
->hw
<< 4;
4375 tcg_gen_movi_i64(cpu_reg(s
, a
->rd
), (uint64_t)a
->imm
<< pos
);
4379 static bool trans_MOVN(DisasContext
*s
, arg_movw
*a
)
4381 int pos
= a
->hw
<< 4;
4382 uint64_t imm
= a
->imm
;
4384 imm
= ~(imm
<< pos
);
4386 imm
= (uint32_t)imm
;
4388 tcg_gen_movi_i64(cpu_reg(s
, a
->rd
), imm
);
4392 static bool trans_MOVK(DisasContext
*s
, arg_movw
*a
)
4394 int pos
= a
->hw
<< 4;
4395 TCGv_i64 tcg_rd
, tcg_im
;
4397 tcg_rd
= cpu_reg(s
, a
->rd
);
4398 tcg_im
= tcg_constant_i64(a
->imm
);
4399 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_im
, pos
, 16);
4401 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4410 static bool trans_SBFM(DisasContext
*s
, arg_SBFM
*a
)
4412 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4413 TCGv_i64 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4414 unsigned int bitsize
= a
->sf
? 64 : 32;
4415 unsigned int ri
= a
->immr
;
4416 unsigned int si
= a
->imms
;
4417 unsigned int pos
, len
;
4420 /* Wd<s-r:0> = Wn<s:r> */
4421 len
= (si
- ri
) + 1;
4422 tcg_gen_sextract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
4424 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4427 /* Wd<32+s-r,32-r> = Wn<s:0> */
4429 pos
= (bitsize
- ri
) & (bitsize
- 1);
4433 * Sign extend the destination field from len to fill the
4434 * balance of the word. Let the deposit below insert all
4435 * of those sign bits.
4437 tcg_gen_sextract_i64(tcg_tmp
, tcg_tmp
, 0, len
);
4442 * We start with zero, and we haven't modified any bits outside
4443 * bitsize, therefore no final zero-extension is unneeded for !sf.
4445 tcg_gen_deposit_z_i64(tcg_rd
, tcg_tmp
, pos
, len
);
4450 static bool trans_UBFM(DisasContext
*s
, arg_UBFM
*a
)
4452 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4453 TCGv_i64 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4454 unsigned int bitsize
= a
->sf
? 64 : 32;
4455 unsigned int ri
= a
->immr
;
4456 unsigned int si
= a
->imms
;
4457 unsigned int pos
, len
;
4459 tcg_rd
= cpu_reg(s
, a
->rd
);
4460 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4463 /* Wd<s-r:0> = Wn<s:r> */
4464 len
= (si
- ri
) + 1;
4465 tcg_gen_extract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
4467 /* Wd<32+s-r,32-r> = Wn<s:0> */
4469 pos
= (bitsize
- ri
) & (bitsize
- 1);
4470 tcg_gen_deposit_z_i64(tcg_rd
, tcg_tmp
, pos
, len
);
4475 static bool trans_BFM(DisasContext
*s
, arg_BFM
*a
)
4477 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4478 TCGv_i64 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4479 unsigned int bitsize
= a
->sf
? 64 : 32;
4480 unsigned int ri
= a
->immr
;
4481 unsigned int si
= a
->imms
;
4482 unsigned int pos
, len
;
4484 tcg_rd
= cpu_reg(s
, a
->rd
);
4485 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4488 /* Wd<s-r:0> = Wn<s:r> */
4489 tcg_gen_shri_i64(tcg_tmp
, tcg_tmp
, ri
);
4490 len
= (si
- ri
) + 1;
4493 /* Wd<32+s-r,32-r> = Wn<s:0> */
4495 pos
= (bitsize
- ri
) & (bitsize
- 1);
4498 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_tmp
, pos
, len
);
4500 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4505 static bool trans_EXTR(DisasContext
*s
, arg_extract
*a
)
4507 TCGv_i64 tcg_rd
, tcg_rm
, tcg_rn
;
4509 tcg_rd
= cpu_reg(s
, a
->rd
);
4511 if (unlikely(a
->imm
== 0)) {
4513 * tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4514 * so an extract from bit 0 is a special case.
4517 tcg_gen_mov_i64(tcg_rd
, cpu_reg(s
, a
->rm
));
4519 tcg_gen_ext32u_i64(tcg_rd
, cpu_reg(s
, a
->rm
));
4522 tcg_rm
= cpu_reg(s
, a
->rm
);
4523 tcg_rn
= cpu_reg(s
, a
->rn
);
4526 /* Specialization to ROR happens in EXTRACT2. */
4527 tcg_gen_extract2_i64(tcg_rd
, tcg_rm
, tcg_rn
, a
->imm
);
4529 TCGv_i32 t0
= tcg_temp_new_i32();
4531 tcg_gen_extrl_i64_i32(t0
, tcg_rm
);
4532 if (a
->rm
== a
->rn
) {
4533 tcg_gen_rotri_i32(t0
, t0
, a
->imm
);
4535 TCGv_i32 t1
= tcg_temp_new_i32();
4536 tcg_gen_extrl_i64_i32(t1
, tcg_rn
);
4537 tcg_gen_extract2_i32(t0
, t0
, t1
, a
->imm
);
4539 tcg_gen_extu_i32_i64(tcg_rd
, t0
);
4545 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
4546 * Note that it is the caller's responsibility to ensure that the
4547 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
4548 * mandated semantics for out of range shifts.
4550 static void shift_reg(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
4551 enum a64_shift_type shift_type
, TCGv_i64 shift_amount
)
4553 switch (shift_type
) {
4554 case A64_SHIFT_TYPE_LSL
:
4555 tcg_gen_shl_i64(dst
, src
, shift_amount
);
4557 case A64_SHIFT_TYPE_LSR
:
4558 tcg_gen_shr_i64(dst
, src
, shift_amount
);
4560 case A64_SHIFT_TYPE_ASR
:
4562 tcg_gen_ext32s_i64(dst
, src
);
4564 tcg_gen_sar_i64(dst
, sf
? src
: dst
, shift_amount
);
4566 case A64_SHIFT_TYPE_ROR
:
4568 tcg_gen_rotr_i64(dst
, src
, shift_amount
);
4571 t0
= tcg_temp_new_i32();
4572 t1
= tcg_temp_new_i32();
4573 tcg_gen_extrl_i64_i32(t0
, src
);
4574 tcg_gen_extrl_i64_i32(t1
, shift_amount
);
4575 tcg_gen_rotr_i32(t0
, t0
, t1
);
4576 tcg_gen_extu_i32_i64(dst
, t0
);
4580 assert(FALSE
); /* all shift types should be handled */
4584 if (!sf
) { /* zero extend final result */
4585 tcg_gen_ext32u_i64(dst
, dst
);
4589 /* Shift a TCGv src by immediate, put result in dst.
4590 * The shift amount must be in range (this should always be true as the
4591 * relevant instructions will UNDEF on bad shift immediates).
4593 static void shift_reg_imm(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
4594 enum a64_shift_type shift_type
, unsigned int shift_i
)
4596 assert(shift_i
< (sf
? 64 : 32));
4599 tcg_gen_mov_i64(dst
, src
);
4601 shift_reg(dst
, src
, sf
, shift_type
, tcg_constant_i64(shift_i
));
4605 /* Logical (shifted register)
4606 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4607 * +----+-----+-----------+-------+---+------+--------+------+------+
4608 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
4609 * +----+-----+-----------+-------+---+------+--------+------+------+
4611 static void disas_logic_reg(DisasContext
*s
, uint32_t insn
)
4613 TCGv_i64 tcg_rd
, tcg_rn
, tcg_rm
;
4614 unsigned int sf
, opc
, shift_type
, invert
, rm
, shift_amount
, rn
, rd
;
4616 sf
= extract32(insn
, 31, 1);
4617 opc
= extract32(insn
, 29, 2);
4618 shift_type
= extract32(insn
, 22, 2);
4619 invert
= extract32(insn
, 21, 1);
4620 rm
= extract32(insn
, 16, 5);
4621 shift_amount
= extract32(insn
, 10, 6);
4622 rn
= extract32(insn
, 5, 5);
4623 rd
= extract32(insn
, 0, 5);
4625 if (!sf
&& (shift_amount
& (1 << 5))) {
4626 unallocated_encoding(s
);
4630 tcg_rd
= cpu_reg(s
, rd
);
4632 if (opc
== 1 && shift_amount
== 0 && shift_type
== 0 && rn
== 31) {
4633 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
4634 * register-register MOV and MVN, so it is worth special casing.
4636 tcg_rm
= cpu_reg(s
, rm
);
4638 tcg_gen_not_i64(tcg_rd
, tcg_rm
);
4640 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4644 tcg_gen_mov_i64(tcg_rd
, tcg_rm
);
4646 tcg_gen_ext32u_i64(tcg_rd
, tcg_rm
);
4652 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4655 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, shift_amount
);
4658 tcg_rn
= cpu_reg(s
, rn
);
4660 switch (opc
| (invert
<< 2)) {
4663 tcg_gen_and_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4666 tcg_gen_or_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4669 tcg_gen_xor_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4673 tcg_gen_andc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4676 tcg_gen_orc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4679 tcg_gen_eqv_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4687 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4691 gen_logic_CC(sf
, tcg_rd
);
4696 * Add/subtract (extended register)
4698 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
4699 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4700 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
4701 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4703 * sf: 0 -> 32bit, 1 -> 64bit
4704 * op: 0 -> add , 1 -> sub
4707 * option: extension type (see DecodeRegExtend)
4708 * imm3: optional shift to Rm
4710 * Rd = Rn + LSL(extend(Rm), amount)
4712 static void disas_add_sub_ext_reg(DisasContext
*s
, uint32_t insn
)
4714 int rd
= extract32(insn
, 0, 5);
4715 int rn
= extract32(insn
, 5, 5);
4716 int imm3
= extract32(insn
, 10, 3);
4717 int option
= extract32(insn
, 13, 3);
4718 int rm
= extract32(insn
, 16, 5);
4719 int opt
= extract32(insn
, 22, 2);
4720 bool setflags
= extract32(insn
, 29, 1);
4721 bool sub_op
= extract32(insn
, 30, 1);
4722 bool sf
= extract32(insn
, 31, 1);
4724 TCGv_i64 tcg_rm
, tcg_rn
; /* temps */
4726 TCGv_i64 tcg_result
;
4728 if (imm3
> 4 || opt
!= 0) {
4729 unallocated_encoding(s
);
4733 /* non-flag setting ops may use SP */
4735 tcg_rd
= cpu_reg_sp(s
, rd
);
4737 tcg_rd
= cpu_reg(s
, rd
);
4739 tcg_rn
= read_cpu_reg_sp(s
, rn
, sf
);
4741 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4742 ext_and_shift_reg(tcg_rm
, tcg_rm
, option
, imm3
);
4744 tcg_result
= tcg_temp_new_i64();
4748 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
4750 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
4754 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4756 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4761 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
4763 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
4768 * Add/subtract (shifted register)
4770 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4771 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4772 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
4773 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4775 * sf: 0 -> 32bit, 1 -> 64bit
4776 * op: 0 -> add , 1 -> sub
4778 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4779 * imm6: Shift amount to apply to Rm before the add/sub
4781 static void disas_add_sub_reg(DisasContext
*s
, uint32_t insn
)
4783 int rd
= extract32(insn
, 0, 5);
4784 int rn
= extract32(insn
, 5, 5);
4785 int imm6
= extract32(insn
, 10, 6);
4786 int rm
= extract32(insn
, 16, 5);
4787 int shift_type
= extract32(insn
, 22, 2);
4788 bool setflags
= extract32(insn
, 29, 1);
4789 bool sub_op
= extract32(insn
, 30, 1);
4790 bool sf
= extract32(insn
, 31, 1);
4792 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4793 TCGv_i64 tcg_rn
, tcg_rm
;
4794 TCGv_i64 tcg_result
;
4796 if ((shift_type
== 3) || (!sf
&& (imm6
> 31))) {
4797 unallocated_encoding(s
);
4801 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
4802 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4804 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, imm6
);
4806 tcg_result
= tcg_temp_new_i64();
4810 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
4812 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
4816 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4818 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4823 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
4825 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
4829 /* Data-processing (3 source)
4831 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
4832 * +--+------+-----------+------+------+----+------+------+------+
4833 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
4834 * +--+------+-----------+------+------+----+------+------+------+
4836 static void disas_data_proc_3src(DisasContext
*s
, uint32_t insn
)
4838 int rd
= extract32(insn
, 0, 5);
4839 int rn
= extract32(insn
, 5, 5);
4840 int ra
= extract32(insn
, 10, 5);
4841 int rm
= extract32(insn
, 16, 5);
4842 int op_id
= (extract32(insn
, 29, 3) << 4) |
4843 (extract32(insn
, 21, 3) << 1) |
4844 extract32(insn
, 15, 1);
4845 bool sf
= extract32(insn
, 31, 1);
4846 bool is_sub
= extract32(op_id
, 0, 1);
4847 bool is_high
= extract32(op_id
, 2, 1);
4848 bool is_signed
= false;
4853 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
4855 case 0x42: /* SMADDL */
4856 case 0x43: /* SMSUBL */
4857 case 0x44: /* SMULH */
4860 case 0x0: /* MADD (32bit) */
4861 case 0x1: /* MSUB (32bit) */
4862 case 0x40: /* MADD (64bit) */
4863 case 0x41: /* MSUB (64bit) */
4864 case 0x4a: /* UMADDL */
4865 case 0x4b: /* UMSUBL */
4866 case 0x4c: /* UMULH */
4869 unallocated_encoding(s
);
4874 TCGv_i64 low_bits
= tcg_temp_new_i64(); /* low bits discarded */
4875 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4876 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
4877 TCGv_i64 tcg_rm
= cpu_reg(s
, rm
);
4880 tcg_gen_muls2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
4882 tcg_gen_mulu2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
4887 tcg_op1
= tcg_temp_new_i64();
4888 tcg_op2
= tcg_temp_new_i64();
4889 tcg_tmp
= tcg_temp_new_i64();
4892 tcg_gen_mov_i64(tcg_op1
, cpu_reg(s
, rn
));
4893 tcg_gen_mov_i64(tcg_op2
, cpu_reg(s
, rm
));
4896 tcg_gen_ext32s_i64(tcg_op1
, cpu_reg(s
, rn
));
4897 tcg_gen_ext32s_i64(tcg_op2
, cpu_reg(s
, rm
));
4899 tcg_gen_ext32u_i64(tcg_op1
, cpu_reg(s
, rn
));
4900 tcg_gen_ext32u_i64(tcg_op2
, cpu_reg(s
, rm
));
4904 if (ra
== 31 && !is_sub
) {
4905 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
4906 tcg_gen_mul_i64(cpu_reg(s
, rd
), tcg_op1
, tcg_op2
);
4908 tcg_gen_mul_i64(tcg_tmp
, tcg_op1
, tcg_op2
);
4910 tcg_gen_sub_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
4912 tcg_gen_add_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
4917 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), cpu_reg(s
, rd
));
4921 /* Add/subtract (with carry)
4922 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
4923 * +--+--+--+------------------------+------+-------------+------+-----+
4924 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd |
4925 * +--+--+--+------------------------+------+-------------+------+-----+
4928 static void disas_adc_sbc(DisasContext
*s
, uint32_t insn
)
4930 unsigned int sf
, op
, setflags
, rm
, rn
, rd
;
4931 TCGv_i64 tcg_y
, tcg_rn
, tcg_rd
;
4933 sf
= extract32(insn
, 31, 1);
4934 op
= extract32(insn
, 30, 1);
4935 setflags
= extract32(insn
, 29, 1);
4936 rm
= extract32(insn
, 16, 5);
4937 rn
= extract32(insn
, 5, 5);
4938 rd
= extract32(insn
, 0, 5);
4940 tcg_rd
= cpu_reg(s
, rd
);
4941 tcg_rn
= cpu_reg(s
, rn
);
4944 tcg_y
= tcg_temp_new_i64();
4945 tcg_gen_not_i64(tcg_y
, cpu_reg(s
, rm
));
4947 tcg_y
= cpu_reg(s
, rm
);
4951 gen_adc_CC(sf
, tcg_rd
, tcg_rn
, tcg_y
);
4953 gen_adc(sf
, tcg_rd
, tcg_rn
, tcg_y
);
4958 * Rotate right into flags
4959 * 31 30 29 21 15 10 5 4 0
4960 * +--+--+--+-----------------+--------+-----------+------+--+------+
4961 * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask |
4962 * +--+--+--+-----------------+--------+-----------+------+--+------+
4964 static void disas_rotate_right_into_flags(DisasContext
*s
, uint32_t insn
)
4966 int mask
= extract32(insn
, 0, 4);
4967 int o2
= extract32(insn
, 4, 1);
4968 int rn
= extract32(insn
, 5, 5);
4969 int imm6
= extract32(insn
, 15, 6);
4970 int sf_op_s
= extract32(insn
, 29, 3);
4974 if (sf_op_s
!= 5 || o2
!= 0 || !dc_isar_feature(aa64_condm_4
, s
)) {
4975 unallocated_encoding(s
);
4979 tcg_rn
= read_cpu_reg(s
, rn
, 1);
4980 tcg_gen_rotri_i64(tcg_rn
, tcg_rn
, imm6
);
4982 nzcv
= tcg_temp_new_i32();
4983 tcg_gen_extrl_i64_i32(nzcv
, tcg_rn
);
4985 if (mask
& 8) { /* N */
4986 tcg_gen_shli_i32(cpu_NF
, nzcv
, 31 - 3);
4988 if (mask
& 4) { /* Z */
4989 tcg_gen_not_i32(cpu_ZF
, nzcv
);
4990 tcg_gen_andi_i32(cpu_ZF
, cpu_ZF
, 4);
4992 if (mask
& 2) { /* C */
4993 tcg_gen_extract_i32(cpu_CF
, nzcv
, 1, 1);
4995 if (mask
& 1) { /* V */
4996 tcg_gen_shli_i32(cpu_VF
, nzcv
, 31 - 0);
5001 * Evaluate into flags
5002 * 31 30 29 21 15 14 10 5 4 0
5003 * +--+--+--+-----------------+---------+----+---------+------+--+------+
5004 * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask |
5005 * +--+--+--+-----------------+---------+----+---------+------+--+------+
5007 static void disas_evaluate_into_flags(DisasContext
*s
, uint32_t insn
)
5009 int o3_mask
= extract32(insn
, 0, 5);
5010 int rn
= extract32(insn
, 5, 5);
5011 int o2
= extract32(insn
, 15, 6);
5012 int sz
= extract32(insn
, 14, 1);
5013 int sf_op_s
= extract32(insn
, 29, 3);
5017 if (sf_op_s
!= 1 || o2
!= 0 || o3_mask
!= 0xd ||
5018 !dc_isar_feature(aa64_condm_4
, s
)) {
5019 unallocated_encoding(s
);
5022 shift
= sz
? 16 : 24; /* SETF16 or SETF8 */
5024 tmp
= tcg_temp_new_i32();
5025 tcg_gen_extrl_i64_i32(tmp
, cpu_reg(s
, rn
));
5026 tcg_gen_shli_i32(cpu_NF
, tmp
, shift
);
5027 tcg_gen_shli_i32(cpu_VF
, tmp
, shift
- 1);
5028 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
5029 tcg_gen_xor_i32(cpu_VF
, cpu_VF
, cpu_NF
);
5032 /* Conditional compare (immediate / register)
5033 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
5034 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
5035 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
5036 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
5039 static void disas_cc(DisasContext
*s
, uint32_t insn
)
5041 unsigned int sf
, op
, y
, cond
, rn
, nzcv
, is_imm
;
5042 TCGv_i32 tcg_t0
, tcg_t1
, tcg_t2
;
5043 TCGv_i64 tcg_tmp
, tcg_y
, tcg_rn
;
5046 if (!extract32(insn
, 29, 1)) {
5047 unallocated_encoding(s
);
5050 if (insn
& (1 << 10 | 1 << 4)) {
5051 unallocated_encoding(s
);
5054 sf
= extract32(insn
, 31, 1);
5055 op
= extract32(insn
, 30, 1);
5056 is_imm
= extract32(insn
, 11, 1);
5057 y
= extract32(insn
, 16, 5); /* y = rm (reg) or imm5 (imm) */
5058 cond
= extract32(insn
, 12, 4);
5059 rn
= extract32(insn
, 5, 5);
5060 nzcv
= extract32(insn
, 0, 4);
5062 /* Set T0 = !COND. */
5063 tcg_t0
= tcg_temp_new_i32();
5064 arm_test_cc(&c
, cond
);
5065 tcg_gen_setcondi_i32(tcg_invert_cond(c
.cond
), tcg_t0
, c
.value
, 0);
5067 /* Load the arguments for the new comparison. */
5069 tcg_y
= tcg_temp_new_i64();
5070 tcg_gen_movi_i64(tcg_y
, y
);
5072 tcg_y
= cpu_reg(s
, y
);
5074 tcg_rn
= cpu_reg(s
, rn
);
5076 /* Set the flags for the new comparison. */
5077 tcg_tmp
= tcg_temp_new_i64();
5079 gen_sub_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
5081 gen_add_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
5084 /* If COND was false, force the flags to #nzcv. Compute two masks
5085 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
5086 * For tcg hosts that support ANDC, we can make do with just T1.
5087 * In either case, allow the tcg optimizer to delete any unused mask.
5089 tcg_t1
= tcg_temp_new_i32();
5090 tcg_t2
= tcg_temp_new_i32();
5091 tcg_gen_neg_i32(tcg_t1
, tcg_t0
);
5092 tcg_gen_subi_i32(tcg_t2
, tcg_t0
, 1);
5094 if (nzcv
& 8) { /* N */
5095 tcg_gen_or_i32(cpu_NF
, cpu_NF
, tcg_t1
);
5097 if (TCG_TARGET_HAS_andc_i32
) {
5098 tcg_gen_andc_i32(cpu_NF
, cpu_NF
, tcg_t1
);
5100 tcg_gen_and_i32(cpu_NF
, cpu_NF
, tcg_t2
);
5103 if (nzcv
& 4) { /* Z */
5104 if (TCG_TARGET_HAS_andc_i32
) {
5105 tcg_gen_andc_i32(cpu_ZF
, cpu_ZF
, tcg_t1
);
5107 tcg_gen_and_i32(cpu_ZF
, cpu_ZF
, tcg_t2
);
5110 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, tcg_t0
);
5112 if (nzcv
& 2) { /* C */
5113 tcg_gen_or_i32(cpu_CF
, cpu_CF
, tcg_t0
);
5115 if (TCG_TARGET_HAS_andc_i32
) {
5116 tcg_gen_andc_i32(cpu_CF
, cpu_CF
, tcg_t1
);
5118 tcg_gen_and_i32(cpu_CF
, cpu_CF
, tcg_t2
);
5121 if (nzcv
& 1) { /* V */
5122 tcg_gen_or_i32(cpu_VF
, cpu_VF
, tcg_t1
);
5124 if (TCG_TARGET_HAS_andc_i32
) {
5125 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tcg_t1
);
5127 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tcg_t2
);
5132 /* Conditional select
5133 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
5134 * +----+----+---+-----------------+------+------+-----+------+------+
5135 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
5136 * +----+----+---+-----------------+------+------+-----+------+------+
5138 static void disas_cond_select(DisasContext
*s
, uint32_t insn
)
5140 unsigned int sf
, else_inv
, rm
, cond
, else_inc
, rn
, rd
;
5141 TCGv_i64 tcg_rd
, zero
;
5144 if (extract32(insn
, 29, 1) || extract32(insn
, 11, 1)) {
5145 /* S == 1 or op2<1> == 1 */
5146 unallocated_encoding(s
);
5149 sf
= extract32(insn
, 31, 1);
5150 else_inv
= extract32(insn
, 30, 1);
5151 rm
= extract32(insn
, 16, 5);
5152 cond
= extract32(insn
, 12, 4);
5153 else_inc
= extract32(insn
, 10, 1);
5154 rn
= extract32(insn
, 5, 5);
5155 rd
= extract32(insn
, 0, 5);
5157 tcg_rd
= cpu_reg(s
, rd
);
5159 a64_test_cc(&c
, cond
);
5160 zero
= tcg_constant_i64(0);
5162 if (rn
== 31 && rm
== 31 && (else_inc
^ else_inv
)) {
5165 tcg_gen_negsetcond_i64(tcg_invert_cond(c
.cond
),
5166 tcg_rd
, c
.value
, zero
);
5168 tcg_gen_setcond_i64(tcg_invert_cond(c
.cond
),
5169 tcg_rd
, c
.value
, zero
);
5172 TCGv_i64 t_true
= cpu_reg(s
, rn
);
5173 TCGv_i64 t_false
= read_cpu_reg(s
, rm
, 1);
5174 if (else_inv
&& else_inc
) {
5175 tcg_gen_neg_i64(t_false
, t_false
);
5176 } else if (else_inv
) {
5177 tcg_gen_not_i64(t_false
, t_false
);
5178 } else if (else_inc
) {
5179 tcg_gen_addi_i64(t_false
, t_false
, 1);
5181 tcg_gen_movcond_i64(c
.cond
, tcg_rd
, c
.value
, zero
, t_true
, t_false
);
5185 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
5189 static void handle_clz(DisasContext
*s
, unsigned int sf
,
5190 unsigned int rn
, unsigned int rd
)
5192 TCGv_i64 tcg_rd
, tcg_rn
;
5193 tcg_rd
= cpu_reg(s
, rd
);
5194 tcg_rn
= cpu_reg(s
, rn
);
5197 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
5199 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
5200 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
5201 tcg_gen_clzi_i32(tcg_tmp32
, tcg_tmp32
, 32);
5202 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
5206 static void handle_cls(DisasContext
*s
, unsigned int sf
,
5207 unsigned int rn
, unsigned int rd
)
5209 TCGv_i64 tcg_rd
, tcg_rn
;
5210 tcg_rd
= cpu_reg(s
, rd
);
5211 tcg_rn
= cpu_reg(s
, rn
);
5214 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
5216 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
5217 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
5218 tcg_gen_clrsb_i32(tcg_tmp32
, tcg_tmp32
);
5219 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
5223 static void handle_rbit(DisasContext
*s
, unsigned int sf
,
5224 unsigned int rn
, unsigned int rd
)
5226 TCGv_i64 tcg_rd
, tcg_rn
;
5227 tcg_rd
= cpu_reg(s
, rd
);
5228 tcg_rn
= cpu_reg(s
, rn
);
5231 gen_helper_rbit64(tcg_rd
, tcg_rn
);
5233 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
5234 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
5235 gen_helper_rbit(tcg_tmp32
, tcg_tmp32
);
5236 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
5240 /* REV with sf==1, opcode==3 ("REV64") */
5241 static void handle_rev64(DisasContext
*s
, unsigned int sf
,
5242 unsigned int rn
, unsigned int rd
)
5245 unallocated_encoding(s
);
5248 tcg_gen_bswap64_i64(cpu_reg(s
, rd
), cpu_reg(s
, rn
));
5251 /* REV with sf==0, opcode==2
5252 * REV32 (sf==1, opcode==2)
5254 static void handle_rev32(DisasContext
*s
, unsigned int sf
,
5255 unsigned int rn
, unsigned int rd
)
5257 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5258 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
5261 tcg_gen_bswap64_i64(tcg_rd
, tcg_rn
);
5262 tcg_gen_rotri_i64(tcg_rd
, tcg_rd
, 32);
5264 tcg_gen_bswap32_i64(tcg_rd
, tcg_rn
, TCG_BSWAP_OZ
);
5268 /* REV16 (opcode==1) */
5269 static void handle_rev16(DisasContext
*s
, unsigned int sf
,
5270 unsigned int rn
, unsigned int rd
)
5272 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5273 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
5274 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
5275 TCGv_i64 mask
= tcg_constant_i64(sf
? 0x00ff00ff00ff00ffull
: 0x00ff00ff);
5277 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 8);
5278 tcg_gen_and_i64(tcg_rd
, tcg_rn
, mask
);
5279 tcg_gen_and_i64(tcg_tmp
, tcg_tmp
, mask
);
5280 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, 8);
5281 tcg_gen_or_i64(tcg_rd
, tcg_rd
, tcg_tmp
);
5284 /* Data-processing (1 source)
5285 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5286 * +----+---+---+-----------------+---------+--------+------+------+
5287 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
5288 * +----+---+---+-----------------+---------+--------+------+------+
5290 static void disas_data_proc_1src(DisasContext
*s
, uint32_t insn
)
5292 unsigned int sf
, opcode
, opcode2
, rn
, rd
;
5295 if (extract32(insn
, 29, 1)) {
5296 unallocated_encoding(s
);
5300 sf
= extract32(insn
, 31, 1);
5301 opcode
= extract32(insn
, 10, 6);
5302 opcode2
= extract32(insn
, 16, 5);
5303 rn
= extract32(insn
, 5, 5);
5304 rd
= extract32(insn
, 0, 5);
5306 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5308 switch (MAP(sf
, opcode2
, opcode
)) {
5309 case MAP(0, 0x00, 0x00): /* RBIT */
5310 case MAP(1, 0x00, 0x00):
5311 handle_rbit(s
, sf
, rn
, rd
);
5313 case MAP(0, 0x00, 0x01): /* REV16 */
5314 case MAP(1, 0x00, 0x01):
5315 handle_rev16(s
, sf
, rn
, rd
);
5317 case MAP(0, 0x00, 0x02): /* REV/REV32 */
5318 case MAP(1, 0x00, 0x02):
5319 handle_rev32(s
, sf
, rn
, rd
);
5321 case MAP(1, 0x00, 0x03): /* REV64 */
5322 handle_rev64(s
, sf
, rn
, rd
);
5324 case MAP(0, 0x00, 0x04): /* CLZ */
5325 case MAP(1, 0x00, 0x04):
5326 handle_clz(s
, sf
, rn
, rd
);
5328 case MAP(0, 0x00, 0x05): /* CLS */
5329 case MAP(1, 0x00, 0x05):
5330 handle_cls(s
, sf
, rn
, rd
);
5332 case MAP(1, 0x01, 0x00): /* PACIA */
5333 if (s
->pauth_active
) {
5334 tcg_rd
= cpu_reg(s
, rd
);
5335 gen_helper_pacia(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5336 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5337 goto do_unallocated
;
5340 case MAP(1, 0x01, 0x01): /* PACIB */
5341 if (s
->pauth_active
) {
5342 tcg_rd
= cpu_reg(s
, rd
);
5343 gen_helper_pacib(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5344 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5345 goto do_unallocated
;
5348 case MAP(1, 0x01, 0x02): /* PACDA */
5349 if (s
->pauth_active
) {
5350 tcg_rd
= cpu_reg(s
, rd
);
5351 gen_helper_pacda(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5352 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5353 goto do_unallocated
;
5356 case MAP(1, 0x01, 0x03): /* PACDB */
5357 if (s
->pauth_active
) {
5358 tcg_rd
= cpu_reg(s
, rd
);
5359 gen_helper_pacdb(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5360 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5361 goto do_unallocated
;
5364 case MAP(1, 0x01, 0x04): /* AUTIA */
5365 if (s
->pauth_active
) {
5366 tcg_rd
= cpu_reg(s
, rd
);
5367 gen_helper_autia(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5368 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5369 goto do_unallocated
;
5372 case MAP(1, 0x01, 0x05): /* AUTIB */
5373 if (s
->pauth_active
) {
5374 tcg_rd
= cpu_reg(s
, rd
);
5375 gen_helper_autib(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5376 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5377 goto do_unallocated
;
5380 case MAP(1, 0x01, 0x06): /* AUTDA */
5381 if (s
->pauth_active
) {
5382 tcg_rd
= cpu_reg(s
, rd
);
5383 gen_helper_autda(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5384 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5385 goto do_unallocated
;
5388 case MAP(1, 0x01, 0x07): /* AUTDB */
5389 if (s
->pauth_active
) {
5390 tcg_rd
= cpu_reg(s
, rd
);
5391 gen_helper_autdb(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5392 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5393 goto do_unallocated
;
5396 case MAP(1, 0x01, 0x08): /* PACIZA */
5397 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5398 goto do_unallocated
;
5399 } else if (s
->pauth_active
) {
5400 tcg_rd
= cpu_reg(s
, rd
);
5401 gen_helper_pacia(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5404 case MAP(1, 0x01, 0x09): /* PACIZB */
5405 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5406 goto do_unallocated
;
5407 } else if (s
->pauth_active
) {
5408 tcg_rd
= cpu_reg(s
, rd
);
5409 gen_helper_pacib(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5412 case MAP(1, 0x01, 0x0a): /* PACDZA */
5413 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5414 goto do_unallocated
;
5415 } else if (s
->pauth_active
) {
5416 tcg_rd
= cpu_reg(s
, rd
);
5417 gen_helper_pacda(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5420 case MAP(1, 0x01, 0x0b): /* PACDZB */
5421 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5422 goto do_unallocated
;
5423 } else if (s
->pauth_active
) {
5424 tcg_rd
= cpu_reg(s
, rd
);
5425 gen_helper_pacdb(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5428 case MAP(1, 0x01, 0x0c): /* AUTIZA */
5429 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5430 goto do_unallocated
;
5431 } else if (s
->pauth_active
) {
5432 tcg_rd
= cpu_reg(s
, rd
);
5433 gen_helper_autia(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5436 case MAP(1, 0x01, 0x0d): /* AUTIZB */
5437 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5438 goto do_unallocated
;
5439 } else if (s
->pauth_active
) {
5440 tcg_rd
= cpu_reg(s
, rd
);
5441 gen_helper_autib(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5444 case MAP(1, 0x01, 0x0e): /* AUTDZA */
5445 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5446 goto do_unallocated
;
5447 } else if (s
->pauth_active
) {
5448 tcg_rd
= cpu_reg(s
, rd
);
5449 gen_helper_autda(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5452 case MAP(1, 0x01, 0x0f): /* AUTDZB */
5453 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5454 goto do_unallocated
;
5455 } else if (s
->pauth_active
) {
5456 tcg_rd
= cpu_reg(s
, rd
);
5457 gen_helper_autdb(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5460 case MAP(1, 0x01, 0x10): /* XPACI */
5461 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5462 goto do_unallocated
;
5463 } else if (s
->pauth_active
) {
5464 tcg_rd
= cpu_reg(s
, rd
);
5465 gen_helper_xpaci(tcg_rd
, tcg_env
, tcg_rd
);
5468 case MAP(1, 0x01, 0x11): /* XPACD */
5469 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5470 goto do_unallocated
;
5471 } else if (s
->pauth_active
) {
5472 tcg_rd
= cpu_reg(s
, rd
);
5473 gen_helper_xpacd(tcg_rd
, tcg_env
, tcg_rd
);
5478 unallocated_encoding(s
);
5485 static void handle_div(DisasContext
*s
, bool is_signed
, unsigned int sf
,
5486 unsigned int rm
, unsigned int rn
, unsigned int rd
)
5488 TCGv_i64 tcg_n
, tcg_m
, tcg_rd
;
5489 tcg_rd
= cpu_reg(s
, rd
);
5491 if (!sf
&& is_signed
) {
5492 tcg_n
= tcg_temp_new_i64();
5493 tcg_m
= tcg_temp_new_i64();
5494 tcg_gen_ext32s_i64(tcg_n
, cpu_reg(s
, rn
));
5495 tcg_gen_ext32s_i64(tcg_m
, cpu_reg(s
, rm
));
5497 tcg_n
= read_cpu_reg(s
, rn
, sf
);
5498 tcg_m
= read_cpu_reg(s
, rm
, sf
);
5502 gen_helper_sdiv64(tcg_rd
, tcg_n
, tcg_m
);
5504 gen_helper_udiv64(tcg_rd
, tcg_n
, tcg_m
);
5507 if (!sf
) { /* zero extend final result */
5508 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
5512 /* LSLV, LSRV, ASRV, RORV */
5513 static void handle_shift_reg(DisasContext
*s
,
5514 enum a64_shift_type shift_type
, unsigned int sf
,
5515 unsigned int rm
, unsigned int rn
, unsigned int rd
)
5517 TCGv_i64 tcg_shift
= tcg_temp_new_i64();
5518 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5519 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
5521 tcg_gen_andi_i64(tcg_shift
, cpu_reg(s
, rm
), sf
? 63 : 31);
5522 shift_reg(tcg_rd
, tcg_rn
, sf
, shift_type
, tcg_shift
);
5525 /* CRC32[BHWX], CRC32C[BHWX] */
5526 static void handle_crc32(DisasContext
*s
,
5527 unsigned int sf
, unsigned int sz
, bool crc32c
,
5528 unsigned int rm
, unsigned int rn
, unsigned int rd
)
5530 TCGv_i64 tcg_acc
, tcg_val
;
5533 if (!dc_isar_feature(aa64_crc32
, s
)
5534 || (sf
== 1 && sz
!= 3)
5535 || (sf
== 0 && sz
== 3)) {
5536 unallocated_encoding(s
);
5541 tcg_val
= cpu_reg(s
, rm
);
5555 g_assert_not_reached();
5557 tcg_val
= tcg_temp_new_i64();
5558 tcg_gen_andi_i64(tcg_val
, cpu_reg(s
, rm
), mask
);
5561 tcg_acc
= cpu_reg(s
, rn
);
5562 tcg_bytes
= tcg_constant_i32(1 << sz
);
5565 gen_helper_crc32c_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
5567 gen_helper_crc32_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
5571 /* Data-processing (2 source)
5572 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5573 * +----+---+---+-----------------+------+--------+------+------+
5574 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
5575 * +----+---+---+-----------------+------+--------+------+------+
5577 static void disas_data_proc_2src(DisasContext
*s
, uint32_t insn
)
5579 unsigned int sf
, rm
, opcode
, rn
, rd
, setflag
;
5580 sf
= extract32(insn
, 31, 1);
5581 setflag
= extract32(insn
, 29, 1);
5582 rm
= extract32(insn
, 16, 5);
5583 opcode
= extract32(insn
, 10, 6);
5584 rn
= extract32(insn
, 5, 5);
5585 rd
= extract32(insn
, 0, 5);
5587 if (setflag
&& opcode
!= 0) {
5588 unallocated_encoding(s
);
5593 case 0: /* SUBP(S) */
5594 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
5595 goto do_unallocated
;
5597 TCGv_i64 tcg_n
, tcg_m
, tcg_d
;
5599 tcg_n
= read_cpu_reg_sp(s
, rn
, true);
5600 tcg_m
= read_cpu_reg_sp(s
, rm
, true);
5601 tcg_gen_sextract_i64(tcg_n
, tcg_n
, 0, 56);
5602 tcg_gen_sextract_i64(tcg_m
, tcg_m
, 0, 56);
5603 tcg_d
= cpu_reg(s
, rd
);
5606 gen_sub_CC(true, tcg_d
, tcg_n
, tcg_m
);
5608 tcg_gen_sub_i64(tcg_d
, tcg_n
, tcg_m
);
5613 handle_div(s
, false, sf
, rm
, rn
, rd
);
5616 handle_div(s
, true, sf
, rm
, rn
, rd
);
5619 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
5620 goto do_unallocated
;
5623 gen_helper_irg(cpu_reg_sp(s
, rd
), tcg_env
,
5624 cpu_reg_sp(s
, rn
), cpu_reg(s
, rm
));
5626 gen_address_with_allocation_tag0(cpu_reg_sp(s
, rd
),
5631 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
5632 goto do_unallocated
;
5634 TCGv_i64 t
= tcg_temp_new_i64();
5636 tcg_gen_extract_i64(t
, cpu_reg_sp(s
, rn
), 56, 4);
5637 tcg_gen_shl_i64(t
, tcg_constant_i64(1), t
);
5638 tcg_gen_or_i64(cpu_reg(s
, rd
), cpu_reg(s
, rm
), t
);
5642 handle_shift_reg(s
, A64_SHIFT_TYPE_LSL
, sf
, rm
, rn
, rd
);
5645 handle_shift_reg(s
, A64_SHIFT_TYPE_LSR
, sf
, rm
, rn
, rd
);
5648 handle_shift_reg(s
, A64_SHIFT_TYPE_ASR
, sf
, rm
, rn
, rd
);
5651 handle_shift_reg(s
, A64_SHIFT_TYPE_ROR
, sf
, rm
, rn
, rd
);
5653 case 12: /* PACGA */
5654 if (sf
== 0 || !dc_isar_feature(aa64_pauth
, s
)) {
5655 goto do_unallocated
;
5657 gen_helper_pacga(cpu_reg(s
, rd
), tcg_env
,
5658 cpu_reg(s
, rn
), cpu_reg_sp(s
, rm
));
5667 case 23: /* CRC32 */
5669 int sz
= extract32(opcode
, 0, 2);
5670 bool crc32c
= extract32(opcode
, 2, 1);
5671 handle_crc32(s
, sf
, sz
, crc32c
, rm
, rn
, rd
);
5676 unallocated_encoding(s
);
5682 * Data processing - register
5683 * 31 30 29 28 25 21 20 16 10 0
5684 * +--+---+--+---+-------+-----+-------+-------+---------+
5685 * | |op0| |op1| 1 0 1 | op2 | | op3 | |
5686 * +--+---+--+---+-------+-----+-------+-------+---------+
5688 static void disas_data_proc_reg(DisasContext
*s
, uint32_t insn
)
5690 int op0
= extract32(insn
, 30, 1);
5691 int op1
= extract32(insn
, 28, 1);
5692 int op2
= extract32(insn
, 21, 4);
5693 int op3
= extract32(insn
, 10, 6);
5698 /* Add/sub (extended register) */
5699 disas_add_sub_ext_reg(s
, insn
);
5701 /* Add/sub (shifted register) */
5702 disas_add_sub_reg(s
, insn
);
5705 /* Logical (shifted register) */
5706 disas_logic_reg(s
, insn
);
5714 case 0x00: /* Add/subtract (with carry) */
5715 disas_adc_sbc(s
, insn
);
5718 case 0x01: /* Rotate right into flags */
5720 disas_rotate_right_into_flags(s
, insn
);
5723 case 0x02: /* Evaluate into flags */
5727 disas_evaluate_into_flags(s
, insn
);
5731 goto do_unallocated
;
5735 case 0x2: /* Conditional compare */
5736 disas_cc(s
, insn
); /* both imm and reg forms */
5739 case 0x4: /* Conditional select */
5740 disas_cond_select(s
, insn
);
5743 case 0x6: /* Data-processing */
5744 if (op0
) { /* (1 source) */
5745 disas_data_proc_1src(s
, insn
);
5746 } else { /* (2 source) */
5747 disas_data_proc_2src(s
, insn
);
5750 case 0x8 ... 0xf: /* (3 source) */
5751 disas_data_proc_3src(s
, insn
);
5756 unallocated_encoding(s
);
5761 static void handle_fp_compare(DisasContext
*s
, int size
,
5762 unsigned int rn
, unsigned int rm
,
5763 bool cmp_with_zero
, bool signal_all_nans
)
5765 TCGv_i64 tcg_flags
= tcg_temp_new_i64();
5766 TCGv_ptr fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
5768 if (size
== MO_64
) {
5769 TCGv_i64 tcg_vn
, tcg_vm
;
5771 tcg_vn
= read_fp_dreg(s
, rn
);
5772 if (cmp_with_zero
) {
5773 tcg_vm
= tcg_constant_i64(0);
5775 tcg_vm
= read_fp_dreg(s
, rm
);
5777 if (signal_all_nans
) {
5778 gen_helper_vfp_cmped_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5780 gen_helper_vfp_cmpd_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5783 TCGv_i32 tcg_vn
= tcg_temp_new_i32();
5784 TCGv_i32 tcg_vm
= tcg_temp_new_i32();
5786 read_vec_element_i32(s
, tcg_vn
, rn
, 0, size
);
5787 if (cmp_with_zero
) {
5788 tcg_gen_movi_i32(tcg_vm
, 0);
5790 read_vec_element_i32(s
, tcg_vm
, rm
, 0, size
);
5795 if (signal_all_nans
) {
5796 gen_helper_vfp_cmpes_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5798 gen_helper_vfp_cmps_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5802 if (signal_all_nans
) {
5803 gen_helper_vfp_cmpeh_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5805 gen_helper_vfp_cmph_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5809 g_assert_not_reached();
5813 gen_set_nzcv(tcg_flags
);
5816 /* Floating point compare
5817 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
5818 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5819 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
5820 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5822 static void disas_fp_compare(DisasContext
*s
, uint32_t insn
)
5824 unsigned int mos
, type
, rm
, op
, rn
, opc
, op2r
;
5827 mos
= extract32(insn
, 29, 3);
5828 type
= extract32(insn
, 22, 2);
5829 rm
= extract32(insn
, 16, 5);
5830 op
= extract32(insn
, 14, 2);
5831 rn
= extract32(insn
, 5, 5);
5832 opc
= extract32(insn
, 3, 2);
5833 op2r
= extract32(insn
, 0, 3);
5835 if (mos
|| op
|| op2r
) {
5836 unallocated_encoding(s
);
5849 if (dc_isar_feature(aa64_fp16
, s
)) {
5854 unallocated_encoding(s
);
5858 if (!fp_access_check(s
)) {
5862 handle_fp_compare(s
, size
, rn
, rm
, opc
& 1, opc
& 2);
5865 /* Floating point conditional compare
5866 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
5867 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5868 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
5869 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5871 static void disas_fp_ccomp(DisasContext
*s
, uint32_t insn
)
5873 unsigned int mos
, type
, rm
, cond
, rn
, op
, nzcv
;
5874 TCGLabel
*label_continue
= NULL
;
5877 mos
= extract32(insn
, 29, 3);
5878 type
= extract32(insn
, 22, 2);
5879 rm
= extract32(insn
, 16, 5);
5880 cond
= extract32(insn
, 12, 4);
5881 rn
= extract32(insn
, 5, 5);
5882 op
= extract32(insn
, 4, 1);
5883 nzcv
= extract32(insn
, 0, 4);
5886 unallocated_encoding(s
);
5899 if (dc_isar_feature(aa64_fp16
, s
)) {
5904 unallocated_encoding(s
);
5908 if (!fp_access_check(s
)) {
5912 if (cond
< 0x0e) { /* not always */
5913 TCGLabel
*label_match
= gen_new_label();
5914 label_continue
= gen_new_label();
5915 arm_gen_test_cc(cond
, label_match
);
5917 gen_set_nzcv(tcg_constant_i64(nzcv
<< 28));
5918 tcg_gen_br(label_continue
);
5919 gen_set_label(label_match
);
5922 handle_fp_compare(s
, size
, rn
, rm
, false, op
);
5925 gen_set_label(label_continue
);
5929 /* Floating point conditional select
5930 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
5931 * +---+---+---+-----------+------+---+------+------+-----+------+------+
5932 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
5933 * +---+---+---+-----------+------+---+------+------+-----+------+------+
5935 static void disas_fp_csel(DisasContext
*s
, uint32_t insn
)
5937 unsigned int mos
, type
, rm
, cond
, rn
, rd
;
5938 TCGv_i64 t_true
, t_false
;
5942 mos
= extract32(insn
, 29, 3);
5943 type
= extract32(insn
, 22, 2);
5944 rm
= extract32(insn
, 16, 5);
5945 cond
= extract32(insn
, 12, 4);
5946 rn
= extract32(insn
, 5, 5);
5947 rd
= extract32(insn
, 0, 5);
5950 unallocated_encoding(s
);
5963 if (dc_isar_feature(aa64_fp16
, s
)) {
5968 unallocated_encoding(s
);
5972 if (!fp_access_check(s
)) {
5976 /* Zero extend sreg & hreg inputs to 64 bits now. */
5977 t_true
= tcg_temp_new_i64();
5978 t_false
= tcg_temp_new_i64();
5979 read_vec_element(s
, t_true
, rn
, 0, sz
);
5980 read_vec_element(s
, t_false
, rm
, 0, sz
);
5982 a64_test_cc(&c
, cond
);
5983 tcg_gen_movcond_i64(c
.cond
, t_true
, c
.value
, tcg_constant_i64(0),
5986 /* Note that sregs & hregs write back zeros to the high bits,
5987 and we've already done the zero-extension. */
5988 write_fp_dreg(s
, rd
, t_true
);
5991 /* Floating-point data-processing (1 source) - half precision */
5992 static void handle_fp_1src_half(DisasContext
*s
, int opcode
, int rd
, int rn
)
5994 TCGv_ptr fpst
= NULL
;
5995 TCGv_i32 tcg_op
= read_fp_hreg(s
, rn
);
5996 TCGv_i32 tcg_res
= tcg_temp_new_i32();
5999 case 0x0: /* FMOV */
6000 tcg_gen_mov_i32(tcg_res
, tcg_op
);
6002 case 0x1: /* FABS */
6003 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
6005 case 0x2: /* FNEG */
6006 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
6008 case 0x3: /* FSQRT */
6009 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6010 gen_helper_sqrt_f16(tcg_res
, tcg_op
, fpst
);
6012 case 0x8: /* FRINTN */
6013 case 0x9: /* FRINTP */
6014 case 0xa: /* FRINTM */
6015 case 0xb: /* FRINTZ */
6016 case 0xc: /* FRINTA */
6020 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6021 tcg_rmode
= gen_set_rmode(opcode
& 7, fpst
);
6022 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
6023 gen_restore_rmode(tcg_rmode
, fpst
);
6026 case 0xe: /* FRINTX */
6027 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6028 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, fpst
);
6030 case 0xf: /* FRINTI */
6031 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6032 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
6035 g_assert_not_reached();
6038 write_fp_sreg(s
, rd
, tcg_res
);
6041 /* Floating-point data-processing (1 source) - single precision */
6042 static void handle_fp_1src_single(DisasContext
*s
, int opcode
, int rd
, int rn
)
6044 void (*gen_fpst
)(TCGv_i32
, TCGv_i32
, TCGv_ptr
);
6045 TCGv_i32 tcg_op
, tcg_res
;
6049 tcg_op
= read_fp_sreg(s
, rn
);
6050 tcg_res
= tcg_temp_new_i32();
6053 case 0x0: /* FMOV */
6054 tcg_gen_mov_i32(tcg_res
, tcg_op
);
6056 case 0x1: /* FABS */
6057 gen_helper_vfp_abss(tcg_res
, tcg_op
);
6059 case 0x2: /* FNEG */
6060 gen_helper_vfp_negs(tcg_res
, tcg_op
);
6062 case 0x3: /* FSQRT */
6063 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, tcg_env
);
6065 case 0x6: /* BFCVT */
6066 gen_fpst
= gen_helper_bfcvt
;
6068 case 0x8: /* FRINTN */
6069 case 0x9: /* FRINTP */
6070 case 0xa: /* FRINTM */
6071 case 0xb: /* FRINTZ */
6072 case 0xc: /* FRINTA */
6074 gen_fpst
= gen_helper_rints
;
6076 case 0xe: /* FRINTX */
6077 gen_fpst
= gen_helper_rints_exact
;
6079 case 0xf: /* FRINTI */
6080 gen_fpst
= gen_helper_rints
;
6082 case 0x10: /* FRINT32Z */
6083 rmode
= FPROUNDING_ZERO
;
6084 gen_fpst
= gen_helper_frint32_s
;
6086 case 0x11: /* FRINT32X */
6087 gen_fpst
= gen_helper_frint32_s
;
6089 case 0x12: /* FRINT64Z */
6090 rmode
= FPROUNDING_ZERO
;
6091 gen_fpst
= gen_helper_frint64_s
;
6093 case 0x13: /* FRINT64X */
6094 gen_fpst
= gen_helper_frint64_s
;
6097 g_assert_not_reached();
6100 fpst
= fpstatus_ptr(FPST_FPCR
);
6102 TCGv_i32 tcg_rmode
= gen_set_rmode(rmode
, fpst
);
6103 gen_fpst(tcg_res
, tcg_op
, fpst
);
6104 gen_restore_rmode(tcg_rmode
, fpst
);
6106 gen_fpst(tcg_res
, tcg_op
, fpst
);
6110 write_fp_sreg(s
, rd
, tcg_res
);
6113 /* Floating-point data-processing (1 source) - double precision */
6114 static void handle_fp_1src_double(DisasContext
*s
, int opcode
, int rd
, int rn
)
6116 void (*gen_fpst
)(TCGv_i64
, TCGv_i64
, TCGv_ptr
);
6117 TCGv_i64 tcg_op
, tcg_res
;
6122 case 0x0: /* FMOV */
6123 gen_gvec_fn2(s
, false, rd
, rn
, tcg_gen_gvec_mov
, 0);
6127 tcg_op
= read_fp_dreg(s
, rn
);
6128 tcg_res
= tcg_temp_new_i64();
6131 case 0x1: /* FABS */
6132 gen_helper_vfp_absd(tcg_res
, tcg_op
);
6134 case 0x2: /* FNEG */
6135 gen_helper_vfp_negd(tcg_res
, tcg_op
);
6137 case 0x3: /* FSQRT */
6138 gen_helper_vfp_sqrtd(tcg_res
, tcg_op
, tcg_env
);
6140 case 0x8: /* FRINTN */
6141 case 0x9: /* FRINTP */
6142 case 0xa: /* FRINTM */
6143 case 0xb: /* FRINTZ */
6144 case 0xc: /* FRINTA */
6146 gen_fpst
= gen_helper_rintd
;
6148 case 0xe: /* FRINTX */
6149 gen_fpst
= gen_helper_rintd_exact
;
6151 case 0xf: /* FRINTI */
6152 gen_fpst
= gen_helper_rintd
;
6154 case 0x10: /* FRINT32Z */
6155 rmode
= FPROUNDING_ZERO
;
6156 gen_fpst
= gen_helper_frint32_d
;
6158 case 0x11: /* FRINT32X */
6159 gen_fpst
= gen_helper_frint32_d
;
6161 case 0x12: /* FRINT64Z */
6162 rmode
= FPROUNDING_ZERO
;
6163 gen_fpst
= gen_helper_frint64_d
;
6165 case 0x13: /* FRINT64X */
6166 gen_fpst
= gen_helper_frint64_d
;
6169 g_assert_not_reached();
6172 fpst
= fpstatus_ptr(FPST_FPCR
);
6174 TCGv_i32 tcg_rmode
= gen_set_rmode(rmode
, fpst
);
6175 gen_fpst(tcg_res
, tcg_op
, fpst
);
6176 gen_restore_rmode(tcg_rmode
, fpst
);
6178 gen_fpst(tcg_res
, tcg_op
, fpst
);
6182 write_fp_dreg(s
, rd
, tcg_res
);
6185 static void handle_fp_fcvt(DisasContext
*s
, int opcode
,
6186 int rd
, int rn
, int dtype
, int ntype
)
6191 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
6193 /* Single to double */
6194 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
6195 gen_helper_vfp_fcvtds(tcg_rd
, tcg_rn
, tcg_env
);
6196 write_fp_dreg(s
, rd
, tcg_rd
);
6198 /* Single to half */
6199 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
6200 TCGv_i32 ahp
= get_ahp_flag();
6201 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6203 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd
, tcg_rn
, fpst
, ahp
);
6204 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6205 write_fp_sreg(s
, rd
, tcg_rd
);
6211 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
6212 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
6214 /* Double to single */
6215 gen_helper_vfp_fcvtsd(tcg_rd
, tcg_rn
, tcg_env
);
6217 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6218 TCGv_i32 ahp
= get_ahp_flag();
6219 /* Double to half */
6220 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd
, tcg_rn
, fpst
, ahp
);
6221 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6223 write_fp_sreg(s
, rd
, tcg_rd
);
6228 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
6229 TCGv_ptr tcg_fpst
= fpstatus_ptr(FPST_FPCR
);
6230 TCGv_i32 tcg_ahp
= get_ahp_flag();
6231 tcg_gen_ext16u_i32(tcg_rn
, tcg_rn
);
6233 /* Half to single */
6234 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
6235 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd
, tcg_rn
, tcg_fpst
, tcg_ahp
);
6236 write_fp_sreg(s
, rd
, tcg_rd
);
6238 /* Half to double */
6239 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
6240 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd
, tcg_rn
, tcg_fpst
, tcg_ahp
);
6241 write_fp_dreg(s
, rd
, tcg_rd
);
6246 g_assert_not_reached();
6250 /* Floating point data-processing (1 source)
6251 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
6252 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6253 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
6254 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6256 static void disas_fp_1src(DisasContext
*s
, uint32_t insn
)
6258 int mos
= extract32(insn
, 29, 3);
6259 int type
= extract32(insn
, 22, 2);
6260 int opcode
= extract32(insn
, 15, 6);
6261 int rn
= extract32(insn
, 5, 5);
6262 int rd
= extract32(insn
, 0, 5);
6265 goto do_unallocated
;
6269 case 0x4: case 0x5: case 0x7:
6271 /* FCVT between half, single and double precision */
6272 int dtype
= extract32(opcode
, 0, 2);
6273 if (type
== 2 || dtype
== type
) {
6274 goto do_unallocated
;
6276 if (!fp_access_check(s
)) {
6280 handle_fp_fcvt(s
, opcode
, rd
, rn
, dtype
, type
);
6284 case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
6285 if (type
> 1 || !dc_isar_feature(aa64_frint
, s
)) {
6286 goto do_unallocated
;
6292 /* 32-to-32 and 64-to-64 ops */
6295 if (!fp_access_check(s
)) {
6298 handle_fp_1src_single(s
, opcode
, rd
, rn
);
6301 if (!fp_access_check(s
)) {
6304 handle_fp_1src_double(s
, opcode
, rd
, rn
);
6307 if (!dc_isar_feature(aa64_fp16
, s
)) {
6308 goto do_unallocated
;
6311 if (!fp_access_check(s
)) {
6314 handle_fp_1src_half(s
, opcode
, rd
, rn
);
6317 goto do_unallocated
;
6324 if (!dc_isar_feature(aa64_bf16
, s
)) {
6325 goto do_unallocated
;
6327 if (!fp_access_check(s
)) {
6330 handle_fp_1src_single(s
, opcode
, rd
, rn
);
6333 goto do_unallocated
;
6339 unallocated_encoding(s
);
6344 /* Floating-point data-processing (2 source) - single precision */
6345 static void handle_fp_2src_single(DisasContext
*s
, int opcode
,
6346 int rd
, int rn
, int rm
)
6353 tcg_res
= tcg_temp_new_i32();
6354 fpst
= fpstatus_ptr(FPST_FPCR
);
6355 tcg_op1
= read_fp_sreg(s
, rn
);
6356 tcg_op2
= read_fp_sreg(s
, rm
);
6359 case 0x0: /* FMUL */
6360 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6362 case 0x1: /* FDIV */
6363 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6365 case 0x2: /* FADD */
6366 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6368 case 0x3: /* FSUB */
6369 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6371 case 0x4: /* FMAX */
6372 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6374 case 0x5: /* FMIN */
6375 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6377 case 0x6: /* FMAXNM */
6378 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6380 case 0x7: /* FMINNM */
6381 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6383 case 0x8: /* FNMUL */
6384 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6385 gen_helper_vfp_negs(tcg_res
, tcg_res
);
6389 write_fp_sreg(s
, rd
, tcg_res
);
6392 /* Floating-point data-processing (2 source) - double precision */
6393 static void handle_fp_2src_double(DisasContext
*s
, int opcode
,
6394 int rd
, int rn
, int rm
)
6401 tcg_res
= tcg_temp_new_i64();
6402 fpst
= fpstatus_ptr(FPST_FPCR
);
6403 tcg_op1
= read_fp_dreg(s
, rn
);
6404 tcg_op2
= read_fp_dreg(s
, rm
);
6407 case 0x0: /* FMUL */
6408 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6410 case 0x1: /* FDIV */
6411 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6413 case 0x2: /* FADD */
6414 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6416 case 0x3: /* FSUB */
6417 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6419 case 0x4: /* FMAX */
6420 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6422 case 0x5: /* FMIN */
6423 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6425 case 0x6: /* FMAXNM */
6426 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6428 case 0x7: /* FMINNM */
6429 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6431 case 0x8: /* FNMUL */
6432 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6433 gen_helper_vfp_negd(tcg_res
, tcg_res
);
6437 write_fp_dreg(s
, rd
, tcg_res
);
6440 /* Floating-point data-processing (2 source) - half precision */
6441 static void handle_fp_2src_half(DisasContext
*s
, int opcode
,
6442 int rd
, int rn
, int rm
)
6449 tcg_res
= tcg_temp_new_i32();
6450 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6451 tcg_op1
= read_fp_hreg(s
, rn
);
6452 tcg_op2
= read_fp_hreg(s
, rm
);
6455 case 0x0: /* FMUL */
6456 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6458 case 0x1: /* FDIV */
6459 gen_helper_advsimd_divh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6461 case 0x2: /* FADD */
6462 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6464 case 0x3: /* FSUB */
6465 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6467 case 0x4: /* FMAX */
6468 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6470 case 0x5: /* FMIN */
6471 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6473 case 0x6: /* FMAXNM */
6474 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6476 case 0x7: /* FMINNM */
6477 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6479 case 0x8: /* FNMUL */
6480 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6481 tcg_gen_xori_i32(tcg_res
, tcg_res
, 0x8000);
6484 g_assert_not_reached();
6487 write_fp_sreg(s
, rd
, tcg_res
);
6490 /* Floating point data-processing (2 source)
6491 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
6492 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6493 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
6494 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6496 static void disas_fp_2src(DisasContext
*s
, uint32_t insn
)
6498 int mos
= extract32(insn
, 29, 3);
6499 int type
= extract32(insn
, 22, 2);
6500 int rd
= extract32(insn
, 0, 5);
6501 int rn
= extract32(insn
, 5, 5);
6502 int rm
= extract32(insn
, 16, 5);
6503 int opcode
= extract32(insn
, 12, 4);
6505 if (opcode
> 8 || mos
) {
6506 unallocated_encoding(s
);
6512 if (!fp_access_check(s
)) {
6515 handle_fp_2src_single(s
, opcode
, rd
, rn
, rm
);
6518 if (!fp_access_check(s
)) {
6521 handle_fp_2src_double(s
, opcode
, rd
, rn
, rm
);
6524 if (!dc_isar_feature(aa64_fp16
, s
)) {
6525 unallocated_encoding(s
);
6528 if (!fp_access_check(s
)) {
6531 handle_fp_2src_half(s
, opcode
, rd
, rn
, rm
);
6534 unallocated_encoding(s
);
6538 /* Floating-point data-processing (3 source) - single precision */
6539 static void handle_fp_3src_single(DisasContext
*s
, bool o0
, bool o1
,
6540 int rd
, int rn
, int rm
, int ra
)
6542 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
;
6543 TCGv_i32 tcg_res
= tcg_temp_new_i32();
6544 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6546 tcg_op1
= read_fp_sreg(s
, rn
);
6547 tcg_op2
= read_fp_sreg(s
, rm
);
6548 tcg_op3
= read_fp_sreg(s
, ra
);
6550 /* These are fused multiply-add, and must be done as one
6551 * floating point operation with no rounding between the
6552 * multiplication and addition steps.
6553 * NB that doing the negations here as separate steps is
6554 * correct : an input NaN should come out with its sign bit
6555 * flipped if it is a negated-input.
6558 gen_helper_vfp_negs(tcg_op3
, tcg_op3
);
6562 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
6565 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
6567 write_fp_sreg(s
, rd
, tcg_res
);
6570 /* Floating-point data-processing (3 source) - double precision */
6571 static void handle_fp_3src_double(DisasContext
*s
, bool o0
, bool o1
,
6572 int rd
, int rn
, int rm
, int ra
)
6574 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
;
6575 TCGv_i64 tcg_res
= tcg_temp_new_i64();
6576 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6578 tcg_op1
= read_fp_dreg(s
, rn
);
6579 tcg_op2
= read_fp_dreg(s
, rm
);
6580 tcg_op3
= read_fp_dreg(s
, ra
);
6582 /* These are fused multiply-add, and must be done as one
6583 * floating point operation with no rounding between the
6584 * multiplication and addition steps.
6585 * NB that doing the negations here as separate steps is
6586 * correct : an input NaN should come out with its sign bit
6587 * flipped if it is a negated-input.
6590 gen_helper_vfp_negd(tcg_op3
, tcg_op3
);
6594 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
6597 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
6599 write_fp_dreg(s
, rd
, tcg_res
);
6602 /* Floating-point data-processing (3 source) - half precision */
6603 static void handle_fp_3src_half(DisasContext
*s
, bool o0
, bool o1
,
6604 int rd
, int rn
, int rm
, int ra
)
6606 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
;
6607 TCGv_i32 tcg_res
= tcg_temp_new_i32();
6608 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6610 tcg_op1
= read_fp_hreg(s
, rn
);
6611 tcg_op2
= read_fp_hreg(s
, rm
);
6612 tcg_op3
= read_fp_hreg(s
, ra
);
6614 /* These are fused multiply-add, and must be done as one
6615 * floating point operation with no rounding between the
6616 * multiplication and addition steps.
6617 * NB that doing the negations here as separate steps is
6618 * correct : an input NaN should come out with its sign bit
6619 * flipped if it is a negated-input.
6622 tcg_gen_xori_i32(tcg_op3
, tcg_op3
, 0x8000);
6626 tcg_gen_xori_i32(tcg_op1
, tcg_op1
, 0x8000);
6629 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
6631 write_fp_sreg(s
, rd
, tcg_res
);
6634 /* Floating point data-processing (3 source)
6635 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
6636 * +---+---+---+-----------+------+----+------+----+------+------+------+
6637 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
6638 * +---+---+---+-----------+------+----+------+----+------+------+------+
6640 static void disas_fp_3src(DisasContext
*s
, uint32_t insn
)
6642 int mos
= extract32(insn
, 29, 3);
6643 int type
= extract32(insn
, 22, 2);
6644 int rd
= extract32(insn
, 0, 5);
6645 int rn
= extract32(insn
, 5, 5);
6646 int ra
= extract32(insn
, 10, 5);
6647 int rm
= extract32(insn
, 16, 5);
6648 bool o0
= extract32(insn
, 15, 1);
6649 bool o1
= extract32(insn
, 21, 1);
6652 unallocated_encoding(s
);
6658 if (!fp_access_check(s
)) {
6661 handle_fp_3src_single(s
, o0
, o1
, rd
, rn
, rm
, ra
);
6664 if (!fp_access_check(s
)) {
6667 handle_fp_3src_double(s
, o0
, o1
, rd
, rn
, rm
, ra
);
6670 if (!dc_isar_feature(aa64_fp16
, s
)) {
6671 unallocated_encoding(s
);
6674 if (!fp_access_check(s
)) {
6677 handle_fp_3src_half(s
, o0
, o1
, rd
, rn
, rm
, ra
);
6680 unallocated_encoding(s
);
6684 /* Floating point immediate
6685 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
6686 * +---+---+---+-----------+------+---+------------+-------+------+------+
6687 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
6688 * +---+---+---+-----------+------+---+------------+-------+------+------+
6690 static void disas_fp_imm(DisasContext
*s
, uint32_t insn
)
6692 int rd
= extract32(insn
, 0, 5);
6693 int imm5
= extract32(insn
, 5, 5);
6694 int imm8
= extract32(insn
, 13, 8);
6695 int type
= extract32(insn
, 22, 2);
6696 int mos
= extract32(insn
, 29, 3);
6701 unallocated_encoding(s
);
6714 if (dc_isar_feature(aa64_fp16
, s
)) {
6719 unallocated_encoding(s
);
6723 if (!fp_access_check(s
)) {
6727 imm
= vfp_expand_imm(sz
, imm8
);
6728 write_fp_dreg(s
, rd
, tcg_constant_i64(imm
));
6731 /* Handle floating point <=> fixed point conversions. Note that we can
6732 * also deal with fp <=> integer conversions as a special case (scale == 64)
6733 * OPTME: consider handling that special case specially or at least skipping
6734 * the call to scalbn in the helpers for zero shifts.
6736 static void handle_fpfpcvt(DisasContext
*s
, int rd
, int rn
, int opcode
,
6737 bool itof
, int rmode
, int scale
, int sf
, int type
)
6739 bool is_signed
= !(opcode
& 1);
6740 TCGv_ptr tcg_fpstatus
;
6741 TCGv_i32 tcg_shift
, tcg_single
;
6742 TCGv_i64 tcg_double
;
6744 tcg_fpstatus
= fpstatus_ptr(type
== 3 ? FPST_FPCR_F16
: FPST_FPCR
);
6746 tcg_shift
= tcg_constant_i32(64 - scale
);
6749 TCGv_i64 tcg_int
= cpu_reg(s
, rn
);
6751 TCGv_i64 tcg_extend
= tcg_temp_new_i64();
6754 tcg_gen_ext32s_i64(tcg_extend
, tcg_int
);
6756 tcg_gen_ext32u_i64(tcg_extend
, tcg_int
);
6759 tcg_int
= tcg_extend
;
6763 case 1: /* float64 */
6764 tcg_double
= tcg_temp_new_i64();
6766 gen_helper_vfp_sqtod(tcg_double
, tcg_int
,
6767 tcg_shift
, tcg_fpstatus
);
6769 gen_helper_vfp_uqtod(tcg_double
, tcg_int
,
6770 tcg_shift
, tcg_fpstatus
);
6772 write_fp_dreg(s
, rd
, tcg_double
);
6775 case 0: /* float32 */
6776 tcg_single
= tcg_temp_new_i32();
6778 gen_helper_vfp_sqtos(tcg_single
, tcg_int
,
6779 tcg_shift
, tcg_fpstatus
);
6781 gen_helper_vfp_uqtos(tcg_single
, tcg_int
,
6782 tcg_shift
, tcg_fpstatus
);
6784 write_fp_sreg(s
, rd
, tcg_single
);
6787 case 3: /* float16 */
6788 tcg_single
= tcg_temp_new_i32();
6790 gen_helper_vfp_sqtoh(tcg_single
, tcg_int
,
6791 tcg_shift
, tcg_fpstatus
);
6793 gen_helper_vfp_uqtoh(tcg_single
, tcg_int
,
6794 tcg_shift
, tcg_fpstatus
);
6796 write_fp_sreg(s
, rd
, tcg_single
);
6800 g_assert_not_reached();
6803 TCGv_i64 tcg_int
= cpu_reg(s
, rd
);
6806 if (extract32(opcode
, 2, 1)) {
6807 /* There are too many rounding modes to all fit into rmode,
6808 * so FCVTA[US] is a special case.
6810 rmode
= FPROUNDING_TIEAWAY
;
6813 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
6816 case 1: /* float64 */
6817 tcg_double
= read_fp_dreg(s
, rn
);
6820 gen_helper_vfp_tosld(tcg_int
, tcg_double
,
6821 tcg_shift
, tcg_fpstatus
);
6823 gen_helper_vfp_tosqd(tcg_int
, tcg_double
,
6824 tcg_shift
, tcg_fpstatus
);
6828 gen_helper_vfp_tould(tcg_int
, tcg_double
,
6829 tcg_shift
, tcg_fpstatus
);
6831 gen_helper_vfp_touqd(tcg_int
, tcg_double
,
6832 tcg_shift
, tcg_fpstatus
);
6836 tcg_gen_ext32u_i64(tcg_int
, tcg_int
);
6840 case 0: /* float32 */
6841 tcg_single
= read_fp_sreg(s
, rn
);
6844 gen_helper_vfp_tosqs(tcg_int
, tcg_single
,
6845 tcg_shift
, tcg_fpstatus
);
6847 gen_helper_vfp_touqs(tcg_int
, tcg_single
,
6848 tcg_shift
, tcg_fpstatus
);
6851 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
6853 gen_helper_vfp_tosls(tcg_dest
, tcg_single
,
6854 tcg_shift
, tcg_fpstatus
);
6856 gen_helper_vfp_touls(tcg_dest
, tcg_single
,
6857 tcg_shift
, tcg_fpstatus
);
6859 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
6863 case 3: /* float16 */
6864 tcg_single
= read_fp_sreg(s
, rn
);
6867 gen_helper_vfp_tosqh(tcg_int
, tcg_single
,
6868 tcg_shift
, tcg_fpstatus
);
6870 gen_helper_vfp_touqh(tcg_int
, tcg_single
,
6871 tcg_shift
, tcg_fpstatus
);
6874 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
6876 gen_helper_vfp_toslh(tcg_dest
, tcg_single
,
6877 tcg_shift
, tcg_fpstatus
);
6879 gen_helper_vfp_toulh(tcg_dest
, tcg_single
,
6880 tcg_shift
, tcg_fpstatus
);
6882 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
6887 g_assert_not_reached();
6890 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
6894 /* Floating point <-> fixed point conversions
6895 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
6896 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6897 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
6898 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6900 static void disas_fp_fixed_conv(DisasContext
*s
, uint32_t insn
)
6902 int rd
= extract32(insn
, 0, 5);
6903 int rn
= extract32(insn
, 5, 5);
6904 int scale
= extract32(insn
, 10, 6);
6905 int opcode
= extract32(insn
, 16, 3);
6906 int rmode
= extract32(insn
, 19, 2);
6907 int type
= extract32(insn
, 22, 2);
6908 bool sbit
= extract32(insn
, 29, 1);
6909 bool sf
= extract32(insn
, 31, 1);
6912 if (sbit
|| (!sf
&& scale
< 32)) {
6913 unallocated_encoding(s
);
6918 case 0: /* float32 */
6919 case 1: /* float64 */
6921 case 3: /* float16 */
6922 if (dc_isar_feature(aa64_fp16
, s
)) {
6927 unallocated_encoding(s
);
6931 switch ((rmode
<< 3) | opcode
) {
6932 case 0x2: /* SCVTF */
6933 case 0x3: /* UCVTF */
6936 case 0x18: /* FCVTZS */
6937 case 0x19: /* FCVTZU */
6941 unallocated_encoding(s
);
6945 if (!fp_access_check(s
)) {
6949 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, FPROUNDING_ZERO
, scale
, sf
, type
);
6952 static void handle_fmov(DisasContext
*s
, int rd
, int rn
, int type
, bool itof
)
6954 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
6955 * without conversion.
6959 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
6965 tmp
= tcg_temp_new_i64();
6966 tcg_gen_ext32u_i64(tmp
, tcg_rn
);
6967 write_fp_dreg(s
, rd
, tmp
);
6971 write_fp_dreg(s
, rd
, tcg_rn
);
6974 /* 64 bit to top half. */
6975 tcg_gen_st_i64(tcg_rn
, tcg_env
, fp_reg_hi_offset(s
, rd
));
6976 clear_vec_high(s
, true, rd
);
6980 tmp
= tcg_temp_new_i64();
6981 tcg_gen_ext16u_i64(tmp
, tcg_rn
);
6982 write_fp_dreg(s
, rd
, tmp
);
6985 g_assert_not_reached();
6988 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
6993 tcg_gen_ld32u_i64(tcg_rd
, tcg_env
, fp_reg_offset(s
, rn
, MO_32
));
6997 tcg_gen_ld_i64(tcg_rd
, tcg_env
, fp_reg_offset(s
, rn
, MO_64
));
7000 /* 64 bits from top half */
7001 tcg_gen_ld_i64(tcg_rd
, tcg_env
, fp_reg_hi_offset(s
, rn
));
7005 tcg_gen_ld16u_i64(tcg_rd
, tcg_env
, fp_reg_offset(s
, rn
, MO_16
));
7008 g_assert_not_reached();
7013 static void handle_fjcvtzs(DisasContext
*s
, int rd
, int rn
)
7015 TCGv_i64 t
= read_fp_dreg(s
, rn
);
7016 TCGv_ptr fpstatus
= fpstatus_ptr(FPST_FPCR
);
7018 gen_helper_fjcvtzs(t
, t
, fpstatus
);
7020 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), t
);
7021 tcg_gen_extrh_i64_i32(cpu_ZF
, t
);
7022 tcg_gen_movi_i32(cpu_CF
, 0);
7023 tcg_gen_movi_i32(cpu_NF
, 0);
7024 tcg_gen_movi_i32(cpu_VF
, 0);
7027 /* Floating point <-> integer conversions
7028 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
7029 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
7030 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
7031 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
7033 static void disas_fp_int_conv(DisasContext
*s
, uint32_t insn
)
7035 int rd
= extract32(insn
, 0, 5);
7036 int rn
= extract32(insn
, 5, 5);
7037 int opcode
= extract32(insn
, 16, 3);
7038 int rmode
= extract32(insn
, 19, 2);
7039 int type
= extract32(insn
, 22, 2);
7040 bool sbit
= extract32(insn
, 29, 1);
7041 bool sf
= extract32(insn
, 31, 1);
7045 goto do_unallocated
;
7053 case 4: /* FCVTAS */
7054 case 5: /* FCVTAU */
7056 goto do_unallocated
;
7059 case 0: /* FCVT[NPMZ]S */
7060 case 1: /* FCVT[NPMZ]U */
7062 case 0: /* float32 */
7063 case 1: /* float64 */
7065 case 3: /* float16 */
7066 if (!dc_isar_feature(aa64_fp16
, s
)) {
7067 goto do_unallocated
;
7071 goto do_unallocated
;
7073 if (!fp_access_check(s
)) {
7076 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, rmode
, 64, sf
, type
);
7080 switch (sf
<< 7 | type
<< 5 | rmode
<< 3 | opcode
) {
7081 case 0b01100110: /* FMOV half <-> 32-bit int */
7083 case 0b11100110: /* FMOV half <-> 64-bit int */
7085 if (!dc_isar_feature(aa64_fp16
, s
)) {
7086 goto do_unallocated
;
7089 case 0b00000110: /* FMOV 32-bit */
7091 case 0b10100110: /* FMOV 64-bit */
7093 case 0b11001110: /* FMOV top half of 128-bit */
7095 if (!fp_access_check(s
)) {
7099 handle_fmov(s
, rd
, rn
, type
, itof
);
7102 case 0b00111110: /* FJCVTZS */
7103 if (!dc_isar_feature(aa64_jscvt
, s
)) {
7104 goto do_unallocated
;
7105 } else if (fp_access_check(s
)) {
7106 handle_fjcvtzs(s
, rd
, rn
);
7112 unallocated_encoding(s
);
7119 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
7120 * 31 30 29 28 25 24 0
7121 * +---+---+---+---------+-----------------------------+
7122 * | | 0 | | 1 1 1 1 | |
7123 * +---+---+---+---------+-----------------------------+
7125 static void disas_data_proc_fp(DisasContext
*s
, uint32_t insn
)
7127 if (extract32(insn
, 24, 1)) {
7128 /* Floating point data-processing (3 source) */
7129 disas_fp_3src(s
, insn
);
7130 } else if (extract32(insn
, 21, 1) == 0) {
7131 /* Floating point to fixed point conversions */
7132 disas_fp_fixed_conv(s
, insn
);
7134 switch (extract32(insn
, 10, 2)) {
7136 /* Floating point conditional compare */
7137 disas_fp_ccomp(s
, insn
);
7140 /* Floating point data-processing (2 source) */
7141 disas_fp_2src(s
, insn
);
7144 /* Floating point conditional select */
7145 disas_fp_csel(s
, insn
);
7148 switch (ctz32(extract32(insn
, 12, 4))) {
7149 case 0: /* [15:12] == xxx1 */
7150 /* Floating point immediate */
7151 disas_fp_imm(s
, insn
);
7153 case 1: /* [15:12] == xx10 */
7154 /* Floating point compare */
7155 disas_fp_compare(s
, insn
);
7157 case 2: /* [15:12] == x100 */
7158 /* Floating point data-processing (1 source) */
7159 disas_fp_1src(s
, insn
);
7161 case 3: /* [15:12] == 1000 */
7162 unallocated_encoding(s
);
7164 default: /* [15:12] == 0000 */
7165 /* Floating point <-> integer conversions */
7166 disas_fp_int_conv(s
, insn
);
7174 static void do_ext64(DisasContext
*s
, TCGv_i64 tcg_left
, TCGv_i64 tcg_right
,
7177 /* Extract 64 bits from the middle of two concatenated 64 bit
7178 * vector register slices left:right. The extracted bits start
7179 * at 'pos' bits into the right (least significant) side.
7180 * We return the result in tcg_right, and guarantee not to
7183 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
7184 assert(pos
> 0 && pos
< 64);
7186 tcg_gen_shri_i64(tcg_right
, tcg_right
, pos
);
7187 tcg_gen_shli_i64(tcg_tmp
, tcg_left
, 64 - pos
);
7188 tcg_gen_or_i64(tcg_right
, tcg_right
, tcg_tmp
);
7192 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
7193 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7194 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
7195 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7197 static void disas_simd_ext(DisasContext
*s
, uint32_t insn
)
7199 int is_q
= extract32(insn
, 30, 1);
7200 int op2
= extract32(insn
, 22, 2);
7201 int imm4
= extract32(insn
, 11, 4);
7202 int rm
= extract32(insn
, 16, 5);
7203 int rn
= extract32(insn
, 5, 5);
7204 int rd
= extract32(insn
, 0, 5);
7205 int pos
= imm4
<< 3;
7206 TCGv_i64 tcg_resl
, tcg_resh
;
7208 if (op2
!= 0 || (!is_q
&& extract32(imm4
, 3, 1))) {
7209 unallocated_encoding(s
);
7213 if (!fp_access_check(s
)) {
7217 tcg_resh
= tcg_temp_new_i64();
7218 tcg_resl
= tcg_temp_new_i64();
7220 /* Vd gets bits starting at pos bits into Vm:Vn. This is
7221 * either extracting 128 bits from a 128:128 concatenation, or
7222 * extracting 64 bits from a 64:64 concatenation.
7225 read_vec_element(s
, tcg_resl
, rn
, 0, MO_64
);
7227 read_vec_element(s
, tcg_resh
, rm
, 0, MO_64
);
7228 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
7236 EltPosns eltposns
[] = { {rn
, 0}, {rn
, 1}, {rm
, 0}, {rm
, 1} };
7237 EltPosns
*elt
= eltposns
;
7244 read_vec_element(s
, tcg_resl
, elt
->reg
, elt
->elt
, MO_64
);
7246 read_vec_element(s
, tcg_resh
, elt
->reg
, elt
->elt
, MO_64
);
7249 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
7250 tcg_hh
= tcg_temp_new_i64();
7251 read_vec_element(s
, tcg_hh
, elt
->reg
, elt
->elt
, MO_64
);
7252 do_ext64(s
, tcg_hh
, tcg_resh
, pos
);
7256 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
7258 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
7260 clear_vec_high(s
, is_q
, rd
);
7264 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
7265 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7266 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
7267 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7269 static void disas_simd_tb(DisasContext
*s
, uint32_t insn
)
7271 int op2
= extract32(insn
, 22, 2);
7272 int is_q
= extract32(insn
, 30, 1);
7273 int rm
= extract32(insn
, 16, 5);
7274 int rn
= extract32(insn
, 5, 5);
7275 int rd
= extract32(insn
, 0, 5);
7276 int is_tbx
= extract32(insn
, 12, 1);
7277 int len
= (extract32(insn
, 13, 2) + 1) * 16;
7280 unallocated_encoding(s
);
7284 if (!fp_access_check(s
)) {
7288 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s
, rd
),
7289 vec_full_reg_offset(s
, rm
), tcg_env
,
7290 is_q
? 16 : 8, vec_full_reg_size(s
),
7291 (len
<< 6) | (is_tbx
<< 5) | rn
,
7292 gen_helper_simd_tblx
);
7296 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
7297 * +---+---+-------------+------+---+------+---+------------------+------+
7298 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
7299 * +---+---+-------------+------+---+------+---+------------------+------+
7301 static void disas_simd_zip_trn(DisasContext
*s
, uint32_t insn
)
7303 int rd
= extract32(insn
, 0, 5);
7304 int rn
= extract32(insn
, 5, 5);
7305 int rm
= extract32(insn
, 16, 5);
7306 int size
= extract32(insn
, 22, 2);
7307 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
7308 * bit 2 indicates 1 vs 2 variant of the insn.
7310 int opcode
= extract32(insn
, 12, 2);
7311 bool part
= extract32(insn
, 14, 1);
7312 bool is_q
= extract32(insn
, 30, 1);
7313 int esize
= 8 << size
;
7315 int datasize
= is_q
? 128 : 64;
7316 int elements
= datasize
/ esize
;
7317 TCGv_i64 tcg_res
[2], tcg_ele
;
7319 if (opcode
== 0 || (size
== 3 && !is_q
)) {
7320 unallocated_encoding(s
);
7324 if (!fp_access_check(s
)) {
7328 tcg_res
[0] = tcg_temp_new_i64();
7329 tcg_res
[1] = is_q
? tcg_temp_new_i64() : NULL
;
7330 tcg_ele
= tcg_temp_new_i64();
7332 for (i
= 0; i
< elements
; i
++) {
7336 case 1: /* UZP1/2 */
7338 int midpoint
= elements
/ 2;
7340 read_vec_element(s
, tcg_ele
, rn
, 2 * i
+ part
, size
);
7342 read_vec_element(s
, tcg_ele
, rm
,
7343 2 * (i
- midpoint
) + part
, size
);
7347 case 2: /* TRN1/2 */
7349 read_vec_element(s
, tcg_ele
, rm
, (i
& ~1) + part
, size
);
7351 read_vec_element(s
, tcg_ele
, rn
, (i
& ~1) + part
, size
);
7354 case 3: /* ZIP1/2 */
7356 int base
= part
* elements
/ 2;
7358 read_vec_element(s
, tcg_ele
, rm
, base
+ (i
>> 1), size
);
7360 read_vec_element(s
, tcg_ele
, rn
, base
+ (i
>> 1), size
);
7365 g_assert_not_reached();
7368 w
= (i
* esize
) / 64;
7369 o
= (i
* esize
) % 64;
7371 tcg_gen_mov_i64(tcg_res
[w
], tcg_ele
);
7373 tcg_gen_shli_i64(tcg_ele
, tcg_ele
, o
);
7374 tcg_gen_or_i64(tcg_res
[w
], tcg_res
[w
], tcg_ele
);
7378 for (i
= 0; i
<= is_q
; ++i
) {
7379 write_vec_element(s
, tcg_res
[i
], rd
, i
, MO_64
);
7381 clear_vec_high(s
, is_q
, rd
);
7385 * do_reduction_op helper
7387 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
7388 * important for correct NaN propagation that we do these
7389 * operations in exactly the order specified by the pseudocode.
7391 * This is a recursive function, TCG temps should be freed by the
7392 * calling function once it is done with the values.
7394 static TCGv_i32
do_reduction_op(DisasContext
*s
, int fpopcode
, int rn
,
7395 int esize
, int size
, int vmap
, TCGv_ptr fpst
)
7397 if (esize
== size
) {
7399 MemOp msize
= esize
== 16 ? MO_16
: MO_32
;
7402 /* We should have one register left here */
7403 assert(ctpop8(vmap
) == 1);
7404 element
= ctz32(vmap
);
7405 assert(element
< 8);
7407 tcg_elem
= tcg_temp_new_i32();
7408 read_vec_element_i32(s
, tcg_elem
, rn
, element
, msize
);
7411 int bits
= size
/ 2;
7412 int shift
= ctpop8(vmap
) / 2;
7413 int vmap_lo
= (vmap
>> shift
) & vmap
;
7414 int vmap_hi
= (vmap
& ~vmap_lo
);
7415 TCGv_i32 tcg_hi
, tcg_lo
, tcg_res
;
7417 tcg_hi
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_hi
, fpst
);
7418 tcg_lo
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_lo
, fpst
);
7419 tcg_res
= tcg_temp_new_i32();
7422 case 0x0c: /* fmaxnmv half-precision */
7423 gen_helper_advsimd_maxnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7425 case 0x0f: /* fmaxv half-precision */
7426 gen_helper_advsimd_maxh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7428 case 0x1c: /* fminnmv half-precision */
7429 gen_helper_advsimd_minnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7431 case 0x1f: /* fminv half-precision */
7432 gen_helper_advsimd_minh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7434 case 0x2c: /* fmaxnmv */
7435 gen_helper_vfp_maxnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7437 case 0x2f: /* fmaxv */
7438 gen_helper_vfp_maxs(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7440 case 0x3c: /* fminnmv */
7441 gen_helper_vfp_minnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7443 case 0x3f: /* fminv */
7444 gen_helper_vfp_mins(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7447 g_assert_not_reached();
7453 /* AdvSIMD across lanes
7454 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7455 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7456 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
7457 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7459 static void disas_simd_across_lanes(DisasContext
*s
, uint32_t insn
)
7461 int rd
= extract32(insn
, 0, 5);
7462 int rn
= extract32(insn
, 5, 5);
7463 int size
= extract32(insn
, 22, 2);
7464 int opcode
= extract32(insn
, 12, 5);
7465 bool is_q
= extract32(insn
, 30, 1);
7466 bool is_u
= extract32(insn
, 29, 1);
7468 bool is_min
= false;
7472 TCGv_i64 tcg_res
, tcg_elt
;
7475 case 0x1b: /* ADDV */
7477 unallocated_encoding(s
);
7481 case 0x3: /* SADDLV, UADDLV */
7482 case 0xa: /* SMAXV, UMAXV */
7483 case 0x1a: /* SMINV, UMINV */
7484 if (size
== 3 || (size
== 2 && !is_q
)) {
7485 unallocated_encoding(s
);
7489 case 0xc: /* FMAXNMV, FMINNMV */
7490 case 0xf: /* FMAXV, FMINV */
7491 /* Bit 1 of size field encodes min vs max and the actual size
7492 * depends on the encoding of the U bit. If not set (and FP16
7493 * enabled) then we do half-precision float instead of single
7496 is_min
= extract32(size
, 1, 1);
7498 if (!is_u
&& dc_isar_feature(aa64_fp16
, s
)) {
7500 } else if (!is_u
|| !is_q
|| extract32(size
, 0, 1)) {
7501 unallocated_encoding(s
);
7508 unallocated_encoding(s
);
7512 if (!fp_access_check(s
)) {
7517 elements
= (is_q
? 128 : 64) / esize
;
7519 tcg_res
= tcg_temp_new_i64();
7520 tcg_elt
= tcg_temp_new_i64();
7522 /* These instructions operate across all lanes of a vector
7523 * to produce a single result. We can guarantee that a 64
7524 * bit intermediate is sufficient:
7525 * + for [US]ADDLV the maximum element size is 32 bits, and
7526 * the result type is 64 bits
7527 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
7528 * same as the element size, which is 32 bits at most
7529 * For the integer operations we can choose to work at 64
7530 * or 32 bits and truncate at the end; for simplicity
7531 * we use 64 bits always. The floating point
7532 * ops do require 32 bit intermediates, though.
7535 read_vec_element(s
, tcg_res
, rn
, 0, size
| (is_u
? 0 : MO_SIGN
));
7537 for (i
= 1; i
< elements
; i
++) {
7538 read_vec_element(s
, tcg_elt
, rn
, i
, size
| (is_u
? 0 : MO_SIGN
));
7541 case 0x03: /* SADDLV / UADDLV */
7542 case 0x1b: /* ADDV */
7543 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_elt
);
7545 case 0x0a: /* SMAXV / UMAXV */
7547 tcg_gen_umax_i64(tcg_res
, tcg_res
, tcg_elt
);
7549 tcg_gen_smax_i64(tcg_res
, tcg_res
, tcg_elt
);
7552 case 0x1a: /* SMINV / UMINV */
7554 tcg_gen_umin_i64(tcg_res
, tcg_res
, tcg_elt
);
7556 tcg_gen_smin_i64(tcg_res
, tcg_res
, tcg_elt
);
7560 g_assert_not_reached();
7565 /* Floating point vector reduction ops which work across 32
7566 * bit (single) or 16 bit (half-precision) intermediates.
7567 * Note that correct NaN propagation requires that we do these
7568 * operations in exactly the order specified by the pseudocode.
7570 TCGv_ptr fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
7571 int fpopcode
= opcode
| is_min
<< 4 | is_u
<< 5;
7572 int vmap
= (1 << elements
) - 1;
7573 TCGv_i32 tcg_res32
= do_reduction_op(s
, fpopcode
, rn
, esize
,
7574 (is_q
? 128 : 64), vmap
, fpst
);
7575 tcg_gen_extu_i32_i64(tcg_res
, tcg_res32
);
7578 /* Now truncate the result to the width required for the final output */
7579 if (opcode
== 0x03) {
7580 /* SADDLV, UADDLV: result is 2*esize */
7586 tcg_gen_ext8u_i64(tcg_res
, tcg_res
);
7589 tcg_gen_ext16u_i64(tcg_res
, tcg_res
);
7592 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
7597 g_assert_not_reached();
7600 write_fp_dreg(s
, rd
, tcg_res
);
7603 /* DUP (Element, Vector)
7605 * 31 30 29 21 20 16 15 10 9 5 4 0
7606 * +---+---+-------------------+--------+-------------+------+------+
7607 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7608 * +---+---+-------------------+--------+-------------+------+------+
7610 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7612 static void handle_simd_dupe(DisasContext
*s
, int is_q
, int rd
, int rn
,
7615 int size
= ctz32(imm5
);
7618 if (size
> 3 || (size
== 3 && !is_q
)) {
7619 unallocated_encoding(s
);
7623 if (!fp_access_check(s
)) {
7627 index
= imm5
>> (size
+ 1);
7628 tcg_gen_gvec_dup_mem(size
, vec_full_reg_offset(s
, rd
),
7629 vec_reg_offset(s
, rn
, index
, size
),
7630 is_q
? 16 : 8, vec_full_reg_size(s
));
7633 /* DUP (element, scalar)
7634 * 31 21 20 16 15 10 9 5 4 0
7635 * +-----------------------+--------+-------------+------+------+
7636 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7637 * +-----------------------+--------+-------------+------+------+
7639 static void handle_simd_dupes(DisasContext
*s
, int rd
, int rn
,
7642 int size
= ctz32(imm5
);
7647 unallocated_encoding(s
);
7651 if (!fp_access_check(s
)) {
7655 index
= imm5
>> (size
+ 1);
7657 /* This instruction just extracts the specified element and
7658 * zero-extends it into the bottom of the destination register.
7660 tmp
= tcg_temp_new_i64();
7661 read_vec_element(s
, tmp
, rn
, index
, size
);
7662 write_fp_dreg(s
, rd
, tmp
);
7667 * 31 30 29 21 20 16 15 10 9 5 4 0
7668 * +---+---+-------------------+--------+-------------+------+------+
7669 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
7670 * +---+---+-------------------+--------+-------------+------+------+
7672 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7674 static void handle_simd_dupg(DisasContext
*s
, int is_q
, int rd
, int rn
,
7677 int size
= ctz32(imm5
);
7678 uint32_t dofs
, oprsz
, maxsz
;
7680 if (size
> 3 || ((size
== 3) && !is_q
)) {
7681 unallocated_encoding(s
);
7685 if (!fp_access_check(s
)) {
7689 dofs
= vec_full_reg_offset(s
, rd
);
7690 oprsz
= is_q
? 16 : 8;
7691 maxsz
= vec_full_reg_size(s
);
7693 tcg_gen_gvec_dup_i64(size
, dofs
, oprsz
, maxsz
, cpu_reg(s
, rn
));
7698 * 31 21 20 16 15 14 11 10 9 5 4 0
7699 * +-----------------------+--------+------------+---+------+------+
7700 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7701 * +-----------------------+--------+------------+---+------+------+
7703 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7704 * index: encoded in imm5<4:size+1>
7706 static void handle_simd_inse(DisasContext
*s
, int rd
, int rn
,
7709 int size
= ctz32(imm5
);
7710 int src_index
, dst_index
;
7714 unallocated_encoding(s
);
7718 if (!fp_access_check(s
)) {
7722 dst_index
= extract32(imm5
, 1+size
, 5);
7723 src_index
= extract32(imm4
, size
, 4);
7725 tmp
= tcg_temp_new_i64();
7727 read_vec_element(s
, tmp
, rn
, src_index
, size
);
7728 write_vec_element(s
, tmp
, rd
, dst_index
, size
);
7730 /* INS is considered a 128-bit write for SVE. */
7731 clear_vec_high(s
, true, rd
);
7737 * 31 21 20 16 15 10 9 5 4 0
7738 * +-----------------------+--------+-------------+------+------+
7739 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
7740 * +-----------------------+--------+-------------+------+------+
7742 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7743 * index: encoded in imm5<4:size+1>
7745 static void handle_simd_insg(DisasContext
*s
, int rd
, int rn
, int imm5
)
7747 int size
= ctz32(imm5
);
7751 unallocated_encoding(s
);
7755 if (!fp_access_check(s
)) {
7759 idx
= extract32(imm5
, 1 + size
, 4 - size
);
7760 write_vec_element(s
, cpu_reg(s
, rn
), rd
, idx
, size
);
7762 /* INS is considered a 128-bit write for SVE. */
7763 clear_vec_high(s
, true, rd
);
7770 * 31 30 29 21 20 16 15 12 10 9 5 4 0
7771 * +---+---+-------------------+--------+-------------+------+------+
7772 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
7773 * +---+---+-------------------+--------+-------------+------+------+
7775 * U: unsigned when set
7776 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7778 static void handle_simd_umov_smov(DisasContext
*s
, int is_q
, int is_signed
,
7779 int rn
, int rd
, int imm5
)
7781 int size
= ctz32(imm5
);
7785 /* Check for UnallocatedEncodings */
7787 if (size
> 2 || (size
== 2 && !is_q
)) {
7788 unallocated_encoding(s
);
7793 || (size
< 3 && is_q
)
7794 || (size
== 3 && !is_q
)) {
7795 unallocated_encoding(s
);
7800 if (!fp_access_check(s
)) {
7804 element
= extract32(imm5
, 1+size
, 4);
7806 tcg_rd
= cpu_reg(s
, rd
);
7807 read_vec_element(s
, tcg_rd
, rn
, element
, size
| (is_signed
? MO_SIGN
: 0));
7808 if (is_signed
&& !is_q
) {
7809 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
7814 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
7815 * +---+---+----+-----------------+------+---+------+---+------+------+
7816 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7817 * +---+---+----+-----------------+------+---+------+---+------+------+
7819 static void disas_simd_copy(DisasContext
*s
, uint32_t insn
)
7821 int rd
= extract32(insn
, 0, 5);
7822 int rn
= extract32(insn
, 5, 5);
7823 int imm4
= extract32(insn
, 11, 4);
7824 int op
= extract32(insn
, 29, 1);
7825 int is_q
= extract32(insn
, 30, 1);
7826 int imm5
= extract32(insn
, 16, 5);
7831 handle_simd_inse(s
, rd
, rn
, imm4
, imm5
);
7833 unallocated_encoding(s
);
7838 /* DUP (element - vector) */
7839 handle_simd_dupe(s
, is_q
, rd
, rn
, imm5
);
7843 handle_simd_dupg(s
, is_q
, rd
, rn
, imm5
);
7848 handle_simd_insg(s
, rd
, rn
, imm5
);
7850 unallocated_encoding(s
);
7855 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
7856 handle_simd_umov_smov(s
, is_q
, (imm4
== 5), rn
, rd
, imm5
);
7859 unallocated_encoding(s
);
7865 /* AdvSIMD modified immediate
7866 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
7867 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7868 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
7869 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7871 * There are a number of operations that can be carried out here:
7872 * MOVI - move (shifted) imm into register
7873 * MVNI - move inverted (shifted) imm into register
7874 * ORR - bitwise OR of (shifted) imm with register
7875 * BIC - bitwise clear of (shifted) imm with register
7876 * With ARMv8.2 we also have:
7877 * FMOV half-precision
7879 static void disas_simd_mod_imm(DisasContext
*s
, uint32_t insn
)
7881 int rd
= extract32(insn
, 0, 5);
7882 int cmode
= extract32(insn
, 12, 4);
7883 int o2
= extract32(insn
, 11, 1);
7884 uint64_t abcdefgh
= extract32(insn
, 5, 5) | (extract32(insn
, 16, 3) << 5);
7885 bool is_neg
= extract32(insn
, 29, 1);
7886 bool is_q
= extract32(insn
, 30, 1);
7889 if (o2
!= 0 || ((cmode
== 0xf) && is_neg
&& !is_q
)) {
7890 /* Check for FMOV (vector, immediate) - half-precision */
7891 if (!(dc_isar_feature(aa64_fp16
, s
) && o2
&& cmode
== 0xf)) {
7892 unallocated_encoding(s
);
7897 if (!fp_access_check(s
)) {
7901 if (cmode
== 15 && o2
&& !is_neg
) {
7902 /* FMOV (vector, immediate) - half-precision */
7903 imm
= vfp_expand_imm(MO_16
, abcdefgh
);
7904 /* now duplicate across the lanes */
7905 imm
= dup_const(MO_16
, imm
);
7907 imm
= asimd_imm_const(abcdefgh
, cmode
, is_neg
);
7910 if (!((cmode
& 0x9) == 0x1 || (cmode
& 0xd) == 0x9)) {
7911 /* MOVI or MVNI, with MVNI negation handled above. */
7912 tcg_gen_gvec_dup_imm(MO_64
, vec_full_reg_offset(s
, rd
), is_q
? 16 : 8,
7913 vec_full_reg_size(s
), imm
);
7915 /* ORR or BIC, with BIC negation to AND handled above. */
7917 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_andi
, MO_64
);
7919 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_ori
, MO_64
);
7924 /* AdvSIMD scalar copy
7925 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
7926 * +-----+----+-----------------+------+---+------+---+------+------+
7927 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7928 * +-----+----+-----------------+------+---+------+---+------+------+
7930 static void disas_simd_scalar_copy(DisasContext
*s
, uint32_t insn
)
7932 int rd
= extract32(insn
, 0, 5);
7933 int rn
= extract32(insn
, 5, 5);
7934 int imm4
= extract32(insn
, 11, 4);
7935 int imm5
= extract32(insn
, 16, 5);
7936 int op
= extract32(insn
, 29, 1);
7938 if (op
!= 0 || imm4
!= 0) {
7939 unallocated_encoding(s
);
7943 /* DUP (element, scalar) */
7944 handle_simd_dupes(s
, rd
, rn
, imm5
);
7947 /* AdvSIMD scalar pairwise
7948 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7949 * +-----+---+-----------+------+-----------+--------+-----+------+------+
7950 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
7951 * +-----+---+-----------+------+-----------+--------+-----+------+------+
7953 static void disas_simd_scalar_pairwise(DisasContext
*s
, uint32_t insn
)
7955 int u
= extract32(insn
, 29, 1);
7956 int size
= extract32(insn
, 22, 2);
7957 int opcode
= extract32(insn
, 12, 5);
7958 int rn
= extract32(insn
, 5, 5);
7959 int rd
= extract32(insn
, 0, 5);
7962 /* For some ops (the FP ones), size[1] is part of the encoding.
7963 * For ADDP strictly it is not but size[1] is always 1 for valid
7966 opcode
|= (extract32(size
, 1, 1) << 5);
7969 case 0x3b: /* ADDP */
7970 if (u
|| size
!= 3) {
7971 unallocated_encoding(s
);
7974 if (!fp_access_check(s
)) {
7980 case 0xc: /* FMAXNMP */
7981 case 0xd: /* FADDP */
7982 case 0xf: /* FMAXP */
7983 case 0x2c: /* FMINNMP */
7984 case 0x2f: /* FMINP */
7985 /* FP op, size[0] is 32 or 64 bit*/
7987 if (!dc_isar_feature(aa64_fp16
, s
)) {
7988 unallocated_encoding(s
);
7994 size
= extract32(size
, 0, 1) ? MO_64
: MO_32
;
7997 if (!fp_access_check(s
)) {
8001 fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8004 unallocated_encoding(s
);
8008 if (size
== MO_64
) {
8009 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
8010 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
8011 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8013 read_vec_element(s
, tcg_op1
, rn
, 0, MO_64
);
8014 read_vec_element(s
, tcg_op2
, rn
, 1, MO_64
);
8017 case 0x3b: /* ADDP */
8018 tcg_gen_add_i64(tcg_res
, tcg_op1
, tcg_op2
);
8020 case 0xc: /* FMAXNMP */
8021 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8023 case 0xd: /* FADDP */
8024 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8026 case 0xf: /* FMAXP */
8027 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8029 case 0x2c: /* FMINNMP */
8030 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8032 case 0x2f: /* FMINP */
8033 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8036 g_assert_not_reached();
8039 write_fp_dreg(s
, rd
, tcg_res
);
8041 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
8042 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
8043 TCGv_i32 tcg_res
= tcg_temp_new_i32();
8045 read_vec_element_i32(s
, tcg_op1
, rn
, 0, size
);
8046 read_vec_element_i32(s
, tcg_op2
, rn
, 1, size
);
8048 if (size
== MO_16
) {
8050 case 0xc: /* FMAXNMP */
8051 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8053 case 0xd: /* FADDP */
8054 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8056 case 0xf: /* FMAXP */
8057 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8059 case 0x2c: /* FMINNMP */
8060 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8062 case 0x2f: /* FMINP */
8063 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8066 g_assert_not_reached();
8070 case 0xc: /* FMAXNMP */
8071 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8073 case 0xd: /* FADDP */
8074 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8076 case 0xf: /* FMAXP */
8077 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8079 case 0x2c: /* FMINNMP */
8080 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8082 case 0x2f: /* FMINP */
8083 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8086 g_assert_not_reached();
8090 write_fp_sreg(s
, rd
, tcg_res
);
8095 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
8097 * This code is handles the common shifting code and is used by both
8098 * the vector and scalar code.
8100 static void handle_shri_with_rndacc(TCGv_i64 tcg_res
, TCGv_i64 tcg_src
,
8101 TCGv_i64 tcg_rnd
, bool accumulate
,
8102 bool is_u
, int size
, int shift
)
8104 bool extended_result
= false;
8105 bool round
= tcg_rnd
!= NULL
;
8107 TCGv_i64 tcg_src_hi
;
8109 if (round
&& size
== 3) {
8110 extended_result
= true;
8111 ext_lshift
= 64 - shift
;
8112 tcg_src_hi
= tcg_temp_new_i64();
8113 } else if (shift
== 64) {
8114 if (!accumulate
&& is_u
) {
8115 /* result is zero */
8116 tcg_gen_movi_i64(tcg_res
, 0);
8121 /* Deal with the rounding step */
8123 if (extended_result
) {
8124 TCGv_i64 tcg_zero
= tcg_constant_i64(0);
8126 /* take care of sign extending tcg_res */
8127 tcg_gen_sari_i64(tcg_src_hi
, tcg_src
, 63);
8128 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
8129 tcg_src
, tcg_src_hi
,
8132 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
8137 tcg_gen_add_i64(tcg_src
, tcg_src
, tcg_rnd
);
8141 /* Now do the shift right */
8142 if (round
&& extended_result
) {
8143 /* extended case, >64 bit precision required */
8144 if (ext_lshift
== 0) {
8145 /* special case, only high bits matter */
8146 tcg_gen_mov_i64(tcg_src
, tcg_src_hi
);
8148 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
8149 tcg_gen_shli_i64(tcg_src_hi
, tcg_src_hi
, ext_lshift
);
8150 tcg_gen_or_i64(tcg_src
, tcg_src
, tcg_src_hi
);
8155 /* essentially shifting in 64 zeros */
8156 tcg_gen_movi_i64(tcg_src
, 0);
8158 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
8162 /* effectively extending the sign-bit */
8163 tcg_gen_sari_i64(tcg_src
, tcg_src
, 63);
8165 tcg_gen_sari_i64(tcg_src
, tcg_src
, shift
);
8171 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_src
);
8173 tcg_gen_mov_i64(tcg_res
, tcg_src
);
8177 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
8178 static void handle_scalar_simd_shri(DisasContext
*s
,
8179 bool is_u
, int immh
, int immb
,
8180 int opcode
, int rn
, int rd
)
8183 int immhb
= immh
<< 3 | immb
;
8184 int shift
= 2 * (8 << size
) - immhb
;
8185 bool accumulate
= false;
8187 bool insert
= false;
8192 if (!extract32(immh
, 3, 1)) {
8193 unallocated_encoding(s
);
8197 if (!fp_access_check(s
)) {
8202 case 0x02: /* SSRA / USRA (accumulate) */
8205 case 0x04: /* SRSHR / URSHR (rounding) */
8208 case 0x06: /* SRSRA / URSRA (accum + rounding) */
8209 accumulate
= round
= true;
8211 case 0x08: /* SRI */
8217 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
8222 tcg_rn
= read_fp_dreg(s
, rn
);
8223 tcg_rd
= (accumulate
|| insert
) ? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
8226 /* shift count same as element size is valid but does nothing;
8227 * special case to avoid potential shift by 64.
8229 int esize
= 8 << size
;
8230 if (shift
!= esize
) {
8231 tcg_gen_shri_i64(tcg_rn
, tcg_rn
, shift
);
8232 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, 0, esize
- shift
);
8235 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
8236 accumulate
, is_u
, size
, shift
);
8239 write_fp_dreg(s
, rd
, tcg_rd
);
8242 /* SHL/SLI - Scalar shift left */
8243 static void handle_scalar_simd_shli(DisasContext
*s
, bool insert
,
8244 int immh
, int immb
, int opcode
,
8247 int size
= 32 - clz32(immh
) - 1;
8248 int immhb
= immh
<< 3 | immb
;
8249 int shift
= immhb
- (8 << size
);
8253 if (!extract32(immh
, 3, 1)) {
8254 unallocated_encoding(s
);
8258 if (!fp_access_check(s
)) {
8262 tcg_rn
= read_fp_dreg(s
, rn
);
8263 tcg_rd
= insert
? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
8266 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, shift
, 64 - shift
);
8268 tcg_gen_shli_i64(tcg_rd
, tcg_rn
, shift
);
8271 write_fp_dreg(s
, rd
, tcg_rd
);
8274 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
8275 * (signed/unsigned) narrowing */
8276 static void handle_vec_simd_sqshrn(DisasContext
*s
, bool is_scalar
, bool is_q
,
8277 bool is_u_shift
, bool is_u_narrow
,
8278 int immh
, int immb
, int opcode
,
8281 int immhb
= immh
<< 3 | immb
;
8282 int size
= 32 - clz32(immh
) - 1;
8283 int esize
= 8 << size
;
8284 int shift
= (2 * esize
) - immhb
;
8285 int elements
= is_scalar
? 1 : (64 / esize
);
8286 bool round
= extract32(opcode
, 0, 1);
8287 MemOp ldop
= (size
+ 1) | (is_u_shift
? 0 : MO_SIGN
);
8288 TCGv_i64 tcg_rn
, tcg_rd
, tcg_round
;
8289 TCGv_i32 tcg_rd_narrowed
;
8292 static NeonGenNarrowEnvFn
* const signed_narrow_fns
[4][2] = {
8293 { gen_helper_neon_narrow_sat_s8
,
8294 gen_helper_neon_unarrow_sat8
},
8295 { gen_helper_neon_narrow_sat_s16
,
8296 gen_helper_neon_unarrow_sat16
},
8297 { gen_helper_neon_narrow_sat_s32
,
8298 gen_helper_neon_unarrow_sat32
},
8301 static NeonGenNarrowEnvFn
* const unsigned_narrow_fns
[4] = {
8302 gen_helper_neon_narrow_sat_u8
,
8303 gen_helper_neon_narrow_sat_u16
,
8304 gen_helper_neon_narrow_sat_u32
,
8307 NeonGenNarrowEnvFn
*narrowfn
;
8313 if (extract32(immh
, 3, 1)) {
8314 unallocated_encoding(s
);
8318 if (!fp_access_check(s
)) {
8323 narrowfn
= unsigned_narrow_fns
[size
];
8325 narrowfn
= signed_narrow_fns
[size
][is_u_narrow
? 1 : 0];
8328 tcg_rn
= tcg_temp_new_i64();
8329 tcg_rd
= tcg_temp_new_i64();
8330 tcg_rd_narrowed
= tcg_temp_new_i32();
8331 tcg_final
= tcg_temp_new_i64();
8334 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
8339 for (i
= 0; i
< elements
; i
++) {
8340 read_vec_element(s
, tcg_rn
, rn
, i
, ldop
);
8341 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
8342 false, is_u_shift
, size
+1, shift
);
8343 narrowfn(tcg_rd_narrowed
, tcg_env
, tcg_rd
);
8344 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd_narrowed
);
8346 tcg_gen_extract_i64(tcg_final
, tcg_rd
, 0, esize
);
8348 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
8353 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
8355 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
8357 clear_vec_high(s
, is_q
, rd
);
8360 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
8361 static void handle_simd_qshl(DisasContext
*s
, bool scalar
, bool is_q
,
8362 bool src_unsigned
, bool dst_unsigned
,
8363 int immh
, int immb
, int rn
, int rd
)
8365 int immhb
= immh
<< 3 | immb
;
8366 int size
= 32 - clz32(immh
) - 1;
8367 int shift
= immhb
- (8 << size
);
8371 assert(!(scalar
&& is_q
));
8374 if (!is_q
&& extract32(immh
, 3, 1)) {
8375 unallocated_encoding(s
);
8379 /* Since we use the variable-shift helpers we must
8380 * replicate the shift count into each element of
8381 * the tcg_shift value.
8385 shift
|= shift
<< 8;
8388 shift
|= shift
<< 16;
8394 g_assert_not_reached();
8398 if (!fp_access_check(s
)) {
8403 TCGv_i64 tcg_shift
= tcg_constant_i64(shift
);
8404 static NeonGenTwo64OpEnvFn
* const fns
[2][2] = {
8405 { gen_helper_neon_qshl_s64
, gen_helper_neon_qshlu_s64
},
8406 { NULL
, gen_helper_neon_qshl_u64
},
8408 NeonGenTwo64OpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
];
8409 int maxpass
= is_q
? 2 : 1;
8411 for (pass
= 0; pass
< maxpass
; pass
++) {
8412 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8414 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8415 genfn(tcg_op
, tcg_env
, tcg_op
, tcg_shift
);
8416 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
8418 clear_vec_high(s
, is_q
, rd
);
8420 TCGv_i32 tcg_shift
= tcg_constant_i32(shift
);
8421 static NeonGenTwoOpEnvFn
* const fns
[2][2][3] = {
8423 { gen_helper_neon_qshl_s8
,
8424 gen_helper_neon_qshl_s16
,
8425 gen_helper_neon_qshl_s32
},
8426 { gen_helper_neon_qshlu_s8
,
8427 gen_helper_neon_qshlu_s16
,
8428 gen_helper_neon_qshlu_s32
}
8430 { NULL
, NULL
, NULL
},
8431 { gen_helper_neon_qshl_u8
,
8432 gen_helper_neon_qshl_u16
,
8433 gen_helper_neon_qshl_u32
}
8436 NeonGenTwoOpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
][size
];
8437 MemOp memop
= scalar
? size
: MO_32
;
8438 int maxpass
= scalar
? 1 : is_q
? 4 : 2;
8440 for (pass
= 0; pass
< maxpass
; pass
++) {
8441 TCGv_i32 tcg_op
= tcg_temp_new_i32();
8443 read_vec_element_i32(s
, tcg_op
, rn
, pass
, memop
);
8444 genfn(tcg_op
, tcg_env
, tcg_op
, tcg_shift
);
8448 tcg_gen_ext8u_i32(tcg_op
, tcg_op
);
8451 tcg_gen_ext16u_i32(tcg_op
, tcg_op
);
8456 g_assert_not_reached();
8458 write_fp_sreg(s
, rd
, tcg_op
);
8460 write_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
8465 clear_vec_high(s
, is_q
, rd
);
8470 /* Common vector code for handling integer to FP conversion */
8471 static void handle_simd_intfp_conv(DisasContext
*s
, int rd
, int rn
,
8472 int elements
, int is_signed
,
8473 int fracbits
, int size
)
8475 TCGv_ptr tcg_fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8476 TCGv_i32 tcg_shift
= NULL
;
8478 MemOp mop
= size
| (is_signed
? MO_SIGN
: 0);
8481 if (fracbits
|| size
== MO_64
) {
8482 tcg_shift
= tcg_constant_i32(fracbits
);
8485 if (size
== MO_64
) {
8486 TCGv_i64 tcg_int64
= tcg_temp_new_i64();
8487 TCGv_i64 tcg_double
= tcg_temp_new_i64();
8489 for (pass
= 0; pass
< elements
; pass
++) {
8490 read_vec_element(s
, tcg_int64
, rn
, pass
, mop
);
8493 gen_helper_vfp_sqtod(tcg_double
, tcg_int64
,
8494 tcg_shift
, tcg_fpst
);
8496 gen_helper_vfp_uqtod(tcg_double
, tcg_int64
,
8497 tcg_shift
, tcg_fpst
);
8499 if (elements
== 1) {
8500 write_fp_dreg(s
, rd
, tcg_double
);
8502 write_vec_element(s
, tcg_double
, rd
, pass
, MO_64
);
8506 TCGv_i32 tcg_int32
= tcg_temp_new_i32();
8507 TCGv_i32 tcg_float
= tcg_temp_new_i32();
8509 for (pass
= 0; pass
< elements
; pass
++) {
8510 read_vec_element_i32(s
, tcg_int32
, rn
, pass
, mop
);
8516 gen_helper_vfp_sltos(tcg_float
, tcg_int32
,
8517 tcg_shift
, tcg_fpst
);
8519 gen_helper_vfp_ultos(tcg_float
, tcg_int32
,
8520 tcg_shift
, tcg_fpst
);
8524 gen_helper_vfp_sitos(tcg_float
, tcg_int32
, tcg_fpst
);
8526 gen_helper_vfp_uitos(tcg_float
, tcg_int32
, tcg_fpst
);
8533 gen_helper_vfp_sltoh(tcg_float
, tcg_int32
,
8534 tcg_shift
, tcg_fpst
);
8536 gen_helper_vfp_ultoh(tcg_float
, tcg_int32
,
8537 tcg_shift
, tcg_fpst
);
8541 gen_helper_vfp_sitoh(tcg_float
, tcg_int32
, tcg_fpst
);
8543 gen_helper_vfp_uitoh(tcg_float
, tcg_int32
, tcg_fpst
);
8548 g_assert_not_reached();
8551 if (elements
== 1) {
8552 write_fp_sreg(s
, rd
, tcg_float
);
8554 write_vec_element_i32(s
, tcg_float
, rd
, pass
, size
);
8559 clear_vec_high(s
, elements
<< size
== 16, rd
);
8562 /* UCVTF/SCVTF - Integer to FP conversion */
8563 static void handle_simd_shift_intfp_conv(DisasContext
*s
, bool is_scalar
,
8564 bool is_q
, bool is_u
,
8565 int immh
, int immb
, int opcode
,
8568 int size
, elements
, fracbits
;
8569 int immhb
= immh
<< 3 | immb
;
8573 if (!is_scalar
&& !is_q
) {
8574 unallocated_encoding(s
);
8577 } else if (immh
& 4) {
8579 } else if (immh
& 2) {
8581 if (!dc_isar_feature(aa64_fp16
, s
)) {
8582 unallocated_encoding(s
);
8586 /* immh == 0 would be a failure of the decode logic */
8587 g_assert(immh
== 1);
8588 unallocated_encoding(s
);
8595 elements
= (8 << is_q
) >> size
;
8597 fracbits
= (16 << size
) - immhb
;
8599 if (!fp_access_check(s
)) {
8603 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !is_u
, fracbits
, size
);
8606 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
8607 static void handle_simd_shift_fpint_conv(DisasContext
*s
, bool is_scalar
,
8608 bool is_q
, bool is_u
,
8609 int immh
, int immb
, int rn
, int rd
)
8611 int immhb
= immh
<< 3 | immb
;
8612 int pass
, size
, fracbits
;
8613 TCGv_ptr tcg_fpstatus
;
8614 TCGv_i32 tcg_rmode
, tcg_shift
;
8618 if (!is_scalar
&& !is_q
) {
8619 unallocated_encoding(s
);
8622 } else if (immh
& 0x4) {
8624 } else if (immh
& 0x2) {
8626 if (!dc_isar_feature(aa64_fp16
, s
)) {
8627 unallocated_encoding(s
);
8631 /* Should have split out AdvSIMD modified immediate earlier. */
8633 unallocated_encoding(s
);
8637 if (!fp_access_check(s
)) {
8641 assert(!(is_scalar
&& is_q
));
8643 tcg_fpstatus
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8644 tcg_rmode
= gen_set_rmode(FPROUNDING_ZERO
, tcg_fpstatus
);
8645 fracbits
= (16 << size
) - immhb
;
8646 tcg_shift
= tcg_constant_i32(fracbits
);
8648 if (size
== MO_64
) {
8649 int maxpass
= is_scalar
? 1 : 2;
8651 for (pass
= 0; pass
< maxpass
; pass
++) {
8652 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8654 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8656 gen_helper_vfp_touqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
8658 gen_helper_vfp_tosqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
8660 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
8662 clear_vec_high(s
, is_q
, rd
);
8664 void (*fn
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
8665 int maxpass
= is_scalar
? 1 : ((8 << is_q
) >> size
);
8670 fn
= gen_helper_vfp_touhh
;
8672 fn
= gen_helper_vfp_toshh
;
8677 fn
= gen_helper_vfp_touls
;
8679 fn
= gen_helper_vfp_tosls
;
8683 g_assert_not_reached();
8686 for (pass
= 0; pass
< maxpass
; pass
++) {
8687 TCGv_i32 tcg_op
= tcg_temp_new_i32();
8689 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
8690 fn(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
8692 write_fp_sreg(s
, rd
, tcg_op
);
8694 write_vec_element_i32(s
, tcg_op
, rd
, pass
, size
);
8698 clear_vec_high(s
, is_q
, rd
);
8702 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
8705 /* AdvSIMD scalar shift by immediate
8706 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
8707 * +-----+---+-------------+------+------+--------+---+------+------+
8708 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
8709 * +-----+---+-------------+------+------+--------+---+------+------+
8711 * This is the scalar version so it works on a fixed sized registers
8713 static void disas_simd_scalar_shift_imm(DisasContext
*s
, uint32_t insn
)
8715 int rd
= extract32(insn
, 0, 5);
8716 int rn
= extract32(insn
, 5, 5);
8717 int opcode
= extract32(insn
, 11, 5);
8718 int immb
= extract32(insn
, 16, 3);
8719 int immh
= extract32(insn
, 19, 4);
8720 bool is_u
= extract32(insn
, 29, 1);
8723 unallocated_encoding(s
);
8728 case 0x08: /* SRI */
8730 unallocated_encoding(s
);
8734 case 0x00: /* SSHR / USHR */
8735 case 0x02: /* SSRA / USRA */
8736 case 0x04: /* SRSHR / URSHR */
8737 case 0x06: /* SRSRA / URSRA */
8738 handle_scalar_simd_shri(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
8740 case 0x0a: /* SHL / SLI */
8741 handle_scalar_simd_shli(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
8743 case 0x1c: /* SCVTF, UCVTF */
8744 handle_simd_shift_intfp_conv(s
, true, false, is_u
, immh
, immb
,
8747 case 0x10: /* SQSHRUN, SQSHRUN2 */
8748 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
8750 unallocated_encoding(s
);
8753 handle_vec_simd_sqshrn(s
, true, false, false, true,
8754 immh
, immb
, opcode
, rn
, rd
);
8756 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
8757 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
8758 handle_vec_simd_sqshrn(s
, true, false, is_u
, is_u
,
8759 immh
, immb
, opcode
, rn
, rd
);
8761 case 0xc: /* SQSHLU */
8763 unallocated_encoding(s
);
8766 handle_simd_qshl(s
, true, false, false, true, immh
, immb
, rn
, rd
);
8768 case 0xe: /* SQSHL, UQSHL */
8769 handle_simd_qshl(s
, true, false, is_u
, is_u
, immh
, immb
, rn
, rd
);
8771 case 0x1f: /* FCVTZS, FCVTZU */
8772 handle_simd_shift_fpint_conv(s
, true, false, is_u
, immh
, immb
, rn
, rd
);
8775 unallocated_encoding(s
);
8780 /* AdvSIMD scalar three different
8781 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
8782 * +-----+---+-----------+------+---+------+--------+-----+------+------+
8783 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
8784 * +-----+---+-----------+------+---+------+--------+-----+------+------+
8786 static void disas_simd_scalar_three_reg_diff(DisasContext
*s
, uint32_t insn
)
8788 bool is_u
= extract32(insn
, 29, 1);
8789 int size
= extract32(insn
, 22, 2);
8790 int opcode
= extract32(insn
, 12, 4);
8791 int rm
= extract32(insn
, 16, 5);
8792 int rn
= extract32(insn
, 5, 5);
8793 int rd
= extract32(insn
, 0, 5);
8796 unallocated_encoding(s
);
8801 case 0x9: /* SQDMLAL, SQDMLAL2 */
8802 case 0xb: /* SQDMLSL, SQDMLSL2 */
8803 case 0xd: /* SQDMULL, SQDMULL2 */
8804 if (size
== 0 || size
== 3) {
8805 unallocated_encoding(s
);
8810 unallocated_encoding(s
);
8814 if (!fp_access_check(s
)) {
8819 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
8820 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
8821 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8823 read_vec_element(s
, tcg_op1
, rn
, 0, MO_32
| MO_SIGN
);
8824 read_vec_element(s
, tcg_op2
, rm
, 0, MO_32
| MO_SIGN
);
8826 tcg_gen_mul_i64(tcg_res
, tcg_op1
, tcg_op2
);
8827 gen_helper_neon_addl_saturate_s64(tcg_res
, tcg_env
, tcg_res
, tcg_res
);
8830 case 0xd: /* SQDMULL, SQDMULL2 */
8832 case 0xb: /* SQDMLSL, SQDMLSL2 */
8833 tcg_gen_neg_i64(tcg_res
, tcg_res
);
8835 case 0x9: /* SQDMLAL, SQDMLAL2 */
8836 read_vec_element(s
, tcg_op1
, rd
, 0, MO_64
);
8837 gen_helper_neon_addl_saturate_s64(tcg_res
, tcg_env
,
8841 g_assert_not_reached();
8844 write_fp_dreg(s
, rd
, tcg_res
);
8846 TCGv_i32 tcg_op1
= read_fp_hreg(s
, rn
);
8847 TCGv_i32 tcg_op2
= read_fp_hreg(s
, rm
);
8848 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8850 gen_helper_neon_mull_s16(tcg_res
, tcg_op1
, tcg_op2
);
8851 gen_helper_neon_addl_saturate_s32(tcg_res
, tcg_env
, tcg_res
, tcg_res
);
8854 case 0xd: /* SQDMULL, SQDMULL2 */
8856 case 0xb: /* SQDMLSL, SQDMLSL2 */
8857 gen_helper_neon_negl_u32(tcg_res
, tcg_res
);
8859 case 0x9: /* SQDMLAL, SQDMLAL2 */
8861 TCGv_i64 tcg_op3
= tcg_temp_new_i64();
8862 read_vec_element(s
, tcg_op3
, rd
, 0, MO_32
);
8863 gen_helper_neon_addl_saturate_s32(tcg_res
, tcg_env
,
8868 g_assert_not_reached();
8871 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
8872 write_fp_dreg(s
, rd
, tcg_res
);
8876 static void handle_3same_64(DisasContext
*s
, int opcode
, bool u
,
8877 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
, TCGv_i64 tcg_rm
)
8879 /* Handle 64x64->64 opcodes which are shared between the scalar
8880 * and vector 3-same groups. We cover every opcode where size == 3
8881 * is valid in either the three-reg-same (integer, not pairwise)
8882 * or scalar-three-reg-same groups.
8887 case 0x1: /* SQADD */
8889 gen_helper_neon_qadd_u64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8891 gen_helper_neon_qadd_s64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8894 case 0x5: /* SQSUB */
8896 gen_helper_neon_qsub_u64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8898 gen_helper_neon_qsub_s64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8901 case 0x6: /* CMGT, CMHI */
8902 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
8904 /* 64 bit integer comparison, result = test ? -1 : 0. */
8905 tcg_gen_negsetcond_i64(cond
, tcg_rd
, tcg_rn
, tcg_rm
);
8907 case 0x7: /* CMGE, CMHS */
8908 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
8910 case 0x11: /* CMTST, CMEQ */
8915 gen_cmtst_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8917 case 0x8: /* SSHL, USHL */
8919 gen_ushl_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8921 gen_sshl_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8924 case 0x9: /* SQSHL, UQSHL */
8926 gen_helper_neon_qshl_u64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8928 gen_helper_neon_qshl_s64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8931 case 0xa: /* SRSHL, URSHL */
8933 gen_helper_neon_rshl_u64(tcg_rd
, tcg_rn
, tcg_rm
);
8935 gen_helper_neon_rshl_s64(tcg_rd
, tcg_rn
, tcg_rm
);
8938 case 0xb: /* SQRSHL, UQRSHL */
8940 gen_helper_neon_qrshl_u64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8942 gen_helper_neon_qrshl_s64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8945 case 0x10: /* ADD, SUB */
8947 tcg_gen_sub_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8949 tcg_gen_add_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8953 g_assert_not_reached();
8957 /* Handle the 3-same-operands float operations; shared by the scalar
8958 * and vector encodings. The caller must filter out any encodings
8959 * not allocated for the encoding it is dealing with.
8961 static void handle_3same_float(DisasContext
*s
, int size
, int elements
,
8962 int fpopcode
, int rd
, int rn
, int rm
)
8965 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
8967 for (pass
= 0; pass
< elements
; pass
++) {
8970 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
8971 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
8972 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8974 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
8975 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
8978 case 0x39: /* FMLS */
8979 /* As usual for ARM, separate negation for fused multiply-add */
8980 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
8982 case 0x19: /* FMLA */
8983 read_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
8984 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
,
8987 case 0x18: /* FMAXNM */
8988 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8990 case 0x1a: /* FADD */
8991 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8993 case 0x1b: /* FMULX */
8994 gen_helper_vfp_mulxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8996 case 0x1c: /* FCMEQ */
8997 gen_helper_neon_ceq_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8999 case 0x1e: /* FMAX */
9000 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9002 case 0x1f: /* FRECPS */
9003 gen_helper_recpsf_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9005 case 0x38: /* FMINNM */
9006 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9008 case 0x3a: /* FSUB */
9009 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9011 case 0x3e: /* FMIN */
9012 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9014 case 0x3f: /* FRSQRTS */
9015 gen_helper_rsqrtsf_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9017 case 0x5b: /* FMUL */
9018 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9020 case 0x5c: /* FCMGE */
9021 gen_helper_neon_cge_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9023 case 0x5d: /* FACGE */
9024 gen_helper_neon_acge_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9026 case 0x5f: /* FDIV */
9027 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9029 case 0x7a: /* FABD */
9030 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9031 gen_helper_vfp_absd(tcg_res
, tcg_res
);
9033 case 0x7c: /* FCMGT */
9034 gen_helper_neon_cgt_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9036 case 0x7d: /* FACGT */
9037 gen_helper_neon_acgt_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9040 g_assert_not_reached();
9043 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
9046 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
9047 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
9048 TCGv_i32 tcg_res
= tcg_temp_new_i32();
9050 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
9051 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
9054 case 0x39: /* FMLS */
9055 /* As usual for ARM, separate negation for fused multiply-add */
9056 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
9058 case 0x19: /* FMLA */
9059 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
9060 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
,
9063 case 0x1a: /* FADD */
9064 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9066 case 0x1b: /* FMULX */
9067 gen_helper_vfp_mulxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9069 case 0x1c: /* FCMEQ */
9070 gen_helper_neon_ceq_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9072 case 0x1e: /* FMAX */
9073 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9075 case 0x1f: /* FRECPS */
9076 gen_helper_recpsf_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9078 case 0x18: /* FMAXNM */
9079 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9081 case 0x38: /* FMINNM */
9082 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9084 case 0x3a: /* FSUB */
9085 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9087 case 0x3e: /* FMIN */
9088 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9090 case 0x3f: /* FRSQRTS */
9091 gen_helper_rsqrtsf_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9093 case 0x5b: /* FMUL */
9094 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9096 case 0x5c: /* FCMGE */
9097 gen_helper_neon_cge_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9099 case 0x5d: /* FACGE */
9100 gen_helper_neon_acge_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9102 case 0x5f: /* FDIV */
9103 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9105 case 0x7a: /* FABD */
9106 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9107 gen_helper_vfp_abss(tcg_res
, tcg_res
);
9109 case 0x7c: /* FCMGT */
9110 gen_helper_neon_cgt_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9112 case 0x7d: /* FACGT */
9113 gen_helper_neon_acgt_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9116 g_assert_not_reached();
9119 if (elements
== 1) {
9120 /* scalar single so clear high part */
9121 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
9123 tcg_gen_extu_i32_i64(tcg_tmp
, tcg_res
);
9124 write_vec_element(s
, tcg_tmp
, rd
, pass
, MO_64
);
9126 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
9131 clear_vec_high(s
, elements
* (size
? 8 : 4) > 8, rd
);
9134 /* AdvSIMD scalar three same
9135 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
9136 * +-----+---+-----------+------+---+------+--------+---+------+------+
9137 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
9138 * +-----+---+-----------+------+---+------+--------+---+------+------+
9140 static void disas_simd_scalar_three_reg_same(DisasContext
*s
, uint32_t insn
)
9142 int rd
= extract32(insn
, 0, 5);
9143 int rn
= extract32(insn
, 5, 5);
9144 int opcode
= extract32(insn
, 11, 5);
9145 int rm
= extract32(insn
, 16, 5);
9146 int size
= extract32(insn
, 22, 2);
9147 bool u
= extract32(insn
, 29, 1);
9150 if (opcode
>= 0x18) {
9151 /* Floating point: U, size[1] and opcode indicate operation */
9152 int fpopcode
= opcode
| (extract32(size
, 1, 1) << 5) | (u
<< 6);
9154 case 0x1b: /* FMULX */
9155 case 0x1f: /* FRECPS */
9156 case 0x3f: /* FRSQRTS */
9157 case 0x5d: /* FACGE */
9158 case 0x7d: /* FACGT */
9159 case 0x1c: /* FCMEQ */
9160 case 0x5c: /* FCMGE */
9161 case 0x7c: /* FCMGT */
9162 case 0x7a: /* FABD */
9165 unallocated_encoding(s
);
9169 if (!fp_access_check(s
)) {
9173 handle_3same_float(s
, extract32(size
, 0, 1), 1, fpopcode
, rd
, rn
, rm
);
9178 case 0x1: /* SQADD, UQADD */
9179 case 0x5: /* SQSUB, UQSUB */
9180 case 0x9: /* SQSHL, UQSHL */
9181 case 0xb: /* SQRSHL, UQRSHL */
9183 case 0x8: /* SSHL, USHL */
9184 case 0xa: /* SRSHL, URSHL */
9185 case 0x6: /* CMGT, CMHI */
9186 case 0x7: /* CMGE, CMHS */
9187 case 0x11: /* CMTST, CMEQ */
9188 case 0x10: /* ADD, SUB (vector) */
9190 unallocated_encoding(s
);
9194 case 0x16: /* SQDMULH, SQRDMULH (vector) */
9195 if (size
!= 1 && size
!= 2) {
9196 unallocated_encoding(s
);
9201 unallocated_encoding(s
);
9205 if (!fp_access_check(s
)) {
9209 tcg_rd
= tcg_temp_new_i64();
9212 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
9213 TCGv_i64 tcg_rm
= read_fp_dreg(s
, rm
);
9215 handle_3same_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rm
);
9217 /* Do a single operation on the lowest element in the vector.
9218 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
9219 * no side effects for all these operations.
9220 * OPTME: special-purpose helpers would avoid doing some
9221 * unnecessary work in the helper for the 8 and 16 bit cases.
9223 NeonGenTwoOpEnvFn
*genenvfn
;
9224 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
9225 TCGv_i32 tcg_rm
= tcg_temp_new_i32();
9226 TCGv_i32 tcg_rd32
= tcg_temp_new_i32();
9228 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
9229 read_vec_element_i32(s
, tcg_rm
, rm
, 0, size
);
9232 case 0x1: /* SQADD, UQADD */
9234 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9235 { gen_helper_neon_qadd_s8
, gen_helper_neon_qadd_u8
},
9236 { gen_helper_neon_qadd_s16
, gen_helper_neon_qadd_u16
},
9237 { gen_helper_neon_qadd_s32
, gen_helper_neon_qadd_u32
},
9239 genenvfn
= fns
[size
][u
];
9242 case 0x5: /* SQSUB, UQSUB */
9244 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9245 { gen_helper_neon_qsub_s8
, gen_helper_neon_qsub_u8
},
9246 { gen_helper_neon_qsub_s16
, gen_helper_neon_qsub_u16
},
9247 { gen_helper_neon_qsub_s32
, gen_helper_neon_qsub_u32
},
9249 genenvfn
= fns
[size
][u
];
9252 case 0x9: /* SQSHL, UQSHL */
9254 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9255 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
9256 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
9257 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
9259 genenvfn
= fns
[size
][u
];
9262 case 0xb: /* SQRSHL, UQRSHL */
9264 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9265 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
9266 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
9267 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
9269 genenvfn
= fns
[size
][u
];
9272 case 0x16: /* SQDMULH, SQRDMULH */
9274 static NeonGenTwoOpEnvFn
* const fns
[2][2] = {
9275 { gen_helper_neon_qdmulh_s16
, gen_helper_neon_qrdmulh_s16
},
9276 { gen_helper_neon_qdmulh_s32
, gen_helper_neon_qrdmulh_s32
},
9278 assert(size
== 1 || size
== 2);
9279 genenvfn
= fns
[size
- 1][u
];
9283 g_assert_not_reached();
9286 genenvfn(tcg_rd32
, tcg_env
, tcg_rn
, tcg_rm
);
9287 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd32
);
9290 write_fp_dreg(s
, rd
, tcg_rd
);
9293 /* AdvSIMD scalar three same FP16
9294 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
9295 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9296 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
9297 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9298 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
9299 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
9301 static void disas_simd_scalar_three_reg_same_fp16(DisasContext
*s
,
9304 int rd
= extract32(insn
, 0, 5);
9305 int rn
= extract32(insn
, 5, 5);
9306 int opcode
= extract32(insn
, 11, 3);
9307 int rm
= extract32(insn
, 16, 5);
9308 bool u
= extract32(insn
, 29, 1);
9309 bool a
= extract32(insn
, 23, 1);
9310 int fpopcode
= opcode
| (a
<< 3) | (u
<< 4);
9317 case 0x03: /* FMULX */
9318 case 0x04: /* FCMEQ (reg) */
9319 case 0x07: /* FRECPS */
9320 case 0x0f: /* FRSQRTS */
9321 case 0x14: /* FCMGE (reg) */
9322 case 0x15: /* FACGE */
9323 case 0x1a: /* FABD */
9324 case 0x1c: /* FCMGT (reg) */
9325 case 0x1d: /* FACGT */
9328 unallocated_encoding(s
);
9332 if (!dc_isar_feature(aa64_fp16
, s
)) {
9333 unallocated_encoding(s
);
9336 if (!fp_access_check(s
)) {
9340 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
9342 tcg_op1
= read_fp_hreg(s
, rn
);
9343 tcg_op2
= read_fp_hreg(s
, rm
);
9344 tcg_res
= tcg_temp_new_i32();
9347 case 0x03: /* FMULX */
9348 gen_helper_advsimd_mulxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9350 case 0x04: /* FCMEQ (reg) */
9351 gen_helper_advsimd_ceq_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9353 case 0x07: /* FRECPS */
9354 gen_helper_recpsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9356 case 0x0f: /* FRSQRTS */
9357 gen_helper_rsqrtsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9359 case 0x14: /* FCMGE (reg) */
9360 gen_helper_advsimd_cge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9362 case 0x15: /* FACGE */
9363 gen_helper_advsimd_acge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9365 case 0x1a: /* FABD */
9366 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9367 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0x7fff);
9369 case 0x1c: /* FCMGT (reg) */
9370 gen_helper_advsimd_cgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9372 case 0x1d: /* FACGT */
9373 gen_helper_advsimd_acgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9376 g_assert_not_reached();
9379 write_fp_sreg(s
, rd
, tcg_res
);
9382 /* AdvSIMD scalar three same extra
9383 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
9384 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9385 * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
9386 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9388 static void disas_simd_scalar_three_reg_same_extra(DisasContext
*s
,
9391 int rd
= extract32(insn
, 0, 5);
9392 int rn
= extract32(insn
, 5, 5);
9393 int opcode
= extract32(insn
, 11, 4);
9394 int rm
= extract32(insn
, 16, 5);
9395 int size
= extract32(insn
, 22, 2);
9396 bool u
= extract32(insn
, 29, 1);
9397 TCGv_i32 ele1
, ele2
, ele3
;
9401 switch (u
* 16 + opcode
) {
9402 case 0x10: /* SQRDMLAH (vector) */
9403 case 0x11: /* SQRDMLSH (vector) */
9404 if (size
!= 1 && size
!= 2) {
9405 unallocated_encoding(s
);
9408 feature
= dc_isar_feature(aa64_rdm
, s
);
9411 unallocated_encoding(s
);
9415 unallocated_encoding(s
);
9418 if (!fp_access_check(s
)) {
9422 /* Do a single operation on the lowest element in the vector.
9423 * We use the standard Neon helpers and rely on 0 OP 0 == 0
9424 * with no side effects for all these operations.
9425 * OPTME: special-purpose helpers would avoid doing some
9426 * unnecessary work in the helper for the 16 bit cases.
9428 ele1
= tcg_temp_new_i32();
9429 ele2
= tcg_temp_new_i32();
9430 ele3
= tcg_temp_new_i32();
9432 read_vec_element_i32(s
, ele1
, rn
, 0, size
);
9433 read_vec_element_i32(s
, ele2
, rm
, 0, size
);
9434 read_vec_element_i32(s
, ele3
, rd
, 0, size
);
9437 case 0x0: /* SQRDMLAH */
9439 gen_helper_neon_qrdmlah_s16(ele3
, tcg_env
, ele1
, ele2
, ele3
);
9441 gen_helper_neon_qrdmlah_s32(ele3
, tcg_env
, ele1
, ele2
, ele3
);
9444 case 0x1: /* SQRDMLSH */
9446 gen_helper_neon_qrdmlsh_s16(ele3
, tcg_env
, ele1
, ele2
, ele3
);
9448 gen_helper_neon_qrdmlsh_s32(ele3
, tcg_env
, ele1
, ele2
, ele3
);
9452 g_assert_not_reached();
9455 res
= tcg_temp_new_i64();
9456 tcg_gen_extu_i32_i64(res
, ele3
);
9457 write_fp_dreg(s
, rd
, res
);
9460 static void handle_2misc_64(DisasContext
*s
, int opcode
, bool u
,
9461 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
,
9462 TCGv_i32 tcg_rmode
, TCGv_ptr tcg_fpstatus
)
9464 /* Handle 64->64 opcodes which are shared between the scalar and
9465 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9466 * is valid in either group and also the double-precision fp ops.
9467 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9473 case 0x4: /* CLS, CLZ */
9475 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
9477 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
9481 /* This opcode is shared with CNT and RBIT but we have earlier
9482 * enforced that size == 3 if and only if this is the NOT insn.
9484 tcg_gen_not_i64(tcg_rd
, tcg_rn
);
9486 case 0x7: /* SQABS, SQNEG */
9488 gen_helper_neon_qneg_s64(tcg_rd
, tcg_env
, tcg_rn
);
9490 gen_helper_neon_qabs_s64(tcg_rd
, tcg_env
, tcg_rn
);
9493 case 0xa: /* CMLT */
9496 /* 64 bit integer comparison against zero, result is test ? -1 : 0. */
9497 tcg_gen_negsetcond_i64(cond
, tcg_rd
, tcg_rn
, tcg_constant_i64(0));
9499 case 0x8: /* CMGT, CMGE */
9500 cond
= u
? TCG_COND_GE
: TCG_COND_GT
;
9502 case 0x9: /* CMEQ, CMLE */
9503 cond
= u
? TCG_COND_LE
: TCG_COND_EQ
;
9505 case 0xb: /* ABS, NEG */
9507 tcg_gen_neg_i64(tcg_rd
, tcg_rn
);
9509 tcg_gen_abs_i64(tcg_rd
, tcg_rn
);
9512 case 0x2f: /* FABS */
9513 gen_helper_vfp_absd(tcg_rd
, tcg_rn
);
9515 case 0x6f: /* FNEG */
9516 gen_helper_vfp_negd(tcg_rd
, tcg_rn
);
9518 case 0x7f: /* FSQRT */
9519 gen_helper_vfp_sqrtd(tcg_rd
, tcg_rn
, tcg_env
);
9521 case 0x1a: /* FCVTNS */
9522 case 0x1b: /* FCVTMS */
9523 case 0x1c: /* FCVTAS */
9524 case 0x3a: /* FCVTPS */
9525 case 0x3b: /* FCVTZS */
9526 gen_helper_vfp_tosqd(tcg_rd
, tcg_rn
, tcg_constant_i32(0), tcg_fpstatus
);
9528 case 0x5a: /* FCVTNU */
9529 case 0x5b: /* FCVTMU */
9530 case 0x5c: /* FCVTAU */
9531 case 0x7a: /* FCVTPU */
9532 case 0x7b: /* FCVTZU */
9533 gen_helper_vfp_touqd(tcg_rd
, tcg_rn
, tcg_constant_i32(0), tcg_fpstatus
);
9535 case 0x18: /* FRINTN */
9536 case 0x19: /* FRINTM */
9537 case 0x38: /* FRINTP */
9538 case 0x39: /* FRINTZ */
9539 case 0x58: /* FRINTA */
9540 case 0x79: /* FRINTI */
9541 gen_helper_rintd(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9543 case 0x59: /* FRINTX */
9544 gen_helper_rintd_exact(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9546 case 0x1e: /* FRINT32Z */
9547 case 0x5e: /* FRINT32X */
9548 gen_helper_frint32_d(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9550 case 0x1f: /* FRINT64Z */
9551 case 0x5f: /* FRINT64X */
9552 gen_helper_frint64_d(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9555 g_assert_not_reached();
9559 static void handle_2misc_fcmp_zero(DisasContext
*s
, int opcode
,
9560 bool is_scalar
, bool is_u
, bool is_q
,
9561 int size
, int rn
, int rd
)
9563 bool is_double
= (size
== MO_64
);
9566 if (!fp_access_check(s
)) {
9570 fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
9573 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9574 TCGv_i64 tcg_zero
= tcg_constant_i64(0);
9575 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9576 NeonGenTwoDoubleOpFn
*genfn
;
9581 case 0x2e: /* FCMLT (zero) */
9584 case 0x2c: /* FCMGT (zero) */
9585 genfn
= gen_helper_neon_cgt_f64
;
9587 case 0x2d: /* FCMEQ (zero) */
9588 genfn
= gen_helper_neon_ceq_f64
;
9590 case 0x6d: /* FCMLE (zero) */
9593 case 0x6c: /* FCMGE (zero) */
9594 genfn
= gen_helper_neon_cge_f64
;
9597 g_assert_not_reached();
9600 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9601 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9603 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
9605 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
9607 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
9610 clear_vec_high(s
, !is_scalar
, rd
);
9612 TCGv_i32 tcg_op
= tcg_temp_new_i32();
9613 TCGv_i32 tcg_zero
= tcg_constant_i32(0);
9614 TCGv_i32 tcg_res
= tcg_temp_new_i32();
9615 NeonGenTwoSingleOpFn
*genfn
;
9617 int pass
, maxpasses
;
9619 if (size
== MO_16
) {
9621 case 0x2e: /* FCMLT (zero) */
9624 case 0x2c: /* FCMGT (zero) */
9625 genfn
= gen_helper_advsimd_cgt_f16
;
9627 case 0x2d: /* FCMEQ (zero) */
9628 genfn
= gen_helper_advsimd_ceq_f16
;
9630 case 0x6d: /* FCMLE (zero) */
9633 case 0x6c: /* FCMGE (zero) */
9634 genfn
= gen_helper_advsimd_cge_f16
;
9637 g_assert_not_reached();
9641 case 0x2e: /* FCMLT (zero) */
9644 case 0x2c: /* FCMGT (zero) */
9645 genfn
= gen_helper_neon_cgt_f32
;
9647 case 0x2d: /* FCMEQ (zero) */
9648 genfn
= gen_helper_neon_ceq_f32
;
9650 case 0x6d: /* FCMLE (zero) */
9653 case 0x6c: /* FCMGE (zero) */
9654 genfn
= gen_helper_neon_cge_f32
;
9657 g_assert_not_reached();
9664 int vector_size
= 8 << is_q
;
9665 maxpasses
= vector_size
>> size
;
9668 for (pass
= 0; pass
< maxpasses
; pass
++) {
9669 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
9671 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
9673 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
9676 write_fp_sreg(s
, rd
, tcg_res
);
9678 write_vec_element_i32(s
, tcg_res
, rd
, pass
, size
);
9683 clear_vec_high(s
, is_q
, rd
);
9688 static void handle_2misc_reciprocal(DisasContext
*s
, int opcode
,
9689 bool is_scalar
, bool is_u
, bool is_q
,
9690 int size
, int rn
, int rd
)
9692 bool is_double
= (size
== 3);
9693 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
9696 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9697 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9700 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9701 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9703 case 0x3d: /* FRECPE */
9704 gen_helper_recpe_f64(tcg_res
, tcg_op
, fpst
);
9706 case 0x3f: /* FRECPX */
9707 gen_helper_frecpx_f64(tcg_res
, tcg_op
, fpst
);
9709 case 0x7d: /* FRSQRTE */
9710 gen_helper_rsqrte_f64(tcg_res
, tcg_op
, fpst
);
9713 g_assert_not_reached();
9715 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
9717 clear_vec_high(s
, !is_scalar
, rd
);
9719 TCGv_i32 tcg_op
= tcg_temp_new_i32();
9720 TCGv_i32 tcg_res
= tcg_temp_new_i32();
9721 int pass
, maxpasses
;
9726 maxpasses
= is_q
? 4 : 2;
9729 for (pass
= 0; pass
< maxpasses
; pass
++) {
9730 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
9733 case 0x3c: /* URECPE */
9734 gen_helper_recpe_u32(tcg_res
, tcg_op
);
9736 case 0x3d: /* FRECPE */
9737 gen_helper_recpe_f32(tcg_res
, tcg_op
, fpst
);
9739 case 0x3f: /* FRECPX */
9740 gen_helper_frecpx_f32(tcg_res
, tcg_op
, fpst
);
9742 case 0x7d: /* FRSQRTE */
9743 gen_helper_rsqrte_f32(tcg_res
, tcg_op
, fpst
);
9746 g_assert_not_reached();
9750 write_fp_sreg(s
, rd
, tcg_res
);
9752 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
9756 clear_vec_high(s
, is_q
, rd
);
9761 static void handle_2misc_narrow(DisasContext
*s
, bool scalar
,
9762 int opcode
, bool u
, bool is_q
,
9763 int size
, int rn
, int rd
)
9765 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
9766 * in the source becomes a size element in the destination).
9769 TCGv_i32 tcg_res
[2];
9770 int destelt
= is_q
? 2 : 0;
9771 int passes
= scalar
? 1 : 2;
9774 tcg_res
[1] = tcg_constant_i32(0);
9777 for (pass
= 0; pass
< passes
; pass
++) {
9778 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9779 NeonGenNarrowFn
*genfn
= NULL
;
9780 NeonGenNarrowEnvFn
*genenvfn
= NULL
;
9783 read_vec_element(s
, tcg_op
, rn
, pass
, size
+ 1);
9785 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9787 tcg_res
[pass
] = tcg_temp_new_i32();
9790 case 0x12: /* XTN, SQXTUN */
9792 static NeonGenNarrowFn
* const xtnfns
[3] = {
9793 gen_helper_neon_narrow_u8
,
9794 gen_helper_neon_narrow_u16
,
9795 tcg_gen_extrl_i64_i32
,
9797 static NeonGenNarrowEnvFn
* const sqxtunfns
[3] = {
9798 gen_helper_neon_unarrow_sat8
,
9799 gen_helper_neon_unarrow_sat16
,
9800 gen_helper_neon_unarrow_sat32
,
9803 genenvfn
= sqxtunfns
[size
];
9805 genfn
= xtnfns
[size
];
9809 case 0x14: /* SQXTN, UQXTN */
9811 static NeonGenNarrowEnvFn
* const fns
[3][2] = {
9812 { gen_helper_neon_narrow_sat_s8
,
9813 gen_helper_neon_narrow_sat_u8
},
9814 { gen_helper_neon_narrow_sat_s16
,
9815 gen_helper_neon_narrow_sat_u16
},
9816 { gen_helper_neon_narrow_sat_s32
,
9817 gen_helper_neon_narrow_sat_u32
},
9819 genenvfn
= fns
[size
][u
];
9822 case 0x16: /* FCVTN, FCVTN2 */
9823 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
9825 gen_helper_vfp_fcvtsd(tcg_res
[pass
], tcg_op
, tcg_env
);
9827 TCGv_i32 tcg_lo
= tcg_temp_new_i32();
9828 TCGv_i32 tcg_hi
= tcg_temp_new_i32();
9829 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
9830 TCGv_i32 ahp
= get_ahp_flag();
9832 tcg_gen_extr_i64_i32(tcg_lo
, tcg_hi
, tcg_op
);
9833 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo
, tcg_lo
, fpst
, ahp
);
9834 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi
, tcg_hi
, fpst
, ahp
);
9835 tcg_gen_deposit_i32(tcg_res
[pass
], tcg_lo
, tcg_hi
, 16, 16);
9838 case 0x36: /* BFCVTN, BFCVTN2 */
9840 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
9841 gen_helper_bfcvt_pair(tcg_res
[pass
], tcg_op
, fpst
);
9844 case 0x56: /* FCVTXN, FCVTXN2 */
9845 /* 64 bit to 32 bit float conversion
9846 * with von Neumann rounding (round to odd)
9849 gen_helper_fcvtx_f64_to_f32(tcg_res
[pass
], tcg_op
, tcg_env
);
9852 g_assert_not_reached();
9856 genfn(tcg_res
[pass
], tcg_op
);
9857 } else if (genenvfn
) {
9858 genenvfn(tcg_res
[pass
], tcg_env
, tcg_op
);
9862 for (pass
= 0; pass
< 2; pass
++) {
9863 write_vec_element_i32(s
, tcg_res
[pass
], rd
, destelt
+ pass
, MO_32
);
9865 clear_vec_high(s
, is_q
, rd
);
9868 /* Remaining saturating accumulating ops */
9869 static void handle_2misc_satacc(DisasContext
*s
, bool is_scalar
, bool is_u
,
9870 bool is_q
, int size
, int rn
, int rd
)
9872 bool is_double
= (size
== 3);
9875 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
9876 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
9879 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9880 read_vec_element(s
, tcg_rn
, rn
, pass
, MO_64
);
9881 read_vec_element(s
, tcg_rd
, rd
, pass
, MO_64
);
9883 if (is_u
) { /* USQADD */
9884 gen_helper_neon_uqadd_s64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9885 } else { /* SUQADD */
9886 gen_helper_neon_sqadd_u64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9888 write_vec_element(s
, tcg_rd
, rd
, pass
, MO_64
);
9890 clear_vec_high(s
, !is_scalar
, rd
);
9892 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
9893 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
9894 int pass
, maxpasses
;
9899 maxpasses
= is_q
? 4 : 2;
9902 for (pass
= 0; pass
< maxpasses
; pass
++) {
9904 read_vec_element_i32(s
, tcg_rn
, rn
, pass
, size
);
9905 read_vec_element_i32(s
, tcg_rd
, rd
, pass
, size
);
9907 read_vec_element_i32(s
, tcg_rn
, rn
, pass
, MO_32
);
9908 read_vec_element_i32(s
, tcg_rd
, rd
, pass
, MO_32
);
9911 if (is_u
) { /* USQADD */
9914 gen_helper_neon_uqadd_s8(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9917 gen_helper_neon_uqadd_s16(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9920 gen_helper_neon_uqadd_s32(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9923 g_assert_not_reached();
9925 } else { /* SUQADD */
9928 gen_helper_neon_sqadd_u8(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9931 gen_helper_neon_sqadd_u16(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9934 gen_helper_neon_sqadd_u32(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9937 g_assert_not_reached();
9942 write_vec_element(s
, tcg_constant_i64(0), rd
, 0, MO_64
);
9944 write_vec_element_i32(s
, tcg_rd
, rd
, pass
, MO_32
);
9946 clear_vec_high(s
, is_q
, rd
);
9950 /* AdvSIMD scalar two reg misc
9951 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
9952 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9953 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
9954 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9956 static void disas_simd_scalar_two_reg_misc(DisasContext
*s
, uint32_t insn
)
9958 int rd
= extract32(insn
, 0, 5);
9959 int rn
= extract32(insn
, 5, 5);
9960 int opcode
= extract32(insn
, 12, 5);
9961 int size
= extract32(insn
, 22, 2);
9962 bool u
= extract32(insn
, 29, 1);
9963 bool is_fcvt
= false;
9966 TCGv_ptr tcg_fpstatus
;
9969 case 0x3: /* USQADD / SUQADD*/
9970 if (!fp_access_check(s
)) {
9973 handle_2misc_satacc(s
, true, u
, false, size
, rn
, rd
);
9975 case 0x7: /* SQABS / SQNEG */
9977 case 0xa: /* CMLT */
9979 unallocated_encoding(s
);
9983 case 0x8: /* CMGT, CMGE */
9984 case 0x9: /* CMEQ, CMLE */
9985 case 0xb: /* ABS, NEG */
9987 unallocated_encoding(s
);
9991 case 0x12: /* SQXTUN */
9993 unallocated_encoding(s
);
9997 case 0x14: /* SQXTN, UQXTN */
9999 unallocated_encoding(s
);
10002 if (!fp_access_check(s
)) {
10005 handle_2misc_narrow(s
, true, opcode
, u
, false, size
, rn
, rd
);
10008 case 0x16 ... 0x1d:
10010 /* Floating point: U, size[1] and opcode indicate operation;
10011 * size[0] indicates single or double precision.
10013 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
10014 size
= extract32(size
, 0, 1) ? 3 : 2;
10016 case 0x2c: /* FCMGT (zero) */
10017 case 0x2d: /* FCMEQ (zero) */
10018 case 0x2e: /* FCMLT (zero) */
10019 case 0x6c: /* FCMGE (zero) */
10020 case 0x6d: /* FCMLE (zero) */
10021 handle_2misc_fcmp_zero(s
, opcode
, true, u
, true, size
, rn
, rd
);
10023 case 0x1d: /* SCVTF */
10024 case 0x5d: /* UCVTF */
10026 bool is_signed
= (opcode
== 0x1d);
10027 if (!fp_access_check(s
)) {
10030 handle_simd_intfp_conv(s
, rd
, rn
, 1, is_signed
, 0, size
);
10033 case 0x3d: /* FRECPE */
10034 case 0x3f: /* FRECPX */
10035 case 0x7d: /* FRSQRTE */
10036 if (!fp_access_check(s
)) {
10039 handle_2misc_reciprocal(s
, opcode
, true, u
, true, size
, rn
, rd
);
10041 case 0x1a: /* FCVTNS */
10042 case 0x1b: /* FCVTMS */
10043 case 0x3a: /* FCVTPS */
10044 case 0x3b: /* FCVTZS */
10045 case 0x5a: /* FCVTNU */
10046 case 0x5b: /* FCVTMU */
10047 case 0x7a: /* FCVTPU */
10048 case 0x7b: /* FCVTZU */
10050 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
10052 case 0x1c: /* FCVTAS */
10053 case 0x5c: /* FCVTAU */
10054 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
10056 rmode
= FPROUNDING_TIEAWAY
;
10058 case 0x56: /* FCVTXN, FCVTXN2 */
10060 unallocated_encoding(s
);
10063 if (!fp_access_check(s
)) {
10066 handle_2misc_narrow(s
, true, opcode
, u
, false, size
- 1, rn
, rd
);
10069 unallocated_encoding(s
);
10074 unallocated_encoding(s
);
10078 if (!fp_access_check(s
)) {
10083 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR
);
10084 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
10086 tcg_fpstatus
= NULL
;
10091 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
10092 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
10094 handle_2misc_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rmode
, tcg_fpstatus
);
10095 write_fp_dreg(s
, rd
, tcg_rd
);
10097 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
10098 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
10100 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
10103 case 0x7: /* SQABS, SQNEG */
10105 NeonGenOneOpEnvFn
*genfn
;
10106 static NeonGenOneOpEnvFn
* const fns
[3][2] = {
10107 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
10108 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
10109 { gen_helper_neon_qabs_s32
, gen_helper_neon_qneg_s32
},
10111 genfn
= fns
[size
][u
];
10112 genfn(tcg_rd
, tcg_env
, tcg_rn
);
10115 case 0x1a: /* FCVTNS */
10116 case 0x1b: /* FCVTMS */
10117 case 0x1c: /* FCVTAS */
10118 case 0x3a: /* FCVTPS */
10119 case 0x3b: /* FCVTZS */
10120 gen_helper_vfp_tosls(tcg_rd
, tcg_rn
, tcg_constant_i32(0),
10123 case 0x5a: /* FCVTNU */
10124 case 0x5b: /* FCVTMU */
10125 case 0x5c: /* FCVTAU */
10126 case 0x7a: /* FCVTPU */
10127 case 0x7b: /* FCVTZU */
10128 gen_helper_vfp_touls(tcg_rd
, tcg_rn
, tcg_constant_i32(0),
10132 g_assert_not_reached();
10135 write_fp_sreg(s
, rd
, tcg_rd
);
10139 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
10143 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
10144 static void handle_vec_simd_shri(DisasContext
*s
, bool is_q
, bool is_u
,
10145 int immh
, int immb
, int opcode
, int rn
, int rd
)
10147 int size
= 32 - clz32(immh
) - 1;
10148 int immhb
= immh
<< 3 | immb
;
10149 int shift
= 2 * (8 << size
) - immhb
;
10150 GVecGen2iFn
*gvec_fn
;
10152 if (extract32(immh
, 3, 1) && !is_q
) {
10153 unallocated_encoding(s
);
10156 tcg_debug_assert(size
<= 3);
10158 if (!fp_access_check(s
)) {
10163 case 0x02: /* SSRA / USRA (accumulate) */
10164 gvec_fn
= is_u
? gen_gvec_usra
: gen_gvec_ssra
;
10167 case 0x08: /* SRI */
10168 gvec_fn
= gen_gvec_sri
;
10171 case 0x00: /* SSHR / USHR */
10173 if (shift
== 8 << size
) {
10174 /* Shift count the same size as element size produces zero. */
10175 tcg_gen_gvec_dup_imm(size
, vec_full_reg_offset(s
, rd
),
10176 is_q
? 16 : 8, vec_full_reg_size(s
), 0);
10179 gvec_fn
= tcg_gen_gvec_shri
;
10181 /* Shift count the same size as element size produces all sign. */
10182 if (shift
== 8 << size
) {
10185 gvec_fn
= tcg_gen_gvec_sari
;
10189 case 0x04: /* SRSHR / URSHR (rounding) */
10190 gvec_fn
= is_u
? gen_gvec_urshr
: gen_gvec_srshr
;
10193 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10194 gvec_fn
= is_u
? gen_gvec_ursra
: gen_gvec_srsra
;
10198 g_assert_not_reached();
10201 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, gvec_fn
, size
);
10204 /* SHL/SLI - Vector shift left */
10205 static void handle_vec_simd_shli(DisasContext
*s
, bool is_q
, bool insert
,
10206 int immh
, int immb
, int opcode
, int rn
, int rd
)
10208 int size
= 32 - clz32(immh
) - 1;
10209 int immhb
= immh
<< 3 | immb
;
10210 int shift
= immhb
- (8 << size
);
10212 /* Range of size is limited by decode: immh is a non-zero 4 bit field */
10213 assert(size
>= 0 && size
<= 3);
10215 if (extract32(immh
, 3, 1) && !is_q
) {
10216 unallocated_encoding(s
);
10220 if (!fp_access_check(s
)) {
10225 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, gen_gvec_sli
, size
);
10227 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, tcg_gen_gvec_shli
, size
);
10231 /* USHLL/SHLL - Vector shift left with widening */
10232 static void handle_vec_simd_wshli(DisasContext
*s
, bool is_q
, bool is_u
,
10233 int immh
, int immb
, int opcode
, int rn
, int rd
)
10235 int size
= 32 - clz32(immh
) - 1;
10236 int immhb
= immh
<< 3 | immb
;
10237 int shift
= immhb
- (8 << size
);
10239 int esize
= 8 << size
;
10240 int elements
= dsize
/esize
;
10241 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
10242 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
10246 unallocated_encoding(s
);
10250 if (!fp_access_check(s
)) {
10254 /* For the LL variants the store is larger than the load,
10255 * so if rd == rn we would overwrite parts of our input.
10256 * So load everything right now and use shifts in the main loop.
10258 read_vec_element(s
, tcg_rn
, rn
, is_q
? 1 : 0, MO_64
);
10260 for (i
= 0; i
< elements
; i
++) {
10261 tcg_gen_shri_i64(tcg_rd
, tcg_rn
, i
* esize
);
10262 ext_and_shift_reg(tcg_rd
, tcg_rd
, size
| (!is_u
<< 2), 0);
10263 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, shift
);
10264 write_vec_element(s
, tcg_rd
, rd
, i
, size
+ 1);
10268 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
10269 static void handle_vec_simd_shrn(DisasContext
*s
, bool is_q
,
10270 int immh
, int immb
, int opcode
, int rn
, int rd
)
10272 int immhb
= immh
<< 3 | immb
;
10273 int size
= 32 - clz32(immh
) - 1;
10275 int esize
= 8 << size
;
10276 int elements
= dsize
/esize
;
10277 int shift
= (2 * esize
) - immhb
;
10278 bool round
= extract32(opcode
, 0, 1);
10279 TCGv_i64 tcg_rn
, tcg_rd
, tcg_final
;
10280 TCGv_i64 tcg_round
;
10283 if (extract32(immh
, 3, 1)) {
10284 unallocated_encoding(s
);
10288 if (!fp_access_check(s
)) {
10292 tcg_rn
= tcg_temp_new_i64();
10293 tcg_rd
= tcg_temp_new_i64();
10294 tcg_final
= tcg_temp_new_i64();
10295 read_vec_element(s
, tcg_final
, rd
, is_q
? 1 : 0, MO_64
);
10298 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
10303 for (i
= 0; i
< elements
; i
++) {
10304 read_vec_element(s
, tcg_rn
, rn
, i
, size
+1);
10305 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
10306 false, true, size
+1, shift
);
10308 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
10312 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
10314 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
10317 clear_vec_high(s
, is_q
, rd
);
10321 /* AdvSIMD shift by immediate
10322 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
10323 * +---+---+---+-------------+------+------+--------+---+------+------+
10324 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
10325 * +---+---+---+-------------+------+------+--------+---+------+------+
10327 static void disas_simd_shift_imm(DisasContext
*s
, uint32_t insn
)
10329 int rd
= extract32(insn
, 0, 5);
10330 int rn
= extract32(insn
, 5, 5);
10331 int opcode
= extract32(insn
, 11, 5);
10332 int immb
= extract32(insn
, 16, 3);
10333 int immh
= extract32(insn
, 19, 4);
10334 bool is_u
= extract32(insn
, 29, 1);
10335 bool is_q
= extract32(insn
, 30, 1);
10337 /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
10341 case 0x08: /* SRI */
10343 unallocated_encoding(s
);
10347 case 0x00: /* SSHR / USHR */
10348 case 0x02: /* SSRA / USRA (accumulate) */
10349 case 0x04: /* SRSHR / URSHR (rounding) */
10350 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10351 handle_vec_simd_shri(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10353 case 0x0a: /* SHL / SLI */
10354 handle_vec_simd_shli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10356 case 0x10: /* SHRN */
10357 case 0x11: /* RSHRN / SQRSHRUN */
10359 handle_vec_simd_sqshrn(s
, false, is_q
, false, true, immh
, immb
,
10362 handle_vec_simd_shrn(s
, is_q
, immh
, immb
, opcode
, rn
, rd
);
10365 case 0x12: /* SQSHRN / UQSHRN */
10366 case 0x13: /* SQRSHRN / UQRSHRN */
10367 handle_vec_simd_sqshrn(s
, false, is_q
, is_u
, is_u
, immh
, immb
,
10370 case 0x14: /* SSHLL / USHLL */
10371 handle_vec_simd_wshli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10373 case 0x1c: /* SCVTF / UCVTF */
10374 handle_simd_shift_intfp_conv(s
, false, is_q
, is_u
, immh
, immb
,
10377 case 0xc: /* SQSHLU */
10379 unallocated_encoding(s
);
10382 handle_simd_qshl(s
, false, is_q
, false, true, immh
, immb
, rn
, rd
);
10384 case 0xe: /* SQSHL, UQSHL */
10385 handle_simd_qshl(s
, false, is_q
, is_u
, is_u
, immh
, immb
, rn
, rd
);
10387 case 0x1f: /* FCVTZS/ FCVTZU */
10388 handle_simd_shift_fpint_conv(s
, false, is_q
, is_u
, immh
, immb
, rn
, rd
);
10391 unallocated_encoding(s
);
10396 /* Generate code to do a "long" addition or subtraction, ie one done in
10397 * TCGv_i64 on vector lanes twice the width specified by size.
10399 static void gen_neon_addl(int size
, bool is_sub
, TCGv_i64 tcg_res
,
10400 TCGv_i64 tcg_op1
, TCGv_i64 tcg_op2
)
10402 static NeonGenTwo64OpFn
* const fns
[3][2] = {
10403 { gen_helper_neon_addl_u16
, gen_helper_neon_subl_u16
},
10404 { gen_helper_neon_addl_u32
, gen_helper_neon_subl_u32
},
10405 { tcg_gen_add_i64
, tcg_gen_sub_i64
},
10407 NeonGenTwo64OpFn
*genfn
;
10410 genfn
= fns
[size
][is_sub
];
10411 genfn(tcg_res
, tcg_op1
, tcg_op2
);
10414 static void handle_3rd_widening(DisasContext
*s
, int is_q
, int is_u
, int size
,
10415 int opcode
, int rd
, int rn
, int rm
)
10417 /* 3-reg-different widening insns: 64 x 64 -> 128 */
10418 TCGv_i64 tcg_res
[2];
10421 tcg_res
[0] = tcg_temp_new_i64();
10422 tcg_res
[1] = tcg_temp_new_i64();
10424 /* Does this op do an adding accumulate, a subtracting accumulate,
10425 * or no accumulate at all?
10443 read_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
10444 read_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
10447 /* size == 2 means two 32x32->64 operations; this is worth special
10448 * casing because we can generally handle it inline.
10451 for (pass
= 0; pass
< 2; pass
++) {
10452 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10453 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10454 TCGv_i64 tcg_passres
;
10455 MemOp memop
= MO_32
| (is_u
? 0 : MO_SIGN
);
10457 int elt
= pass
+ is_q
* 2;
10459 read_vec_element(s
, tcg_op1
, rn
, elt
, memop
);
10460 read_vec_element(s
, tcg_op2
, rm
, elt
, memop
);
10463 tcg_passres
= tcg_res
[pass
];
10465 tcg_passres
= tcg_temp_new_i64();
10469 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10470 tcg_gen_add_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10472 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10473 tcg_gen_sub_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10475 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10476 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10478 TCGv_i64 tcg_tmp1
= tcg_temp_new_i64();
10479 TCGv_i64 tcg_tmp2
= tcg_temp_new_i64();
10481 tcg_gen_sub_i64(tcg_tmp1
, tcg_op1
, tcg_op2
);
10482 tcg_gen_sub_i64(tcg_tmp2
, tcg_op2
, tcg_op1
);
10483 tcg_gen_movcond_i64(is_u
? TCG_COND_GEU
: TCG_COND_GE
,
10485 tcg_op1
, tcg_op2
, tcg_tmp1
, tcg_tmp2
);
10488 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10489 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10490 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10491 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10493 case 9: /* SQDMLAL, SQDMLAL2 */
10494 case 11: /* SQDMLSL, SQDMLSL2 */
10495 case 13: /* SQDMULL, SQDMULL2 */
10496 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10497 gen_helper_neon_addl_saturate_s64(tcg_passres
, tcg_env
,
10498 tcg_passres
, tcg_passres
);
10501 g_assert_not_reached();
10504 if (opcode
== 9 || opcode
== 11) {
10505 /* saturating accumulate ops */
10507 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
10509 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], tcg_env
,
10510 tcg_res
[pass
], tcg_passres
);
10511 } else if (accop
> 0) {
10512 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
10513 } else if (accop
< 0) {
10514 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
10518 /* size 0 or 1, generally helper functions */
10519 for (pass
= 0; pass
< 2; pass
++) {
10520 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10521 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10522 TCGv_i64 tcg_passres
;
10523 int elt
= pass
+ is_q
* 2;
10525 read_vec_element_i32(s
, tcg_op1
, rn
, elt
, MO_32
);
10526 read_vec_element_i32(s
, tcg_op2
, rm
, elt
, MO_32
);
10529 tcg_passres
= tcg_res
[pass
];
10531 tcg_passres
= tcg_temp_new_i64();
10535 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10536 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10538 TCGv_i64 tcg_op2_64
= tcg_temp_new_i64();
10539 static NeonGenWidenFn
* const widenfns
[2][2] = {
10540 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
10541 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
10543 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
10545 widenfn(tcg_op2_64
, tcg_op2
);
10546 widenfn(tcg_passres
, tcg_op1
);
10547 gen_neon_addl(size
, (opcode
== 2), tcg_passres
,
10548 tcg_passres
, tcg_op2_64
);
10551 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10552 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10555 gen_helper_neon_abdl_u16(tcg_passres
, tcg_op1
, tcg_op2
);
10557 gen_helper_neon_abdl_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10561 gen_helper_neon_abdl_u32(tcg_passres
, tcg_op1
, tcg_op2
);
10563 gen_helper_neon_abdl_s32(tcg_passres
, tcg_op1
, tcg_op2
);
10567 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10568 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10569 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10572 gen_helper_neon_mull_u8(tcg_passres
, tcg_op1
, tcg_op2
);
10574 gen_helper_neon_mull_s8(tcg_passres
, tcg_op1
, tcg_op2
);
10578 gen_helper_neon_mull_u16(tcg_passres
, tcg_op1
, tcg_op2
);
10580 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10584 case 9: /* SQDMLAL, SQDMLAL2 */
10585 case 11: /* SQDMLSL, SQDMLSL2 */
10586 case 13: /* SQDMULL, SQDMULL2 */
10588 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10589 gen_helper_neon_addl_saturate_s32(tcg_passres
, tcg_env
,
10590 tcg_passres
, tcg_passres
);
10593 g_assert_not_reached();
10597 if (opcode
== 9 || opcode
== 11) {
10598 /* saturating accumulate ops */
10600 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
10602 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], tcg_env
,
10606 gen_neon_addl(size
, (accop
< 0), tcg_res
[pass
],
10607 tcg_res
[pass
], tcg_passres
);
10613 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
10614 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
10617 static void handle_3rd_wide(DisasContext
*s
, int is_q
, int is_u
, int size
,
10618 int opcode
, int rd
, int rn
, int rm
)
10620 TCGv_i64 tcg_res
[2];
10621 int part
= is_q
? 2 : 0;
10624 for (pass
= 0; pass
< 2; pass
++) {
10625 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10626 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10627 TCGv_i64 tcg_op2_wide
= tcg_temp_new_i64();
10628 static NeonGenWidenFn
* const widenfns
[3][2] = {
10629 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
10630 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
10631 { tcg_gen_ext_i32_i64
, tcg_gen_extu_i32_i64
},
10633 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
10635 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
10636 read_vec_element_i32(s
, tcg_op2
, rm
, part
+ pass
, MO_32
);
10637 widenfn(tcg_op2_wide
, tcg_op2
);
10638 tcg_res
[pass
] = tcg_temp_new_i64();
10639 gen_neon_addl(size
, (opcode
== 3),
10640 tcg_res
[pass
], tcg_op1
, tcg_op2_wide
);
10643 for (pass
= 0; pass
< 2; pass
++) {
10644 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
10648 static void do_narrow_round_high_u32(TCGv_i32 res
, TCGv_i64 in
)
10650 tcg_gen_addi_i64(in
, in
, 1U << 31);
10651 tcg_gen_extrh_i64_i32(res
, in
);
10654 static void handle_3rd_narrowing(DisasContext
*s
, int is_q
, int is_u
, int size
,
10655 int opcode
, int rd
, int rn
, int rm
)
10657 TCGv_i32 tcg_res
[2];
10658 int part
= is_q
? 2 : 0;
10661 for (pass
= 0; pass
< 2; pass
++) {
10662 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10663 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10664 TCGv_i64 tcg_wideres
= tcg_temp_new_i64();
10665 static NeonGenNarrowFn
* const narrowfns
[3][2] = {
10666 { gen_helper_neon_narrow_high_u8
,
10667 gen_helper_neon_narrow_round_high_u8
},
10668 { gen_helper_neon_narrow_high_u16
,
10669 gen_helper_neon_narrow_round_high_u16
},
10670 { tcg_gen_extrh_i64_i32
, do_narrow_round_high_u32
},
10672 NeonGenNarrowFn
*gennarrow
= narrowfns
[size
][is_u
];
10674 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
10675 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
10677 gen_neon_addl(size
, (opcode
== 6), tcg_wideres
, tcg_op1
, tcg_op2
);
10679 tcg_res
[pass
] = tcg_temp_new_i32();
10680 gennarrow(tcg_res
[pass
], tcg_wideres
);
10683 for (pass
= 0; pass
< 2; pass
++) {
10684 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
+ part
, MO_32
);
10686 clear_vec_high(s
, is_q
, rd
);
10689 /* AdvSIMD three different
10690 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
10691 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10692 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
10693 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10695 static void disas_simd_three_reg_diff(DisasContext
*s
, uint32_t insn
)
10697 /* Instructions in this group fall into three basic classes
10698 * (in each case with the operation working on each element in
10699 * the input vectors):
10700 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
10702 * (2) wide 64 x 128 -> 128
10703 * (3) narrowing 128 x 128 -> 64
10704 * Here we do initial decode, catch unallocated cases and
10705 * dispatch to separate functions for each class.
10707 int is_q
= extract32(insn
, 30, 1);
10708 int is_u
= extract32(insn
, 29, 1);
10709 int size
= extract32(insn
, 22, 2);
10710 int opcode
= extract32(insn
, 12, 4);
10711 int rm
= extract32(insn
, 16, 5);
10712 int rn
= extract32(insn
, 5, 5);
10713 int rd
= extract32(insn
, 0, 5);
10716 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10717 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10718 /* 64 x 128 -> 128 */
10720 unallocated_encoding(s
);
10723 if (!fp_access_check(s
)) {
10726 handle_3rd_wide(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10728 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10729 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10730 /* 128 x 128 -> 64 */
10732 unallocated_encoding(s
);
10735 if (!fp_access_check(s
)) {
10738 handle_3rd_narrowing(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10740 case 14: /* PMULL, PMULL2 */
10742 unallocated_encoding(s
);
10746 case 0: /* PMULL.P8 */
10747 if (!fp_access_check(s
)) {
10750 /* The Q field specifies lo/hi half input for this insn. */
10751 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, is_q
,
10752 gen_helper_neon_pmull_h
);
10755 case 3: /* PMULL.P64 */
10756 if (!dc_isar_feature(aa64_pmull
, s
)) {
10757 unallocated_encoding(s
);
10760 if (!fp_access_check(s
)) {
10763 /* The Q field specifies lo/hi half input for this insn. */
10764 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, is_q
,
10765 gen_helper_gvec_pmull_q
);
10769 unallocated_encoding(s
);
10773 case 9: /* SQDMLAL, SQDMLAL2 */
10774 case 11: /* SQDMLSL, SQDMLSL2 */
10775 case 13: /* SQDMULL, SQDMULL2 */
10776 if (is_u
|| size
== 0) {
10777 unallocated_encoding(s
);
10781 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10782 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10783 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10784 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10785 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10786 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10787 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10788 /* 64 x 64 -> 128 */
10790 unallocated_encoding(s
);
10793 if (!fp_access_check(s
)) {
10797 handle_3rd_widening(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10800 /* opcode 15 not allocated */
10801 unallocated_encoding(s
);
10806 /* Logic op (opcode == 3) subgroup of C3.6.16. */
10807 static void disas_simd_3same_logic(DisasContext
*s
, uint32_t insn
)
10809 int rd
= extract32(insn
, 0, 5);
10810 int rn
= extract32(insn
, 5, 5);
10811 int rm
= extract32(insn
, 16, 5);
10812 int size
= extract32(insn
, 22, 2);
10813 bool is_u
= extract32(insn
, 29, 1);
10814 bool is_q
= extract32(insn
, 30, 1);
10816 if (!fp_access_check(s
)) {
10820 switch (size
+ 4 * is_u
) {
10822 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_and
, 0);
10825 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_andc
, 0);
10828 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_or
, 0);
10831 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_orc
, 0);
10834 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_xor
, 0);
10837 case 5: /* BSL bitwise select */
10838 gen_gvec_fn4(s
, is_q
, rd
, rd
, rn
, rm
, tcg_gen_gvec_bitsel
, 0);
10840 case 6: /* BIT, bitwise insert if true */
10841 gen_gvec_fn4(s
, is_q
, rd
, rm
, rn
, rd
, tcg_gen_gvec_bitsel
, 0);
10843 case 7: /* BIF, bitwise insert if false */
10844 gen_gvec_fn4(s
, is_q
, rd
, rm
, rd
, rn
, tcg_gen_gvec_bitsel
, 0);
10848 g_assert_not_reached();
10852 /* Pairwise op subgroup of C3.6.16.
10854 * This is called directly or via the handle_3same_float for float pairwise
10855 * operations where the opcode and size are calculated differently.
10857 static void handle_simd_3same_pair(DisasContext
*s
, int is_q
, int u
, int opcode
,
10858 int size
, int rn
, int rm
, int rd
)
10863 /* Floating point operations need fpst */
10864 if (opcode
>= 0x58) {
10865 fpst
= fpstatus_ptr(FPST_FPCR
);
10870 if (!fp_access_check(s
)) {
10874 /* These operations work on the concatenated rm:rn, with each pair of
10875 * adjacent elements being operated on to produce an element in the result.
10878 TCGv_i64 tcg_res
[2];
10880 for (pass
= 0; pass
< 2; pass
++) {
10881 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10882 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10883 int passreg
= (pass
== 0) ? rn
: rm
;
10885 read_vec_element(s
, tcg_op1
, passreg
, 0, MO_64
);
10886 read_vec_element(s
, tcg_op2
, passreg
, 1, MO_64
);
10887 tcg_res
[pass
] = tcg_temp_new_i64();
10890 case 0x17: /* ADDP */
10891 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
10893 case 0x58: /* FMAXNMP */
10894 gen_helper_vfp_maxnumd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10896 case 0x5a: /* FADDP */
10897 gen_helper_vfp_addd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10899 case 0x5e: /* FMAXP */
10900 gen_helper_vfp_maxd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10902 case 0x78: /* FMINNMP */
10903 gen_helper_vfp_minnumd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10905 case 0x7e: /* FMINP */
10906 gen_helper_vfp_mind(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10909 g_assert_not_reached();
10913 for (pass
= 0; pass
< 2; pass
++) {
10914 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
10917 int maxpass
= is_q
? 4 : 2;
10918 TCGv_i32 tcg_res
[4];
10920 for (pass
= 0; pass
< maxpass
; pass
++) {
10921 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10922 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10923 NeonGenTwoOpFn
*genfn
= NULL
;
10924 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
10925 int passelt
= (is_q
&& (pass
& 1)) ? 2 : 0;
10927 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_32
);
10928 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_32
);
10929 tcg_res
[pass
] = tcg_temp_new_i32();
10932 case 0x17: /* ADDP */
10934 static NeonGenTwoOpFn
* const fns
[3] = {
10935 gen_helper_neon_padd_u8
,
10936 gen_helper_neon_padd_u16
,
10942 case 0x14: /* SMAXP, UMAXP */
10944 static NeonGenTwoOpFn
* const fns
[3][2] = {
10945 { gen_helper_neon_pmax_s8
, gen_helper_neon_pmax_u8
},
10946 { gen_helper_neon_pmax_s16
, gen_helper_neon_pmax_u16
},
10947 { tcg_gen_smax_i32
, tcg_gen_umax_i32
},
10949 genfn
= fns
[size
][u
];
10952 case 0x15: /* SMINP, UMINP */
10954 static NeonGenTwoOpFn
* const fns
[3][2] = {
10955 { gen_helper_neon_pmin_s8
, gen_helper_neon_pmin_u8
},
10956 { gen_helper_neon_pmin_s16
, gen_helper_neon_pmin_u16
},
10957 { tcg_gen_smin_i32
, tcg_gen_umin_i32
},
10959 genfn
= fns
[size
][u
];
10962 /* The FP operations are all on single floats (32 bit) */
10963 case 0x58: /* FMAXNMP */
10964 gen_helper_vfp_maxnums(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10966 case 0x5a: /* FADDP */
10967 gen_helper_vfp_adds(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10969 case 0x5e: /* FMAXP */
10970 gen_helper_vfp_maxs(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10972 case 0x78: /* FMINNMP */
10973 gen_helper_vfp_minnums(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10975 case 0x7e: /* FMINP */
10976 gen_helper_vfp_mins(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10979 g_assert_not_reached();
10982 /* FP ops called directly, otherwise call now */
10984 genfn(tcg_res
[pass
], tcg_op1
, tcg_op2
);
10988 for (pass
= 0; pass
< maxpass
; pass
++) {
10989 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
10991 clear_vec_high(s
, is_q
, rd
);
10995 /* Floating point op subgroup of C3.6.16. */
10996 static void disas_simd_3same_float(DisasContext
*s
, uint32_t insn
)
10998 /* For floating point ops, the U, size[1] and opcode bits
10999 * together indicate the operation. size[0] indicates single
11002 int fpopcode
= extract32(insn
, 11, 5)
11003 | (extract32(insn
, 23, 1) << 5)
11004 | (extract32(insn
, 29, 1) << 6);
11005 int is_q
= extract32(insn
, 30, 1);
11006 int size
= extract32(insn
, 22, 1);
11007 int rm
= extract32(insn
, 16, 5);
11008 int rn
= extract32(insn
, 5, 5);
11009 int rd
= extract32(insn
, 0, 5);
11011 int datasize
= is_q
? 128 : 64;
11012 int esize
= 32 << size
;
11013 int elements
= datasize
/ esize
;
11015 if (size
== 1 && !is_q
) {
11016 unallocated_encoding(s
);
11020 switch (fpopcode
) {
11021 case 0x58: /* FMAXNMP */
11022 case 0x5a: /* FADDP */
11023 case 0x5e: /* FMAXP */
11024 case 0x78: /* FMINNMP */
11025 case 0x7e: /* FMINP */
11026 if (size
&& !is_q
) {
11027 unallocated_encoding(s
);
11030 handle_simd_3same_pair(s
, is_q
, 0, fpopcode
, size
? MO_64
: MO_32
,
11033 case 0x1b: /* FMULX */
11034 case 0x1f: /* FRECPS */
11035 case 0x3f: /* FRSQRTS */
11036 case 0x5d: /* FACGE */
11037 case 0x7d: /* FACGT */
11038 case 0x19: /* FMLA */
11039 case 0x39: /* FMLS */
11040 case 0x18: /* FMAXNM */
11041 case 0x1a: /* FADD */
11042 case 0x1c: /* FCMEQ */
11043 case 0x1e: /* FMAX */
11044 case 0x38: /* FMINNM */
11045 case 0x3a: /* FSUB */
11046 case 0x3e: /* FMIN */
11047 case 0x5b: /* FMUL */
11048 case 0x5c: /* FCMGE */
11049 case 0x5f: /* FDIV */
11050 case 0x7a: /* FABD */
11051 case 0x7c: /* FCMGT */
11052 if (!fp_access_check(s
)) {
11055 handle_3same_float(s
, size
, elements
, fpopcode
, rd
, rn
, rm
);
11058 case 0x1d: /* FMLAL */
11059 case 0x3d: /* FMLSL */
11060 case 0x59: /* FMLAL2 */
11061 case 0x79: /* FMLSL2 */
11062 if (size
& 1 || !dc_isar_feature(aa64_fhm
, s
)) {
11063 unallocated_encoding(s
);
11066 if (fp_access_check(s
)) {
11067 int is_s
= extract32(insn
, 23, 1);
11068 int is_2
= extract32(insn
, 29, 1);
11069 int data
= (is_2
<< 1) | is_s
;
11070 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
11071 vec_full_reg_offset(s
, rn
),
11072 vec_full_reg_offset(s
, rm
), tcg_env
,
11073 is_q
? 16 : 8, vec_full_reg_size(s
),
11074 data
, gen_helper_gvec_fmlal_a64
);
11079 unallocated_encoding(s
);
11084 /* Integer op subgroup of C3.6.16. */
11085 static void disas_simd_3same_int(DisasContext
*s
, uint32_t insn
)
11087 int is_q
= extract32(insn
, 30, 1);
11088 int u
= extract32(insn
, 29, 1);
11089 int size
= extract32(insn
, 22, 2);
11090 int opcode
= extract32(insn
, 11, 5);
11091 int rm
= extract32(insn
, 16, 5);
11092 int rn
= extract32(insn
, 5, 5);
11093 int rd
= extract32(insn
, 0, 5);
11098 case 0x13: /* MUL, PMUL */
11099 if (u
&& size
!= 0) {
11100 unallocated_encoding(s
);
11104 case 0x0: /* SHADD, UHADD */
11105 case 0x2: /* SRHADD, URHADD */
11106 case 0x4: /* SHSUB, UHSUB */
11107 case 0xc: /* SMAX, UMAX */
11108 case 0xd: /* SMIN, UMIN */
11109 case 0xe: /* SABD, UABD */
11110 case 0xf: /* SABA, UABA */
11111 case 0x12: /* MLA, MLS */
11113 unallocated_encoding(s
);
11117 case 0x16: /* SQDMULH, SQRDMULH */
11118 if (size
== 0 || size
== 3) {
11119 unallocated_encoding(s
);
11124 if (size
== 3 && !is_q
) {
11125 unallocated_encoding(s
);
11131 if (!fp_access_check(s
)) {
11136 case 0x01: /* SQADD, UQADD */
11138 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uqadd_qc
, size
);
11140 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqadd_qc
, size
);
11143 case 0x05: /* SQSUB, UQSUB */
11145 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uqsub_qc
, size
);
11147 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqsub_qc
, size
);
11150 case 0x08: /* SSHL, USHL */
11152 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_ushl
, size
);
11154 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sshl
, size
);
11157 case 0x0c: /* SMAX, UMAX */
11159 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_umax
, size
);
11161 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_smax
, size
);
11164 case 0x0d: /* SMIN, UMIN */
11166 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_umin
, size
);
11168 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_smin
, size
);
11171 case 0xe: /* SABD, UABD */
11173 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uabd
, size
);
11175 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sabd
, size
);
11178 case 0xf: /* SABA, UABA */
11180 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uaba
, size
);
11182 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_saba
, size
);
11185 case 0x10: /* ADD, SUB */
11187 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_sub
, size
);
11189 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_add
, size
);
11192 case 0x13: /* MUL, PMUL */
11193 if (!u
) { /* MUL */
11194 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_mul
, size
);
11195 } else { /* PMUL */
11196 gen_gvec_op3_ool(s
, is_q
, rd
, rn
, rm
, 0, gen_helper_gvec_pmul_b
);
11199 case 0x12: /* MLA, MLS */
11201 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_mls
, size
);
11203 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_mla
, size
);
11206 case 0x16: /* SQDMULH, SQRDMULH */
11208 static gen_helper_gvec_3_ptr
* const fns
[2][2] = {
11209 { gen_helper_neon_sqdmulh_h
, gen_helper_neon_sqrdmulh_h
},
11210 { gen_helper_neon_sqdmulh_s
, gen_helper_neon_sqrdmulh_s
},
11212 gen_gvec_op3_qc(s
, is_q
, rd
, rn
, rm
, fns
[size
- 1][u
]);
11216 if (!u
) { /* CMTST */
11217 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_cmtst
, size
);
11221 cond
= TCG_COND_EQ
;
11223 case 0x06: /* CMGT, CMHI */
11224 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
11226 case 0x07: /* CMGE, CMHS */
11227 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
11229 tcg_gen_gvec_cmp(cond
, size
, vec_full_reg_offset(s
, rd
),
11230 vec_full_reg_offset(s
, rn
),
11231 vec_full_reg_offset(s
, rm
),
11232 is_q
? 16 : 8, vec_full_reg_size(s
));
11238 for (pass
= 0; pass
< 2; pass
++) {
11239 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11240 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
11241 TCGv_i64 tcg_res
= tcg_temp_new_i64();
11243 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
11244 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
11246 handle_3same_64(s
, opcode
, u
, tcg_res
, tcg_op1
, tcg_op2
);
11248 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
11251 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
11252 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11253 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11254 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11255 NeonGenTwoOpFn
*genfn
= NULL
;
11256 NeonGenTwoOpEnvFn
*genenvfn
= NULL
;
11258 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
11259 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
11262 case 0x0: /* SHADD, UHADD */
11264 static NeonGenTwoOpFn
* const fns
[3][2] = {
11265 { gen_helper_neon_hadd_s8
, gen_helper_neon_hadd_u8
},
11266 { gen_helper_neon_hadd_s16
, gen_helper_neon_hadd_u16
},
11267 { gen_helper_neon_hadd_s32
, gen_helper_neon_hadd_u32
},
11269 genfn
= fns
[size
][u
];
11272 case 0x2: /* SRHADD, URHADD */
11274 static NeonGenTwoOpFn
* const fns
[3][2] = {
11275 { gen_helper_neon_rhadd_s8
, gen_helper_neon_rhadd_u8
},
11276 { gen_helper_neon_rhadd_s16
, gen_helper_neon_rhadd_u16
},
11277 { gen_helper_neon_rhadd_s32
, gen_helper_neon_rhadd_u32
},
11279 genfn
= fns
[size
][u
];
11282 case 0x4: /* SHSUB, UHSUB */
11284 static NeonGenTwoOpFn
* const fns
[3][2] = {
11285 { gen_helper_neon_hsub_s8
, gen_helper_neon_hsub_u8
},
11286 { gen_helper_neon_hsub_s16
, gen_helper_neon_hsub_u16
},
11287 { gen_helper_neon_hsub_s32
, gen_helper_neon_hsub_u32
},
11289 genfn
= fns
[size
][u
];
11292 case 0x9: /* SQSHL, UQSHL */
11294 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
11295 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
11296 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
11297 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
11299 genenvfn
= fns
[size
][u
];
11302 case 0xa: /* SRSHL, URSHL */
11304 static NeonGenTwoOpFn
* const fns
[3][2] = {
11305 { gen_helper_neon_rshl_s8
, gen_helper_neon_rshl_u8
},
11306 { gen_helper_neon_rshl_s16
, gen_helper_neon_rshl_u16
},
11307 { gen_helper_neon_rshl_s32
, gen_helper_neon_rshl_u32
},
11309 genfn
= fns
[size
][u
];
11312 case 0xb: /* SQRSHL, UQRSHL */
11314 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
11315 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
11316 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
11317 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
11319 genenvfn
= fns
[size
][u
];
11323 g_assert_not_reached();
11327 genenvfn(tcg_res
, tcg_env
, tcg_op1
, tcg_op2
);
11329 genfn(tcg_res
, tcg_op1
, tcg_op2
);
11332 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
11335 clear_vec_high(s
, is_q
, rd
);
11338 /* AdvSIMD three same
11339 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
11340 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11341 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
11342 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11344 static void disas_simd_three_reg_same(DisasContext
*s
, uint32_t insn
)
11346 int opcode
= extract32(insn
, 11, 5);
11349 case 0x3: /* logic ops */
11350 disas_simd_3same_logic(s
, insn
);
11352 case 0x17: /* ADDP */
11353 case 0x14: /* SMAXP, UMAXP */
11354 case 0x15: /* SMINP, UMINP */
11356 /* Pairwise operations */
11357 int is_q
= extract32(insn
, 30, 1);
11358 int u
= extract32(insn
, 29, 1);
11359 int size
= extract32(insn
, 22, 2);
11360 int rm
= extract32(insn
, 16, 5);
11361 int rn
= extract32(insn
, 5, 5);
11362 int rd
= extract32(insn
, 0, 5);
11363 if (opcode
== 0x17) {
11364 if (u
|| (size
== 3 && !is_q
)) {
11365 unallocated_encoding(s
);
11370 unallocated_encoding(s
);
11374 handle_simd_3same_pair(s
, is_q
, u
, opcode
, size
, rn
, rm
, rd
);
11377 case 0x18 ... 0x31:
11378 /* floating point ops, sz[1] and U are part of opcode */
11379 disas_simd_3same_float(s
, insn
);
11382 disas_simd_3same_int(s
, insn
);
11388 * Advanced SIMD three same (ARMv8.2 FP16 variants)
11390 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
11391 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11392 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
11393 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11395 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11396 * (register), FACGE, FABD, FCMGT (register) and FACGT.
11399 static void disas_simd_three_reg_same_fp16(DisasContext
*s
, uint32_t insn
)
11401 int opcode
= extract32(insn
, 11, 3);
11402 int u
= extract32(insn
, 29, 1);
11403 int a
= extract32(insn
, 23, 1);
11404 int is_q
= extract32(insn
, 30, 1);
11405 int rm
= extract32(insn
, 16, 5);
11406 int rn
= extract32(insn
, 5, 5);
11407 int rd
= extract32(insn
, 0, 5);
11409 * For these floating point ops, the U, a and opcode bits
11410 * together indicate the operation.
11412 int fpopcode
= opcode
| (a
<< 3) | (u
<< 4);
11413 int datasize
= is_q
? 128 : 64;
11414 int elements
= datasize
/ 16;
11419 switch (fpopcode
) {
11420 case 0x0: /* FMAXNM */
11421 case 0x1: /* FMLA */
11422 case 0x2: /* FADD */
11423 case 0x3: /* FMULX */
11424 case 0x4: /* FCMEQ */
11425 case 0x6: /* FMAX */
11426 case 0x7: /* FRECPS */
11427 case 0x8: /* FMINNM */
11428 case 0x9: /* FMLS */
11429 case 0xa: /* FSUB */
11430 case 0xe: /* FMIN */
11431 case 0xf: /* FRSQRTS */
11432 case 0x13: /* FMUL */
11433 case 0x14: /* FCMGE */
11434 case 0x15: /* FACGE */
11435 case 0x17: /* FDIV */
11436 case 0x1a: /* FABD */
11437 case 0x1c: /* FCMGT */
11438 case 0x1d: /* FACGT */
11441 case 0x10: /* FMAXNMP */
11442 case 0x12: /* FADDP */
11443 case 0x16: /* FMAXP */
11444 case 0x18: /* FMINNMP */
11445 case 0x1e: /* FMINP */
11449 unallocated_encoding(s
);
11453 if (!dc_isar_feature(aa64_fp16
, s
)) {
11454 unallocated_encoding(s
);
11458 if (!fp_access_check(s
)) {
11462 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
11465 int maxpass
= is_q
? 8 : 4;
11466 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11467 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11468 TCGv_i32 tcg_res
[8];
11470 for (pass
= 0; pass
< maxpass
; pass
++) {
11471 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
11472 int passelt
= (pass
<< 1) & (maxpass
- 1);
11474 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_16
);
11475 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_16
);
11476 tcg_res
[pass
] = tcg_temp_new_i32();
11478 switch (fpopcode
) {
11479 case 0x10: /* FMAXNMP */
11480 gen_helper_advsimd_maxnumh(tcg_res
[pass
], tcg_op1
, tcg_op2
,
11483 case 0x12: /* FADDP */
11484 gen_helper_advsimd_addh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11486 case 0x16: /* FMAXP */
11487 gen_helper_advsimd_maxh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11489 case 0x18: /* FMINNMP */
11490 gen_helper_advsimd_minnumh(tcg_res
[pass
], tcg_op1
, tcg_op2
,
11493 case 0x1e: /* FMINP */
11494 gen_helper_advsimd_minh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11497 g_assert_not_reached();
11501 for (pass
= 0; pass
< maxpass
; pass
++) {
11502 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_16
);
11505 for (pass
= 0; pass
< elements
; pass
++) {
11506 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11507 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11508 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11510 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_16
);
11511 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_16
);
11513 switch (fpopcode
) {
11514 case 0x0: /* FMAXNM */
11515 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11517 case 0x1: /* FMLA */
11518 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11519 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_res
,
11522 case 0x2: /* FADD */
11523 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11525 case 0x3: /* FMULX */
11526 gen_helper_advsimd_mulxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11528 case 0x4: /* FCMEQ */
11529 gen_helper_advsimd_ceq_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11531 case 0x6: /* FMAX */
11532 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11534 case 0x7: /* FRECPS */
11535 gen_helper_recpsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11537 case 0x8: /* FMINNM */
11538 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11540 case 0x9: /* FMLS */
11541 /* As usual for ARM, separate negation for fused multiply-add */
11542 tcg_gen_xori_i32(tcg_op1
, tcg_op1
, 0x8000);
11543 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11544 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_res
,
11547 case 0xa: /* FSUB */
11548 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11550 case 0xe: /* FMIN */
11551 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11553 case 0xf: /* FRSQRTS */
11554 gen_helper_rsqrtsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11556 case 0x13: /* FMUL */
11557 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11559 case 0x14: /* FCMGE */
11560 gen_helper_advsimd_cge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11562 case 0x15: /* FACGE */
11563 gen_helper_advsimd_acge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11565 case 0x17: /* FDIV */
11566 gen_helper_advsimd_divh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11568 case 0x1a: /* FABD */
11569 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11570 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0x7fff);
11572 case 0x1c: /* FCMGT */
11573 gen_helper_advsimd_cgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11575 case 0x1d: /* FACGT */
11576 gen_helper_advsimd_acgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11579 g_assert_not_reached();
11582 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11586 clear_vec_high(s
, is_q
, rd
);
11589 /* AdvSIMD three same extra
11590 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
11591 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11592 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
11593 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11595 static void disas_simd_three_reg_same_extra(DisasContext
*s
, uint32_t insn
)
11597 int rd
= extract32(insn
, 0, 5);
11598 int rn
= extract32(insn
, 5, 5);
11599 int opcode
= extract32(insn
, 11, 4);
11600 int rm
= extract32(insn
, 16, 5);
11601 int size
= extract32(insn
, 22, 2);
11602 bool u
= extract32(insn
, 29, 1);
11603 bool is_q
= extract32(insn
, 30, 1);
11607 switch (u
* 16 + opcode
) {
11608 case 0x10: /* SQRDMLAH (vector) */
11609 case 0x11: /* SQRDMLSH (vector) */
11610 if (size
!= 1 && size
!= 2) {
11611 unallocated_encoding(s
);
11614 feature
= dc_isar_feature(aa64_rdm
, s
);
11616 case 0x02: /* SDOT (vector) */
11617 case 0x12: /* UDOT (vector) */
11618 if (size
!= MO_32
) {
11619 unallocated_encoding(s
);
11622 feature
= dc_isar_feature(aa64_dp
, s
);
11624 case 0x03: /* USDOT */
11625 if (size
!= MO_32
) {
11626 unallocated_encoding(s
);
11629 feature
= dc_isar_feature(aa64_i8mm
, s
);
11631 case 0x04: /* SMMLA */
11632 case 0x14: /* UMMLA */
11633 case 0x05: /* USMMLA */
11634 if (!is_q
|| size
!= MO_32
) {
11635 unallocated_encoding(s
);
11638 feature
= dc_isar_feature(aa64_i8mm
, s
);
11640 case 0x18: /* FCMLA, #0 */
11641 case 0x19: /* FCMLA, #90 */
11642 case 0x1a: /* FCMLA, #180 */
11643 case 0x1b: /* FCMLA, #270 */
11644 case 0x1c: /* FCADD, #90 */
11645 case 0x1e: /* FCADD, #270 */
11647 || (size
== 1 && !dc_isar_feature(aa64_fp16
, s
))
11648 || (size
== 3 && !is_q
)) {
11649 unallocated_encoding(s
);
11652 feature
= dc_isar_feature(aa64_fcma
, s
);
11654 case 0x1d: /* BFMMLA */
11655 if (size
!= MO_16
|| !is_q
) {
11656 unallocated_encoding(s
);
11659 feature
= dc_isar_feature(aa64_bf16
, s
);
11663 case 1: /* BFDOT */
11664 case 3: /* BFMLAL{B,T} */
11665 feature
= dc_isar_feature(aa64_bf16
, s
);
11668 unallocated_encoding(s
);
11673 unallocated_encoding(s
);
11677 unallocated_encoding(s
);
11680 if (!fp_access_check(s
)) {
11685 case 0x0: /* SQRDMLAH (vector) */
11686 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqrdmlah_qc
, size
);
11689 case 0x1: /* SQRDMLSH (vector) */
11690 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqrdmlsh_qc
, size
);
11693 case 0x2: /* SDOT / UDOT */
11694 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0,
11695 u
? gen_helper_gvec_udot_b
: gen_helper_gvec_sdot_b
);
11698 case 0x3: /* USDOT */
11699 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_usdot_b
);
11702 case 0x04: /* SMMLA, UMMLA */
11703 gen_gvec_op4_ool(s
, 1, rd
, rn
, rm
, rd
, 0,
11704 u
? gen_helper_gvec_ummla_b
11705 : gen_helper_gvec_smmla_b
);
11707 case 0x05: /* USMMLA */
11708 gen_gvec_op4_ool(s
, 1, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_usmmla_b
);
11711 case 0x8: /* FCMLA, #0 */
11712 case 0x9: /* FCMLA, #90 */
11713 case 0xa: /* FCMLA, #180 */
11714 case 0xb: /* FCMLA, #270 */
11715 rot
= extract32(opcode
, 0, 2);
11718 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, true, rot
,
11719 gen_helper_gvec_fcmlah
);
11722 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, false, rot
,
11723 gen_helper_gvec_fcmlas
);
11726 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, false, rot
,
11727 gen_helper_gvec_fcmlad
);
11730 g_assert_not_reached();
11734 case 0xc: /* FCADD, #90 */
11735 case 0xe: /* FCADD, #270 */
11736 rot
= extract32(opcode
, 1, 1);
11739 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11740 gen_helper_gvec_fcaddh
);
11743 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11744 gen_helper_gvec_fcadds
);
11747 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11748 gen_helper_gvec_fcaddd
);
11751 g_assert_not_reached();
11755 case 0xd: /* BFMMLA */
11756 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_bfmmla
);
11760 case 1: /* BFDOT */
11761 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_bfdot
);
11763 case 3: /* BFMLAL{B,T} */
11764 gen_gvec_op4_fpst(s
, 1, rd
, rn
, rm
, rd
, false, is_q
,
11765 gen_helper_gvec_bfmlal
);
11768 g_assert_not_reached();
11773 g_assert_not_reached();
11777 static void handle_2misc_widening(DisasContext
*s
, int opcode
, bool is_q
,
11778 int size
, int rn
, int rd
)
11780 /* Handle 2-reg-misc ops which are widening (so each size element
11781 * in the source becomes a 2*size element in the destination.
11782 * The only instruction like this is FCVTL.
11787 /* 32 -> 64 bit fp conversion */
11788 TCGv_i64 tcg_res
[2];
11789 int srcelt
= is_q
? 2 : 0;
11791 for (pass
= 0; pass
< 2; pass
++) {
11792 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11793 tcg_res
[pass
] = tcg_temp_new_i64();
11795 read_vec_element_i32(s
, tcg_op
, rn
, srcelt
+ pass
, MO_32
);
11796 gen_helper_vfp_fcvtds(tcg_res
[pass
], tcg_op
, tcg_env
);
11798 for (pass
= 0; pass
< 2; pass
++) {
11799 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11802 /* 16 -> 32 bit fp conversion */
11803 int srcelt
= is_q
? 4 : 0;
11804 TCGv_i32 tcg_res
[4];
11805 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
11806 TCGv_i32 ahp
= get_ahp_flag();
11808 for (pass
= 0; pass
< 4; pass
++) {
11809 tcg_res
[pass
] = tcg_temp_new_i32();
11811 read_vec_element_i32(s
, tcg_res
[pass
], rn
, srcelt
+ pass
, MO_16
);
11812 gen_helper_vfp_fcvt_f16_to_f32(tcg_res
[pass
], tcg_res
[pass
],
11815 for (pass
= 0; pass
< 4; pass
++) {
11816 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
11821 static void handle_rev(DisasContext
*s
, int opcode
, bool u
,
11822 bool is_q
, int size
, int rn
, int rd
)
11824 int op
= (opcode
<< 1) | u
;
11825 int opsz
= op
+ size
;
11826 int grp_size
= 3 - opsz
;
11827 int dsize
= is_q
? 128 : 64;
11831 unallocated_encoding(s
);
11835 if (!fp_access_check(s
)) {
11840 /* Special case bytes, use bswap op on each group of elements */
11841 int groups
= dsize
/ (8 << grp_size
);
11843 for (i
= 0; i
< groups
; i
++) {
11844 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
11846 read_vec_element(s
, tcg_tmp
, rn
, i
, grp_size
);
11847 switch (grp_size
) {
11849 tcg_gen_bswap16_i64(tcg_tmp
, tcg_tmp
, TCG_BSWAP_IZ
);
11852 tcg_gen_bswap32_i64(tcg_tmp
, tcg_tmp
, TCG_BSWAP_IZ
);
11855 tcg_gen_bswap64_i64(tcg_tmp
, tcg_tmp
);
11858 g_assert_not_reached();
11860 write_vec_element(s
, tcg_tmp
, rd
, i
, grp_size
);
11862 clear_vec_high(s
, is_q
, rd
);
11864 int revmask
= (1 << grp_size
) - 1;
11865 int esize
= 8 << size
;
11866 int elements
= dsize
/ esize
;
11867 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
11868 TCGv_i64 tcg_rd
[2];
11870 for (i
= 0; i
< 2; i
++) {
11871 tcg_rd
[i
] = tcg_temp_new_i64();
11872 tcg_gen_movi_i64(tcg_rd
[i
], 0);
11875 for (i
= 0; i
< elements
; i
++) {
11876 int e_rev
= (i
& 0xf) ^ revmask
;
11877 int w
= (e_rev
* esize
) / 64;
11878 int o
= (e_rev
* esize
) % 64;
11880 read_vec_element(s
, tcg_rn
, rn
, i
, size
);
11881 tcg_gen_deposit_i64(tcg_rd
[w
], tcg_rd
[w
], tcg_rn
, o
, esize
);
11884 for (i
= 0; i
< 2; i
++) {
11885 write_vec_element(s
, tcg_rd
[i
], rd
, i
, MO_64
);
11887 clear_vec_high(s
, true, rd
);
11891 static void handle_2misc_pairwise(DisasContext
*s
, int opcode
, bool u
,
11892 bool is_q
, int size
, int rn
, int rd
)
11894 /* Implement the pairwise operations from 2-misc:
11895 * SADDLP, UADDLP, SADALP, UADALP.
11896 * These all add pairs of elements in the input to produce a
11897 * double-width result element in the output (possibly accumulating).
11899 bool accum
= (opcode
== 0x6);
11900 int maxpass
= is_q
? 2 : 1;
11902 TCGv_i64 tcg_res
[2];
11905 /* 32 + 32 -> 64 op */
11906 MemOp memop
= size
+ (u
? 0 : MO_SIGN
);
11908 for (pass
= 0; pass
< maxpass
; pass
++) {
11909 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11910 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
11912 tcg_res
[pass
] = tcg_temp_new_i64();
11914 read_vec_element(s
, tcg_op1
, rn
, pass
* 2, memop
);
11915 read_vec_element(s
, tcg_op2
, rn
, pass
* 2 + 1, memop
);
11916 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
11918 read_vec_element(s
, tcg_op1
, rd
, pass
, MO_64
);
11919 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
11923 for (pass
= 0; pass
< maxpass
; pass
++) {
11924 TCGv_i64 tcg_op
= tcg_temp_new_i64();
11925 NeonGenOne64OpFn
*genfn
;
11926 static NeonGenOne64OpFn
* const fns
[2][2] = {
11927 { gen_helper_neon_addlp_s8
, gen_helper_neon_addlp_u8
},
11928 { gen_helper_neon_addlp_s16
, gen_helper_neon_addlp_u16
},
11931 genfn
= fns
[size
][u
];
11933 tcg_res
[pass
] = tcg_temp_new_i64();
11935 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
11936 genfn(tcg_res
[pass
], tcg_op
);
11939 read_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
11941 gen_helper_neon_addl_u16(tcg_res
[pass
],
11942 tcg_res
[pass
], tcg_op
);
11944 gen_helper_neon_addl_u32(tcg_res
[pass
],
11945 tcg_res
[pass
], tcg_op
);
11951 tcg_res
[1] = tcg_constant_i64(0);
11953 for (pass
= 0; pass
< 2; pass
++) {
11954 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11958 static void handle_shll(DisasContext
*s
, bool is_q
, int size
, int rn
, int rd
)
11960 /* Implement SHLL and SHLL2 */
11962 int part
= is_q
? 2 : 0;
11963 TCGv_i64 tcg_res
[2];
11965 for (pass
= 0; pass
< 2; pass
++) {
11966 static NeonGenWidenFn
* const widenfns
[3] = {
11967 gen_helper_neon_widen_u8
,
11968 gen_helper_neon_widen_u16
,
11969 tcg_gen_extu_i32_i64
,
11971 NeonGenWidenFn
*widenfn
= widenfns
[size
];
11972 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11974 read_vec_element_i32(s
, tcg_op
, rn
, part
+ pass
, MO_32
);
11975 tcg_res
[pass
] = tcg_temp_new_i64();
11976 widenfn(tcg_res
[pass
], tcg_op
);
11977 tcg_gen_shli_i64(tcg_res
[pass
], tcg_res
[pass
], 8 << size
);
11980 for (pass
= 0; pass
< 2; pass
++) {
11981 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11985 /* AdvSIMD two reg misc
11986 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
11987 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11988 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
11989 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11991 static void disas_simd_two_reg_misc(DisasContext
*s
, uint32_t insn
)
11993 int size
= extract32(insn
, 22, 2);
11994 int opcode
= extract32(insn
, 12, 5);
11995 bool u
= extract32(insn
, 29, 1);
11996 bool is_q
= extract32(insn
, 30, 1);
11997 int rn
= extract32(insn
, 5, 5);
11998 int rd
= extract32(insn
, 0, 5);
11999 bool need_fpstatus
= false;
12001 TCGv_i32 tcg_rmode
;
12002 TCGv_ptr tcg_fpstatus
;
12005 case 0x0: /* REV64, REV32 */
12006 case 0x1: /* REV16 */
12007 handle_rev(s
, opcode
, u
, is_q
, size
, rn
, rd
);
12009 case 0x5: /* CNT, NOT, RBIT */
12010 if (u
&& size
== 0) {
12013 } else if (u
&& size
== 1) {
12016 } else if (!u
&& size
== 0) {
12020 unallocated_encoding(s
);
12022 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
12023 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
12025 unallocated_encoding(s
);
12028 if (!fp_access_check(s
)) {
12032 handle_2misc_narrow(s
, false, opcode
, u
, is_q
, size
, rn
, rd
);
12034 case 0x4: /* CLS, CLZ */
12036 unallocated_encoding(s
);
12040 case 0x2: /* SADDLP, UADDLP */
12041 case 0x6: /* SADALP, UADALP */
12043 unallocated_encoding(s
);
12046 if (!fp_access_check(s
)) {
12049 handle_2misc_pairwise(s
, opcode
, u
, is_q
, size
, rn
, rd
);
12051 case 0x13: /* SHLL, SHLL2 */
12052 if (u
== 0 || size
== 3) {
12053 unallocated_encoding(s
);
12056 if (!fp_access_check(s
)) {
12059 handle_shll(s
, is_q
, size
, rn
, rd
);
12061 case 0xa: /* CMLT */
12063 unallocated_encoding(s
);
12067 case 0x8: /* CMGT, CMGE */
12068 case 0x9: /* CMEQ, CMLE */
12069 case 0xb: /* ABS, NEG */
12070 if (size
== 3 && !is_q
) {
12071 unallocated_encoding(s
);
12075 case 0x3: /* SUQADD, USQADD */
12076 if (size
== 3 && !is_q
) {
12077 unallocated_encoding(s
);
12080 if (!fp_access_check(s
)) {
12083 handle_2misc_satacc(s
, false, u
, is_q
, size
, rn
, rd
);
12085 case 0x7: /* SQABS, SQNEG */
12086 if (size
== 3 && !is_q
) {
12087 unallocated_encoding(s
);
12092 case 0x16 ... 0x1f:
12094 /* Floating point: U, size[1] and opcode indicate operation;
12095 * size[0] indicates single or double precision.
12097 int is_double
= extract32(size
, 0, 1);
12098 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
12099 size
= is_double
? 3 : 2;
12101 case 0x2f: /* FABS */
12102 case 0x6f: /* FNEG */
12103 if (size
== 3 && !is_q
) {
12104 unallocated_encoding(s
);
12108 case 0x1d: /* SCVTF */
12109 case 0x5d: /* UCVTF */
12111 bool is_signed
= (opcode
== 0x1d) ? true : false;
12112 int elements
= is_double
? 2 : is_q
? 4 : 2;
12113 if (is_double
&& !is_q
) {
12114 unallocated_encoding(s
);
12117 if (!fp_access_check(s
)) {
12120 handle_simd_intfp_conv(s
, rd
, rn
, elements
, is_signed
, 0, size
);
12123 case 0x2c: /* FCMGT (zero) */
12124 case 0x2d: /* FCMEQ (zero) */
12125 case 0x2e: /* FCMLT (zero) */
12126 case 0x6c: /* FCMGE (zero) */
12127 case 0x6d: /* FCMLE (zero) */
12128 if (size
== 3 && !is_q
) {
12129 unallocated_encoding(s
);
12132 handle_2misc_fcmp_zero(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
12134 case 0x7f: /* FSQRT */
12135 if (size
== 3 && !is_q
) {
12136 unallocated_encoding(s
);
12140 case 0x1a: /* FCVTNS */
12141 case 0x1b: /* FCVTMS */
12142 case 0x3a: /* FCVTPS */
12143 case 0x3b: /* FCVTZS */
12144 case 0x5a: /* FCVTNU */
12145 case 0x5b: /* FCVTMU */
12146 case 0x7a: /* FCVTPU */
12147 case 0x7b: /* FCVTZU */
12148 need_fpstatus
= true;
12149 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
12150 if (size
== 3 && !is_q
) {
12151 unallocated_encoding(s
);
12155 case 0x5c: /* FCVTAU */
12156 case 0x1c: /* FCVTAS */
12157 need_fpstatus
= true;
12158 rmode
= FPROUNDING_TIEAWAY
;
12159 if (size
== 3 && !is_q
) {
12160 unallocated_encoding(s
);
12164 case 0x3c: /* URECPE */
12166 unallocated_encoding(s
);
12170 case 0x3d: /* FRECPE */
12171 case 0x7d: /* FRSQRTE */
12172 if (size
== 3 && !is_q
) {
12173 unallocated_encoding(s
);
12176 if (!fp_access_check(s
)) {
12179 handle_2misc_reciprocal(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
12181 case 0x56: /* FCVTXN, FCVTXN2 */
12183 unallocated_encoding(s
);
12187 case 0x16: /* FCVTN, FCVTN2 */
12188 /* handle_2misc_narrow does a 2*size -> size operation, but these
12189 * instructions encode the source size rather than dest size.
12191 if (!fp_access_check(s
)) {
12194 handle_2misc_narrow(s
, false, opcode
, 0, is_q
, size
- 1, rn
, rd
);
12196 case 0x36: /* BFCVTN, BFCVTN2 */
12197 if (!dc_isar_feature(aa64_bf16
, s
) || size
!= 2) {
12198 unallocated_encoding(s
);
12201 if (!fp_access_check(s
)) {
12204 handle_2misc_narrow(s
, false, opcode
, 0, is_q
, size
- 1, rn
, rd
);
12206 case 0x17: /* FCVTL, FCVTL2 */
12207 if (!fp_access_check(s
)) {
12210 handle_2misc_widening(s
, opcode
, is_q
, size
, rn
, rd
);
12212 case 0x18: /* FRINTN */
12213 case 0x19: /* FRINTM */
12214 case 0x38: /* FRINTP */
12215 case 0x39: /* FRINTZ */
12216 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
12218 case 0x59: /* FRINTX */
12219 case 0x79: /* FRINTI */
12220 need_fpstatus
= true;
12221 if (size
== 3 && !is_q
) {
12222 unallocated_encoding(s
);
12226 case 0x58: /* FRINTA */
12227 rmode
= FPROUNDING_TIEAWAY
;
12228 need_fpstatus
= true;
12229 if (size
== 3 && !is_q
) {
12230 unallocated_encoding(s
);
12234 case 0x7c: /* URSQRTE */
12236 unallocated_encoding(s
);
12240 case 0x1e: /* FRINT32Z */
12241 case 0x1f: /* FRINT64Z */
12242 rmode
= FPROUNDING_ZERO
;
12244 case 0x5e: /* FRINT32X */
12245 case 0x5f: /* FRINT64X */
12246 need_fpstatus
= true;
12247 if ((size
== 3 && !is_q
) || !dc_isar_feature(aa64_frint
, s
)) {
12248 unallocated_encoding(s
);
12253 unallocated_encoding(s
);
12259 unallocated_encoding(s
);
12263 if (!fp_access_check(s
)) {
12267 if (need_fpstatus
|| rmode
>= 0) {
12268 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR
);
12270 tcg_fpstatus
= NULL
;
12273 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
12280 if (u
&& size
== 0) { /* NOT */
12281 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_not
, 0);
12285 case 0x8: /* CMGT, CMGE */
12287 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cge0
, size
);
12289 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cgt0
, size
);
12292 case 0x9: /* CMEQ, CMLE */
12294 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cle0
, size
);
12296 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_ceq0
, size
);
12299 case 0xa: /* CMLT */
12300 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_clt0
, size
);
12303 if (u
) { /* ABS, NEG */
12304 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_neg
, size
);
12306 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_abs
, size
);
12312 /* All 64-bit element operations can be shared with scalar 2misc */
12315 /* Coverity claims (size == 3 && !is_q) has been eliminated
12316 * from all paths leading to here.
12318 tcg_debug_assert(is_q
);
12319 for (pass
= 0; pass
< 2; pass
++) {
12320 TCGv_i64 tcg_op
= tcg_temp_new_i64();
12321 TCGv_i64 tcg_res
= tcg_temp_new_i64();
12323 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
12325 handle_2misc_64(s
, opcode
, u
, tcg_res
, tcg_op
,
12326 tcg_rmode
, tcg_fpstatus
);
12328 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
12333 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
12334 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12335 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12337 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
12340 /* Special cases for 32 bit elements */
12342 case 0x4: /* CLS */
12344 tcg_gen_clzi_i32(tcg_res
, tcg_op
, 32);
12346 tcg_gen_clrsb_i32(tcg_res
, tcg_op
);
12349 case 0x7: /* SQABS, SQNEG */
12351 gen_helper_neon_qneg_s32(tcg_res
, tcg_env
, tcg_op
);
12353 gen_helper_neon_qabs_s32(tcg_res
, tcg_env
, tcg_op
);
12356 case 0x2f: /* FABS */
12357 gen_helper_vfp_abss(tcg_res
, tcg_op
);
12359 case 0x6f: /* FNEG */
12360 gen_helper_vfp_negs(tcg_res
, tcg_op
);
12362 case 0x7f: /* FSQRT */
12363 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, tcg_env
);
12365 case 0x1a: /* FCVTNS */
12366 case 0x1b: /* FCVTMS */
12367 case 0x1c: /* FCVTAS */
12368 case 0x3a: /* FCVTPS */
12369 case 0x3b: /* FCVTZS */
12370 gen_helper_vfp_tosls(tcg_res
, tcg_op
,
12371 tcg_constant_i32(0), tcg_fpstatus
);
12373 case 0x5a: /* FCVTNU */
12374 case 0x5b: /* FCVTMU */
12375 case 0x5c: /* FCVTAU */
12376 case 0x7a: /* FCVTPU */
12377 case 0x7b: /* FCVTZU */
12378 gen_helper_vfp_touls(tcg_res
, tcg_op
,
12379 tcg_constant_i32(0), tcg_fpstatus
);
12381 case 0x18: /* FRINTN */
12382 case 0x19: /* FRINTM */
12383 case 0x38: /* FRINTP */
12384 case 0x39: /* FRINTZ */
12385 case 0x58: /* FRINTA */
12386 case 0x79: /* FRINTI */
12387 gen_helper_rints(tcg_res
, tcg_op
, tcg_fpstatus
);
12389 case 0x59: /* FRINTX */
12390 gen_helper_rints_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
12392 case 0x7c: /* URSQRTE */
12393 gen_helper_rsqrte_u32(tcg_res
, tcg_op
);
12395 case 0x1e: /* FRINT32Z */
12396 case 0x5e: /* FRINT32X */
12397 gen_helper_frint32_s(tcg_res
, tcg_op
, tcg_fpstatus
);
12399 case 0x1f: /* FRINT64Z */
12400 case 0x5f: /* FRINT64X */
12401 gen_helper_frint64_s(tcg_res
, tcg_op
, tcg_fpstatus
);
12404 g_assert_not_reached();
12407 /* Use helpers for 8 and 16 bit elements */
12409 case 0x5: /* CNT, RBIT */
12410 /* For these two insns size is part of the opcode specifier
12411 * (handled earlier); they always operate on byte elements.
12414 gen_helper_neon_rbit_u8(tcg_res
, tcg_op
);
12416 gen_helper_neon_cnt_u8(tcg_res
, tcg_op
);
12419 case 0x7: /* SQABS, SQNEG */
12421 NeonGenOneOpEnvFn
*genfn
;
12422 static NeonGenOneOpEnvFn
* const fns
[2][2] = {
12423 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
12424 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
12426 genfn
= fns
[size
][u
];
12427 genfn(tcg_res
, tcg_env
, tcg_op
);
12430 case 0x4: /* CLS, CLZ */
12433 gen_helper_neon_clz_u8(tcg_res
, tcg_op
);
12435 gen_helper_neon_clz_u16(tcg_res
, tcg_op
);
12439 gen_helper_neon_cls_s8(tcg_res
, tcg_op
);
12441 gen_helper_neon_cls_s16(tcg_res
, tcg_op
);
12446 g_assert_not_reached();
12450 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
12453 clear_vec_high(s
, is_q
, rd
);
12456 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
12460 /* AdvSIMD [scalar] two register miscellaneous (FP16)
12462 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
12463 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12464 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
12465 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12466 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
12467 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
12469 * This actually covers two groups where scalar access is governed by
12470 * bit 28. A bunch of the instructions (float to integral) only exist
12471 * in the vector form and are un-allocated for the scalar decode. Also
12472 * in the scalar decode Q is always 1.
12474 static void disas_simd_two_reg_misc_fp16(DisasContext
*s
, uint32_t insn
)
12476 int fpop
, opcode
, a
, u
;
12480 bool only_in_vector
= false;
12483 TCGv_i32 tcg_rmode
= NULL
;
12484 TCGv_ptr tcg_fpstatus
= NULL
;
12485 bool need_fpst
= true;
12488 if (!dc_isar_feature(aa64_fp16
, s
)) {
12489 unallocated_encoding(s
);
12493 rd
= extract32(insn
, 0, 5);
12494 rn
= extract32(insn
, 5, 5);
12496 a
= extract32(insn
, 23, 1);
12497 u
= extract32(insn
, 29, 1);
12498 is_scalar
= extract32(insn
, 28, 1);
12499 is_q
= extract32(insn
, 30, 1);
12501 opcode
= extract32(insn
, 12, 5);
12502 fpop
= deposit32(opcode
, 5, 1, a
);
12503 fpop
= deposit32(fpop
, 6, 1, u
);
12506 case 0x1d: /* SCVTF */
12507 case 0x5d: /* UCVTF */
12514 elements
= (is_q
? 8 : 4);
12517 if (!fp_access_check(s
)) {
12520 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !u
, 0, MO_16
);
12524 case 0x2c: /* FCMGT (zero) */
12525 case 0x2d: /* FCMEQ (zero) */
12526 case 0x2e: /* FCMLT (zero) */
12527 case 0x6c: /* FCMGE (zero) */
12528 case 0x6d: /* FCMLE (zero) */
12529 handle_2misc_fcmp_zero(s
, fpop
, is_scalar
, 0, is_q
, MO_16
, rn
, rd
);
12531 case 0x3d: /* FRECPE */
12532 case 0x3f: /* FRECPX */
12534 case 0x18: /* FRINTN */
12535 only_in_vector
= true;
12536 rmode
= FPROUNDING_TIEEVEN
;
12538 case 0x19: /* FRINTM */
12539 only_in_vector
= true;
12540 rmode
= FPROUNDING_NEGINF
;
12542 case 0x38: /* FRINTP */
12543 only_in_vector
= true;
12544 rmode
= FPROUNDING_POSINF
;
12546 case 0x39: /* FRINTZ */
12547 only_in_vector
= true;
12548 rmode
= FPROUNDING_ZERO
;
12550 case 0x58: /* FRINTA */
12551 only_in_vector
= true;
12552 rmode
= FPROUNDING_TIEAWAY
;
12554 case 0x59: /* FRINTX */
12555 case 0x79: /* FRINTI */
12556 only_in_vector
= true;
12557 /* current rounding mode */
12559 case 0x1a: /* FCVTNS */
12560 rmode
= FPROUNDING_TIEEVEN
;
12562 case 0x1b: /* FCVTMS */
12563 rmode
= FPROUNDING_NEGINF
;
12565 case 0x1c: /* FCVTAS */
12566 rmode
= FPROUNDING_TIEAWAY
;
12568 case 0x3a: /* FCVTPS */
12569 rmode
= FPROUNDING_POSINF
;
12571 case 0x3b: /* FCVTZS */
12572 rmode
= FPROUNDING_ZERO
;
12574 case 0x5a: /* FCVTNU */
12575 rmode
= FPROUNDING_TIEEVEN
;
12577 case 0x5b: /* FCVTMU */
12578 rmode
= FPROUNDING_NEGINF
;
12580 case 0x5c: /* FCVTAU */
12581 rmode
= FPROUNDING_TIEAWAY
;
12583 case 0x7a: /* FCVTPU */
12584 rmode
= FPROUNDING_POSINF
;
12586 case 0x7b: /* FCVTZU */
12587 rmode
= FPROUNDING_ZERO
;
12589 case 0x2f: /* FABS */
12590 case 0x6f: /* FNEG */
12593 case 0x7d: /* FRSQRTE */
12594 case 0x7f: /* FSQRT (vector) */
12597 unallocated_encoding(s
);
12602 /* Check additional constraints for the scalar encoding */
12605 unallocated_encoding(s
);
12608 /* FRINTxx is only in the vector form */
12609 if (only_in_vector
) {
12610 unallocated_encoding(s
);
12615 if (!fp_access_check(s
)) {
12619 if (rmode
>= 0 || need_fpst
) {
12620 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR_F16
);
12624 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
12628 TCGv_i32 tcg_op
= read_fp_hreg(s
, rn
);
12629 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12632 case 0x1a: /* FCVTNS */
12633 case 0x1b: /* FCVTMS */
12634 case 0x1c: /* FCVTAS */
12635 case 0x3a: /* FCVTPS */
12636 case 0x3b: /* FCVTZS */
12637 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12639 case 0x3d: /* FRECPE */
12640 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12642 case 0x3f: /* FRECPX */
12643 gen_helper_frecpx_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12645 case 0x5a: /* FCVTNU */
12646 case 0x5b: /* FCVTMU */
12647 case 0x5c: /* FCVTAU */
12648 case 0x7a: /* FCVTPU */
12649 case 0x7b: /* FCVTZU */
12650 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12652 case 0x6f: /* FNEG */
12653 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
12655 case 0x7d: /* FRSQRTE */
12656 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12659 g_assert_not_reached();
12662 /* limit any sign extension going on */
12663 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0xffff);
12664 write_fp_sreg(s
, rd
, tcg_res
);
12666 for (pass
= 0; pass
< (is_q
? 8 : 4); pass
++) {
12667 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12668 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12670 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_16
);
12673 case 0x1a: /* FCVTNS */
12674 case 0x1b: /* FCVTMS */
12675 case 0x1c: /* FCVTAS */
12676 case 0x3a: /* FCVTPS */
12677 case 0x3b: /* FCVTZS */
12678 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12680 case 0x3d: /* FRECPE */
12681 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12683 case 0x5a: /* FCVTNU */
12684 case 0x5b: /* FCVTMU */
12685 case 0x5c: /* FCVTAU */
12686 case 0x7a: /* FCVTPU */
12687 case 0x7b: /* FCVTZU */
12688 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12690 case 0x18: /* FRINTN */
12691 case 0x19: /* FRINTM */
12692 case 0x38: /* FRINTP */
12693 case 0x39: /* FRINTZ */
12694 case 0x58: /* FRINTA */
12695 case 0x79: /* FRINTI */
12696 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12698 case 0x59: /* FRINTX */
12699 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
12701 case 0x2f: /* FABS */
12702 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
12704 case 0x6f: /* FNEG */
12705 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
12707 case 0x7d: /* FRSQRTE */
12708 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12710 case 0x7f: /* FSQRT */
12711 gen_helper_sqrt_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12714 g_assert_not_reached();
12717 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
12720 clear_vec_high(s
, is_q
, rd
);
12724 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
12728 /* AdvSIMD scalar x indexed element
12729 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12730 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12731 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12732 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12733 * AdvSIMD vector x indexed element
12734 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12735 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12736 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12737 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12739 static void disas_simd_indexed(DisasContext
*s
, uint32_t insn
)
12741 /* This encoding has two kinds of instruction:
12742 * normal, where we perform elt x idxelt => elt for each
12743 * element in the vector
12744 * long, where we perform elt x idxelt and generate a result of
12745 * double the width of the input element
12746 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12748 bool is_scalar
= extract32(insn
, 28, 1);
12749 bool is_q
= extract32(insn
, 30, 1);
12750 bool u
= extract32(insn
, 29, 1);
12751 int size
= extract32(insn
, 22, 2);
12752 int l
= extract32(insn
, 21, 1);
12753 int m
= extract32(insn
, 20, 1);
12754 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12755 int rm
= extract32(insn
, 16, 4);
12756 int opcode
= extract32(insn
, 12, 4);
12757 int h
= extract32(insn
, 11, 1);
12758 int rn
= extract32(insn
, 5, 5);
12759 int rd
= extract32(insn
, 0, 5);
12760 bool is_long
= false;
12762 bool is_fp16
= false;
12766 switch (16 * u
+ opcode
) {
12767 case 0x08: /* MUL */
12768 case 0x10: /* MLA */
12769 case 0x14: /* MLS */
12771 unallocated_encoding(s
);
12775 case 0x02: /* SMLAL, SMLAL2 */
12776 case 0x12: /* UMLAL, UMLAL2 */
12777 case 0x06: /* SMLSL, SMLSL2 */
12778 case 0x16: /* UMLSL, UMLSL2 */
12779 case 0x0a: /* SMULL, SMULL2 */
12780 case 0x1a: /* UMULL, UMULL2 */
12782 unallocated_encoding(s
);
12787 case 0x03: /* SQDMLAL, SQDMLAL2 */
12788 case 0x07: /* SQDMLSL, SQDMLSL2 */
12789 case 0x0b: /* SQDMULL, SQDMULL2 */
12792 case 0x0c: /* SQDMULH */
12793 case 0x0d: /* SQRDMULH */
12795 case 0x01: /* FMLA */
12796 case 0x05: /* FMLS */
12797 case 0x09: /* FMUL */
12798 case 0x19: /* FMULX */
12801 case 0x1d: /* SQRDMLAH */
12802 case 0x1f: /* SQRDMLSH */
12803 if (!dc_isar_feature(aa64_rdm
, s
)) {
12804 unallocated_encoding(s
);
12808 case 0x0e: /* SDOT */
12809 case 0x1e: /* UDOT */
12810 if (is_scalar
|| size
!= MO_32
|| !dc_isar_feature(aa64_dp
, s
)) {
12811 unallocated_encoding(s
);
12817 case 0: /* SUDOT */
12818 case 2: /* USDOT */
12819 if (is_scalar
|| !dc_isar_feature(aa64_i8mm
, s
)) {
12820 unallocated_encoding(s
);
12825 case 1: /* BFDOT */
12826 if (is_scalar
|| !dc_isar_feature(aa64_bf16
, s
)) {
12827 unallocated_encoding(s
);
12832 case 3: /* BFMLAL{B,T} */
12833 if (is_scalar
|| !dc_isar_feature(aa64_bf16
, s
)) {
12834 unallocated_encoding(s
);
12837 /* can't set is_fp without other incorrect size checks */
12841 unallocated_encoding(s
);
12845 case 0x11: /* FCMLA #0 */
12846 case 0x13: /* FCMLA #90 */
12847 case 0x15: /* FCMLA #180 */
12848 case 0x17: /* FCMLA #270 */
12849 if (is_scalar
|| !dc_isar_feature(aa64_fcma
, s
)) {
12850 unallocated_encoding(s
);
12855 case 0x00: /* FMLAL */
12856 case 0x04: /* FMLSL */
12857 case 0x18: /* FMLAL2 */
12858 case 0x1c: /* FMLSL2 */
12859 if (is_scalar
|| size
!= MO_32
|| !dc_isar_feature(aa64_fhm
, s
)) {
12860 unallocated_encoding(s
);
12864 /* is_fp, but we pass tcg_env not fp_status. */
12867 unallocated_encoding(s
);
12872 case 1: /* normal fp */
12873 /* convert insn encoded size to MemOp size */
12875 case 0: /* half-precision */
12879 case MO_32
: /* single precision */
12880 case MO_64
: /* double precision */
12883 unallocated_encoding(s
);
12888 case 2: /* complex fp */
12889 /* Each indexable element is a complex pair. */
12894 unallocated_encoding(s
);
12902 unallocated_encoding(s
);
12907 default: /* integer */
12911 unallocated_encoding(s
);
12916 if (is_fp16
&& !dc_isar_feature(aa64_fp16
, s
)) {
12917 unallocated_encoding(s
);
12921 /* Given MemOp size, adjust register and indexing. */
12924 index
= h
<< 2 | l
<< 1 | m
;
12927 index
= h
<< 1 | l
;
12932 unallocated_encoding(s
);
12939 g_assert_not_reached();
12942 if (!fp_access_check(s
)) {
12947 fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
12952 switch (16 * u
+ opcode
) {
12953 case 0x0e: /* SDOT */
12954 case 0x1e: /* UDOT */
12955 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
12956 u
? gen_helper_gvec_udot_idx_b
12957 : gen_helper_gvec_sdot_idx_b
);
12960 switch (extract32(insn
, 22, 2)) {
12961 case 0: /* SUDOT */
12962 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
12963 gen_helper_gvec_sudot_idx_b
);
12965 case 1: /* BFDOT */
12966 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
12967 gen_helper_gvec_bfdot_idx
);
12969 case 2: /* USDOT */
12970 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
12971 gen_helper_gvec_usdot_idx_b
);
12973 case 3: /* BFMLAL{B,T} */
12974 gen_gvec_op4_fpst(s
, 1, rd
, rn
, rm
, rd
, 0, (index
<< 1) | is_q
,
12975 gen_helper_gvec_bfmlal_idx
);
12978 g_assert_not_reached();
12979 case 0x11: /* FCMLA #0 */
12980 case 0x13: /* FCMLA #90 */
12981 case 0x15: /* FCMLA #180 */
12982 case 0x17: /* FCMLA #270 */
12984 int rot
= extract32(insn
, 13, 2);
12985 int data
= (index
<< 2) | rot
;
12986 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, rd
),
12987 vec_full_reg_offset(s
, rn
),
12988 vec_full_reg_offset(s
, rm
),
12989 vec_full_reg_offset(s
, rd
), fpst
,
12990 is_q
? 16 : 8, vec_full_reg_size(s
), data
,
12992 ? gen_helper_gvec_fcmlas_idx
12993 : gen_helper_gvec_fcmlah_idx
);
12997 case 0x00: /* FMLAL */
12998 case 0x04: /* FMLSL */
12999 case 0x18: /* FMLAL2 */
13000 case 0x1c: /* FMLSL2 */
13002 int is_s
= extract32(opcode
, 2, 1);
13004 int data
= (index
<< 2) | (is_2
<< 1) | is_s
;
13005 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
13006 vec_full_reg_offset(s
, rn
),
13007 vec_full_reg_offset(s
, rm
), tcg_env
,
13008 is_q
? 16 : 8, vec_full_reg_size(s
),
13009 data
, gen_helper_gvec_fmlal_idx_a64
);
13013 case 0x08: /* MUL */
13014 if (!is_long
&& !is_scalar
) {
13015 static gen_helper_gvec_3
* const fns
[3] = {
13016 gen_helper_gvec_mul_idx_h
,
13017 gen_helper_gvec_mul_idx_s
,
13018 gen_helper_gvec_mul_idx_d
,
13020 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, rd
),
13021 vec_full_reg_offset(s
, rn
),
13022 vec_full_reg_offset(s
, rm
),
13023 is_q
? 16 : 8, vec_full_reg_size(s
),
13024 index
, fns
[size
- 1]);
13029 case 0x10: /* MLA */
13030 if (!is_long
&& !is_scalar
) {
13031 static gen_helper_gvec_4
* const fns
[3] = {
13032 gen_helper_gvec_mla_idx_h
,
13033 gen_helper_gvec_mla_idx_s
,
13034 gen_helper_gvec_mla_idx_d
,
13036 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
13037 vec_full_reg_offset(s
, rn
),
13038 vec_full_reg_offset(s
, rm
),
13039 vec_full_reg_offset(s
, rd
),
13040 is_q
? 16 : 8, vec_full_reg_size(s
),
13041 index
, fns
[size
- 1]);
13046 case 0x14: /* MLS */
13047 if (!is_long
&& !is_scalar
) {
13048 static gen_helper_gvec_4
* const fns
[3] = {
13049 gen_helper_gvec_mls_idx_h
,
13050 gen_helper_gvec_mls_idx_s
,
13051 gen_helper_gvec_mls_idx_d
,
13053 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
13054 vec_full_reg_offset(s
, rn
),
13055 vec_full_reg_offset(s
, rm
),
13056 vec_full_reg_offset(s
, rd
),
13057 is_q
? 16 : 8, vec_full_reg_size(s
),
13058 index
, fns
[size
- 1]);
13065 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
13068 assert(is_fp
&& is_q
&& !is_long
);
13070 read_vec_element(s
, tcg_idx
, rm
, index
, MO_64
);
13072 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
13073 TCGv_i64 tcg_op
= tcg_temp_new_i64();
13074 TCGv_i64 tcg_res
= tcg_temp_new_i64();
13076 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
13078 switch (16 * u
+ opcode
) {
13079 case 0x05: /* FMLS */
13080 /* As usual for ARM, separate negation for fused multiply-add */
13081 gen_helper_vfp_negd(tcg_op
, tcg_op
);
13083 case 0x01: /* FMLA */
13084 read_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
13085 gen_helper_vfp_muladdd(tcg_res
, tcg_op
, tcg_idx
, tcg_res
, fpst
);
13087 case 0x09: /* FMUL */
13088 gen_helper_vfp_muld(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13090 case 0x19: /* FMULX */
13091 gen_helper_vfp_mulxd(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13094 g_assert_not_reached();
13097 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
13100 clear_vec_high(s
, !is_scalar
, rd
);
13101 } else if (!is_long
) {
13102 /* 32 bit floating point, or 16 or 32 bit integer.
13103 * For the 16 bit scalar case we use the usual Neon helpers and
13104 * rely on the fact that 0 op 0 == 0 with no side effects.
13106 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
13107 int pass
, maxpasses
;
13112 maxpasses
= is_q
? 4 : 2;
13115 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
13117 if (size
== 1 && !is_scalar
) {
13118 /* The simplest way to handle the 16x16 indexed ops is to duplicate
13119 * the index into both halves of the 32 bit tcg_idx and then use
13120 * the usual Neon helpers.
13122 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
13125 for (pass
= 0; pass
< maxpasses
; pass
++) {
13126 TCGv_i32 tcg_op
= tcg_temp_new_i32();
13127 TCGv_i32 tcg_res
= tcg_temp_new_i32();
13129 read_vec_element_i32(s
, tcg_op
, rn
, pass
, is_scalar
? size
: MO_32
);
13131 switch (16 * u
+ opcode
) {
13132 case 0x08: /* MUL */
13133 case 0x10: /* MLA */
13134 case 0x14: /* MLS */
13136 static NeonGenTwoOpFn
* const fns
[2][2] = {
13137 { gen_helper_neon_add_u16
, gen_helper_neon_sub_u16
},
13138 { tcg_gen_add_i32
, tcg_gen_sub_i32
},
13140 NeonGenTwoOpFn
*genfn
;
13141 bool is_sub
= opcode
== 0x4;
13144 gen_helper_neon_mul_u16(tcg_res
, tcg_op
, tcg_idx
);
13146 tcg_gen_mul_i32(tcg_res
, tcg_op
, tcg_idx
);
13148 if (opcode
== 0x8) {
13151 read_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
13152 genfn
= fns
[size
- 1][is_sub
];
13153 genfn(tcg_res
, tcg_op
, tcg_res
);
13156 case 0x05: /* FMLS */
13157 case 0x01: /* FMLA */
13158 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
13159 is_scalar
? size
: MO_32
);
13162 if (opcode
== 0x5) {
13163 /* As usual for ARM, separate negation for fused
13165 tcg_gen_xori_i32(tcg_op
, tcg_op
, 0x80008000);
13168 gen_helper_advsimd_muladdh(tcg_res
, tcg_op
, tcg_idx
,
13171 gen_helper_advsimd_muladd2h(tcg_res
, tcg_op
, tcg_idx
,
13176 if (opcode
== 0x5) {
13177 /* As usual for ARM, separate negation for
13178 * fused multiply-add */
13179 tcg_gen_xori_i32(tcg_op
, tcg_op
, 0x80000000);
13181 gen_helper_vfp_muladds(tcg_res
, tcg_op
, tcg_idx
,
13185 g_assert_not_reached();
13188 case 0x09: /* FMUL */
13192 gen_helper_advsimd_mulh(tcg_res
, tcg_op
,
13195 gen_helper_advsimd_mul2h(tcg_res
, tcg_op
,
13200 gen_helper_vfp_muls(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13203 g_assert_not_reached();
13206 case 0x19: /* FMULX */
13210 gen_helper_advsimd_mulxh(tcg_res
, tcg_op
,
13213 gen_helper_advsimd_mulx2h(tcg_res
, tcg_op
,
13218 gen_helper_vfp_mulxs(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13221 g_assert_not_reached();
13224 case 0x0c: /* SQDMULH */
13226 gen_helper_neon_qdmulh_s16(tcg_res
, tcg_env
,
13229 gen_helper_neon_qdmulh_s32(tcg_res
, tcg_env
,
13233 case 0x0d: /* SQRDMULH */
13235 gen_helper_neon_qrdmulh_s16(tcg_res
, tcg_env
,
13238 gen_helper_neon_qrdmulh_s32(tcg_res
, tcg_env
,
13242 case 0x1d: /* SQRDMLAH */
13243 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
13244 is_scalar
? size
: MO_32
);
13246 gen_helper_neon_qrdmlah_s16(tcg_res
, tcg_env
,
13247 tcg_op
, tcg_idx
, tcg_res
);
13249 gen_helper_neon_qrdmlah_s32(tcg_res
, tcg_env
,
13250 tcg_op
, tcg_idx
, tcg_res
);
13253 case 0x1f: /* SQRDMLSH */
13254 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
13255 is_scalar
? size
: MO_32
);
13257 gen_helper_neon_qrdmlsh_s16(tcg_res
, tcg_env
,
13258 tcg_op
, tcg_idx
, tcg_res
);
13260 gen_helper_neon_qrdmlsh_s32(tcg_res
, tcg_env
,
13261 tcg_op
, tcg_idx
, tcg_res
);
13265 g_assert_not_reached();
13269 write_fp_sreg(s
, rd
, tcg_res
);
13271 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
13275 clear_vec_high(s
, is_q
, rd
);
13277 /* long ops: 16x16->32 or 32x32->64 */
13278 TCGv_i64 tcg_res
[2];
13280 bool satop
= extract32(opcode
, 0, 1);
13281 MemOp memop
= MO_32
;
13288 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
13290 read_vec_element(s
, tcg_idx
, rm
, index
, memop
);
13292 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
13293 TCGv_i64 tcg_op
= tcg_temp_new_i64();
13294 TCGv_i64 tcg_passres
;
13300 passelt
= pass
+ (is_q
* 2);
13303 read_vec_element(s
, tcg_op
, rn
, passelt
, memop
);
13305 tcg_res
[pass
] = tcg_temp_new_i64();
13307 if (opcode
== 0xa || opcode
== 0xb) {
13308 /* Non-accumulating ops */
13309 tcg_passres
= tcg_res
[pass
];
13311 tcg_passres
= tcg_temp_new_i64();
13314 tcg_gen_mul_i64(tcg_passres
, tcg_op
, tcg_idx
);
13317 /* saturating, doubling */
13318 gen_helper_neon_addl_saturate_s64(tcg_passres
, tcg_env
,
13319 tcg_passres
, tcg_passres
);
13322 if (opcode
== 0xa || opcode
== 0xb) {
13326 /* Accumulating op: handle accumulate step */
13327 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13330 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13331 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
13333 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13334 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
13336 case 0x7: /* SQDMLSL, SQDMLSL2 */
13337 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
13339 case 0x3: /* SQDMLAL, SQDMLAL2 */
13340 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], tcg_env
,
13345 g_assert_not_reached();
13349 clear_vec_high(s
, !is_scalar
, rd
);
13351 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
13354 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
13357 /* The simplest way to handle the 16x16 indexed ops is to
13358 * duplicate the index into both halves of the 32 bit tcg_idx
13359 * and then use the usual Neon helpers.
13361 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
13364 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
13365 TCGv_i32 tcg_op
= tcg_temp_new_i32();
13366 TCGv_i64 tcg_passres
;
13369 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
13371 read_vec_element_i32(s
, tcg_op
, rn
,
13372 pass
+ (is_q
* 2), MO_32
);
13375 tcg_res
[pass
] = tcg_temp_new_i64();
13377 if (opcode
== 0xa || opcode
== 0xb) {
13378 /* Non-accumulating ops */
13379 tcg_passres
= tcg_res
[pass
];
13381 tcg_passres
= tcg_temp_new_i64();
13384 if (memop
& MO_SIGN
) {
13385 gen_helper_neon_mull_s16(tcg_passres
, tcg_op
, tcg_idx
);
13387 gen_helper_neon_mull_u16(tcg_passres
, tcg_op
, tcg_idx
);
13390 gen_helper_neon_addl_saturate_s32(tcg_passres
, tcg_env
,
13391 tcg_passres
, tcg_passres
);
13394 if (opcode
== 0xa || opcode
== 0xb) {
13398 /* Accumulating op: handle accumulate step */
13399 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13402 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13403 gen_helper_neon_addl_u32(tcg_res
[pass
], tcg_res
[pass
],
13406 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13407 gen_helper_neon_subl_u32(tcg_res
[pass
], tcg_res
[pass
],
13410 case 0x7: /* SQDMLSL, SQDMLSL2 */
13411 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
13413 case 0x3: /* SQDMLAL, SQDMLAL2 */
13414 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], tcg_env
,
13419 g_assert_not_reached();
13424 tcg_gen_ext32u_i64(tcg_res
[0], tcg_res
[0]);
13429 tcg_res
[1] = tcg_constant_i64(0);
13432 for (pass
= 0; pass
< 2; pass
++) {
13433 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13439 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13440 * +-----------------+------+-----------+--------+-----+------+------+
13441 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13442 * +-----------------+------+-----------+--------+-----+------+------+
13444 static void disas_crypto_aes(DisasContext
*s
, uint32_t insn
)
13446 int size
= extract32(insn
, 22, 2);
13447 int opcode
= extract32(insn
, 12, 5);
13448 int rn
= extract32(insn
, 5, 5);
13449 int rd
= extract32(insn
, 0, 5);
13450 gen_helper_gvec_2
*genfn2
= NULL
;
13451 gen_helper_gvec_3
*genfn3
= NULL
;
13453 if (!dc_isar_feature(aa64_aes
, s
) || size
!= 0) {
13454 unallocated_encoding(s
);
13459 case 0x4: /* AESE */
13460 genfn3
= gen_helper_crypto_aese
;
13462 case 0x6: /* AESMC */
13463 genfn2
= gen_helper_crypto_aesmc
;
13465 case 0x5: /* AESD */
13466 genfn3
= gen_helper_crypto_aesd
;
13468 case 0x7: /* AESIMC */
13469 genfn2
= gen_helper_crypto_aesimc
;
13472 unallocated_encoding(s
);
13476 if (!fp_access_check(s
)) {
13480 gen_gvec_op2_ool(s
, true, rd
, rn
, 0, genfn2
);
13482 gen_gvec_op3_ool(s
, true, rd
, rd
, rn
, 0, genfn3
);
13486 /* Crypto three-reg SHA
13487 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
13488 * +-----------------+------+---+------+---+--------+-----+------+------+
13489 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
13490 * +-----------------+------+---+------+---+--------+-----+------+------+
13492 static void disas_crypto_three_reg_sha(DisasContext
*s
, uint32_t insn
)
13494 int size
= extract32(insn
, 22, 2);
13495 int opcode
= extract32(insn
, 12, 3);
13496 int rm
= extract32(insn
, 16, 5);
13497 int rn
= extract32(insn
, 5, 5);
13498 int rd
= extract32(insn
, 0, 5);
13499 gen_helper_gvec_3
*genfn
;
13503 unallocated_encoding(s
);
13508 case 0: /* SHA1C */
13509 genfn
= gen_helper_crypto_sha1c
;
13510 feature
= dc_isar_feature(aa64_sha1
, s
);
13512 case 1: /* SHA1P */
13513 genfn
= gen_helper_crypto_sha1p
;
13514 feature
= dc_isar_feature(aa64_sha1
, s
);
13516 case 2: /* SHA1M */
13517 genfn
= gen_helper_crypto_sha1m
;
13518 feature
= dc_isar_feature(aa64_sha1
, s
);
13520 case 3: /* SHA1SU0 */
13521 genfn
= gen_helper_crypto_sha1su0
;
13522 feature
= dc_isar_feature(aa64_sha1
, s
);
13524 case 4: /* SHA256H */
13525 genfn
= gen_helper_crypto_sha256h
;
13526 feature
= dc_isar_feature(aa64_sha256
, s
);
13528 case 5: /* SHA256H2 */
13529 genfn
= gen_helper_crypto_sha256h2
;
13530 feature
= dc_isar_feature(aa64_sha256
, s
);
13532 case 6: /* SHA256SU1 */
13533 genfn
= gen_helper_crypto_sha256su1
;
13534 feature
= dc_isar_feature(aa64_sha256
, s
);
13537 unallocated_encoding(s
);
13542 unallocated_encoding(s
);
13546 if (!fp_access_check(s
)) {
13549 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, 0, genfn
);
13552 /* Crypto two-reg SHA
13553 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13554 * +-----------------+------+-----------+--------+-----+------+------+
13555 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13556 * +-----------------+------+-----------+--------+-----+------+------+
13558 static void disas_crypto_two_reg_sha(DisasContext
*s
, uint32_t insn
)
13560 int size
= extract32(insn
, 22, 2);
13561 int opcode
= extract32(insn
, 12, 5);
13562 int rn
= extract32(insn
, 5, 5);
13563 int rd
= extract32(insn
, 0, 5);
13564 gen_helper_gvec_2
*genfn
;
13568 unallocated_encoding(s
);
13573 case 0: /* SHA1H */
13574 feature
= dc_isar_feature(aa64_sha1
, s
);
13575 genfn
= gen_helper_crypto_sha1h
;
13577 case 1: /* SHA1SU1 */
13578 feature
= dc_isar_feature(aa64_sha1
, s
);
13579 genfn
= gen_helper_crypto_sha1su1
;
13581 case 2: /* SHA256SU0 */
13582 feature
= dc_isar_feature(aa64_sha256
, s
);
13583 genfn
= gen_helper_crypto_sha256su0
;
13586 unallocated_encoding(s
);
13591 unallocated_encoding(s
);
13595 if (!fp_access_check(s
)) {
13598 gen_gvec_op2_ool(s
, true, rd
, rn
, 0, genfn
);
13601 static void gen_rax1_i64(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
)
13603 tcg_gen_rotli_i64(d
, m
, 1);
13604 tcg_gen_xor_i64(d
, d
, n
);
13607 static void gen_rax1_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
, TCGv_vec m
)
13609 tcg_gen_rotli_vec(vece
, d
, m
, 1);
13610 tcg_gen_xor_vec(vece
, d
, d
, n
);
13613 void gen_gvec_rax1(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
13614 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
13616 static const TCGOpcode vecop_list
[] = { INDEX_op_rotli_vec
, 0 };
13617 static const GVecGen3 op
= {
13618 .fni8
= gen_rax1_i64
,
13619 .fniv
= gen_rax1_vec
,
13620 .opt_opc
= vecop_list
,
13621 .fno
= gen_helper_crypto_rax1
,
13624 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &op
);
13627 /* Crypto three-reg SHA512
13628 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13629 * +-----------------------+------+---+---+-----+--------+------+------+
13630 * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
13631 * +-----------------------+------+---+---+-----+--------+------+------+
13633 static void disas_crypto_three_reg_sha512(DisasContext
*s
, uint32_t insn
)
13635 int opcode
= extract32(insn
, 10, 2);
13636 int o
= extract32(insn
, 14, 1);
13637 int rm
= extract32(insn
, 16, 5);
13638 int rn
= extract32(insn
, 5, 5);
13639 int rd
= extract32(insn
, 0, 5);
13641 gen_helper_gvec_3
*oolfn
= NULL
;
13642 GVecGen3Fn
*gvecfn
= NULL
;
13646 case 0: /* SHA512H */
13647 feature
= dc_isar_feature(aa64_sha512
, s
);
13648 oolfn
= gen_helper_crypto_sha512h
;
13650 case 1: /* SHA512H2 */
13651 feature
= dc_isar_feature(aa64_sha512
, s
);
13652 oolfn
= gen_helper_crypto_sha512h2
;
13654 case 2: /* SHA512SU1 */
13655 feature
= dc_isar_feature(aa64_sha512
, s
);
13656 oolfn
= gen_helper_crypto_sha512su1
;
13659 feature
= dc_isar_feature(aa64_sha3
, s
);
13660 gvecfn
= gen_gvec_rax1
;
13663 g_assert_not_reached();
13667 case 0: /* SM3PARTW1 */
13668 feature
= dc_isar_feature(aa64_sm3
, s
);
13669 oolfn
= gen_helper_crypto_sm3partw1
;
13671 case 1: /* SM3PARTW2 */
13672 feature
= dc_isar_feature(aa64_sm3
, s
);
13673 oolfn
= gen_helper_crypto_sm3partw2
;
13675 case 2: /* SM4EKEY */
13676 feature
= dc_isar_feature(aa64_sm4
, s
);
13677 oolfn
= gen_helper_crypto_sm4ekey
;
13680 unallocated_encoding(s
);
13686 unallocated_encoding(s
);
13690 if (!fp_access_check(s
)) {
13695 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, 0, oolfn
);
13697 gen_gvec_fn3(s
, true, rd
, rn
, rm
, gvecfn
, MO_64
);
13701 /* Crypto two-reg SHA512
13702 * 31 12 11 10 9 5 4 0
13703 * +-----------------------------------------+--------+------+------+
13704 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
13705 * +-----------------------------------------+--------+------+------+
13707 static void disas_crypto_two_reg_sha512(DisasContext
*s
, uint32_t insn
)
13709 int opcode
= extract32(insn
, 10, 2);
13710 int rn
= extract32(insn
, 5, 5);
13711 int rd
= extract32(insn
, 0, 5);
13715 case 0: /* SHA512SU0 */
13716 feature
= dc_isar_feature(aa64_sha512
, s
);
13719 feature
= dc_isar_feature(aa64_sm4
, s
);
13722 unallocated_encoding(s
);
13727 unallocated_encoding(s
);
13731 if (!fp_access_check(s
)) {
13736 case 0: /* SHA512SU0 */
13737 gen_gvec_op2_ool(s
, true, rd
, rn
, 0, gen_helper_crypto_sha512su0
);
13740 gen_gvec_op3_ool(s
, true, rd
, rd
, rn
, 0, gen_helper_crypto_sm4e
);
13743 g_assert_not_reached();
13747 /* Crypto four-register
13748 * 31 23 22 21 20 16 15 14 10 9 5 4 0
13749 * +-------------------+-----+------+---+------+------+------+
13750 * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
13751 * +-------------------+-----+------+---+------+------+------+
13753 static void disas_crypto_four_reg(DisasContext
*s
, uint32_t insn
)
13755 int op0
= extract32(insn
, 21, 2);
13756 int rm
= extract32(insn
, 16, 5);
13757 int ra
= extract32(insn
, 10, 5);
13758 int rn
= extract32(insn
, 5, 5);
13759 int rd
= extract32(insn
, 0, 5);
13765 feature
= dc_isar_feature(aa64_sha3
, s
);
13767 case 2: /* SM3SS1 */
13768 feature
= dc_isar_feature(aa64_sm3
, s
);
13771 unallocated_encoding(s
);
13776 unallocated_encoding(s
);
13780 if (!fp_access_check(s
)) {
13785 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
, tcg_res
[2];
13788 tcg_op1
= tcg_temp_new_i64();
13789 tcg_op2
= tcg_temp_new_i64();
13790 tcg_op3
= tcg_temp_new_i64();
13791 tcg_res
[0] = tcg_temp_new_i64();
13792 tcg_res
[1] = tcg_temp_new_i64();
13794 for (pass
= 0; pass
< 2; pass
++) {
13795 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
13796 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
13797 read_vec_element(s
, tcg_op3
, ra
, pass
, MO_64
);
13801 tcg_gen_xor_i64(tcg_res
[pass
], tcg_op2
, tcg_op3
);
13804 tcg_gen_andc_i64(tcg_res
[pass
], tcg_op2
, tcg_op3
);
13806 tcg_gen_xor_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
13808 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
13809 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
13811 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
, tcg_res
, tcg_zero
;
13813 tcg_op1
= tcg_temp_new_i32();
13814 tcg_op2
= tcg_temp_new_i32();
13815 tcg_op3
= tcg_temp_new_i32();
13816 tcg_res
= tcg_temp_new_i32();
13817 tcg_zero
= tcg_constant_i32(0);
13819 read_vec_element_i32(s
, tcg_op1
, rn
, 3, MO_32
);
13820 read_vec_element_i32(s
, tcg_op2
, rm
, 3, MO_32
);
13821 read_vec_element_i32(s
, tcg_op3
, ra
, 3, MO_32
);
13823 tcg_gen_rotri_i32(tcg_res
, tcg_op1
, 20);
13824 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op2
);
13825 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op3
);
13826 tcg_gen_rotri_i32(tcg_res
, tcg_res
, 25);
13828 write_vec_element_i32(s
, tcg_zero
, rd
, 0, MO_32
);
13829 write_vec_element_i32(s
, tcg_zero
, rd
, 1, MO_32
);
13830 write_vec_element_i32(s
, tcg_zero
, rd
, 2, MO_32
);
13831 write_vec_element_i32(s
, tcg_res
, rd
, 3, MO_32
);
13836 * 31 21 20 16 15 10 9 5 4 0
13837 * +-----------------------+------+--------+------+------+
13838 * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
13839 * +-----------------------+------+--------+------+------+
13841 static void disas_crypto_xar(DisasContext
*s
, uint32_t insn
)
13843 int rm
= extract32(insn
, 16, 5);
13844 int imm6
= extract32(insn
, 10, 6);
13845 int rn
= extract32(insn
, 5, 5);
13846 int rd
= extract32(insn
, 0, 5);
13848 if (!dc_isar_feature(aa64_sha3
, s
)) {
13849 unallocated_encoding(s
);
13853 if (!fp_access_check(s
)) {
13857 gen_gvec_xar(MO_64
, vec_full_reg_offset(s
, rd
),
13858 vec_full_reg_offset(s
, rn
),
13859 vec_full_reg_offset(s
, rm
), imm6
, 16,
13860 vec_full_reg_size(s
));
13863 /* Crypto three-reg imm2
13864 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13865 * +-----------------------+------+-----+------+--------+------+------+
13866 * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
13867 * +-----------------------+------+-----+------+--------+------+------+
13869 static void disas_crypto_three_reg_imm2(DisasContext
*s
, uint32_t insn
)
13871 static gen_helper_gvec_3
* const fns
[4] = {
13872 gen_helper_crypto_sm3tt1a
, gen_helper_crypto_sm3tt1b
,
13873 gen_helper_crypto_sm3tt2a
, gen_helper_crypto_sm3tt2b
,
13875 int opcode
= extract32(insn
, 10, 2);
13876 int imm2
= extract32(insn
, 12, 2);
13877 int rm
= extract32(insn
, 16, 5);
13878 int rn
= extract32(insn
, 5, 5);
13879 int rd
= extract32(insn
, 0, 5);
13881 if (!dc_isar_feature(aa64_sm3
, s
)) {
13882 unallocated_encoding(s
);
13886 if (!fp_access_check(s
)) {
13890 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, imm2
, fns
[opcode
]);
13893 /* C3.6 Data processing - SIMD, inc Crypto
13895 * As the decode gets a little complex we are using a table based
13896 * approach for this part of the decode.
13898 static const AArch64DecodeTable data_proc_simd
[] = {
13899 /* pattern , mask , fn */
13900 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same
},
13901 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra
},
13902 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff
},
13903 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc
},
13904 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes
},
13905 { 0x0e000400, 0x9fe08400, disas_simd_copy
},
13906 { 0x0f000000, 0x9f000400, disas_simd_indexed
}, /* vector indexed */
13907 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
13908 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm
},
13909 { 0x0f000400, 0x9f800400, disas_simd_shift_imm
},
13910 { 0x0e000000, 0xbf208c00, disas_simd_tb
},
13911 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn
},
13912 { 0x2e000000, 0xbf208400, disas_simd_ext
},
13913 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same
},
13914 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra
},
13915 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff
},
13916 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc
},
13917 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise
},
13918 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy
},
13919 { 0x5f000000, 0xdf000400, disas_simd_indexed
}, /* scalar indexed */
13920 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm
},
13921 { 0x4e280800, 0xff3e0c00, disas_crypto_aes
},
13922 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha
},
13923 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha
},
13924 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512
},
13925 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512
},
13926 { 0xce000000, 0xff808000, disas_crypto_four_reg
},
13927 { 0xce800000, 0xffe00000, disas_crypto_xar
},
13928 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2
},
13929 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16
},
13930 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16
},
13931 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16
},
13932 { 0x00000000, 0x00000000, NULL
}
13935 static void disas_data_proc_simd(DisasContext
*s
, uint32_t insn
)
13937 /* Note that this is called with all non-FP cases from
13938 * table C3-6 so it must UNDEF for entries not specifically
13939 * allocated to instructions in that table.
13941 AArch64DecodeFn
*fn
= lookup_disas_fn(&data_proc_simd
[0], insn
);
13945 unallocated_encoding(s
);
13949 /* C3.6 Data processing - SIMD and floating point */
13950 static void disas_data_proc_simd_fp(DisasContext
*s
, uint32_t insn
)
13952 if (extract32(insn
, 28, 1) == 1 && extract32(insn
, 30, 1) == 0) {
13953 disas_data_proc_fp(s
, insn
);
13955 /* SIMD, including crypto */
13956 disas_data_proc_simd(s
, insn
);
13960 static bool trans_OK(DisasContext
*s
, arg_OK
*a
)
13965 static bool trans_FAIL(DisasContext
*s
, arg_OK
*a
)
13967 s
->is_nonstreaming
= true;
13973 * @env: The cpu environment
13974 * @s: The DisasContext
13976 * Return true if the page is guarded.
13978 static bool is_guarded_page(CPUARMState
*env
, DisasContext
*s
)
13980 uint64_t addr
= s
->base
.pc_first
;
13981 #ifdef CONFIG_USER_ONLY
13982 return page_get_flags(addr
) & PAGE_BTI
;
13984 CPUTLBEntryFull
*full
;
13986 int mmu_idx
= arm_to_core_mmu_idx(s
->mmu_idx
);
13990 * We test this immediately after reading an insn, which means
13991 * that the TLB entry must be present and valid, and thus this
13992 * access will never raise an exception.
13994 flags
= probe_access_full(env
, addr
, 0, MMU_INST_FETCH
, mmu_idx
,
13995 false, &host
, &full
, 0);
13996 assert(!(flags
& TLB_INVALID_MASK
));
13998 return full
->extra
.arm
.guarded
;
14003 * btype_destination_ok:
14004 * @insn: The instruction at the branch destination
14005 * @bt: SCTLR_ELx.BT
14006 * @btype: PSTATE.BTYPE, and is non-zero
14008 * On a guarded page, there are a limited number of insns
14009 * that may be present at the branch target:
14010 * - branch target identifiers,
14011 * - paciasp, pacibsp,
14014 * Anything else causes a Branch Target Exception.
14016 * Return true if the branch is compatible, false to raise BTITRAP.
14018 static bool btype_destination_ok(uint32_t insn
, bool bt
, int btype
)
14020 if ((insn
& 0xfffff01fu
) == 0xd503201fu
) {
14022 switch (extract32(insn
, 5, 7)) {
14023 case 0b011001: /* PACIASP */
14024 case 0b011011: /* PACIBSP */
14026 * If SCTLR_ELx.BT, then PACI*SP are not compatible
14027 * with btype == 3. Otherwise all btype are ok.
14029 return !bt
|| btype
!= 3;
14030 case 0b100000: /* BTI */
14031 /* Not compatible with any btype. */
14033 case 0b100010: /* BTI c */
14034 /* Not compatible with btype == 3 */
14036 case 0b100100: /* BTI j */
14037 /* Not compatible with btype == 2 */
14039 case 0b100110: /* BTI jc */
14040 /* Compatible with any btype. */
14044 switch (insn
& 0xffe0001fu
) {
14045 case 0xd4200000u
: /* BRK */
14046 case 0xd4400000u
: /* HLT */
14047 /* Give priority to the breakpoint exception. */
14054 /* C3.1 A64 instruction index by encoding */
14055 static void disas_a64_legacy(DisasContext
*s
, uint32_t insn
)
14057 switch (extract32(insn
, 25, 4)) {
14059 case 0xd: /* Data processing - register */
14060 disas_data_proc_reg(s
, insn
);
14063 case 0xf: /* Data processing - SIMD and floating point */
14064 disas_data_proc_simd_fp(s
, insn
);
14067 unallocated_encoding(s
);
14072 static void aarch64_tr_init_disas_context(DisasContextBase
*dcbase
,
14075 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14076 CPUARMState
*env
= cpu_env(cpu
);
14077 ARMCPU
*arm_cpu
= env_archcpu(env
);
14078 CPUARMTBFlags tb_flags
= arm_tbflags_from_tb(dc
->base
.tb
);
14079 int bound
, core_mmu_idx
;
14081 dc
->isar
= &arm_cpu
->isar
;
14083 dc
->pc_save
= dc
->base
.pc_first
;
14084 dc
->aarch64
= true;
14087 dc
->be_data
= EX_TBFLAG_ANY(tb_flags
, BE_DATA
) ? MO_BE
: MO_LE
;
14088 dc
->condexec_mask
= 0;
14089 dc
->condexec_cond
= 0;
14090 core_mmu_idx
= EX_TBFLAG_ANY(tb_flags
, MMUIDX
);
14091 dc
->mmu_idx
= core_to_aa64_mmu_idx(core_mmu_idx
);
14092 dc
->tbii
= EX_TBFLAG_A64(tb_flags
, TBII
);
14093 dc
->tbid
= EX_TBFLAG_A64(tb_flags
, TBID
);
14094 dc
->tcma
= EX_TBFLAG_A64(tb_flags
, TCMA
);
14095 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
14096 #if !defined(CONFIG_USER_ONLY)
14097 dc
->user
= (dc
->current_el
== 0);
14099 dc
->fp_excp_el
= EX_TBFLAG_ANY(tb_flags
, FPEXC_EL
);
14100 dc
->align_mem
= EX_TBFLAG_ANY(tb_flags
, ALIGN_MEM
);
14101 dc
->pstate_il
= EX_TBFLAG_ANY(tb_flags
, PSTATE__IL
);
14102 dc
->fgt_active
= EX_TBFLAG_ANY(tb_flags
, FGT_ACTIVE
);
14103 dc
->fgt_svc
= EX_TBFLAG_ANY(tb_flags
, FGT_SVC
);
14104 dc
->trap_eret
= EX_TBFLAG_A64(tb_flags
, TRAP_ERET
);
14105 dc
->sve_excp_el
= EX_TBFLAG_A64(tb_flags
, SVEEXC_EL
);
14106 dc
->sme_excp_el
= EX_TBFLAG_A64(tb_flags
, SMEEXC_EL
);
14107 dc
->vl
= (EX_TBFLAG_A64(tb_flags
, VL
) + 1) * 16;
14108 dc
->svl
= (EX_TBFLAG_A64(tb_flags
, SVL
) + 1) * 16;
14109 dc
->pauth_active
= EX_TBFLAG_A64(tb_flags
, PAUTH_ACTIVE
);
14110 dc
->bt
= EX_TBFLAG_A64(tb_flags
, BT
);
14111 dc
->btype
= EX_TBFLAG_A64(tb_flags
, BTYPE
);
14112 dc
->unpriv
= EX_TBFLAG_A64(tb_flags
, UNPRIV
);
14113 dc
->ata
[0] = EX_TBFLAG_A64(tb_flags
, ATA
);
14114 dc
->ata
[1] = EX_TBFLAG_A64(tb_flags
, ATA0
);
14115 dc
->mte_active
[0] = EX_TBFLAG_A64(tb_flags
, MTE_ACTIVE
);
14116 dc
->mte_active
[1] = EX_TBFLAG_A64(tb_flags
, MTE0_ACTIVE
);
14117 dc
->pstate_sm
= EX_TBFLAG_A64(tb_flags
, PSTATE_SM
);
14118 dc
->pstate_za
= EX_TBFLAG_A64(tb_flags
, PSTATE_ZA
);
14119 dc
->sme_trap_nonstreaming
= EX_TBFLAG_A64(tb_flags
, SME_TRAP_NONSTREAMING
);
14120 dc
->naa
= EX_TBFLAG_A64(tb_flags
, NAA
);
14121 dc
->nv
= EX_TBFLAG_A64(tb_flags
, NV
);
14122 dc
->nv1
= EX_TBFLAG_A64(tb_flags
, NV1
);
14123 dc
->nv2
= EX_TBFLAG_A64(tb_flags
, NV2
);
14124 dc
->nv2_mem_e20
= EX_TBFLAG_A64(tb_flags
, NV2_MEM_E20
);
14125 dc
->nv2_mem_be
= EX_TBFLAG_A64(tb_flags
, NV2_MEM_BE
);
14127 dc
->vec_stride
= 0;
14128 dc
->cp_regs
= arm_cpu
->cp_regs
;
14129 dc
->features
= env
->features
;
14130 dc
->dcz_blocksize
= arm_cpu
->dcz_blocksize
;
14131 dc
->gm_blocksize
= arm_cpu
->gm_blocksize
;
14133 #ifdef CONFIG_USER_ONLY
14134 /* In sve_probe_page, we assume TBI is enabled. */
14135 tcg_debug_assert(dc
->tbid
& 1);
14138 dc
->lse2
= dc_isar_feature(aa64_lse2
, dc
);
14140 /* Single step state. The code-generation logic here is:
14142 * generate code with no special handling for single-stepping (except
14143 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
14144 * this happens anyway because those changes are all system register or
14146 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
14147 * emit code for one insn
14148 * emit code to clear PSTATE.SS
14149 * emit code to generate software step exception for completed step
14150 * end TB (as usual for having generated an exception)
14151 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
14152 * emit code to generate a software step exception
14155 dc
->ss_active
= EX_TBFLAG_ANY(tb_flags
, SS_ACTIVE
);
14156 dc
->pstate_ss
= EX_TBFLAG_ANY(tb_flags
, PSTATE__SS
);
14157 dc
->is_ldex
= false;
14159 /* Bound the number of insns to execute to those left on the page. */
14160 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
14162 /* If architectural single step active, limit to 1. */
14163 if (dc
->ss_active
) {
14166 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
14169 static void aarch64_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
14173 static void aarch64_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
14175 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14176 target_ulong pc_arg
= dc
->base
.pc_next
;
14178 if (tb_cflags(dcbase
->tb
) & CF_PCREL
) {
14179 pc_arg
&= ~TARGET_PAGE_MASK
;
14181 tcg_gen_insn_start(pc_arg
, 0, 0);
14182 dc
->insn_start
= tcg_last_op();
14185 static void aarch64_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
14187 DisasContext
*s
= container_of(dcbase
, DisasContext
, base
);
14188 CPUARMState
*env
= cpu_env(cpu
);
14189 uint64_t pc
= s
->base
.pc_next
;
14192 /* Singlestep exceptions have the highest priority. */
14193 if (s
->ss_active
&& !s
->pstate_ss
) {
14194 /* Singlestep state is Active-pending.
14195 * If we're in this state at the start of a TB then either
14196 * a) we just took an exception to an EL which is being debugged
14197 * and this is the first insn in the exception handler
14198 * b) debug exceptions were masked and we just unmasked them
14199 * without changing EL (eg by clearing PSTATE.D)
14200 * In either case we're going to take a swstep exception in the
14201 * "did not step an insn" case, and so the syndrome ISV and EX
14202 * bits should be zero.
14204 assert(s
->base
.num_insns
== 1);
14205 gen_swstep_exception(s
, 0, 0);
14206 s
->base
.is_jmp
= DISAS_NORETURN
;
14207 s
->base
.pc_next
= pc
+ 4;
14213 * PC alignment fault. This has priority over the instruction abort
14214 * that we would receive from a translation fault via arm_ldl_code.
14215 * This should only be possible after an indirect branch, at the
14218 assert(s
->base
.num_insns
== 1);
14219 gen_helper_exception_pc_alignment(tcg_env
, tcg_constant_tl(pc
));
14220 s
->base
.is_jmp
= DISAS_NORETURN
;
14221 s
->base
.pc_next
= QEMU_ALIGN_UP(pc
, 4);
14226 insn
= arm_ldl_code(env
, &s
->base
, pc
, s
->sctlr_b
);
14228 s
->base
.pc_next
= pc
+ 4;
14230 s
->fp_access_checked
= false;
14231 s
->sve_access_checked
= false;
14233 if (s
->pstate_il
) {
14235 * Illegal execution state. This has priority over BTI
14236 * exceptions, but comes after instruction abort exceptions.
14238 gen_exception_insn(s
, 0, EXCP_UDEF
, syn_illegalstate());
14242 if (dc_isar_feature(aa64_bti
, s
)) {
14243 if (s
->base
.num_insns
== 1) {
14245 * At the first insn of the TB, compute s->guarded_page.
14246 * We delayed computing this until successfully reading
14247 * the first insn of the TB, above. This (mostly) ensures
14248 * that the softmmu tlb entry has been populated, and the
14249 * page table GP bit is available.
14251 * Note that we need to compute this even if btype == 0,
14252 * because this value is used for BR instructions later
14253 * where ENV is not available.
14255 s
->guarded_page
= is_guarded_page(env
, s
);
14257 /* First insn can have btype set to non-zero. */
14258 tcg_debug_assert(s
->btype
>= 0);
14261 * Note that the Branch Target Exception has fairly high
14262 * priority -- below debugging exceptions but above most
14263 * everything else. This allows us to handle this now
14264 * instead of waiting until the insn is otherwise decoded.
14268 && !btype_destination_ok(insn
, s
->bt
, s
->btype
)) {
14269 gen_exception_insn(s
, 0, EXCP_UDEF
, syn_btitrap(s
->btype
));
14273 /* Not the first insn: btype must be 0. */
14274 tcg_debug_assert(s
->btype
== 0);
14278 s
->is_nonstreaming
= false;
14279 if (s
->sme_trap_nonstreaming
) {
14280 disas_sme_fa64(s
, insn
);
14283 if (!disas_a64(s
, insn
) &&
14284 !disas_sme(s
, insn
) &&
14285 !disas_sve(s
, insn
)) {
14286 disas_a64_legacy(s
, insn
);
14290 * After execution of most insns, btype is reset to 0.
14291 * Note that we set btype == -1 when the insn sets btype.
14293 if (s
->btype
> 0 && s
->base
.is_jmp
!= DISAS_NORETURN
) {
14298 static void aarch64_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
14300 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14302 if (unlikely(dc
->ss_active
)) {
14303 /* Note that this means single stepping WFI doesn't halt the CPU.
14304 * For conditional branch insns this is harmless unreachable code as
14305 * gen_goto_tb() has already handled emitting the debug exception
14306 * (and thus a tb-jump is not possible when singlestepping).
14308 switch (dc
->base
.is_jmp
) {
14310 gen_a64_update_pc(dc
, 4);
14314 gen_step_complete_exception(dc
);
14316 case DISAS_NORETURN
:
14320 switch (dc
->base
.is_jmp
) {
14322 case DISAS_TOO_MANY
:
14323 gen_goto_tb(dc
, 1, 4);
14326 case DISAS_UPDATE_EXIT
:
14327 gen_a64_update_pc(dc
, 4);
14330 tcg_gen_exit_tb(NULL
, 0);
14332 case DISAS_UPDATE_NOCHAIN
:
14333 gen_a64_update_pc(dc
, 4);
14336 tcg_gen_lookup_and_goto_ptr();
14338 case DISAS_NORETURN
:
14342 gen_a64_update_pc(dc
, 4);
14343 gen_helper_wfe(tcg_env
);
14346 gen_a64_update_pc(dc
, 4);
14347 gen_helper_yield(tcg_env
);
14351 * This is a special case because we don't want to just halt
14352 * the CPU if trying to debug across a WFI.
14354 gen_a64_update_pc(dc
, 4);
14355 gen_helper_wfi(tcg_env
, tcg_constant_i32(4));
14357 * The helper doesn't necessarily throw an exception, but we
14358 * must go back to the main loop to check for interrupts anyway.
14360 tcg_gen_exit_tb(NULL
, 0);
14366 static void aarch64_tr_disas_log(const DisasContextBase
*dcbase
,
14367 CPUState
*cpu
, FILE *logfile
)
14369 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14371 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
14372 target_disas(logfile
, cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
14375 const TranslatorOps aarch64_translator_ops
= {
14376 .init_disas_context
= aarch64_tr_init_disas_context
,
14377 .tb_start
= aarch64_tr_tb_start
,
14378 .insn_start
= aarch64_tr_insn_start
,
14379 .translate_insn
= aarch64_tr_translate_insn
,
14380 .tb_stop
= aarch64_tr_tb_stop
,
14381 .disas_log
= aarch64_tr_disas_log
,