4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "translate.h"
22 #include "translate-a64.h"
24 #include "disas/disas.h"
26 #include "semihosting/semihost.h"
29 static TCGv_i64 cpu_X
[32];
30 static TCGv_i64 cpu_pc
;
32 /* Load/store exclusive handling */
33 static TCGv_i64 cpu_exclusive_high
;
35 static const char *regnames
[] = {
36 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
37 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
38 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
39 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
43 A64_SHIFT_TYPE_LSL
= 0,
44 A64_SHIFT_TYPE_LSR
= 1,
45 A64_SHIFT_TYPE_ASR
= 2,
46 A64_SHIFT_TYPE_ROR
= 3
50 * Helpers for extracting complex instruction fields
54 * For load/store with an unsigned 12 bit immediate scaled by the element
55 * size. The input has the immediate field in bits [14:3] and the element
58 static int uimm_scaled(DisasContext
*s
, int x
)
60 unsigned imm
= x
>> 3;
61 unsigned scale
= extract32(x
, 0, 3);
65 /* For load/store memory tags: scale offset by LOG2_TAG_GRANULE */
66 static int scale_by_log2_tag_granule(DisasContext
*s
, int x
)
68 return x
<< LOG2_TAG_GRANULE
;
72 * Include the generated decoders.
75 #include "decode-sme-fa64.c.inc"
76 #include "decode-a64.c.inc"
78 /* Table based decoder typedefs - used when the relevant bits for decode
79 * are too awkwardly scattered across the instruction (eg SIMD).
81 typedef void AArch64DecodeFn(DisasContext
*s
, uint32_t insn
);
83 typedef struct AArch64DecodeTable
{
86 AArch64DecodeFn
*disas_fn
;
89 /* initialize TCG globals. */
90 void a64_translate_init(void)
94 cpu_pc
= tcg_global_mem_new_i64(tcg_env
,
95 offsetof(CPUARMState
, pc
),
97 for (i
= 0; i
< 32; i
++) {
98 cpu_X
[i
] = tcg_global_mem_new_i64(tcg_env
,
99 offsetof(CPUARMState
, xregs
[i
]),
103 cpu_exclusive_high
= tcg_global_mem_new_i64(tcg_env
,
104 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
108 * Return the core mmu_idx to use for A64 load/store insns which
109 * have a "unprivileged load/store" variant. Those insns access
110 * EL0 if executed from an EL which has control over EL0 (usually
111 * EL1) but behave like normal loads and stores if executed from
112 * elsewhere (eg EL3).
114 * @unpriv : true for the unprivileged encoding; false for the
115 * normal encoding (in which case we will return the same
116 * thing as get_mem_index().
118 static int get_a64_user_mem_index(DisasContext
*s
, bool unpriv
)
121 * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
122 * which is the usual mmu_idx for this cpu state.
124 ARMMMUIdx useridx
= s
->mmu_idx
;
126 if (unpriv
&& s
->unpriv
) {
128 * We have pre-computed the condition for AccType_UNPRIV.
129 * Therefore we should never get here with a mmu_idx for
130 * which we do not know the corresponding user mmu_idx.
133 case ARMMMUIdx_E10_1
:
134 case ARMMMUIdx_E10_1_PAN
:
135 useridx
= ARMMMUIdx_E10_0
;
137 case ARMMMUIdx_E20_2
:
138 case ARMMMUIdx_E20_2_PAN
:
139 useridx
= ARMMMUIdx_E20_0
;
142 g_assert_not_reached();
145 return arm_to_core_mmu_idx(useridx
);
148 static void set_btype_raw(int val
)
150 tcg_gen_st_i32(tcg_constant_i32(val
), tcg_env
,
151 offsetof(CPUARMState
, btype
));
154 static void set_btype(DisasContext
*s
, int val
)
156 /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
157 tcg_debug_assert(val
>= 1 && val
<= 3);
162 static void reset_btype(DisasContext
*s
)
170 static void gen_pc_plus_diff(DisasContext
*s
, TCGv_i64 dest
, target_long diff
)
172 assert(s
->pc_save
!= -1);
173 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
174 tcg_gen_addi_i64(dest
, cpu_pc
, (s
->pc_curr
- s
->pc_save
) + diff
);
176 tcg_gen_movi_i64(dest
, s
->pc_curr
+ diff
);
180 void gen_a64_update_pc(DisasContext
*s
, target_long diff
)
182 gen_pc_plus_diff(s
, cpu_pc
, diff
);
183 s
->pc_save
= s
->pc_curr
+ diff
;
187 * Handle Top Byte Ignore (TBI) bits.
189 * If address tagging is enabled via the TCR TBI bits:
190 * + for EL2 and EL3 there is only one TBI bit, and if it is set
191 * then the address is zero-extended, clearing bits [63:56]
192 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
193 * and TBI1 controls addresses with bit 55 == 1.
194 * If the appropriate TBI bit is set for the address then
195 * the address is sign-extended from bit 55 into bits [63:56]
197 * Here We have concatenated TBI{1,0} into tbi.
199 static void gen_top_byte_ignore(DisasContext
*s
, TCGv_i64 dst
,
200 TCGv_i64 src
, int tbi
)
203 /* Load unmodified address */
204 tcg_gen_mov_i64(dst
, src
);
205 } else if (!regime_has_2_ranges(s
->mmu_idx
)) {
206 /* Force tag byte to all zero */
207 tcg_gen_extract_i64(dst
, src
, 0, 56);
209 /* Sign-extend from bit 55. */
210 tcg_gen_sextract_i64(dst
, src
, 0, 56);
214 /* tbi0 but !tbi1: only use the extension if positive */
215 tcg_gen_and_i64(dst
, dst
, src
);
218 /* !tbi0 but tbi1: only use the extension if negative */
219 tcg_gen_or_i64(dst
, dst
, src
);
222 /* tbi0 and tbi1: always use the extension */
225 g_assert_not_reached();
230 static void gen_a64_set_pc(DisasContext
*s
, TCGv_i64 src
)
233 * If address tagging is enabled for instructions via the TCR TBI bits,
234 * then loading an address into the PC will clear out any tag.
236 gen_top_byte_ignore(s
, cpu_pc
, src
, s
->tbii
);
241 * Handle MTE and/or TBI.
243 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
244 * for the tag to be present in the FAR_ELx register. But for user-only
245 * mode we do not have a TLB with which to implement this, so we must
246 * remove the top byte now.
248 * Always return a fresh temporary that we can increment independently
249 * of the write-back address.
252 TCGv_i64
clean_data_tbi(DisasContext
*s
, TCGv_i64 addr
)
254 TCGv_i64 clean
= tcg_temp_new_i64();
255 #ifdef CONFIG_USER_ONLY
256 gen_top_byte_ignore(s
, clean
, addr
, s
->tbid
);
258 tcg_gen_mov_i64(clean
, addr
);
263 /* Insert a zero tag into src, with the result at dst. */
264 static void gen_address_with_allocation_tag0(TCGv_i64 dst
, TCGv_i64 src
)
266 tcg_gen_andi_i64(dst
, src
, ~MAKE_64BIT_MASK(56, 4));
269 static void gen_probe_access(DisasContext
*s
, TCGv_i64 ptr
,
270 MMUAccessType acc
, int log2_size
)
272 gen_helper_probe_access(tcg_env
, ptr
,
273 tcg_constant_i32(acc
),
274 tcg_constant_i32(get_mem_index(s
)),
275 tcg_constant_i32(1 << log2_size
));
279 * For MTE, check a single logical or atomic access. This probes a single
280 * address, the exact one specified. The size and alignment of the access
281 * is not relevant to MTE, per se, but watchpoints do require the size,
282 * and we want to recognize those before making any other changes to state.
284 static TCGv_i64
gen_mte_check1_mmuidx(DisasContext
*s
, TCGv_i64 addr
,
285 bool is_write
, bool tag_checked
,
286 MemOp memop
, bool is_unpriv
,
289 if (tag_checked
&& s
->mte_active
[is_unpriv
]) {
293 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, core_idx
);
294 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
295 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
296 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, is_write
);
297 desc
= FIELD_DP32(desc
, MTEDESC
, ALIGN
, get_alignment_bits(memop
));
298 desc
= FIELD_DP32(desc
, MTEDESC
, SIZEM1
, memop_size(memop
) - 1);
300 ret
= tcg_temp_new_i64();
301 gen_helper_mte_check(ret
, tcg_env
, tcg_constant_i32(desc
), addr
);
305 return clean_data_tbi(s
, addr
);
308 TCGv_i64
gen_mte_check1(DisasContext
*s
, TCGv_i64 addr
, bool is_write
,
309 bool tag_checked
, MemOp memop
)
311 return gen_mte_check1_mmuidx(s
, addr
, is_write
, tag_checked
, memop
,
312 false, get_mem_index(s
));
316 * For MTE, check multiple logical sequential accesses.
318 TCGv_i64
gen_mte_checkN(DisasContext
*s
, TCGv_i64 addr
, bool is_write
,
319 bool tag_checked
, int total_size
, MemOp single_mop
)
321 if (tag_checked
&& s
->mte_active
[0]) {
325 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, get_mem_index(s
));
326 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
327 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
328 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, is_write
);
329 desc
= FIELD_DP32(desc
, MTEDESC
, ALIGN
, get_alignment_bits(single_mop
));
330 desc
= FIELD_DP32(desc
, MTEDESC
, SIZEM1
, total_size
- 1);
332 ret
= tcg_temp_new_i64();
333 gen_helper_mte_check(ret
, tcg_env
, tcg_constant_i32(desc
), addr
);
337 return clean_data_tbi(s
, addr
);
341 * Generate the special alignment check that applies to AccType_ATOMIC
342 * and AccType_ORDERED insns under FEAT_LSE2: the access need not be
343 * naturally aligned, but it must not cross a 16-byte boundary.
344 * See AArch64.CheckAlignment().
346 static void check_lse2_align(DisasContext
*s
, int rn
, int imm
,
347 bool is_write
, MemOp mop
)
351 TCGLabel
*over_label
;
355 tmp
= tcg_temp_new_i32();
356 tcg_gen_extrl_i64_i32(tmp
, cpu_reg_sp(s
, rn
));
357 tcg_gen_addi_i32(tmp
, tmp
, imm
& 15);
358 tcg_gen_andi_i32(tmp
, tmp
, 15);
359 tcg_gen_addi_i32(tmp
, tmp
, memop_size(mop
));
361 over_label
= gen_new_label();
362 tcg_gen_brcondi_i32(TCG_COND_LEU
, tmp
, 16, over_label
);
364 addr
= tcg_temp_new_i64();
365 tcg_gen_addi_i64(addr
, cpu_reg_sp(s
, rn
), imm
);
367 type
= is_write
? MMU_DATA_STORE
: MMU_DATA_LOAD
,
368 mmu_idx
= get_mem_index(s
);
369 gen_helper_unaligned_access(tcg_env
, addr
, tcg_constant_i32(type
),
370 tcg_constant_i32(mmu_idx
));
372 gen_set_label(over_label
);
376 /* Handle the alignment check for AccType_ATOMIC instructions. */
377 static MemOp
check_atomic_align(DisasContext
*s
, int rn
, MemOp mop
)
379 MemOp size
= mop
& MO_SIZE
;
386 * If size == MO_128, this is a LDXP, and the operation is single-copy
387 * atomic for each doubleword, not the entire quadword; it still must
388 * be quadword aligned.
390 if (size
== MO_128
) {
391 return finalize_memop_atom(s
, MO_128
| MO_ALIGN
,
392 MO_ATOM_IFALIGN_PAIR
);
394 if (dc_isar_feature(aa64_lse2
, s
)) {
395 check_lse2_align(s
, rn
, 0, true, mop
);
399 return finalize_memop(s
, mop
);
402 /* Handle the alignment check for AccType_ORDERED instructions. */
403 static MemOp
check_ordered_align(DisasContext
*s
, int rn
, int imm
,
404 bool is_write
, MemOp mop
)
406 MemOp size
= mop
& MO_SIZE
;
411 if (size
== MO_128
) {
412 return finalize_memop_atom(s
, MO_128
| MO_ALIGN
,
413 MO_ATOM_IFALIGN_PAIR
);
415 if (!dc_isar_feature(aa64_lse2
, s
)) {
417 } else if (!s
->naa
) {
418 check_lse2_align(s
, rn
, imm
, is_write
, mop
);
420 return finalize_memop(s
, mop
);
423 typedef struct DisasCompare64
{
428 static void a64_test_cc(DisasCompare64
*c64
, int cc
)
432 arm_test_cc(&c32
, cc
);
435 * Sign-extend the 32-bit value so that the GE/LT comparisons work
436 * properly. The NE/EQ comparisons are also fine with this choice.
438 c64
->cond
= c32
.cond
;
439 c64
->value
= tcg_temp_new_i64();
440 tcg_gen_ext_i32_i64(c64
->value
, c32
.value
);
443 static void gen_rebuild_hflags(DisasContext
*s
)
445 gen_helper_rebuild_hflags_a64(tcg_env
, tcg_constant_i32(s
->current_el
));
448 static void gen_exception_internal(int excp
)
450 assert(excp_is_internal(excp
));
451 gen_helper_exception_internal(tcg_env
, tcg_constant_i32(excp
));
454 static void gen_exception_internal_insn(DisasContext
*s
, int excp
)
456 gen_a64_update_pc(s
, 0);
457 gen_exception_internal(excp
);
458 s
->base
.is_jmp
= DISAS_NORETURN
;
461 static void gen_exception_bkpt_insn(DisasContext
*s
, uint32_t syndrome
)
463 gen_a64_update_pc(s
, 0);
464 gen_helper_exception_bkpt_insn(tcg_env
, tcg_constant_i32(syndrome
));
465 s
->base
.is_jmp
= DISAS_NORETURN
;
468 static void gen_step_complete_exception(DisasContext
*s
)
470 /* We just completed step of an insn. Move from Active-not-pending
471 * to Active-pending, and then also take the swstep exception.
472 * This corresponds to making the (IMPDEF) choice to prioritize
473 * swstep exceptions over asynchronous exceptions taken to an exception
474 * level where debug is disabled. This choice has the advantage that
475 * we do not need to maintain internal state corresponding to the
476 * ISV/EX syndrome bits between completion of the step and generation
477 * of the exception, and our syndrome information is always correct.
480 gen_swstep_exception(s
, 1, s
->is_ldex
);
481 s
->base
.is_jmp
= DISAS_NORETURN
;
484 static inline bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
489 return translator_use_goto_tb(&s
->base
, dest
);
492 static void gen_goto_tb(DisasContext
*s
, int n
, int64_t diff
)
494 if (use_goto_tb(s
, s
->pc_curr
+ diff
)) {
496 * For pcrel, the pc must always be up-to-date on entry to
497 * the linked TB, so that it can use simple additions for all
498 * further adjustments. For !pcrel, the linked TB is compiled
499 * to know its full virtual address, so we can delay the
500 * update to pc to the unlinked path. A long chain of links
501 * can thus avoid many updates to the PC.
503 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
504 gen_a64_update_pc(s
, diff
);
508 gen_a64_update_pc(s
, diff
);
510 tcg_gen_exit_tb(s
->base
.tb
, n
);
511 s
->base
.is_jmp
= DISAS_NORETURN
;
513 gen_a64_update_pc(s
, diff
);
515 gen_step_complete_exception(s
);
517 tcg_gen_lookup_and_goto_ptr();
518 s
->base
.is_jmp
= DISAS_NORETURN
;
524 * Register access functions
526 * These functions are used for directly accessing a register in where
527 * changes to the final register value are likely to be made. If you
528 * need to use a register for temporary calculation (e.g. index type
529 * operations) use the read_* form.
531 * B1.2.1 Register mappings
533 * In instruction register encoding 31 can refer to ZR (zero register) or
534 * the SP (stack pointer) depending on context. In QEMU's case we map SP
535 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
536 * This is the point of the _sp forms.
538 TCGv_i64
cpu_reg(DisasContext
*s
, int reg
)
541 TCGv_i64 t
= tcg_temp_new_i64();
542 tcg_gen_movi_i64(t
, 0);
549 /* register access for when 31 == SP */
550 TCGv_i64
cpu_reg_sp(DisasContext
*s
, int reg
)
555 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
556 * representing the register contents. This TCGv is an auto-freed
557 * temporary so it need not be explicitly freed, and may be modified.
559 TCGv_i64
read_cpu_reg(DisasContext
*s
, int reg
, int sf
)
561 TCGv_i64 v
= tcg_temp_new_i64();
564 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
566 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
569 tcg_gen_movi_i64(v
, 0);
574 TCGv_i64
read_cpu_reg_sp(DisasContext
*s
, int reg
, int sf
)
576 TCGv_i64 v
= tcg_temp_new_i64();
578 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
580 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
585 /* Return the offset into CPUARMState of a slice (from
586 * the least significant end) of FP register Qn (ie
588 * (Note that this is not the same mapping as for A32; see cpu.h)
590 static inline int fp_reg_offset(DisasContext
*s
, int regno
, MemOp size
)
592 return vec_reg_offset(s
, regno
, 0, size
);
595 /* Offset of the high half of the 128 bit vector Qn */
596 static inline int fp_reg_hi_offset(DisasContext
*s
, int regno
)
598 return vec_reg_offset(s
, regno
, 1, MO_64
);
601 /* Convenience accessors for reading and writing single and double
602 * FP registers. Writing clears the upper parts of the associated
603 * 128 bit vector register, as required by the architecture.
604 * Note that unlike the GP register accessors, the values returned
605 * by the read functions must be manually freed.
607 static TCGv_i64
read_fp_dreg(DisasContext
*s
, int reg
)
609 TCGv_i64 v
= tcg_temp_new_i64();
611 tcg_gen_ld_i64(v
, tcg_env
, fp_reg_offset(s
, reg
, MO_64
));
615 static TCGv_i32
read_fp_sreg(DisasContext
*s
, int reg
)
617 TCGv_i32 v
= tcg_temp_new_i32();
619 tcg_gen_ld_i32(v
, tcg_env
, fp_reg_offset(s
, reg
, MO_32
));
623 static TCGv_i32
read_fp_hreg(DisasContext
*s
, int reg
)
625 TCGv_i32 v
= tcg_temp_new_i32();
627 tcg_gen_ld16u_i32(v
, tcg_env
, fp_reg_offset(s
, reg
, MO_16
));
631 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
632 * If SVE is not enabled, then there are only 128 bits in the vector.
634 static void clear_vec_high(DisasContext
*s
, bool is_q
, int rd
)
636 unsigned ofs
= fp_reg_offset(s
, rd
, MO_64
);
637 unsigned vsz
= vec_full_reg_size(s
);
639 /* Nop move, with side effect of clearing the tail. */
640 tcg_gen_gvec_mov(MO_64
, ofs
, ofs
, is_q
? 16 : 8, vsz
);
643 void write_fp_dreg(DisasContext
*s
, int reg
, TCGv_i64 v
)
645 unsigned ofs
= fp_reg_offset(s
, reg
, MO_64
);
647 tcg_gen_st_i64(v
, tcg_env
, ofs
);
648 clear_vec_high(s
, false, reg
);
651 static void write_fp_sreg(DisasContext
*s
, int reg
, TCGv_i32 v
)
653 TCGv_i64 tmp
= tcg_temp_new_i64();
655 tcg_gen_extu_i32_i64(tmp
, v
);
656 write_fp_dreg(s
, reg
, tmp
);
659 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
660 static void gen_gvec_fn2(DisasContext
*s
, bool is_q
, int rd
, int rn
,
661 GVecGen2Fn
*gvec_fn
, int vece
)
663 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
664 is_q
? 16 : 8, vec_full_reg_size(s
));
667 /* Expand a 2-operand + immediate AdvSIMD vector operation using
668 * an expander function.
670 static void gen_gvec_fn2i(DisasContext
*s
, bool is_q
, int rd
, int rn
,
671 int64_t imm
, GVecGen2iFn
*gvec_fn
, int vece
)
673 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
674 imm
, is_q
? 16 : 8, vec_full_reg_size(s
));
677 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
678 static void gen_gvec_fn3(DisasContext
*s
, bool is_q
, int rd
, int rn
, int rm
,
679 GVecGen3Fn
*gvec_fn
, int vece
)
681 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
682 vec_full_reg_offset(s
, rm
), is_q
? 16 : 8, vec_full_reg_size(s
));
685 /* Expand a 4-operand AdvSIMD vector operation using an expander function. */
686 static void gen_gvec_fn4(DisasContext
*s
, bool is_q
, int rd
, int rn
, int rm
,
687 int rx
, GVecGen4Fn
*gvec_fn
, int vece
)
689 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
690 vec_full_reg_offset(s
, rm
), vec_full_reg_offset(s
, rx
),
691 is_q
? 16 : 8, vec_full_reg_size(s
));
694 /* Expand a 2-operand operation using an out-of-line helper. */
695 static void gen_gvec_op2_ool(DisasContext
*s
, bool is_q
, int rd
,
696 int rn
, int data
, gen_helper_gvec_2
*fn
)
698 tcg_gen_gvec_2_ool(vec_full_reg_offset(s
, rd
),
699 vec_full_reg_offset(s
, rn
),
700 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
703 /* Expand a 3-operand operation using an out-of-line helper. */
704 static void gen_gvec_op3_ool(DisasContext
*s
, bool is_q
, int rd
,
705 int rn
, int rm
, int data
, gen_helper_gvec_3
*fn
)
707 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, rd
),
708 vec_full_reg_offset(s
, rn
),
709 vec_full_reg_offset(s
, rm
),
710 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
713 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
714 * an out-of-line helper.
716 static void gen_gvec_op3_fpst(DisasContext
*s
, bool is_q
, int rd
, int rn
,
717 int rm
, bool is_fp16
, int data
,
718 gen_helper_gvec_3_ptr
*fn
)
720 TCGv_ptr fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
721 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
722 vec_full_reg_offset(s
, rn
),
723 vec_full_reg_offset(s
, rm
), fpst
,
724 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
727 /* Expand a 3-operand + qc + operation using an out-of-line helper. */
728 static void gen_gvec_op3_qc(DisasContext
*s
, bool is_q
, int rd
, int rn
,
729 int rm
, gen_helper_gvec_3_ptr
*fn
)
731 TCGv_ptr qc_ptr
= tcg_temp_new_ptr();
733 tcg_gen_addi_ptr(qc_ptr
, tcg_env
, offsetof(CPUARMState
, vfp
.qc
));
734 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
735 vec_full_reg_offset(s
, rn
),
736 vec_full_reg_offset(s
, rm
), qc_ptr
,
737 is_q
? 16 : 8, vec_full_reg_size(s
), 0, fn
);
740 /* Expand a 4-operand operation using an out-of-line helper. */
741 static void gen_gvec_op4_ool(DisasContext
*s
, bool is_q
, int rd
, int rn
,
742 int rm
, int ra
, int data
, gen_helper_gvec_4
*fn
)
744 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
745 vec_full_reg_offset(s
, rn
),
746 vec_full_reg_offset(s
, rm
),
747 vec_full_reg_offset(s
, ra
),
748 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
752 * Expand a 4-operand + fpstatus pointer + simd data value operation using
753 * an out-of-line helper.
755 static void gen_gvec_op4_fpst(DisasContext
*s
, bool is_q
, int rd
, int rn
,
756 int rm
, int ra
, bool is_fp16
, int data
,
757 gen_helper_gvec_4_ptr
*fn
)
759 TCGv_ptr fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
760 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, rd
),
761 vec_full_reg_offset(s
, rn
),
762 vec_full_reg_offset(s
, rm
),
763 vec_full_reg_offset(s
, ra
), fpst
,
764 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
767 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
768 * than the 32 bit equivalent.
770 static inline void gen_set_NZ64(TCGv_i64 result
)
772 tcg_gen_extr_i64_i32(cpu_ZF
, cpu_NF
, result
);
773 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, cpu_NF
);
776 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
777 static inline void gen_logic_CC(int sf
, TCGv_i64 result
)
780 gen_set_NZ64(result
);
782 tcg_gen_extrl_i64_i32(cpu_ZF
, result
);
783 tcg_gen_mov_i32(cpu_NF
, cpu_ZF
);
785 tcg_gen_movi_i32(cpu_CF
, 0);
786 tcg_gen_movi_i32(cpu_VF
, 0);
789 /* dest = T0 + T1; compute C, N, V and Z flags */
790 static void gen_add64_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
792 TCGv_i64 result
, flag
, tmp
;
793 result
= tcg_temp_new_i64();
794 flag
= tcg_temp_new_i64();
795 tmp
= tcg_temp_new_i64();
797 tcg_gen_movi_i64(tmp
, 0);
798 tcg_gen_add2_i64(result
, flag
, t0
, tmp
, t1
, tmp
);
800 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
802 gen_set_NZ64(result
);
804 tcg_gen_xor_i64(flag
, result
, t0
);
805 tcg_gen_xor_i64(tmp
, t0
, t1
);
806 tcg_gen_andc_i64(flag
, flag
, tmp
);
807 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
809 tcg_gen_mov_i64(dest
, result
);
812 static void gen_add32_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
814 TCGv_i32 t0_32
= tcg_temp_new_i32();
815 TCGv_i32 t1_32
= tcg_temp_new_i32();
816 TCGv_i32 tmp
= tcg_temp_new_i32();
818 tcg_gen_movi_i32(tmp
, 0);
819 tcg_gen_extrl_i64_i32(t0_32
, t0
);
820 tcg_gen_extrl_i64_i32(t1_32
, t1
);
821 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, tmp
, t1_32
, tmp
);
822 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
823 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
824 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
825 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
826 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
829 static void gen_add_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
832 gen_add64_CC(dest
, t0
, t1
);
834 gen_add32_CC(dest
, t0
, t1
);
838 /* dest = T0 - T1; compute C, N, V and Z flags */
839 static void gen_sub64_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
841 /* 64 bit arithmetic */
842 TCGv_i64 result
, flag
, tmp
;
844 result
= tcg_temp_new_i64();
845 flag
= tcg_temp_new_i64();
846 tcg_gen_sub_i64(result
, t0
, t1
);
848 gen_set_NZ64(result
);
850 tcg_gen_setcond_i64(TCG_COND_GEU
, flag
, t0
, t1
);
851 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
853 tcg_gen_xor_i64(flag
, result
, t0
);
854 tmp
= tcg_temp_new_i64();
855 tcg_gen_xor_i64(tmp
, t0
, t1
);
856 tcg_gen_and_i64(flag
, flag
, tmp
);
857 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
858 tcg_gen_mov_i64(dest
, result
);
861 static void gen_sub32_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
863 /* 32 bit arithmetic */
864 TCGv_i32 t0_32
= tcg_temp_new_i32();
865 TCGv_i32 t1_32
= tcg_temp_new_i32();
868 tcg_gen_extrl_i64_i32(t0_32
, t0
);
869 tcg_gen_extrl_i64_i32(t1_32
, t1
);
870 tcg_gen_sub_i32(cpu_NF
, t0_32
, t1_32
);
871 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
872 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0_32
, t1_32
);
873 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
874 tmp
= tcg_temp_new_i32();
875 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
876 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
877 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
880 static void gen_sub_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
883 gen_sub64_CC(dest
, t0
, t1
);
885 gen_sub32_CC(dest
, t0
, t1
);
889 /* dest = T0 + T1 + CF; do not compute flags. */
890 static void gen_adc(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
892 TCGv_i64 flag
= tcg_temp_new_i64();
893 tcg_gen_extu_i32_i64(flag
, cpu_CF
);
894 tcg_gen_add_i64(dest
, t0
, t1
);
895 tcg_gen_add_i64(dest
, dest
, flag
);
898 tcg_gen_ext32u_i64(dest
, dest
);
902 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
903 static void gen_adc_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
906 TCGv_i64 result
= tcg_temp_new_i64();
907 TCGv_i64 cf_64
= tcg_temp_new_i64();
908 TCGv_i64 vf_64
= tcg_temp_new_i64();
909 TCGv_i64 tmp
= tcg_temp_new_i64();
910 TCGv_i64 zero
= tcg_constant_i64(0);
912 tcg_gen_extu_i32_i64(cf_64
, cpu_CF
);
913 tcg_gen_add2_i64(result
, cf_64
, t0
, zero
, cf_64
, zero
);
914 tcg_gen_add2_i64(result
, cf_64
, result
, cf_64
, t1
, zero
);
915 tcg_gen_extrl_i64_i32(cpu_CF
, cf_64
);
916 gen_set_NZ64(result
);
918 tcg_gen_xor_i64(vf_64
, result
, t0
);
919 tcg_gen_xor_i64(tmp
, t0
, t1
);
920 tcg_gen_andc_i64(vf_64
, vf_64
, tmp
);
921 tcg_gen_extrh_i64_i32(cpu_VF
, vf_64
);
923 tcg_gen_mov_i64(dest
, result
);
925 TCGv_i32 t0_32
= tcg_temp_new_i32();
926 TCGv_i32 t1_32
= tcg_temp_new_i32();
927 TCGv_i32 tmp
= tcg_temp_new_i32();
928 TCGv_i32 zero
= tcg_constant_i32(0);
930 tcg_gen_extrl_i64_i32(t0_32
, t0
);
931 tcg_gen_extrl_i64_i32(t1_32
, t1
);
932 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, zero
, cpu_CF
, zero
);
933 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1_32
, zero
);
935 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
936 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
937 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
938 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
939 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
944 * Load/Store generators
948 * Store from GPR register to memory.
950 static void do_gpr_st_memidx(DisasContext
*s
, TCGv_i64 source
,
951 TCGv_i64 tcg_addr
, MemOp memop
, int memidx
,
953 unsigned int iss_srt
,
954 bool iss_sf
, bool iss_ar
)
956 tcg_gen_qemu_st_i64(source
, tcg_addr
, memidx
, memop
);
961 syn
= syn_data_abort_with_iss(0,
967 0, 0, 0, 0, 0, false);
968 disas_set_insn_syndrome(s
, syn
);
972 static void do_gpr_st(DisasContext
*s
, TCGv_i64 source
,
973 TCGv_i64 tcg_addr
, MemOp memop
,
975 unsigned int iss_srt
,
976 bool iss_sf
, bool iss_ar
)
978 do_gpr_st_memidx(s
, source
, tcg_addr
, memop
, get_mem_index(s
),
979 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
983 * Load from memory to GPR register
985 static void do_gpr_ld_memidx(DisasContext
*s
, TCGv_i64 dest
, TCGv_i64 tcg_addr
,
986 MemOp memop
, bool extend
, int memidx
,
987 bool iss_valid
, unsigned int iss_srt
,
988 bool iss_sf
, bool iss_ar
)
990 tcg_gen_qemu_ld_i64(dest
, tcg_addr
, memidx
, memop
);
992 if (extend
&& (memop
& MO_SIGN
)) {
993 g_assert((memop
& MO_SIZE
) <= MO_32
);
994 tcg_gen_ext32u_i64(dest
, dest
);
1000 syn
= syn_data_abort_with_iss(0,
1002 (memop
& MO_SIGN
) != 0,
1006 0, 0, 0, 0, 0, false);
1007 disas_set_insn_syndrome(s
, syn
);
1011 static void do_gpr_ld(DisasContext
*s
, TCGv_i64 dest
, TCGv_i64 tcg_addr
,
1012 MemOp memop
, bool extend
,
1013 bool iss_valid
, unsigned int iss_srt
,
1014 bool iss_sf
, bool iss_ar
)
1016 do_gpr_ld_memidx(s
, dest
, tcg_addr
, memop
, extend
, get_mem_index(s
),
1017 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
1021 * Store from FP register to memory
1023 static void do_fp_st(DisasContext
*s
, int srcidx
, TCGv_i64 tcg_addr
, MemOp mop
)
1025 /* This writes the bottom N bits of a 128 bit wide vector to memory */
1026 TCGv_i64 tmplo
= tcg_temp_new_i64();
1028 tcg_gen_ld_i64(tmplo
, tcg_env
, fp_reg_offset(s
, srcidx
, MO_64
));
1030 if ((mop
& MO_SIZE
) < MO_128
) {
1031 tcg_gen_qemu_st_i64(tmplo
, tcg_addr
, get_mem_index(s
), mop
);
1033 TCGv_i64 tmphi
= tcg_temp_new_i64();
1034 TCGv_i128 t16
= tcg_temp_new_i128();
1036 tcg_gen_ld_i64(tmphi
, tcg_env
, fp_reg_hi_offset(s
, srcidx
));
1037 tcg_gen_concat_i64_i128(t16
, tmplo
, tmphi
);
1039 tcg_gen_qemu_st_i128(t16
, tcg_addr
, get_mem_index(s
), mop
);
1044 * Load from memory to FP register
1046 static void do_fp_ld(DisasContext
*s
, int destidx
, TCGv_i64 tcg_addr
, MemOp mop
)
1048 /* This always zero-extends and writes to a full 128 bit wide vector */
1049 TCGv_i64 tmplo
= tcg_temp_new_i64();
1050 TCGv_i64 tmphi
= NULL
;
1052 if ((mop
& MO_SIZE
) < MO_128
) {
1053 tcg_gen_qemu_ld_i64(tmplo
, tcg_addr
, get_mem_index(s
), mop
);
1055 TCGv_i128 t16
= tcg_temp_new_i128();
1057 tcg_gen_qemu_ld_i128(t16
, tcg_addr
, get_mem_index(s
), mop
);
1059 tmphi
= tcg_temp_new_i64();
1060 tcg_gen_extr_i128_i64(tmplo
, tmphi
, t16
);
1063 tcg_gen_st_i64(tmplo
, tcg_env
, fp_reg_offset(s
, destidx
, MO_64
));
1066 tcg_gen_st_i64(tmphi
, tcg_env
, fp_reg_hi_offset(s
, destidx
));
1068 clear_vec_high(s
, tmphi
!= NULL
, destidx
);
1072 * Vector load/store helpers.
1074 * The principal difference between this and a FP load is that we don't
1075 * zero extend as we are filling a partial chunk of the vector register.
1076 * These functions don't support 128 bit loads/stores, which would be
1077 * normal load/store operations.
1079 * The _i32 versions are useful when operating on 32 bit quantities
1080 * (eg for floating point single or using Neon helper functions).
1083 /* Get value of an element within a vector register */
1084 static void read_vec_element(DisasContext
*s
, TCGv_i64 tcg_dest
, int srcidx
,
1085 int element
, MemOp memop
)
1087 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1088 switch ((unsigned)memop
) {
1090 tcg_gen_ld8u_i64(tcg_dest
, tcg_env
, vect_off
);
1093 tcg_gen_ld16u_i64(tcg_dest
, tcg_env
, vect_off
);
1096 tcg_gen_ld32u_i64(tcg_dest
, tcg_env
, vect_off
);
1099 tcg_gen_ld8s_i64(tcg_dest
, tcg_env
, vect_off
);
1102 tcg_gen_ld16s_i64(tcg_dest
, tcg_env
, vect_off
);
1105 tcg_gen_ld32s_i64(tcg_dest
, tcg_env
, vect_off
);
1109 tcg_gen_ld_i64(tcg_dest
, tcg_env
, vect_off
);
1112 g_assert_not_reached();
1116 static void read_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_dest
, int srcidx
,
1117 int element
, MemOp memop
)
1119 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1122 tcg_gen_ld8u_i32(tcg_dest
, tcg_env
, vect_off
);
1125 tcg_gen_ld16u_i32(tcg_dest
, tcg_env
, vect_off
);
1128 tcg_gen_ld8s_i32(tcg_dest
, tcg_env
, vect_off
);
1131 tcg_gen_ld16s_i32(tcg_dest
, tcg_env
, vect_off
);
1135 tcg_gen_ld_i32(tcg_dest
, tcg_env
, vect_off
);
1138 g_assert_not_reached();
1142 /* Set value of an element within a vector register */
1143 static void write_vec_element(DisasContext
*s
, TCGv_i64 tcg_src
, int destidx
,
1144 int element
, MemOp memop
)
1146 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1149 tcg_gen_st8_i64(tcg_src
, tcg_env
, vect_off
);
1152 tcg_gen_st16_i64(tcg_src
, tcg_env
, vect_off
);
1155 tcg_gen_st32_i64(tcg_src
, tcg_env
, vect_off
);
1158 tcg_gen_st_i64(tcg_src
, tcg_env
, vect_off
);
1161 g_assert_not_reached();
1165 static void write_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_src
,
1166 int destidx
, int element
, MemOp memop
)
1168 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1171 tcg_gen_st8_i32(tcg_src
, tcg_env
, vect_off
);
1174 tcg_gen_st16_i32(tcg_src
, tcg_env
, vect_off
);
1177 tcg_gen_st_i32(tcg_src
, tcg_env
, vect_off
);
1180 g_assert_not_reached();
1184 /* Store from vector register to memory */
1185 static void do_vec_st(DisasContext
*s
, int srcidx
, int element
,
1186 TCGv_i64 tcg_addr
, MemOp mop
)
1188 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1190 read_vec_element(s
, tcg_tmp
, srcidx
, element
, mop
& MO_SIZE
);
1191 tcg_gen_qemu_st_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), mop
);
1194 /* Load from memory to vector register */
1195 static void do_vec_ld(DisasContext
*s
, int destidx
, int element
,
1196 TCGv_i64 tcg_addr
, MemOp mop
)
1198 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1200 tcg_gen_qemu_ld_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), mop
);
1201 write_vec_element(s
, tcg_tmp
, destidx
, element
, mop
& MO_SIZE
);
1204 /* Check that FP/Neon access is enabled. If it is, return
1205 * true. If not, emit code to generate an appropriate exception,
1206 * and return false; the caller should not emit any code for
1207 * the instruction. Note that this check must happen after all
1208 * unallocated-encoding checks (otherwise the syndrome information
1209 * for the resulting exception will be incorrect).
1211 static bool fp_access_check_only(DisasContext
*s
)
1213 if (s
->fp_excp_el
) {
1214 assert(!s
->fp_access_checked
);
1215 s
->fp_access_checked
= true;
1217 gen_exception_insn_el(s
, 0, EXCP_UDEF
,
1218 syn_fp_access_trap(1, 0xe, false, 0),
1222 s
->fp_access_checked
= true;
1226 static bool fp_access_check(DisasContext
*s
)
1228 if (!fp_access_check_only(s
)) {
1231 if (s
->sme_trap_nonstreaming
&& s
->is_nonstreaming
) {
1232 gen_exception_insn(s
, 0, EXCP_UDEF
,
1233 syn_smetrap(SME_ET_Streaming
, false));
1240 * Check that SVE access is enabled. If it is, return true.
1241 * If not, emit code to generate an appropriate exception and return false.
1242 * This function corresponds to CheckSVEEnabled().
1244 bool sve_access_check(DisasContext
*s
)
1246 if (s
->pstate_sm
|| !dc_isar_feature(aa64_sve
, s
)) {
1247 assert(dc_isar_feature(aa64_sme
, s
));
1248 if (!sme_sm_enabled_check(s
)) {
1251 } else if (s
->sve_excp_el
) {
1252 gen_exception_insn_el(s
, 0, EXCP_UDEF
,
1253 syn_sve_access_trap(), s
->sve_excp_el
);
1256 s
->sve_access_checked
= true;
1257 return fp_access_check(s
);
1260 /* Assert that we only raise one exception per instruction. */
1261 assert(!s
->sve_access_checked
);
1262 s
->sve_access_checked
= true;
1267 * Check that SME access is enabled, raise an exception if not.
1268 * Note that this function corresponds to CheckSMEAccess and is
1269 * only used directly for cpregs.
1271 static bool sme_access_check(DisasContext
*s
)
1273 if (s
->sme_excp_el
) {
1274 gen_exception_insn_el(s
, 0, EXCP_UDEF
,
1275 syn_smetrap(SME_ET_AccessTrap
, false),
1282 /* This function corresponds to CheckSMEEnabled. */
1283 bool sme_enabled_check(DisasContext
*s
)
1286 * Note that unlike sve_excp_el, we have not constrained sme_excp_el
1287 * to be zero when fp_excp_el has priority. This is because we need
1288 * sme_excp_el by itself for cpregs access checks.
1290 if (!s
->fp_excp_el
|| s
->sme_excp_el
< s
->fp_excp_el
) {
1291 s
->fp_access_checked
= true;
1292 return sme_access_check(s
);
1294 return fp_access_check_only(s
);
1297 /* Common subroutine for CheckSMEAnd*Enabled. */
1298 bool sme_enabled_check_with_svcr(DisasContext
*s
, unsigned req
)
1300 if (!sme_enabled_check(s
)) {
1303 if (FIELD_EX64(req
, SVCR
, SM
) && !s
->pstate_sm
) {
1304 gen_exception_insn(s
, 0, EXCP_UDEF
,
1305 syn_smetrap(SME_ET_NotStreaming
, false));
1308 if (FIELD_EX64(req
, SVCR
, ZA
) && !s
->pstate_za
) {
1309 gen_exception_insn(s
, 0, EXCP_UDEF
,
1310 syn_smetrap(SME_ET_InactiveZA
, false));
1317 * This utility function is for doing register extension with an
1318 * optional shift. You will likely want to pass a temporary for the
1319 * destination register. See DecodeRegExtend() in the ARM ARM.
1321 static void ext_and_shift_reg(TCGv_i64 tcg_out
, TCGv_i64 tcg_in
,
1322 int option
, unsigned int shift
)
1324 int extsize
= extract32(option
, 0, 2);
1325 bool is_signed
= extract32(option
, 2, 1);
1330 tcg_gen_ext8s_i64(tcg_out
, tcg_in
);
1333 tcg_gen_ext16s_i64(tcg_out
, tcg_in
);
1336 tcg_gen_ext32s_i64(tcg_out
, tcg_in
);
1339 tcg_gen_mov_i64(tcg_out
, tcg_in
);
1345 tcg_gen_ext8u_i64(tcg_out
, tcg_in
);
1348 tcg_gen_ext16u_i64(tcg_out
, tcg_in
);
1351 tcg_gen_ext32u_i64(tcg_out
, tcg_in
);
1354 tcg_gen_mov_i64(tcg_out
, tcg_in
);
1360 tcg_gen_shli_i64(tcg_out
, tcg_out
, shift
);
1364 static inline void gen_check_sp_alignment(DisasContext
*s
)
1366 /* The AArch64 architecture mandates that (if enabled via PSTATE
1367 * or SCTLR bits) there is a check that SP is 16-aligned on every
1368 * SP-relative load or store (with an exception generated if it is not).
1369 * In line with general QEMU practice regarding misaligned accesses,
1370 * we omit these checks for the sake of guest program performance.
1371 * This function is provided as a hook so we can more easily add these
1372 * checks in future (possibly as a "favour catching guest program bugs
1373 * over speed" user selectable option).
1378 * This provides a simple table based table lookup decoder. It is
1379 * intended to be used when the relevant bits for decode are too
1380 * awkwardly placed and switch/if based logic would be confusing and
1381 * deeply nested. Since it's a linear search through the table, tables
1382 * should be kept small.
1384 * It returns the first handler where insn & mask == pattern, or
1385 * NULL if there is no match.
1386 * The table is terminated by an empty mask (i.e. 0)
1388 static inline AArch64DecodeFn
*lookup_disas_fn(const AArch64DecodeTable
*table
,
1391 const AArch64DecodeTable
*tptr
= table
;
1393 while (tptr
->mask
) {
1394 if ((insn
& tptr
->mask
) == tptr
->pattern
) {
1395 return tptr
->disas_fn
;
1403 * The instruction disassembly implemented here matches
1404 * the instruction encoding classifications in chapter C4
1405 * of the ARM Architecture Reference Manual (DDI0487B_a);
1406 * classification names and decode diagrams here should generally
1407 * match up with those in the manual.
1410 static bool trans_B(DisasContext
*s
, arg_i
*a
)
1413 gen_goto_tb(s
, 0, a
->imm
);
1417 static bool trans_BL(DisasContext
*s
, arg_i
*a
)
1419 gen_pc_plus_diff(s
, cpu_reg(s
, 30), curr_insn_len(s
));
1421 gen_goto_tb(s
, 0, a
->imm
);
1426 static bool trans_CBZ(DisasContext
*s
, arg_cbz
*a
)
1431 tcg_cmp
= read_cpu_reg(s
, a
->rt
, a
->sf
);
1434 match
= gen_disas_label(s
);
1435 tcg_gen_brcondi_i64(a
->nz
? TCG_COND_NE
: TCG_COND_EQ
,
1436 tcg_cmp
, 0, match
.label
);
1437 gen_goto_tb(s
, 0, 4);
1438 set_disas_label(s
, match
);
1439 gen_goto_tb(s
, 1, a
->imm
);
1443 static bool trans_TBZ(DisasContext
*s
, arg_tbz
*a
)
1448 tcg_cmp
= tcg_temp_new_i64();
1449 tcg_gen_andi_i64(tcg_cmp
, cpu_reg(s
, a
->rt
), 1ULL << a
->bitpos
);
1453 match
= gen_disas_label(s
);
1454 tcg_gen_brcondi_i64(a
->nz
? TCG_COND_NE
: TCG_COND_EQ
,
1455 tcg_cmp
, 0, match
.label
);
1456 gen_goto_tb(s
, 0, 4);
1457 set_disas_label(s
, match
);
1458 gen_goto_tb(s
, 1, a
->imm
);
1462 static bool trans_B_cond(DisasContext
*s
, arg_B_cond
*a
)
1464 /* BC.cond is only present with FEAT_HBC */
1465 if (a
->c
&& !dc_isar_feature(aa64_hbc
, s
)) {
1469 if (a
->cond
< 0x0e) {
1470 /* genuinely conditional branches */
1471 DisasLabel match
= gen_disas_label(s
);
1472 arm_gen_test_cc(a
->cond
, match
.label
);
1473 gen_goto_tb(s
, 0, 4);
1474 set_disas_label(s
, match
);
1475 gen_goto_tb(s
, 1, a
->imm
);
1477 /* 0xe and 0xf are both "always" conditions */
1478 gen_goto_tb(s
, 0, a
->imm
);
1483 static void set_btype_for_br(DisasContext
*s
, int rn
)
1485 if (dc_isar_feature(aa64_bti
, s
)) {
1486 /* BR to {x16,x17} or !guard -> 1, else 3. */
1487 set_btype(s
, rn
== 16 || rn
== 17 || !s
->guarded_page
? 1 : 3);
1491 static void set_btype_for_blr(DisasContext
*s
)
1493 if (dc_isar_feature(aa64_bti
, s
)) {
1494 /* BLR sets BTYPE to 2, regardless of source guarded page. */
1499 static bool trans_BR(DisasContext
*s
, arg_r
*a
)
1501 gen_a64_set_pc(s
, cpu_reg(s
, a
->rn
));
1502 set_btype_for_br(s
, a
->rn
);
1503 s
->base
.is_jmp
= DISAS_JUMP
;
1507 static bool trans_BLR(DisasContext
*s
, arg_r
*a
)
1509 TCGv_i64 dst
= cpu_reg(s
, a
->rn
);
1510 TCGv_i64 lr
= cpu_reg(s
, 30);
1512 TCGv_i64 tmp
= tcg_temp_new_i64();
1513 tcg_gen_mov_i64(tmp
, dst
);
1516 gen_pc_plus_diff(s
, lr
, curr_insn_len(s
));
1517 gen_a64_set_pc(s
, dst
);
1518 set_btype_for_blr(s
);
1519 s
->base
.is_jmp
= DISAS_JUMP
;
1523 static bool trans_RET(DisasContext
*s
, arg_r
*a
)
1525 gen_a64_set_pc(s
, cpu_reg(s
, a
->rn
));
1526 s
->base
.is_jmp
= DISAS_JUMP
;
1530 static TCGv_i64
auth_branch_target(DisasContext
*s
, TCGv_i64 dst
,
1531 TCGv_i64 modifier
, bool use_key_a
)
1535 * Return the branch target for a BRAA/RETA/etc, which is either
1536 * just the destination dst, or that value with the pauth check
1537 * done and the code removed from the high bits.
1539 if (!s
->pauth_active
) {
1543 truedst
= tcg_temp_new_i64();
1545 gen_helper_autia_combined(truedst
, tcg_env
, dst
, modifier
);
1547 gen_helper_autib_combined(truedst
, tcg_env
, dst
, modifier
);
1552 static bool trans_BRAZ(DisasContext
*s
, arg_braz
*a
)
1556 if (!dc_isar_feature(aa64_pauth
, s
)) {
1560 dst
= auth_branch_target(s
, cpu_reg(s
, a
->rn
), tcg_constant_i64(0), !a
->m
);
1561 gen_a64_set_pc(s
, dst
);
1562 set_btype_for_br(s
, a
->rn
);
1563 s
->base
.is_jmp
= DISAS_JUMP
;
1567 static bool trans_BLRAZ(DisasContext
*s
, arg_braz
*a
)
1571 if (!dc_isar_feature(aa64_pauth
, s
)) {
1575 dst
= auth_branch_target(s
, cpu_reg(s
, a
->rn
), tcg_constant_i64(0), !a
->m
);
1576 lr
= cpu_reg(s
, 30);
1578 TCGv_i64 tmp
= tcg_temp_new_i64();
1579 tcg_gen_mov_i64(tmp
, dst
);
1582 gen_pc_plus_diff(s
, lr
, curr_insn_len(s
));
1583 gen_a64_set_pc(s
, dst
);
1584 set_btype_for_blr(s
);
1585 s
->base
.is_jmp
= DISAS_JUMP
;
1589 static bool trans_RETA(DisasContext
*s
, arg_reta
*a
)
1593 dst
= auth_branch_target(s
, cpu_reg(s
, 30), cpu_X
[31], !a
->m
);
1594 gen_a64_set_pc(s
, dst
);
1595 s
->base
.is_jmp
= DISAS_JUMP
;
1599 static bool trans_BRA(DisasContext
*s
, arg_bra
*a
)
1603 if (!dc_isar_feature(aa64_pauth
, s
)) {
1606 dst
= auth_branch_target(s
, cpu_reg(s
,a
->rn
), cpu_reg_sp(s
, a
->rm
), !a
->m
);
1607 gen_a64_set_pc(s
, dst
);
1608 set_btype_for_br(s
, a
->rn
);
1609 s
->base
.is_jmp
= DISAS_JUMP
;
1613 static bool trans_BLRA(DisasContext
*s
, arg_bra
*a
)
1617 if (!dc_isar_feature(aa64_pauth
, s
)) {
1620 dst
= auth_branch_target(s
, cpu_reg(s
, a
->rn
), cpu_reg_sp(s
, a
->rm
), !a
->m
);
1621 lr
= cpu_reg(s
, 30);
1623 TCGv_i64 tmp
= tcg_temp_new_i64();
1624 tcg_gen_mov_i64(tmp
, dst
);
1627 gen_pc_plus_diff(s
, lr
, curr_insn_len(s
));
1628 gen_a64_set_pc(s
, dst
);
1629 set_btype_for_blr(s
);
1630 s
->base
.is_jmp
= DISAS_JUMP
;
1634 static bool trans_ERET(DisasContext
*s
, arg_ERET
*a
)
1638 if (s
->current_el
== 0) {
1642 gen_exception_insn_el(s
, 0, EXCP_UDEF
, 0, 2);
1645 dst
= tcg_temp_new_i64();
1646 tcg_gen_ld_i64(dst
, tcg_env
,
1647 offsetof(CPUARMState
, elr_el
[s
->current_el
]));
1649 translator_io_start(&s
->base
);
1651 gen_helper_exception_return(tcg_env
, dst
);
1652 /* Must exit loop to check un-masked IRQs */
1653 s
->base
.is_jmp
= DISAS_EXIT
;
1657 static bool trans_ERETA(DisasContext
*s
, arg_reta
*a
)
1661 if (!dc_isar_feature(aa64_pauth
, s
)) {
1664 if (s
->current_el
== 0) {
1667 /* The FGT trap takes precedence over an auth trap. */
1669 gen_exception_insn_el(s
, 0, EXCP_UDEF
, a
->m
? 3 : 2, 2);
1672 dst
= tcg_temp_new_i64();
1673 tcg_gen_ld_i64(dst
, tcg_env
,
1674 offsetof(CPUARMState
, elr_el
[s
->current_el
]));
1676 dst
= auth_branch_target(s
, dst
, cpu_X
[31], !a
->m
);
1678 translator_io_start(&s
->base
);
1680 gen_helper_exception_return(tcg_env
, dst
);
1681 /* Must exit loop to check un-masked IRQs */
1682 s
->base
.is_jmp
= DISAS_EXIT
;
1686 static bool trans_NOP(DisasContext
*s
, arg_NOP
*a
)
1691 static bool trans_YIELD(DisasContext
*s
, arg_YIELD
*a
)
1694 * When running in MTTCG we don't generate jumps to the yield and
1695 * WFE helpers as it won't affect the scheduling of other vCPUs.
1696 * If we wanted to more completely model WFE/SEV so we don't busy
1697 * spin unnecessarily we would need to do something more involved.
1699 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1700 s
->base
.is_jmp
= DISAS_YIELD
;
1705 static bool trans_WFI(DisasContext
*s
, arg_WFI
*a
)
1707 s
->base
.is_jmp
= DISAS_WFI
;
1711 static bool trans_WFE(DisasContext
*s
, arg_WFI
*a
)
1714 * When running in MTTCG we don't generate jumps to the yield and
1715 * WFE helpers as it won't affect the scheduling of other vCPUs.
1716 * If we wanted to more completely model WFE/SEV so we don't busy
1717 * spin unnecessarily we would need to do something more involved.
1719 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1720 s
->base
.is_jmp
= DISAS_WFE
;
1725 static bool trans_XPACLRI(DisasContext
*s
, arg_XPACLRI
*a
)
1727 if (s
->pauth_active
) {
1728 gen_helper_xpaci(cpu_X
[30], tcg_env
, cpu_X
[30]);
1733 static bool trans_PACIA1716(DisasContext
*s
, arg_PACIA1716
*a
)
1735 if (s
->pauth_active
) {
1736 gen_helper_pacia(cpu_X
[17], tcg_env
, cpu_X
[17], cpu_X
[16]);
1741 static bool trans_PACIB1716(DisasContext
*s
, arg_PACIB1716
*a
)
1743 if (s
->pauth_active
) {
1744 gen_helper_pacib(cpu_X
[17], tcg_env
, cpu_X
[17], cpu_X
[16]);
1749 static bool trans_AUTIA1716(DisasContext
*s
, arg_AUTIA1716
*a
)
1751 if (s
->pauth_active
) {
1752 gen_helper_autia(cpu_X
[17], tcg_env
, cpu_X
[17], cpu_X
[16]);
1757 static bool trans_AUTIB1716(DisasContext
*s
, arg_AUTIB1716
*a
)
1759 if (s
->pauth_active
) {
1760 gen_helper_autib(cpu_X
[17], tcg_env
, cpu_X
[17], cpu_X
[16]);
1765 static bool trans_ESB(DisasContext
*s
, arg_ESB
*a
)
1767 /* Without RAS, we must implement this as NOP. */
1768 if (dc_isar_feature(aa64_ras
, s
)) {
1770 * QEMU does not have a source of physical SErrors,
1771 * so we are only concerned with virtual SErrors.
1772 * The pseudocode in the ARM for this case is
1773 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
1774 * AArch64.vESBOperation();
1775 * Most of the condition can be evaluated at translation time.
1776 * Test for EL2 present, and defer test for SEL2 to runtime.
1778 if (s
->current_el
<= 1 && arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
1779 gen_helper_vesb(tcg_env
);
1785 static bool trans_PACIAZ(DisasContext
*s
, arg_PACIAZ
*a
)
1787 if (s
->pauth_active
) {
1788 gen_helper_pacia(cpu_X
[30], tcg_env
, cpu_X
[30], tcg_constant_i64(0));
1793 static bool trans_PACIASP(DisasContext
*s
, arg_PACIASP
*a
)
1795 if (s
->pauth_active
) {
1796 gen_helper_pacia(cpu_X
[30], tcg_env
, cpu_X
[30], cpu_X
[31]);
1801 static bool trans_PACIBZ(DisasContext
*s
, arg_PACIBZ
*a
)
1803 if (s
->pauth_active
) {
1804 gen_helper_pacib(cpu_X
[30], tcg_env
, cpu_X
[30], tcg_constant_i64(0));
1809 static bool trans_PACIBSP(DisasContext
*s
, arg_PACIBSP
*a
)
1811 if (s
->pauth_active
) {
1812 gen_helper_pacib(cpu_X
[30], tcg_env
, cpu_X
[30], cpu_X
[31]);
1817 static bool trans_AUTIAZ(DisasContext
*s
, arg_AUTIAZ
*a
)
1819 if (s
->pauth_active
) {
1820 gen_helper_autia(cpu_X
[30], tcg_env
, cpu_X
[30], tcg_constant_i64(0));
1825 static bool trans_AUTIASP(DisasContext
*s
, arg_AUTIASP
*a
)
1827 if (s
->pauth_active
) {
1828 gen_helper_autia(cpu_X
[30], tcg_env
, cpu_X
[30], cpu_X
[31]);
1833 static bool trans_AUTIBZ(DisasContext
*s
, arg_AUTIBZ
*a
)
1835 if (s
->pauth_active
) {
1836 gen_helper_autib(cpu_X
[30], tcg_env
, cpu_X
[30], tcg_constant_i64(0));
1841 static bool trans_AUTIBSP(DisasContext
*s
, arg_AUTIBSP
*a
)
1843 if (s
->pauth_active
) {
1844 gen_helper_autib(cpu_X
[30], tcg_env
, cpu_X
[30], cpu_X
[31]);
1849 static bool trans_CLREX(DisasContext
*s
, arg_CLREX
*a
)
1851 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
1855 static bool trans_DSB_DMB(DisasContext
*s
, arg_DSB_DMB
*a
)
1857 /* We handle DSB and DMB the same way */
1861 case 1: /* MBReqTypes_Reads */
1862 bar
= TCG_BAR_SC
| TCG_MO_LD_LD
| TCG_MO_LD_ST
;
1864 case 2: /* MBReqTypes_Writes */
1865 bar
= TCG_BAR_SC
| TCG_MO_ST_ST
;
1867 default: /* MBReqTypes_All */
1868 bar
= TCG_BAR_SC
| TCG_MO_ALL
;
1875 static bool trans_ISB(DisasContext
*s
, arg_ISB
*a
)
1878 * We need to break the TB after this insn to execute
1879 * self-modifying code correctly and also to take
1880 * any pending interrupts immediately.
1883 gen_goto_tb(s
, 0, 4);
1887 static bool trans_SB(DisasContext
*s
, arg_SB
*a
)
1889 if (!dc_isar_feature(aa64_sb
, s
)) {
1893 * TODO: There is no speculation barrier opcode for TCG;
1894 * MB and end the TB instead.
1896 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1897 gen_goto_tb(s
, 0, 4);
1901 static bool trans_CFINV(DisasContext
*s
, arg_CFINV
*a
)
1903 if (!dc_isar_feature(aa64_condm_4
, s
)) {
1906 tcg_gen_xori_i32(cpu_CF
, cpu_CF
, 1);
1910 static bool trans_XAFLAG(DisasContext
*s
, arg_XAFLAG
*a
)
1914 if (!dc_isar_feature(aa64_condm_5
, s
)) {
1918 z
= tcg_temp_new_i32();
1920 tcg_gen_setcondi_i32(TCG_COND_EQ
, z
, cpu_ZF
, 0);
1929 tcg_gen_or_i32(cpu_NF
, cpu_CF
, z
);
1930 tcg_gen_subi_i32(cpu_NF
, cpu_NF
, 1);
1933 tcg_gen_and_i32(cpu_ZF
, z
, cpu_CF
);
1934 tcg_gen_xori_i32(cpu_ZF
, cpu_ZF
, 1);
1936 /* (!C & Z) << 31 -> -(Z & ~C) */
1937 tcg_gen_andc_i32(cpu_VF
, z
, cpu_CF
);
1938 tcg_gen_neg_i32(cpu_VF
, cpu_VF
);
1941 tcg_gen_or_i32(cpu_CF
, cpu_CF
, z
);
1946 static bool trans_AXFLAG(DisasContext
*s
, arg_AXFLAG
*a
)
1948 if (!dc_isar_feature(aa64_condm_5
, s
)) {
1952 tcg_gen_sari_i32(cpu_VF
, cpu_VF
, 31); /* V ? -1 : 0 */
1953 tcg_gen_andc_i32(cpu_CF
, cpu_CF
, cpu_VF
); /* C & !V */
1955 /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
1956 tcg_gen_andc_i32(cpu_ZF
, cpu_ZF
, cpu_VF
);
1958 tcg_gen_movi_i32(cpu_NF
, 0);
1959 tcg_gen_movi_i32(cpu_VF
, 0);
1964 static bool trans_MSR_i_UAO(DisasContext
*s
, arg_i
*a
)
1966 if (!dc_isar_feature(aa64_uao
, s
) || s
->current_el
== 0) {
1970 set_pstate_bits(PSTATE_UAO
);
1972 clear_pstate_bits(PSTATE_UAO
);
1974 gen_rebuild_hflags(s
);
1975 s
->base
.is_jmp
= DISAS_TOO_MANY
;
1979 static bool trans_MSR_i_PAN(DisasContext
*s
, arg_i
*a
)
1981 if (!dc_isar_feature(aa64_pan
, s
) || s
->current_el
== 0) {
1985 set_pstate_bits(PSTATE_PAN
);
1987 clear_pstate_bits(PSTATE_PAN
);
1989 gen_rebuild_hflags(s
);
1990 s
->base
.is_jmp
= DISAS_TOO_MANY
;
1994 static bool trans_MSR_i_SPSEL(DisasContext
*s
, arg_i
*a
)
1996 if (s
->current_el
== 0) {
1999 gen_helper_msr_i_spsel(tcg_env
, tcg_constant_i32(a
->imm
& PSTATE_SP
));
2000 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2004 static bool trans_MSR_i_SBSS(DisasContext
*s
, arg_i
*a
)
2006 if (!dc_isar_feature(aa64_ssbs
, s
)) {
2010 set_pstate_bits(PSTATE_SSBS
);
2012 clear_pstate_bits(PSTATE_SSBS
);
2014 /* Don't need to rebuild hflags since SSBS is a nop */
2015 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2019 static bool trans_MSR_i_DIT(DisasContext
*s
, arg_i
*a
)
2021 if (!dc_isar_feature(aa64_dit
, s
)) {
2025 set_pstate_bits(PSTATE_DIT
);
2027 clear_pstate_bits(PSTATE_DIT
);
2029 /* There's no need to rebuild hflags because DIT is a nop */
2030 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2034 static bool trans_MSR_i_TCO(DisasContext
*s
, arg_i
*a
)
2036 if (dc_isar_feature(aa64_mte
, s
)) {
2037 /* Full MTE is enabled -- set the TCO bit as directed. */
2039 set_pstate_bits(PSTATE_TCO
);
2041 clear_pstate_bits(PSTATE_TCO
);
2043 gen_rebuild_hflags(s
);
2044 /* Many factors, including TCO, go into MTE_ACTIVE. */
2045 s
->base
.is_jmp
= DISAS_UPDATE_NOCHAIN
;
2047 } else if (dc_isar_feature(aa64_mte_insn_reg
, s
)) {
2048 /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */
2051 /* Insn not present */
2056 static bool trans_MSR_i_DAIFSET(DisasContext
*s
, arg_i
*a
)
2058 gen_helper_msr_i_daifset(tcg_env
, tcg_constant_i32(a
->imm
));
2059 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2063 static bool trans_MSR_i_DAIFCLEAR(DisasContext
*s
, arg_i
*a
)
2065 gen_helper_msr_i_daifclear(tcg_env
, tcg_constant_i32(a
->imm
));
2066 /* Exit the cpu loop to re-evaluate pending IRQs. */
2067 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
2071 static bool trans_MSR_i_SVCR(DisasContext
*s
, arg_MSR_i_SVCR
*a
)
2073 if (!dc_isar_feature(aa64_sme
, s
) || a
->mask
== 0) {
2076 if (sme_access_check(s
)) {
2077 int old
= s
->pstate_sm
| (s
->pstate_za
<< 1);
2078 int new = a
->imm
* 3;
2080 if ((old
^ new) & a
->mask
) {
2081 /* At least one bit changes. */
2082 gen_helper_set_svcr(tcg_env
, tcg_constant_i32(new),
2083 tcg_constant_i32(a
->mask
));
2084 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2090 static void gen_get_nzcv(TCGv_i64 tcg_rt
)
2092 TCGv_i32 tmp
= tcg_temp_new_i32();
2093 TCGv_i32 nzcv
= tcg_temp_new_i32();
2095 /* build bit 31, N */
2096 tcg_gen_andi_i32(nzcv
, cpu_NF
, (1U << 31));
2097 /* build bit 30, Z */
2098 tcg_gen_setcondi_i32(TCG_COND_EQ
, tmp
, cpu_ZF
, 0);
2099 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 30, 1);
2100 /* build bit 29, C */
2101 tcg_gen_deposit_i32(nzcv
, nzcv
, cpu_CF
, 29, 1);
2102 /* build bit 28, V */
2103 tcg_gen_shri_i32(tmp
, cpu_VF
, 31);
2104 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 28, 1);
2105 /* generate result */
2106 tcg_gen_extu_i32_i64(tcg_rt
, nzcv
);
2109 static void gen_set_nzcv(TCGv_i64 tcg_rt
)
2111 TCGv_i32 nzcv
= tcg_temp_new_i32();
2113 /* take NZCV from R[t] */
2114 tcg_gen_extrl_i64_i32(nzcv
, tcg_rt
);
2117 tcg_gen_andi_i32(cpu_NF
, nzcv
, (1U << 31));
2119 tcg_gen_andi_i32(cpu_ZF
, nzcv
, (1 << 30));
2120 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_ZF
, cpu_ZF
, 0);
2122 tcg_gen_andi_i32(cpu_CF
, nzcv
, (1 << 29));
2123 tcg_gen_shri_i32(cpu_CF
, cpu_CF
, 29);
2125 tcg_gen_andi_i32(cpu_VF
, nzcv
, (1 << 28));
2126 tcg_gen_shli_i32(cpu_VF
, cpu_VF
, 3);
2129 static void gen_sysreg_undef(DisasContext
*s
, bool isread
,
2130 uint8_t op0
, uint8_t op1
, uint8_t op2
,
2131 uint8_t crn
, uint8_t crm
, uint8_t rt
)
2134 * Generate code to emit an UNDEF with correct syndrome
2135 * information for a failed system register access.
2136 * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
2137 * but if FEAT_IDST is implemented then read accesses to registers
2138 * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
2143 if (isread
&& dc_isar_feature(aa64_ids
, s
) &&
2144 arm_cpreg_encoding_in_idspace(op0
, op1
, op2
, crn
, crm
)) {
2145 syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
2147 syndrome
= syn_uncategorized();
2149 gen_exception_insn(s
, 0, EXCP_UDEF
, syndrome
);
2152 /* MRS - move from system register
2153 * MSR (register) - move to system register
2156 * These are all essentially the same insn in 'read' and 'write'
2157 * versions, with varying op0 fields.
2159 static void handle_sys(DisasContext
*s
, bool isread
,
2160 unsigned int op0
, unsigned int op1
, unsigned int op2
,
2161 unsigned int crn
, unsigned int crm
, unsigned int rt
)
2163 uint32_t key
= ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP
,
2164 crn
, crm
, op0
, op1
, op2
);
2165 const ARMCPRegInfo
*ri
= get_arm_cp_reginfo(s
->cp_regs
, key
);
2166 bool need_exit_tb
= false;
2167 TCGv_ptr tcg_ri
= NULL
;
2171 if (crn
== 11 || crn
== 15) {
2173 * Check for TIDCP trap, which must take precedence over
2174 * the UNDEF for "no such register" etc.
2176 syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
2177 switch (s
->current_el
) {
2179 if (dc_isar_feature(aa64_tidcp1
, s
)) {
2180 gen_helper_tidcp_el0(tcg_env
, tcg_constant_i32(syndrome
));
2184 gen_helper_tidcp_el1(tcg_env
, tcg_constant_i32(syndrome
));
2190 /* Unknown register; this might be a guest error or a QEMU
2191 * unimplemented feature.
2193 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch64 "
2194 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
2195 isread
? "read" : "write", op0
, op1
, crn
, crm
, op2
);
2196 gen_sysreg_undef(s
, isread
, op0
, op1
, op2
, crn
, crm
, rt
);
2200 /* Check access permissions */
2201 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
2202 gen_sysreg_undef(s
, isread
, op0
, op1
, op2
, crn
, crm
, rt
);
2206 if (ri
->accessfn
|| (ri
->fgt
&& s
->fgt_active
)) {
2207 /* Emit code to perform further access permissions checks at
2208 * runtime; this may result in an exception.
2210 syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
2211 gen_a64_update_pc(s
, 0);
2212 tcg_ri
= tcg_temp_new_ptr();
2213 gen_helper_access_check_cp_reg(tcg_ri
, tcg_env
,
2214 tcg_constant_i32(key
),
2215 tcg_constant_i32(syndrome
),
2216 tcg_constant_i32(isread
));
2217 } else if (ri
->type
& ARM_CP_RAISES_EXC
) {
2219 * The readfn or writefn might raise an exception;
2220 * synchronize the CPU state in case it does.
2222 gen_a64_update_pc(s
, 0);
2225 /* Handle special cases first */
2226 switch (ri
->type
& ARM_CP_SPECIAL_MASK
) {
2232 tcg_rt
= cpu_reg(s
, rt
);
2234 gen_get_nzcv(tcg_rt
);
2236 gen_set_nzcv(tcg_rt
);
2239 case ARM_CP_CURRENTEL
:
2240 /* Reads as current EL value from pstate, which is
2241 * guaranteed to be constant by the tb flags.
2243 tcg_rt
= cpu_reg(s
, rt
);
2244 tcg_gen_movi_i64(tcg_rt
, s
->current_el
<< 2);
2247 /* Writes clear the aligned block of memory which rt points into. */
2248 if (s
->mte_active
[0]) {
2251 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, get_mem_index(s
));
2252 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
2253 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
2255 tcg_rt
= tcg_temp_new_i64();
2256 gen_helper_mte_check_zva(tcg_rt
, tcg_env
,
2257 tcg_constant_i32(desc
), cpu_reg(s
, rt
));
2259 tcg_rt
= clean_data_tbi(s
, cpu_reg(s
, rt
));
2261 gen_helper_dc_zva(tcg_env
, tcg_rt
);
2265 TCGv_i64 clean_addr
, tag
;
2268 * DC_GVA, like DC_ZVA, requires that we supply the original
2269 * pointer for an invalid page. Probe that address first.
2271 tcg_rt
= cpu_reg(s
, rt
);
2272 clean_addr
= clean_data_tbi(s
, tcg_rt
);
2273 gen_probe_access(s
, clean_addr
, MMU_DATA_STORE
, MO_8
);
2276 /* Extract the tag from the register to match STZGM. */
2277 tag
= tcg_temp_new_i64();
2278 tcg_gen_shri_i64(tag
, tcg_rt
, 56);
2279 gen_helper_stzgm_tags(tcg_env
, clean_addr
, tag
);
2283 case ARM_CP_DC_GZVA
:
2285 TCGv_i64 clean_addr
, tag
;
2287 /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
2288 tcg_rt
= cpu_reg(s
, rt
);
2289 clean_addr
= clean_data_tbi(s
, tcg_rt
);
2290 gen_helper_dc_zva(tcg_env
, clean_addr
);
2293 /* Extract the tag from the register to match STZGM. */
2294 tag
= tcg_temp_new_i64();
2295 tcg_gen_shri_i64(tag
, tcg_rt
, 56);
2296 gen_helper_stzgm_tags(tcg_env
, clean_addr
, tag
);
2301 g_assert_not_reached();
2303 if ((ri
->type
& ARM_CP_FPU
) && !fp_access_check_only(s
)) {
2305 } else if ((ri
->type
& ARM_CP_SVE
) && !sve_access_check(s
)) {
2307 } else if ((ri
->type
& ARM_CP_SME
) && !sme_access_check(s
)) {
2311 if (ri
->type
& ARM_CP_IO
) {
2312 /* I/O operations must end the TB here (whether read or write) */
2313 need_exit_tb
= translator_io_start(&s
->base
);
2316 tcg_rt
= cpu_reg(s
, rt
);
2319 if (ri
->type
& ARM_CP_CONST
) {
2320 tcg_gen_movi_i64(tcg_rt
, ri
->resetvalue
);
2321 } else if (ri
->readfn
) {
2323 tcg_ri
= gen_lookup_cp_reg(key
);
2325 gen_helper_get_cp_reg64(tcg_rt
, tcg_env
, tcg_ri
);
2327 tcg_gen_ld_i64(tcg_rt
, tcg_env
, ri
->fieldoffset
);
2330 if (ri
->type
& ARM_CP_CONST
) {
2331 /* If not forbidden by access permissions, treat as WI */
2333 } else if (ri
->writefn
) {
2335 tcg_ri
= gen_lookup_cp_reg(key
);
2337 gen_helper_set_cp_reg64(tcg_env
, tcg_ri
, tcg_rt
);
2339 tcg_gen_st_i64(tcg_rt
, tcg_env
, ri
->fieldoffset
);
2343 if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
2345 * A write to any coprocessor register that ends a TB
2346 * must rebuild the hflags for the next TB.
2348 gen_rebuild_hflags(s
);
2350 * We default to ending the TB on a coprocessor register write,
2351 * but allow this to be suppressed by the register definition
2352 * (usually only necessary to work around guest bugs).
2354 need_exit_tb
= true;
2357 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
2361 static bool trans_SYS(DisasContext
*s
, arg_SYS
*a
)
2363 handle_sys(s
, a
->l
, a
->op0
, a
->op1
, a
->op2
, a
->crn
, a
->crm
, a
->rt
);
2367 static bool trans_SVC(DisasContext
*s
, arg_i
*a
)
2370 * For SVC, HVC and SMC we advance the single-step state
2371 * machine before taking the exception. This is architecturally
2372 * mandated, to ensure that single-stepping a system call
2373 * instruction works properly.
2375 uint32_t syndrome
= syn_aa64_svc(a
->imm
);
2377 gen_exception_insn_el(s
, 0, EXCP_UDEF
, syndrome
, 2);
2381 gen_exception_insn(s
, 4, EXCP_SWI
, syndrome
);
2385 static bool trans_HVC(DisasContext
*s
, arg_i
*a
)
2387 if (s
->current_el
== 0) {
2388 unallocated_encoding(s
);
2392 * The pre HVC helper handles cases when HVC gets trapped
2393 * as an undefined insn by runtime configuration.
2395 gen_a64_update_pc(s
, 0);
2396 gen_helper_pre_hvc(tcg_env
);
2397 /* Architecture requires ss advance before we do the actual work */
2399 gen_exception_insn_el(s
, 4, EXCP_HVC
, syn_aa64_hvc(a
->imm
), 2);
2403 static bool trans_SMC(DisasContext
*s
, arg_i
*a
)
2405 if (s
->current_el
== 0) {
2406 unallocated_encoding(s
);
2409 gen_a64_update_pc(s
, 0);
2410 gen_helper_pre_smc(tcg_env
, tcg_constant_i32(syn_aa64_smc(a
->imm
)));
2411 /* Architecture requires ss advance before we do the actual work */
2413 gen_exception_insn_el(s
, 4, EXCP_SMC
, syn_aa64_smc(a
->imm
), 3);
2417 static bool trans_BRK(DisasContext
*s
, arg_i
*a
)
2419 gen_exception_bkpt_insn(s
, syn_aa64_bkpt(a
->imm
));
2423 static bool trans_HLT(DisasContext
*s
, arg_i
*a
)
2426 * HLT. This has two purposes.
2427 * Architecturally, it is an external halting debug instruction.
2428 * Since QEMU doesn't implement external debug, we treat this as
2429 * it is required for halting debug disabled: it will UNDEF.
2430 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
2432 if (semihosting_enabled(s
->current_el
== 0) && a
->imm
== 0xf000) {
2433 gen_exception_internal_insn(s
, EXCP_SEMIHOST
);
2435 unallocated_encoding(s
);
2441 * Load/Store exclusive instructions are implemented by remembering
2442 * the value/address loaded, and seeing if these are the same
2443 * when the store is performed. This is not actually the architecturally
2444 * mandated semantics, but it works for typical guest code sequences
2445 * and avoids having to monitor regular stores.
2447 * The store exclusive uses the atomic cmpxchg primitives to avoid
2448 * races in multi-threaded linux-user and when MTTCG softmmu is
2451 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
, int rn
,
2452 int size
, bool is_pair
)
2454 int idx
= get_mem_index(s
);
2455 TCGv_i64 dirty_addr
, clean_addr
;
2456 MemOp memop
= check_atomic_align(s
, rn
, size
+ is_pair
);
2459 dirty_addr
= cpu_reg_sp(s
, rn
);
2460 clean_addr
= gen_mte_check1(s
, dirty_addr
, false, rn
!= 31, memop
);
2462 g_assert(size
<= 3);
2464 g_assert(size
>= 2);
2466 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, clean_addr
, idx
, memop
);
2467 if (s
->be_data
== MO_LE
) {
2468 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 0, 32);
2469 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 32, 32);
2471 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 32, 32);
2472 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 0, 32);
2475 TCGv_i128 t16
= tcg_temp_new_i128();
2477 tcg_gen_qemu_ld_i128(t16
, clean_addr
, idx
, memop
);
2479 if (s
->be_data
== MO_LE
) {
2480 tcg_gen_extr_i128_i64(cpu_exclusive_val
,
2481 cpu_exclusive_high
, t16
);
2483 tcg_gen_extr_i128_i64(cpu_exclusive_high
,
2484 cpu_exclusive_val
, t16
);
2486 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2487 tcg_gen_mov_i64(cpu_reg(s
, rt2
), cpu_exclusive_high
);
2490 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, clean_addr
, idx
, memop
);
2491 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2493 tcg_gen_mov_i64(cpu_exclusive_addr
, clean_addr
);
2496 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
2497 int rn
, int size
, int is_pair
)
2499 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2500 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2503 * [addr + datasize] = {Rt2};
2509 * env->exclusive_addr = -1;
2511 TCGLabel
*fail_label
= gen_new_label();
2512 TCGLabel
*done_label
= gen_new_label();
2513 TCGv_i64 tmp
, clean_addr
;
2517 * FIXME: We are out of spec here. We have recorded only the address
2518 * from load_exclusive, not the entire range, and we assume that the
2519 * size of the access on both sides match. The architecture allows the
2520 * store to be smaller than the load, so long as the stored bytes are
2521 * within the range recorded by the load.
2524 /* See AArch64.ExclusiveMonitorsPass() and AArch64.IsExclusiveVA(). */
2525 clean_addr
= clean_data_tbi(s
, cpu_reg_sp(s
, rn
));
2526 tcg_gen_brcond_i64(TCG_COND_NE
, clean_addr
, cpu_exclusive_addr
, fail_label
);
2529 * The write, and any associated faults, only happen if the virtual
2530 * and physical addresses pass the exclusive monitor check. These
2531 * faults are exceedingly unlikely, because normally the guest uses
2532 * the exact same address register for the load_exclusive, and we
2533 * would have recognized these faults there.
2535 * It is possible to trigger an alignment fault pre-LSE2, e.g. with an
2536 * unaligned 4-byte write within the range of an aligned 8-byte load.
2537 * With LSE2, the store would need to cross a 16-byte boundary when the
2538 * load did not, which would mean the store is outside the range
2539 * recorded for the monitor, which would have failed a corrected monitor
2540 * check above. For now, we assume no size change and retain the
2541 * MO_ALIGN to let tcg know what we checked in the load_exclusive.
2543 * It is possible to trigger an MTE fault, by performing the load with
2544 * a virtual address with a valid tag and performing the store with the
2545 * same virtual address and a different invalid tag.
2547 memop
= size
+ is_pair
;
2548 if (memop
== MO_128
|| !dc_isar_feature(aa64_lse2
, s
)) {
2551 memop
= finalize_memop(s
, memop
);
2552 gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, memop
);
2554 tmp
= tcg_temp_new_i64();
2557 if (s
->be_data
== MO_LE
) {
2558 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2560 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt2
), cpu_reg(s
, rt
));
2562 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
,
2563 cpu_exclusive_val
, tmp
,
2564 get_mem_index(s
), memop
);
2565 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2567 TCGv_i128 t16
= tcg_temp_new_i128();
2568 TCGv_i128 c16
= tcg_temp_new_i128();
2571 if (s
->be_data
== MO_LE
) {
2572 tcg_gen_concat_i64_i128(t16
, cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2573 tcg_gen_concat_i64_i128(c16
, cpu_exclusive_val
,
2574 cpu_exclusive_high
);
2576 tcg_gen_concat_i64_i128(t16
, cpu_reg(s
, rt2
), cpu_reg(s
, rt
));
2577 tcg_gen_concat_i64_i128(c16
, cpu_exclusive_high
,
2581 tcg_gen_atomic_cmpxchg_i128(t16
, cpu_exclusive_addr
, c16
, t16
,
2582 get_mem_index(s
), memop
);
2584 a
= tcg_temp_new_i64();
2585 b
= tcg_temp_new_i64();
2586 if (s
->be_data
== MO_LE
) {
2587 tcg_gen_extr_i128_i64(a
, b
, t16
);
2589 tcg_gen_extr_i128_i64(b
, a
, t16
);
2592 tcg_gen_xor_i64(a
, a
, cpu_exclusive_val
);
2593 tcg_gen_xor_i64(b
, b
, cpu_exclusive_high
);
2594 tcg_gen_or_i64(tmp
, a
, b
);
2596 tcg_gen_setcondi_i64(TCG_COND_NE
, tmp
, tmp
, 0);
2599 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
, cpu_exclusive_val
,
2600 cpu_reg(s
, rt
), get_mem_index(s
), memop
);
2601 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2603 tcg_gen_mov_i64(cpu_reg(s
, rd
), tmp
);
2604 tcg_gen_br(done_label
);
2606 gen_set_label(fail_label
);
2607 tcg_gen_movi_i64(cpu_reg(s
, rd
), 1);
2608 gen_set_label(done_label
);
2609 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
2612 static void gen_compare_and_swap(DisasContext
*s
, int rs
, int rt
,
2615 TCGv_i64 tcg_rs
= cpu_reg(s
, rs
);
2616 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2617 int memidx
= get_mem_index(s
);
2618 TCGv_i64 clean_addr
;
2622 gen_check_sp_alignment(s
);
2624 memop
= check_atomic_align(s
, rn
, size
);
2625 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, memop
);
2626 tcg_gen_atomic_cmpxchg_i64(tcg_rs
, clean_addr
, tcg_rs
, tcg_rt
,
2630 static void gen_compare_and_swap_pair(DisasContext
*s
, int rs
, int rt
,
2633 TCGv_i64 s1
= cpu_reg(s
, rs
);
2634 TCGv_i64 s2
= cpu_reg(s
, rs
+ 1);
2635 TCGv_i64 t1
= cpu_reg(s
, rt
);
2636 TCGv_i64 t2
= cpu_reg(s
, rt
+ 1);
2637 TCGv_i64 clean_addr
;
2638 int memidx
= get_mem_index(s
);
2642 gen_check_sp_alignment(s
);
2645 /* This is a single atomic access, despite the "pair". */
2646 memop
= check_atomic_align(s
, rn
, size
+ 1);
2647 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, memop
);
2650 TCGv_i64 cmp
= tcg_temp_new_i64();
2651 TCGv_i64 val
= tcg_temp_new_i64();
2653 if (s
->be_data
== MO_LE
) {
2654 tcg_gen_concat32_i64(val
, t1
, t2
);
2655 tcg_gen_concat32_i64(cmp
, s1
, s2
);
2657 tcg_gen_concat32_i64(val
, t2
, t1
);
2658 tcg_gen_concat32_i64(cmp
, s2
, s1
);
2661 tcg_gen_atomic_cmpxchg_i64(cmp
, clean_addr
, cmp
, val
, memidx
, memop
);
2663 if (s
->be_data
== MO_LE
) {
2664 tcg_gen_extr32_i64(s1
, s2
, cmp
);
2666 tcg_gen_extr32_i64(s2
, s1
, cmp
);
2669 TCGv_i128 cmp
= tcg_temp_new_i128();
2670 TCGv_i128 val
= tcg_temp_new_i128();
2672 if (s
->be_data
== MO_LE
) {
2673 tcg_gen_concat_i64_i128(val
, t1
, t2
);
2674 tcg_gen_concat_i64_i128(cmp
, s1
, s2
);
2676 tcg_gen_concat_i64_i128(val
, t2
, t1
);
2677 tcg_gen_concat_i64_i128(cmp
, s2
, s1
);
2680 tcg_gen_atomic_cmpxchg_i128(cmp
, clean_addr
, cmp
, val
, memidx
, memop
);
2682 if (s
->be_data
== MO_LE
) {
2683 tcg_gen_extr_i128_i64(s1
, s2
, cmp
);
2685 tcg_gen_extr_i128_i64(s2
, s1
, cmp
);
2691 * Compute the ISS.SF bit for syndrome information if an exception
2692 * is taken on a load or store. This indicates whether the instruction
2693 * is accessing a 32-bit or 64-bit register. This logic is derived
2694 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2696 static bool ldst_iss_sf(int size
, bool sign
, bool ext
)
2701 * Signed loads are 64 bit results if we are not going to
2702 * do a zero-extend from 32 to 64 after the load.
2703 * (For a store, sign and ext are always false.)
2707 /* Unsigned loads/stores work at the specified size */
2708 return size
== MO_64
;
2712 static bool trans_STXR(DisasContext
*s
, arg_stxr
*a
)
2715 gen_check_sp_alignment(s
);
2718 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2720 gen_store_exclusive(s
, a
->rs
, a
->rt
, a
->rt2
, a
->rn
, a
->sz
, false);
2724 static bool trans_LDXR(DisasContext
*s
, arg_stxr
*a
)
2727 gen_check_sp_alignment(s
);
2729 gen_load_exclusive(s
, a
->rt
, a
->rt2
, a
->rn
, a
->sz
, false);
2731 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2736 static bool trans_STLR(DisasContext
*s
, arg_stlr
*a
)
2738 TCGv_i64 clean_addr
;
2740 bool iss_sf
= ldst_iss_sf(a
->sz
, false, false);
2743 * StoreLORelease is the same as Store-Release for QEMU, but
2744 * needs the feature-test.
2746 if (!a
->lasr
&& !dc_isar_feature(aa64_lor
, s
)) {
2749 /* Generate ISS for non-exclusive accesses including LASR. */
2751 gen_check_sp_alignment(s
);
2753 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2754 memop
= check_ordered_align(s
, a
->rn
, 0, true, a
->sz
);
2755 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, a
->rn
),
2756 true, a
->rn
!= 31, memop
);
2757 do_gpr_st(s
, cpu_reg(s
, a
->rt
), clean_addr
, memop
, true, a
->rt
,
2762 static bool trans_LDAR(DisasContext
*s
, arg_stlr
*a
)
2764 TCGv_i64 clean_addr
;
2766 bool iss_sf
= ldst_iss_sf(a
->sz
, false, false);
2768 /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
2769 if (!a
->lasr
&& !dc_isar_feature(aa64_lor
, s
)) {
2772 /* Generate ISS for non-exclusive accesses including LASR. */
2774 gen_check_sp_alignment(s
);
2776 memop
= check_ordered_align(s
, a
->rn
, 0, false, a
->sz
);
2777 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, a
->rn
),
2778 false, a
->rn
!= 31, memop
);
2779 do_gpr_ld(s
, cpu_reg(s
, a
->rt
), clean_addr
, memop
, false, true,
2780 a
->rt
, iss_sf
, a
->lasr
);
2781 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2785 static bool trans_STXP(DisasContext
*s
, arg_stxr
*a
)
2788 gen_check_sp_alignment(s
);
2791 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2793 gen_store_exclusive(s
, a
->rs
, a
->rt
, a
->rt2
, a
->rn
, a
->sz
, true);
2797 static bool trans_LDXP(DisasContext
*s
, arg_stxr
*a
)
2800 gen_check_sp_alignment(s
);
2802 gen_load_exclusive(s
, a
->rt
, a
->rt2
, a
->rn
, a
->sz
, true);
2804 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2809 static bool trans_CASP(DisasContext
*s
, arg_CASP
*a
)
2811 if (!dc_isar_feature(aa64_atomics
, s
)) {
2814 if (((a
->rt
| a
->rs
) & 1) != 0) {
2818 gen_compare_and_swap_pair(s
, a
->rs
, a
->rt
, a
->rn
, a
->sz
);
2822 static bool trans_CAS(DisasContext
*s
, arg_CAS
*a
)
2824 if (!dc_isar_feature(aa64_atomics
, s
)) {
2827 gen_compare_and_swap(s
, a
->rs
, a
->rt
, a
->rn
, a
->sz
);
2831 static bool trans_LD_lit(DisasContext
*s
, arg_ldlit
*a
)
2833 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, false);
2834 TCGv_i64 tcg_rt
= cpu_reg(s
, a
->rt
);
2835 TCGv_i64 clean_addr
= tcg_temp_new_i64();
2836 MemOp memop
= finalize_memop(s
, a
->sz
+ a
->sign
* MO_SIGN
);
2838 gen_pc_plus_diff(s
, clean_addr
, a
->imm
);
2839 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
,
2840 false, true, a
->rt
, iss_sf
, false);
2844 static bool trans_LD_lit_v(DisasContext
*s
, arg_ldlit
*a
)
2846 /* Load register (literal), vector version */
2847 TCGv_i64 clean_addr
;
2850 if (!fp_access_check(s
)) {
2853 memop
= finalize_memop_asimd(s
, a
->sz
);
2854 clean_addr
= tcg_temp_new_i64();
2855 gen_pc_plus_diff(s
, clean_addr
, a
->imm
);
2856 do_fp_ld(s
, a
->rt
, clean_addr
, memop
);
2860 static void op_addr_ldstpair_pre(DisasContext
*s
, arg_ldstpair
*a
,
2861 TCGv_i64
*clean_addr
, TCGv_i64
*dirty_addr
,
2862 uint64_t offset
, bool is_store
, MemOp mop
)
2865 gen_check_sp_alignment(s
);
2868 *dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
2870 tcg_gen_addi_i64(*dirty_addr
, *dirty_addr
, offset
);
2873 *clean_addr
= gen_mte_checkN(s
, *dirty_addr
, is_store
,
2874 (a
->w
|| a
->rn
!= 31), 2 << a
->sz
, mop
);
2877 static void op_addr_ldstpair_post(DisasContext
*s
, arg_ldstpair
*a
,
2878 TCGv_i64 dirty_addr
, uint64_t offset
)
2882 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
2884 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), dirty_addr
);
2888 static bool trans_STP(DisasContext
*s
, arg_ldstpair
*a
)
2890 uint64_t offset
= a
->imm
<< a
->sz
;
2891 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
, tcg_rt2
;
2892 MemOp mop
= finalize_memop(s
, a
->sz
);
2894 op_addr_ldstpair_pre(s
, a
, &clean_addr
, &dirty_addr
, offset
, true, mop
);
2895 tcg_rt
= cpu_reg(s
, a
->rt
);
2896 tcg_rt2
= cpu_reg(s
, a
->rt2
);
2898 * We built mop above for the single logical access -- rebuild it
2899 * now for the paired operation.
2901 * With LSE2, non-sign-extending pairs are treated atomically if
2902 * aligned, and if unaligned one of the pair will be completely
2903 * within a 16-byte block and that element will be atomic.
2904 * Otherwise each element is separately atomic.
2905 * In all cases, issue one operation with the correct atomicity.
2909 mop
|= (a
->sz
== 2 ? MO_ALIGN_4
: MO_ALIGN_8
);
2911 mop
= finalize_memop_pair(s
, mop
);
2913 TCGv_i64 tmp
= tcg_temp_new_i64();
2915 if (s
->be_data
== MO_LE
) {
2916 tcg_gen_concat32_i64(tmp
, tcg_rt
, tcg_rt2
);
2918 tcg_gen_concat32_i64(tmp
, tcg_rt2
, tcg_rt
);
2920 tcg_gen_qemu_st_i64(tmp
, clean_addr
, get_mem_index(s
), mop
);
2922 TCGv_i128 tmp
= tcg_temp_new_i128();
2924 if (s
->be_data
== MO_LE
) {
2925 tcg_gen_concat_i64_i128(tmp
, tcg_rt
, tcg_rt2
);
2927 tcg_gen_concat_i64_i128(tmp
, tcg_rt2
, tcg_rt
);
2929 tcg_gen_qemu_st_i128(tmp
, clean_addr
, get_mem_index(s
), mop
);
2931 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
2935 static bool trans_LDP(DisasContext
*s
, arg_ldstpair
*a
)
2937 uint64_t offset
= a
->imm
<< a
->sz
;
2938 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
, tcg_rt2
;
2939 MemOp mop
= finalize_memop(s
, a
->sz
);
2941 op_addr_ldstpair_pre(s
, a
, &clean_addr
, &dirty_addr
, offset
, false, mop
);
2942 tcg_rt
= cpu_reg(s
, a
->rt
);
2943 tcg_rt2
= cpu_reg(s
, a
->rt2
);
2946 * We built mop above for the single logical access -- rebuild it
2947 * now for the paired operation.
2949 * With LSE2, non-sign-extending pairs are treated atomically if
2950 * aligned, and if unaligned one of the pair will be completely
2951 * within a 16-byte block and that element will be atomic.
2952 * Otherwise each element is separately atomic.
2953 * In all cases, issue one operation with the correct atomicity.
2955 * This treats sign-extending loads like zero-extending loads,
2956 * since that reuses the most code below.
2960 mop
|= (a
->sz
== 2 ? MO_ALIGN_4
: MO_ALIGN_8
);
2962 mop
= finalize_memop_pair(s
, mop
);
2964 int o2
= s
->be_data
== MO_LE
? 32 : 0;
2967 tcg_gen_qemu_ld_i64(tcg_rt
, clean_addr
, get_mem_index(s
), mop
);
2969 tcg_gen_sextract_i64(tcg_rt2
, tcg_rt
, o2
, 32);
2970 tcg_gen_sextract_i64(tcg_rt
, tcg_rt
, o1
, 32);
2972 tcg_gen_extract_i64(tcg_rt2
, tcg_rt
, o2
, 32);
2973 tcg_gen_extract_i64(tcg_rt
, tcg_rt
, o1
, 32);
2976 TCGv_i128 tmp
= tcg_temp_new_i128();
2978 tcg_gen_qemu_ld_i128(tmp
, clean_addr
, get_mem_index(s
), mop
);
2979 if (s
->be_data
== MO_LE
) {
2980 tcg_gen_extr_i128_i64(tcg_rt
, tcg_rt2
, tmp
);
2982 tcg_gen_extr_i128_i64(tcg_rt2
, tcg_rt
, tmp
);
2985 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
2989 static bool trans_STP_v(DisasContext
*s
, arg_ldstpair
*a
)
2991 uint64_t offset
= a
->imm
<< a
->sz
;
2992 TCGv_i64 clean_addr
, dirty_addr
;
2995 if (!fp_access_check(s
)) {
2999 /* LSE2 does not merge FP pairs; leave these as separate operations. */
3000 mop
= finalize_memop_asimd(s
, a
->sz
);
3001 op_addr_ldstpair_pre(s
, a
, &clean_addr
, &dirty_addr
, offset
, true, mop
);
3002 do_fp_st(s
, a
->rt
, clean_addr
, mop
);
3003 tcg_gen_addi_i64(clean_addr
, clean_addr
, 1 << a
->sz
);
3004 do_fp_st(s
, a
->rt2
, clean_addr
, mop
);
3005 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3009 static bool trans_LDP_v(DisasContext
*s
, arg_ldstpair
*a
)
3011 uint64_t offset
= a
->imm
<< a
->sz
;
3012 TCGv_i64 clean_addr
, dirty_addr
;
3015 if (!fp_access_check(s
)) {
3019 /* LSE2 does not merge FP pairs; leave these as separate operations. */
3020 mop
= finalize_memop_asimd(s
, a
->sz
);
3021 op_addr_ldstpair_pre(s
, a
, &clean_addr
, &dirty_addr
, offset
, false, mop
);
3022 do_fp_ld(s
, a
->rt
, clean_addr
, mop
);
3023 tcg_gen_addi_i64(clean_addr
, clean_addr
, 1 << a
->sz
);
3024 do_fp_ld(s
, a
->rt2
, clean_addr
, mop
);
3025 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3029 static bool trans_STGP(DisasContext
*s
, arg_ldstpair
*a
)
3031 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
, tcg_rt2
;
3032 uint64_t offset
= a
->imm
<< LOG2_TAG_GRANULE
;
3036 /* STGP only comes in one size. */
3037 tcg_debug_assert(a
->sz
== MO_64
);
3039 if (!dc_isar_feature(aa64_mte_insn_reg
, s
)) {
3044 gen_check_sp_alignment(s
);
3047 dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3049 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3052 clean_addr
= clean_data_tbi(s
, dirty_addr
);
3053 tcg_rt
= cpu_reg(s
, a
->rt
);
3054 tcg_rt2
= cpu_reg(s
, a
->rt2
);
3057 * STGP is defined as two 8-byte memory operations, aligned to TAG_GRANULE,
3058 * and one tag operation. We implement it as one single aligned 16-byte
3059 * memory operation for convenience. Note that the alignment ensures
3060 * MO_ATOM_IFALIGN_PAIR produces 8-byte atomicity for the memory store.
3062 mop
= finalize_memop_atom(s
, MO_128
| MO_ALIGN
, MO_ATOM_IFALIGN_PAIR
);
3064 tmp
= tcg_temp_new_i128();
3065 if (s
->be_data
== MO_LE
) {
3066 tcg_gen_concat_i64_i128(tmp
, tcg_rt
, tcg_rt2
);
3068 tcg_gen_concat_i64_i128(tmp
, tcg_rt2
, tcg_rt
);
3070 tcg_gen_qemu_st_i128(tmp
, clean_addr
, get_mem_index(s
), mop
);
3072 /* Perform the tag store, if tag access enabled. */
3074 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3075 gen_helper_stg_parallel(tcg_env
, dirty_addr
, dirty_addr
);
3077 gen_helper_stg(tcg_env
, dirty_addr
, dirty_addr
);
3081 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3085 static void op_addr_ldst_imm_pre(DisasContext
*s
, arg_ldst_imm
*a
,
3086 TCGv_i64
*clean_addr
, TCGv_i64
*dirty_addr
,
3087 uint64_t offset
, bool is_store
, MemOp mop
)
3092 gen_check_sp_alignment(s
);
3095 *dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3097 tcg_gen_addi_i64(*dirty_addr
, *dirty_addr
, offset
);
3099 memidx
= get_a64_user_mem_index(s
, a
->unpriv
);
3100 *clean_addr
= gen_mte_check1_mmuidx(s
, *dirty_addr
, is_store
,
3101 a
->w
|| a
->rn
!= 31,
3102 mop
, a
->unpriv
, memidx
);
3105 static void op_addr_ldst_imm_post(DisasContext
*s
, arg_ldst_imm
*a
,
3106 TCGv_i64 dirty_addr
, uint64_t offset
)
3110 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3112 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), dirty_addr
);
3116 static bool trans_STR_i(DisasContext
*s
, arg_ldst_imm
*a
)
3118 bool iss_sf
, iss_valid
= !a
->w
;
3119 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3120 int memidx
= get_a64_user_mem_index(s
, a
->unpriv
);
3121 MemOp mop
= finalize_memop(s
, a
->sz
+ a
->sign
* MO_SIGN
);
3123 op_addr_ldst_imm_pre(s
, a
, &clean_addr
, &dirty_addr
, a
->imm
, true, mop
);
3125 tcg_rt
= cpu_reg(s
, a
->rt
);
3126 iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3128 do_gpr_st_memidx(s
, tcg_rt
, clean_addr
, mop
, memidx
,
3129 iss_valid
, a
->rt
, iss_sf
, false);
3130 op_addr_ldst_imm_post(s
, a
, dirty_addr
, a
->imm
);
3134 static bool trans_LDR_i(DisasContext
*s
, arg_ldst_imm
*a
)
3136 bool iss_sf
, iss_valid
= !a
->w
;
3137 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3138 int memidx
= get_a64_user_mem_index(s
, a
->unpriv
);
3139 MemOp mop
= finalize_memop(s
, a
->sz
+ a
->sign
* MO_SIGN
);
3141 op_addr_ldst_imm_pre(s
, a
, &clean_addr
, &dirty_addr
, a
->imm
, false, mop
);
3143 tcg_rt
= cpu_reg(s
, a
->rt
);
3144 iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3146 do_gpr_ld_memidx(s
, tcg_rt
, clean_addr
, mop
,
3147 a
->ext
, memidx
, iss_valid
, a
->rt
, iss_sf
, false);
3148 op_addr_ldst_imm_post(s
, a
, dirty_addr
, a
->imm
);
3152 static bool trans_STR_v_i(DisasContext
*s
, arg_ldst_imm
*a
)
3154 TCGv_i64 clean_addr
, dirty_addr
;
3157 if (!fp_access_check(s
)) {
3160 mop
= finalize_memop_asimd(s
, a
->sz
);
3161 op_addr_ldst_imm_pre(s
, a
, &clean_addr
, &dirty_addr
, a
->imm
, true, mop
);
3162 do_fp_st(s
, a
->rt
, clean_addr
, mop
);
3163 op_addr_ldst_imm_post(s
, a
, dirty_addr
, a
->imm
);
3167 static bool trans_LDR_v_i(DisasContext
*s
, arg_ldst_imm
*a
)
3169 TCGv_i64 clean_addr
, dirty_addr
;
3172 if (!fp_access_check(s
)) {
3175 mop
= finalize_memop_asimd(s
, a
->sz
);
3176 op_addr_ldst_imm_pre(s
, a
, &clean_addr
, &dirty_addr
, a
->imm
, false, mop
);
3177 do_fp_ld(s
, a
->rt
, clean_addr
, mop
);
3178 op_addr_ldst_imm_post(s
, a
, dirty_addr
, a
->imm
);
3182 static void op_addr_ldst_pre(DisasContext
*s
, arg_ldst
*a
,
3183 TCGv_i64
*clean_addr
, TCGv_i64
*dirty_addr
,
3184 bool is_store
, MemOp memop
)
3189 gen_check_sp_alignment(s
);
3191 *dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3193 tcg_rm
= read_cpu_reg(s
, a
->rm
, 1);
3194 ext_and_shift_reg(tcg_rm
, tcg_rm
, a
->opt
, a
->s
? a
->sz
: 0);
3196 tcg_gen_add_i64(*dirty_addr
, *dirty_addr
, tcg_rm
);
3197 *clean_addr
= gen_mte_check1(s
, *dirty_addr
, is_store
, true, memop
);
3200 static bool trans_LDR(DisasContext
*s
, arg_ldst
*a
)
3202 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3203 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3206 if (extract32(a
->opt
, 1, 1) == 0) {
3210 memop
= finalize_memop(s
, a
->sz
+ a
->sign
* MO_SIGN
);
3211 op_addr_ldst_pre(s
, a
, &clean_addr
, &dirty_addr
, false, memop
);
3212 tcg_rt
= cpu_reg(s
, a
->rt
);
3213 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
,
3214 a
->ext
, true, a
->rt
, iss_sf
, false);
3218 static bool trans_STR(DisasContext
*s
, arg_ldst
*a
)
3220 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3221 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3224 if (extract32(a
->opt
, 1, 1) == 0) {
3228 memop
= finalize_memop(s
, a
->sz
);
3229 op_addr_ldst_pre(s
, a
, &clean_addr
, &dirty_addr
, true, memop
);
3230 tcg_rt
= cpu_reg(s
, a
->rt
);
3231 do_gpr_st(s
, tcg_rt
, clean_addr
, memop
, true, a
->rt
, iss_sf
, false);
3235 static bool trans_LDR_v(DisasContext
*s
, arg_ldst
*a
)
3237 TCGv_i64 clean_addr
, dirty_addr
;
3240 if (extract32(a
->opt
, 1, 1) == 0) {
3244 if (!fp_access_check(s
)) {
3248 memop
= finalize_memop_asimd(s
, a
->sz
);
3249 op_addr_ldst_pre(s
, a
, &clean_addr
, &dirty_addr
, false, memop
);
3250 do_fp_ld(s
, a
->rt
, clean_addr
, memop
);
3254 static bool trans_STR_v(DisasContext
*s
, arg_ldst
*a
)
3256 TCGv_i64 clean_addr
, dirty_addr
;
3259 if (extract32(a
->opt
, 1, 1) == 0) {
3263 if (!fp_access_check(s
)) {
3267 memop
= finalize_memop_asimd(s
, a
->sz
);
3268 op_addr_ldst_pre(s
, a
, &clean_addr
, &dirty_addr
, true, memop
);
3269 do_fp_st(s
, a
->rt
, clean_addr
, memop
);
3274 static bool do_atomic_ld(DisasContext
*s
, arg_atomic
*a
, AtomicThreeOpFn
*fn
,
3275 int sign
, bool invert
)
3277 MemOp mop
= a
->sz
| sign
;
3278 TCGv_i64 clean_addr
, tcg_rs
, tcg_rt
;
3281 gen_check_sp_alignment(s
);
3283 mop
= check_atomic_align(s
, a
->rn
, mop
);
3284 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, a
->rn
), false,
3286 tcg_rs
= read_cpu_reg(s
, a
->rs
, true);
3287 tcg_rt
= cpu_reg(s
, a
->rt
);
3289 tcg_gen_not_i64(tcg_rs
, tcg_rs
);
3292 * The tcg atomic primitives are all full barriers. Therefore we
3293 * can ignore the Acquire and Release bits of this instruction.
3295 fn(tcg_rt
, clean_addr
, tcg_rs
, get_mem_index(s
), mop
);
3297 if (mop
& MO_SIGN
) {
3300 tcg_gen_ext8u_i64(tcg_rt
, tcg_rt
);
3303 tcg_gen_ext16u_i64(tcg_rt
, tcg_rt
);
3306 tcg_gen_ext32u_i64(tcg_rt
, tcg_rt
);
3311 g_assert_not_reached();
3317 TRANS_FEAT(LDADD
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_add_i64
, 0, false)
3318 TRANS_FEAT(LDCLR
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_and_i64
, 0, true)
3319 TRANS_FEAT(LDEOR
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_xor_i64
, 0, false)
3320 TRANS_FEAT(LDSET
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_or_i64
, 0, false)
3321 TRANS_FEAT(LDSMAX
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_smax_i64
, MO_SIGN
, false)
3322 TRANS_FEAT(LDSMIN
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_smin_i64
, MO_SIGN
, false)
3323 TRANS_FEAT(LDUMAX
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_umax_i64
, 0, false)
3324 TRANS_FEAT(LDUMIN
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_umin_i64
, 0, false)
3325 TRANS_FEAT(SWP
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_xchg_i64
, 0, false)
3327 static bool trans_LDAPR(DisasContext
*s
, arg_LDAPR
*a
)
3329 bool iss_sf
= ldst_iss_sf(a
->sz
, false, false);
3330 TCGv_i64 clean_addr
;
3333 if (!dc_isar_feature(aa64_atomics
, s
) ||
3334 !dc_isar_feature(aa64_rcpc_8_3
, s
)) {
3338 gen_check_sp_alignment(s
);
3340 mop
= check_atomic_align(s
, a
->rn
, a
->sz
);
3341 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, a
->rn
), false,
3344 * LDAPR* are a special case because they are a simple load, not a
3345 * fetch-and-do-something op.
3346 * The architectural consistency requirements here are weaker than
3347 * full load-acquire (we only need "load-acquire processor consistent"),
3348 * but we choose to implement them as full LDAQ.
3350 do_gpr_ld(s
, cpu_reg(s
, a
->rt
), clean_addr
, mop
, false,
3351 true, a
->rt
, iss_sf
, true);
3352 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
3356 static bool trans_LDRA(DisasContext
*s
, arg_LDRA
*a
)
3358 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3361 /* Load with pointer authentication */
3362 if (!dc_isar_feature(aa64_pauth
, s
)) {
3367 gen_check_sp_alignment(s
);
3369 dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3371 if (s
->pauth_active
) {
3373 gen_helper_autda_combined(dirty_addr
, tcg_env
, dirty_addr
,
3374 tcg_constant_i64(0));
3376 gen_helper_autdb_combined(dirty_addr
, tcg_env
, dirty_addr
,
3377 tcg_constant_i64(0));
3381 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, a
->imm
);
3383 memop
= finalize_memop(s
, MO_64
);
3385 /* Note that "clean" and "dirty" here refer to TBI not PAC. */
3386 clean_addr
= gen_mte_check1(s
, dirty_addr
, false,
3387 a
->w
|| a
->rn
!= 31, memop
);
3389 tcg_rt
= cpu_reg(s
, a
->rt
);
3390 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
,
3391 /* extend */ false, /* iss_valid */ !a
->w
,
3392 /* iss_srt */ a
->rt
, /* iss_sf */ true, /* iss_ar */ false);
3395 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), dirty_addr
);
3400 static bool trans_LDAPR_i(DisasContext
*s
, arg_ldapr_stlr_i
*a
)
3402 TCGv_i64 clean_addr
, dirty_addr
;
3403 MemOp mop
= a
->sz
| (a
->sign
? MO_SIGN
: 0);
3404 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3406 if (!dc_isar_feature(aa64_rcpc_8_4
, s
)) {
3411 gen_check_sp_alignment(s
);
3414 mop
= check_ordered_align(s
, a
->rn
, a
->imm
, false, mop
);
3415 dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3416 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, a
->imm
);
3417 clean_addr
= clean_data_tbi(s
, dirty_addr
);
3420 * Load-AcquirePC semantics; we implement as the slightly more
3421 * restrictive Load-Acquire.
3423 do_gpr_ld(s
, cpu_reg(s
, a
->rt
), clean_addr
, mop
, a
->ext
, true,
3424 a
->rt
, iss_sf
, true);
3425 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
3429 static bool trans_STLR_i(DisasContext
*s
, arg_ldapr_stlr_i
*a
)
3431 TCGv_i64 clean_addr
, dirty_addr
;
3433 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3435 if (!dc_isar_feature(aa64_rcpc_8_4
, s
)) {
3439 /* TODO: ARMv8.4-LSE SCTLR.nAA */
3442 gen_check_sp_alignment(s
);
3445 mop
= check_ordered_align(s
, a
->rn
, a
->imm
, true, mop
);
3446 dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3447 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, a
->imm
);
3448 clean_addr
= clean_data_tbi(s
, dirty_addr
);
3450 /* Store-Release semantics */
3451 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
3452 do_gpr_st(s
, cpu_reg(s
, a
->rt
), clean_addr
, mop
, true, a
->rt
, iss_sf
, true);
3456 static bool trans_LD_mult(DisasContext
*s
, arg_ldst_mult
*a
)
3458 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3459 MemOp endian
, align
, mop
;
3461 int total
; /* total bytes */
3462 int elements
; /* elements per vector */
3466 if (!a
->p
&& a
->rm
!= 0) {
3467 /* For non-postindexed accesses the Rm field must be 0 */
3470 if (size
== 3 && !a
->q
&& a
->selem
!= 1) {
3473 if (!fp_access_check(s
)) {
3478 gen_check_sp_alignment(s
);
3481 /* For our purposes, bytes are always little-endian. */
3482 endian
= s
->be_data
;
3487 total
= a
->rpt
* a
->selem
* (a
->q
? 16 : 8);
3488 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3491 * Issue the MTE check vs the logical repeat count, before we
3492 * promote consecutive little-endian elements below.
3494 clean_addr
= gen_mte_checkN(s
, tcg_rn
, false, a
->p
|| a
->rn
!= 31, total
,
3495 finalize_memop_asimd(s
, size
));
3498 * Consecutive little-endian elements from a single register
3499 * can be promoted to a larger little-endian operation.
3502 if (a
->selem
== 1 && endian
== MO_LE
) {
3503 align
= pow2_align(size
);
3506 if (!s
->align_mem
) {
3509 mop
= endian
| size
| align
;
3511 elements
= (a
->q
? 16 : 8) >> size
;
3512 tcg_ebytes
= tcg_constant_i64(1 << size
);
3513 for (r
= 0; r
< a
->rpt
; r
++) {
3515 for (e
= 0; e
< elements
; e
++) {
3517 for (xs
= 0; xs
< a
->selem
; xs
++) {
3518 int tt
= (a
->rt
+ r
+ xs
) % 32;
3519 do_vec_ld(s
, tt
, e
, clean_addr
, mop
);
3520 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3526 * For non-quad operations, setting a slice of the low 64 bits of
3527 * the register clears the high 64 bits (in the ARM ARM pseudocode
3528 * this is implicit in the fact that 'rval' is a 64 bit wide
3529 * variable). For quad operations, we might still need to zero
3530 * the high bits of SVE.
3532 for (r
= 0; r
< a
->rpt
* a
->selem
; r
++) {
3533 int tt
= (a
->rt
+ r
) % 32;
3534 clear_vec_high(s
, a
->q
, tt
);
3539 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3541 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3547 static bool trans_ST_mult(DisasContext
*s
, arg_ldst_mult
*a
)
3549 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3550 MemOp endian
, align
, mop
;
3552 int total
; /* total bytes */
3553 int elements
; /* elements per vector */
3557 if (!a
->p
&& a
->rm
!= 0) {
3558 /* For non-postindexed accesses the Rm field must be 0 */
3561 if (size
== 3 && !a
->q
&& a
->selem
!= 1) {
3564 if (!fp_access_check(s
)) {
3569 gen_check_sp_alignment(s
);
3572 /* For our purposes, bytes are always little-endian. */
3573 endian
= s
->be_data
;
3578 total
= a
->rpt
* a
->selem
* (a
->q
? 16 : 8);
3579 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3582 * Issue the MTE check vs the logical repeat count, before we
3583 * promote consecutive little-endian elements below.
3585 clean_addr
= gen_mte_checkN(s
, tcg_rn
, true, a
->p
|| a
->rn
!= 31, total
,
3586 finalize_memop_asimd(s
, size
));
3589 * Consecutive little-endian elements from a single register
3590 * can be promoted to a larger little-endian operation.
3593 if (a
->selem
== 1 && endian
== MO_LE
) {
3594 align
= pow2_align(size
);
3597 if (!s
->align_mem
) {
3600 mop
= endian
| size
| align
;
3602 elements
= (a
->q
? 16 : 8) >> size
;
3603 tcg_ebytes
= tcg_constant_i64(1 << size
);
3604 for (r
= 0; r
< a
->rpt
; r
++) {
3606 for (e
= 0; e
< elements
; e
++) {
3608 for (xs
= 0; xs
< a
->selem
; xs
++) {
3609 int tt
= (a
->rt
+ r
+ xs
) % 32;
3610 do_vec_st(s
, tt
, e
, clean_addr
, mop
);
3611 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3618 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3620 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3626 static bool trans_ST_single(DisasContext
*s
, arg_ldst_single
*a
)
3629 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3632 if (!a
->p
&& a
->rm
!= 0) {
3635 if (!fp_access_check(s
)) {
3640 gen_check_sp_alignment(s
);
3643 total
= a
->selem
<< a
->scale
;
3644 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3646 mop
= finalize_memop_asimd(s
, a
->scale
);
3647 clean_addr
= gen_mte_checkN(s
, tcg_rn
, true, a
->p
|| a
->rn
!= 31,
3650 tcg_ebytes
= tcg_constant_i64(1 << a
->scale
);
3651 for (xs
= 0, rt
= a
->rt
; xs
< a
->selem
; xs
++, rt
= (rt
+ 1) % 32) {
3652 do_vec_st(s
, rt
, a
->index
, clean_addr
, mop
);
3653 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3658 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3660 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3666 static bool trans_LD_single(DisasContext
*s
, arg_ldst_single
*a
)
3669 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3672 if (!a
->p
&& a
->rm
!= 0) {
3675 if (!fp_access_check(s
)) {
3680 gen_check_sp_alignment(s
);
3683 total
= a
->selem
<< a
->scale
;
3684 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3686 mop
= finalize_memop_asimd(s
, a
->scale
);
3687 clean_addr
= gen_mte_checkN(s
, tcg_rn
, false, a
->p
|| a
->rn
!= 31,
3690 tcg_ebytes
= tcg_constant_i64(1 << a
->scale
);
3691 for (xs
= 0, rt
= a
->rt
; xs
< a
->selem
; xs
++, rt
= (rt
+ 1) % 32) {
3692 do_vec_ld(s
, rt
, a
->index
, clean_addr
, mop
);
3693 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3698 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3700 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3706 static bool trans_LD_single_repl(DisasContext
*s
, arg_LD_single_repl
*a
)
3709 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3712 if (!a
->p
&& a
->rm
!= 0) {
3715 if (!fp_access_check(s
)) {
3720 gen_check_sp_alignment(s
);
3723 total
= a
->selem
<< a
->scale
;
3724 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3726 mop
= finalize_memop_asimd(s
, a
->scale
);
3727 clean_addr
= gen_mte_checkN(s
, tcg_rn
, false, a
->p
|| a
->rn
!= 31,
3730 tcg_ebytes
= tcg_constant_i64(1 << a
->scale
);
3731 for (xs
= 0, rt
= a
->rt
; xs
< a
->selem
; xs
++, rt
= (rt
+ 1) % 32) {
3732 /* Load and replicate to all elements */
3733 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
3735 tcg_gen_qemu_ld_i64(tcg_tmp
, clean_addr
, get_mem_index(s
), mop
);
3736 tcg_gen_gvec_dup_i64(a
->scale
, vec_full_reg_offset(s
, rt
),
3737 (a
->q
+ 1) * 8, vec_full_reg_size(s
), tcg_tmp
);
3738 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3743 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3745 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3751 static bool trans_STZGM(DisasContext
*s
, arg_ldst_tag
*a
)
3753 TCGv_i64 addr
, clean_addr
, tcg_rt
;
3754 int size
= 4 << s
->dcz_blocksize
;
3756 if (!dc_isar_feature(aa64_mte
, s
)) {
3759 if (s
->current_el
== 0) {
3764 gen_check_sp_alignment(s
);
3767 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
3768 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3769 tcg_rt
= cpu_reg(s
, a
->rt
);
3772 gen_helper_stzgm_tags(tcg_env
, addr
, tcg_rt
);
3775 * The non-tags portion of STZGM is mostly like DC_ZVA,
3776 * except the alignment happens before the access.
3778 clean_addr
= clean_data_tbi(s
, addr
);
3779 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
3780 gen_helper_dc_zva(tcg_env
, clean_addr
);
3784 static bool trans_STGM(DisasContext
*s
, arg_ldst_tag
*a
)
3786 TCGv_i64 addr
, clean_addr
, tcg_rt
;
3788 if (!dc_isar_feature(aa64_mte
, s
)) {
3791 if (s
->current_el
== 0) {
3796 gen_check_sp_alignment(s
);
3799 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
3800 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3801 tcg_rt
= cpu_reg(s
, a
->rt
);
3804 gen_helper_stgm(tcg_env
, addr
, tcg_rt
);
3806 MMUAccessType acc
= MMU_DATA_STORE
;
3807 int size
= 4 << s
->gm_blocksize
;
3809 clean_addr
= clean_data_tbi(s
, addr
);
3810 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
3811 gen_probe_access(s
, clean_addr
, acc
, size
);
3816 static bool trans_LDGM(DisasContext
*s
, arg_ldst_tag
*a
)
3818 TCGv_i64 addr
, clean_addr
, tcg_rt
;
3820 if (!dc_isar_feature(aa64_mte
, s
)) {
3823 if (s
->current_el
== 0) {
3828 gen_check_sp_alignment(s
);
3831 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
3832 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3833 tcg_rt
= cpu_reg(s
, a
->rt
);
3836 gen_helper_ldgm(tcg_rt
, tcg_env
, addr
);
3838 MMUAccessType acc
= MMU_DATA_LOAD
;
3839 int size
= 4 << s
->gm_blocksize
;
3841 clean_addr
= clean_data_tbi(s
, addr
);
3842 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
3843 gen_probe_access(s
, clean_addr
, acc
, size
);
3844 /* The result tags are zeros. */
3845 tcg_gen_movi_i64(tcg_rt
, 0);
3850 static bool trans_LDG(DisasContext
*s
, arg_ldst_tag
*a
)
3852 TCGv_i64 addr
, clean_addr
, tcg_rt
;
3854 if (!dc_isar_feature(aa64_mte_insn_reg
, s
)) {
3859 gen_check_sp_alignment(s
);
3862 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
3864 /* pre-index or signed offset */
3865 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3868 tcg_gen_andi_i64(addr
, addr
, -TAG_GRANULE
);
3869 tcg_rt
= cpu_reg(s
, a
->rt
);
3871 gen_helper_ldg(tcg_rt
, tcg_env
, addr
, tcg_rt
);
3874 * Tag access disabled: we must check for aborts on the load
3875 * load from [rn+offset], and then insert a 0 tag into rt.
3877 clean_addr
= clean_data_tbi(s
, addr
);
3878 gen_probe_access(s
, clean_addr
, MMU_DATA_LOAD
, MO_8
);
3879 gen_address_with_allocation_tag0(tcg_rt
, tcg_rt
);
3883 /* pre-index or post-index */
3886 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3888 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), addr
);
3893 static bool do_STG(DisasContext
*s
, arg_ldst_tag
*a
, bool is_zero
, bool is_pair
)
3895 TCGv_i64 addr
, tcg_rt
;
3898 gen_check_sp_alignment(s
);
3901 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
3903 /* pre-index or signed offset */
3904 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3906 tcg_rt
= cpu_reg_sp(s
, a
->rt
);
3909 * For STG and ST2G, we need to check alignment and probe memory.
3910 * TODO: For STZG and STZ2G, we could rely on the stores below,
3911 * at least for system mode; user-only won't enforce alignment.
3914 gen_helper_st2g_stub(tcg_env
, addr
);
3916 gen_helper_stg_stub(tcg_env
, addr
);
3918 } else if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3920 gen_helper_st2g_parallel(tcg_env
, addr
, tcg_rt
);
3922 gen_helper_stg_parallel(tcg_env
, addr
, tcg_rt
);
3926 gen_helper_st2g(tcg_env
, addr
, tcg_rt
);
3928 gen_helper_stg(tcg_env
, addr
, tcg_rt
);
3933 TCGv_i64 clean_addr
= clean_data_tbi(s
, addr
);
3934 TCGv_i64 zero64
= tcg_constant_i64(0);
3935 TCGv_i128 zero128
= tcg_temp_new_i128();
3936 int mem_index
= get_mem_index(s
);
3937 MemOp mop
= finalize_memop(s
, MO_128
| MO_ALIGN
);
3939 tcg_gen_concat_i64_i128(zero128
, zero64
, zero64
);
3941 /* This is 1 or 2 atomic 16-byte operations. */
3942 tcg_gen_qemu_st_i128(zero128
, clean_addr
, mem_index
, mop
);
3944 tcg_gen_addi_i64(clean_addr
, clean_addr
, 16);
3945 tcg_gen_qemu_st_i128(zero128
, clean_addr
, mem_index
, mop
);
3950 /* pre-index or post-index */
3953 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3955 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), addr
);
3960 TRANS_FEAT(STG
, aa64_mte_insn_reg
, do_STG
, a
, false, false)
3961 TRANS_FEAT(STZG
, aa64_mte_insn_reg
, do_STG
, a
, true, false)
3962 TRANS_FEAT(ST2G
, aa64_mte_insn_reg
, do_STG
, a
, false, true)
3963 TRANS_FEAT(STZ2G
, aa64_mte_insn_reg
, do_STG
, a
, true, true)
3965 typedef void SetFn(TCGv_env
, TCGv_i32
, TCGv_i32
);
3967 static bool do_SET(DisasContext
*s
, arg_set
*a
, bool is_epilogue
,
3968 bool is_setg
, SetFn fn
)
3971 uint32_t syndrome
, desc
= 0;
3973 if (is_setg
&& !dc_isar_feature(aa64_mte
, s
)) {
3978 * UNPREDICTABLE cases: we choose to UNDEF, which allows
3979 * us to pull this check before the CheckMOPSEnabled() test
3980 * (which we do in the helper function)
3982 if (a
->rs
== a
->rn
|| a
->rs
== a
->rd
|| a
->rn
== a
->rd
||
3983 a
->rd
== 31 || a
->rn
== 31) {
3987 memidx
= get_a64_user_mem_index(s
, a
->unpriv
);
3990 * We pass option_a == true, matching our implementation;
3991 * we pass wrong_option == false: helper function may set that bit.
3993 syndrome
= syn_mop(true, is_setg
, (a
->nontemp
<< 1) | a
->unpriv
,
3994 is_epilogue
, false, true, a
->rd
, a
->rs
, a
->rn
);
3996 if (is_setg
? s
->ata
[a
->unpriv
] : s
->mte_active
[a
->unpriv
]) {
3997 /* We may need to do MTE tag checking, so assemble the descriptor */
3998 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
3999 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
4000 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, true);
4001 /* SIZEM1 and ALIGN we leave 0 (byte write) */
4003 /* The helper function always needs the memidx even with MTE disabled */
4004 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, memidx
);
4007 * The helper needs the register numbers, but since they're in
4008 * the syndrome anyway, we let it extract them from there rather
4009 * than passing in an extra three integer arguments.
4011 fn(tcg_env
, tcg_constant_i32(syndrome
), tcg_constant_i32(desc
));
4015 TRANS_FEAT(SETP
, aa64_mops
, do_SET
, a
, false, false, gen_helper_setp
)
4016 TRANS_FEAT(SETM
, aa64_mops
, do_SET
, a
, false, false, gen_helper_setm
)
4017 TRANS_FEAT(SETE
, aa64_mops
, do_SET
, a
, true, false, gen_helper_sete
)
4018 TRANS_FEAT(SETGP
, aa64_mops
, do_SET
, a
, false, true, gen_helper_setgp
)
4019 TRANS_FEAT(SETGM
, aa64_mops
, do_SET
, a
, false, true, gen_helper_setgm
)
4020 TRANS_FEAT(SETGE
, aa64_mops
, do_SET
, a
, true, true, gen_helper_setge
)
4022 typedef void CpyFn(TCGv_env
, TCGv_i32
, TCGv_i32
, TCGv_i32
);
4024 static bool do_CPY(DisasContext
*s
, arg_cpy
*a
, bool is_epilogue
, CpyFn fn
)
4026 int rmemidx
, wmemidx
;
4027 uint32_t syndrome
, rdesc
= 0, wdesc
= 0;
4028 bool wunpriv
= extract32(a
->options
, 0, 1);
4029 bool runpriv
= extract32(a
->options
, 1, 1);
4032 * UNPREDICTABLE cases: we choose to UNDEF, which allows
4033 * us to pull this check before the CheckMOPSEnabled() test
4034 * (which we do in the helper function)
4036 if (a
->rs
== a
->rn
|| a
->rs
== a
->rd
|| a
->rn
== a
->rd
||
4037 a
->rd
== 31 || a
->rs
== 31 || a
->rn
== 31) {
4041 rmemidx
= get_a64_user_mem_index(s
, runpriv
);
4042 wmemidx
= get_a64_user_mem_index(s
, wunpriv
);
4045 * We pass option_a == true, matching our implementation;
4046 * we pass wrong_option == false: helper function may set that bit.
4048 syndrome
= syn_mop(false, false, a
->options
, is_epilogue
,
4049 false, true, a
->rd
, a
->rs
, a
->rn
);
4051 /* If we need to do MTE tag checking, assemble the descriptors */
4052 if (s
->mte_active
[runpriv
]) {
4053 rdesc
= FIELD_DP32(rdesc
, MTEDESC
, TBI
, s
->tbid
);
4054 rdesc
= FIELD_DP32(rdesc
, MTEDESC
, TCMA
, s
->tcma
);
4056 if (s
->mte_active
[wunpriv
]) {
4057 wdesc
= FIELD_DP32(wdesc
, MTEDESC
, TBI
, s
->tbid
);
4058 wdesc
= FIELD_DP32(wdesc
, MTEDESC
, TCMA
, s
->tcma
);
4059 wdesc
= FIELD_DP32(wdesc
, MTEDESC
, WRITE
, true);
4061 /* The helper function needs these parts of the descriptor regardless */
4062 rdesc
= FIELD_DP32(rdesc
, MTEDESC
, MIDX
, rmemidx
);
4063 wdesc
= FIELD_DP32(wdesc
, MTEDESC
, MIDX
, wmemidx
);
4066 * The helper needs the register numbers, but since they're in
4067 * the syndrome anyway, we let it extract them from there rather
4068 * than passing in an extra three integer arguments.
4070 fn(tcg_env
, tcg_constant_i32(syndrome
), tcg_constant_i32(wdesc
),
4071 tcg_constant_i32(rdesc
));
4075 TRANS_FEAT(CPYP
, aa64_mops
, do_CPY
, a
, false, gen_helper_cpyp
)
4076 TRANS_FEAT(CPYM
, aa64_mops
, do_CPY
, a
, false, gen_helper_cpym
)
4077 TRANS_FEAT(CPYE
, aa64_mops
, do_CPY
, a
, true, gen_helper_cpye
)
4078 TRANS_FEAT(CPYFP
, aa64_mops
, do_CPY
, a
, false, gen_helper_cpyfp
)
4079 TRANS_FEAT(CPYFM
, aa64_mops
, do_CPY
, a
, false, gen_helper_cpyfm
)
4080 TRANS_FEAT(CPYFE
, aa64_mops
, do_CPY
, a
, true, gen_helper_cpyfe
)
4082 typedef void ArithTwoOp(TCGv_i64
, TCGv_i64
, TCGv_i64
);
4084 static bool gen_rri(DisasContext
*s
, arg_rri_sf
*a
,
4085 bool rd_sp
, bool rn_sp
, ArithTwoOp
*fn
)
4087 TCGv_i64 tcg_rn
= rn_sp
? cpu_reg_sp(s
, a
->rn
) : cpu_reg(s
, a
->rn
);
4088 TCGv_i64 tcg_rd
= rd_sp
? cpu_reg_sp(s
, a
->rd
) : cpu_reg(s
, a
->rd
);
4089 TCGv_i64 tcg_imm
= tcg_constant_i64(a
->imm
);
4091 fn(tcg_rd
, tcg_rn
, tcg_imm
);
4093 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4099 * PC-rel. addressing
4102 static bool trans_ADR(DisasContext
*s
, arg_ri
*a
)
4104 gen_pc_plus_diff(s
, cpu_reg(s
, a
->rd
), a
->imm
);
4108 static bool trans_ADRP(DisasContext
*s
, arg_ri
*a
)
4110 int64_t offset
= (int64_t)a
->imm
<< 12;
4112 /* The page offset is ok for CF_PCREL. */
4113 offset
-= s
->pc_curr
& 0xfff;
4114 gen_pc_plus_diff(s
, cpu_reg(s
, a
->rd
), offset
);
4119 * Add/subtract (immediate)
4121 TRANS(ADD_i
, gen_rri
, a
, 1, 1, tcg_gen_add_i64
)
4122 TRANS(SUB_i
, gen_rri
, a
, 1, 1, tcg_gen_sub_i64
)
4123 TRANS(ADDS_i
, gen_rri
, a
, 0, 1, a
->sf
? gen_add64_CC
: gen_add32_CC
)
4124 TRANS(SUBS_i
, gen_rri
, a
, 0, 1, a
->sf
? gen_sub64_CC
: gen_sub32_CC
)
4127 * Add/subtract (immediate, with tags)
4130 static bool gen_add_sub_imm_with_tags(DisasContext
*s
, arg_rri_tag
*a
,
4133 TCGv_i64 tcg_rn
, tcg_rd
;
4136 imm
= a
->uimm6
<< LOG2_TAG_GRANULE
;
4141 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
4142 tcg_rd
= cpu_reg_sp(s
, a
->rd
);
4145 gen_helper_addsubg(tcg_rd
, tcg_env
, tcg_rn
,
4146 tcg_constant_i32(imm
),
4147 tcg_constant_i32(a
->uimm4
));
4149 tcg_gen_addi_i64(tcg_rd
, tcg_rn
, imm
);
4150 gen_address_with_allocation_tag0(tcg_rd
, tcg_rd
);
4155 TRANS_FEAT(ADDG_i
, aa64_mte_insn_reg
, gen_add_sub_imm_with_tags
, a
, false)
4156 TRANS_FEAT(SUBG_i
, aa64_mte_insn_reg
, gen_add_sub_imm_with_tags
, a
, true)
4158 /* The input should be a value in the bottom e bits (with higher
4159 * bits zero); returns that value replicated into every element
4160 * of size e in a 64 bit integer.
4162 static uint64_t bitfield_replicate(uint64_t mask
, unsigned int e
)
4173 * Logical (immediate)
4177 * Simplified variant of pseudocode DecodeBitMasks() for the case where we
4178 * only require the wmask. Returns false if the imms/immr/immn are a reserved
4179 * value (ie should cause a guest UNDEF exception), and true if they are
4180 * valid, in which case the decoded bit pattern is written to result.
4182 bool logic_imm_decode_wmask(uint64_t *result
, unsigned int immn
,
4183 unsigned int imms
, unsigned int immr
)
4186 unsigned e
, levels
, s
, r
;
4189 assert(immn
< 2 && imms
< 64 && immr
< 64);
4191 /* The bit patterns we create here are 64 bit patterns which
4192 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
4193 * 64 bits each. Each element contains the same value: a run
4194 * of between 1 and e-1 non-zero bits, rotated within the
4195 * element by between 0 and e-1 bits.
4197 * The element size and run length are encoded into immn (1 bit)
4198 * and imms (6 bits) as follows:
4199 * 64 bit elements: immn = 1, imms = <length of run - 1>
4200 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
4201 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
4202 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
4203 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
4204 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
4205 * Notice that immn = 0, imms = 11111x is the only combination
4206 * not covered by one of the above options; this is reserved.
4207 * Further, <length of run - 1> all-ones is a reserved pattern.
4209 * In all cases the rotation is by immr % e (and immr is 6 bits).
4212 /* First determine the element size */
4213 len
= 31 - clz32((immn
<< 6) | (~imms
& 0x3f));
4215 /* This is the immn == 0, imms == 0x11111x case */
4225 /* <length of run - 1> mustn't be all-ones. */
4229 /* Create the value of one element: s+1 set bits rotated
4230 * by r within the element (which is e bits wide)...
4232 mask
= MAKE_64BIT_MASK(0, s
+ 1);
4234 mask
= (mask
>> r
) | (mask
<< (e
- r
));
4235 mask
&= MAKE_64BIT_MASK(0, e
);
4237 /* ...then replicate the element over the whole 64 bit value */
4238 mask
= bitfield_replicate(mask
, e
);
4243 static bool gen_rri_log(DisasContext
*s
, arg_rri_log
*a
, bool set_cc
,
4244 void (*fn
)(TCGv_i64
, TCGv_i64
, int64_t))
4246 TCGv_i64 tcg_rd
, tcg_rn
;
4249 /* Some immediate field values are reserved. */
4250 if (!logic_imm_decode_wmask(&imm
, extract32(a
->dbm
, 12, 1),
4251 extract32(a
->dbm
, 0, 6),
4252 extract32(a
->dbm
, 6, 6))) {
4256 imm
&= 0xffffffffull
;
4259 tcg_rd
= set_cc
? cpu_reg(s
, a
->rd
) : cpu_reg_sp(s
, a
->rd
);
4260 tcg_rn
= cpu_reg(s
, a
->rn
);
4262 fn(tcg_rd
, tcg_rn
, imm
);
4264 gen_logic_CC(a
->sf
, tcg_rd
);
4267 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4272 TRANS(AND_i
, gen_rri_log
, a
, false, tcg_gen_andi_i64
)
4273 TRANS(ORR_i
, gen_rri_log
, a
, false, tcg_gen_ori_i64
)
4274 TRANS(EOR_i
, gen_rri_log
, a
, false, tcg_gen_xori_i64
)
4275 TRANS(ANDS_i
, gen_rri_log
, a
, true, tcg_gen_andi_i64
)
4278 * Move wide (immediate)
4281 static bool trans_MOVZ(DisasContext
*s
, arg_movw
*a
)
4283 int pos
= a
->hw
<< 4;
4284 tcg_gen_movi_i64(cpu_reg(s
, a
->rd
), (uint64_t)a
->imm
<< pos
);
4288 static bool trans_MOVN(DisasContext
*s
, arg_movw
*a
)
4290 int pos
= a
->hw
<< 4;
4291 uint64_t imm
= a
->imm
;
4293 imm
= ~(imm
<< pos
);
4295 imm
= (uint32_t)imm
;
4297 tcg_gen_movi_i64(cpu_reg(s
, a
->rd
), imm
);
4301 static bool trans_MOVK(DisasContext
*s
, arg_movw
*a
)
4303 int pos
= a
->hw
<< 4;
4304 TCGv_i64 tcg_rd
, tcg_im
;
4306 tcg_rd
= cpu_reg(s
, a
->rd
);
4307 tcg_im
= tcg_constant_i64(a
->imm
);
4308 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_im
, pos
, 16);
4310 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4319 static bool trans_SBFM(DisasContext
*s
, arg_SBFM
*a
)
4321 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4322 TCGv_i64 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4323 unsigned int bitsize
= a
->sf
? 64 : 32;
4324 unsigned int ri
= a
->immr
;
4325 unsigned int si
= a
->imms
;
4326 unsigned int pos
, len
;
4329 /* Wd<s-r:0> = Wn<s:r> */
4330 len
= (si
- ri
) + 1;
4331 tcg_gen_sextract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
4333 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4336 /* Wd<32+s-r,32-r> = Wn<s:0> */
4338 pos
= (bitsize
- ri
) & (bitsize
- 1);
4342 * Sign extend the destination field from len to fill the
4343 * balance of the word. Let the deposit below insert all
4344 * of those sign bits.
4346 tcg_gen_sextract_i64(tcg_tmp
, tcg_tmp
, 0, len
);
4351 * We start with zero, and we haven't modified any bits outside
4352 * bitsize, therefore no final zero-extension is unneeded for !sf.
4354 tcg_gen_deposit_z_i64(tcg_rd
, tcg_tmp
, pos
, len
);
4359 static bool trans_UBFM(DisasContext
*s
, arg_UBFM
*a
)
4361 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4362 TCGv_i64 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4363 unsigned int bitsize
= a
->sf
? 64 : 32;
4364 unsigned int ri
= a
->immr
;
4365 unsigned int si
= a
->imms
;
4366 unsigned int pos
, len
;
4368 tcg_rd
= cpu_reg(s
, a
->rd
);
4369 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4372 /* Wd<s-r:0> = Wn<s:r> */
4373 len
= (si
- ri
) + 1;
4374 tcg_gen_extract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
4376 /* Wd<32+s-r,32-r> = Wn<s:0> */
4378 pos
= (bitsize
- ri
) & (bitsize
- 1);
4379 tcg_gen_deposit_z_i64(tcg_rd
, tcg_tmp
, pos
, len
);
4384 static bool trans_BFM(DisasContext
*s
, arg_BFM
*a
)
4386 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4387 TCGv_i64 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4388 unsigned int bitsize
= a
->sf
? 64 : 32;
4389 unsigned int ri
= a
->immr
;
4390 unsigned int si
= a
->imms
;
4391 unsigned int pos
, len
;
4393 tcg_rd
= cpu_reg(s
, a
->rd
);
4394 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4397 /* Wd<s-r:0> = Wn<s:r> */
4398 tcg_gen_shri_i64(tcg_tmp
, tcg_tmp
, ri
);
4399 len
= (si
- ri
) + 1;
4402 /* Wd<32+s-r,32-r> = Wn<s:0> */
4404 pos
= (bitsize
- ri
) & (bitsize
- 1);
4407 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_tmp
, pos
, len
);
4409 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4414 static bool trans_EXTR(DisasContext
*s
, arg_extract
*a
)
4416 TCGv_i64 tcg_rd
, tcg_rm
, tcg_rn
;
4418 tcg_rd
= cpu_reg(s
, a
->rd
);
4420 if (unlikely(a
->imm
== 0)) {
4422 * tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4423 * so an extract from bit 0 is a special case.
4426 tcg_gen_mov_i64(tcg_rd
, cpu_reg(s
, a
->rm
));
4428 tcg_gen_ext32u_i64(tcg_rd
, cpu_reg(s
, a
->rm
));
4431 tcg_rm
= cpu_reg(s
, a
->rm
);
4432 tcg_rn
= cpu_reg(s
, a
->rn
);
4435 /* Specialization to ROR happens in EXTRACT2. */
4436 tcg_gen_extract2_i64(tcg_rd
, tcg_rm
, tcg_rn
, a
->imm
);
4438 TCGv_i32 t0
= tcg_temp_new_i32();
4440 tcg_gen_extrl_i64_i32(t0
, tcg_rm
);
4441 if (a
->rm
== a
->rn
) {
4442 tcg_gen_rotri_i32(t0
, t0
, a
->imm
);
4444 TCGv_i32 t1
= tcg_temp_new_i32();
4445 tcg_gen_extrl_i64_i32(t1
, tcg_rn
);
4446 tcg_gen_extract2_i32(t0
, t0
, t1
, a
->imm
);
4448 tcg_gen_extu_i32_i64(tcg_rd
, t0
);
4454 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
4455 * Note that it is the caller's responsibility to ensure that the
4456 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
4457 * mandated semantics for out of range shifts.
4459 static void shift_reg(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
4460 enum a64_shift_type shift_type
, TCGv_i64 shift_amount
)
4462 switch (shift_type
) {
4463 case A64_SHIFT_TYPE_LSL
:
4464 tcg_gen_shl_i64(dst
, src
, shift_amount
);
4466 case A64_SHIFT_TYPE_LSR
:
4467 tcg_gen_shr_i64(dst
, src
, shift_amount
);
4469 case A64_SHIFT_TYPE_ASR
:
4471 tcg_gen_ext32s_i64(dst
, src
);
4473 tcg_gen_sar_i64(dst
, sf
? src
: dst
, shift_amount
);
4475 case A64_SHIFT_TYPE_ROR
:
4477 tcg_gen_rotr_i64(dst
, src
, shift_amount
);
4480 t0
= tcg_temp_new_i32();
4481 t1
= tcg_temp_new_i32();
4482 tcg_gen_extrl_i64_i32(t0
, src
);
4483 tcg_gen_extrl_i64_i32(t1
, shift_amount
);
4484 tcg_gen_rotr_i32(t0
, t0
, t1
);
4485 tcg_gen_extu_i32_i64(dst
, t0
);
4489 assert(FALSE
); /* all shift types should be handled */
4493 if (!sf
) { /* zero extend final result */
4494 tcg_gen_ext32u_i64(dst
, dst
);
4498 /* Shift a TCGv src by immediate, put result in dst.
4499 * The shift amount must be in range (this should always be true as the
4500 * relevant instructions will UNDEF on bad shift immediates).
4502 static void shift_reg_imm(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
4503 enum a64_shift_type shift_type
, unsigned int shift_i
)
4505 assert(shift_i
< (sf
? 64 : 32));
4508 tcg_gen_mov_i64(dst
, src
);
4510 shift_reg(dst
, src
, sf
, shift_type
, tcg_constant_i64(shift_i
));
4514 /* Logical (shifted register)
4515 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4516 * +----+-----+-----------+-------+---+------+--------+------+------+
4517 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
4518 * +----+-----+-----------+-------+---+------+--------+------+------+
4520 static void disas_logic_reg(DisasContext
*s
, uint32_t insn
)
4522 TCGv_i64 tcg_rd
, tcg_rn
, tcg_rm
;
4523 unsigned int sf
, opc
, shift_type
, invert
, rm
, shift_amount
, rn
, rd
;
4525 sf
= extract32(insn
, 31, 1);
4526 opc
= extract32(insn
, 29, 2);
4527 shift_type
= extract32(insn
, 22, 2);
4528 invert
= extract32(insn
, 21, 1);
4529 rm
= extract32(insn
, 16, 5);
4530 shift_amount
= extract32(insn
, 10, 6);
4531 rn
= extract32(insn
, 5, 5);
4532 rd
= extract32(insn
, 0, 5);
4534 if (!sf
&& (shift_amount
& (1 << 5))) {
4535 unallocated_encoding(s
);
4539 tcg_rd
= cpu_reg(s
, rd
);
4541 if (opc
== 1 && shift_amount
== 0 && shift_type
== 0 && rn
== 31) {
4542 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
4543 * register-register MOV and MVN, so it is worth special casing.
4545 tcg_rm
= cpu_reg(s
, rm
);
4547 tcg_gen_not_i64(tcg_rd
, tcg_rm
);
4549 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4553 tcg_gen_mov_i64(tcg_rd
, tcg_rm
);
4555 tcg_gen_ext32u_i64(tcg_rd
, tcg_rm
);
4561 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4564 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, shift_amount
);
4567 tcg_rn
= cpu_reg(s
, rn
);
4569 switch (opc
| (invert
<< 2)) {
4572 tcg_gen_and_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4575 tcg_gen_or_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4578 tcg_gen_xor_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4582 tcg_gen_andc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4585 tcg_gen_orc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4588 tcg_gen_eqv_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4596 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4600 gen_logic_CC(sf
, tcg_rd
);
4605 * Add/subtract (extended register)
4607 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
4608 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4609 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
4610 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4612 * sf: 0 -> 32bit, 1 -> 64bit
4613 * op: 0 -> add , 1 -> sub
4616 * option: extension type (see DecodeRegExtend)
4617 * imm3: optional shift to Rm
4619 * Rd = Rn + LSL(extend(Rm), amount)
4621 static void disas_add_sub_ext_reg(DisasContext
*s
, uint32_t insn
)
4623 int rd
= extract32(insn
, 0, 5);
4624 int rn
= extract32(insn
, 5, 5);
4625 int imm3
= extract32(insn
, 10, 3);
4626 int option
= extract32(insn
, 13, 3);
4627 int rm
= extract32(insn
, 16, 5);
4628 int opt
= extract32(insn
, 22, 2);
4629 bool setflags
= extract32(insn
, 29, 1);
4630 bool sub_op
= extract32(insn
, 30, 1);
4631 bool sf
= extract32(insn
, 31, 1);
4633 TCGv_i64 tcg_rm
, tcg_rn
; /* temps */
4635 TCGv_i64 tcg_result
;
4637 if (imm3
> 4 || opt
!= 0) {
4638 unallocated_encoding(s
);
4642 /* non-flag setting ops may use SP */
4644 tcg_rd
= cpu_reg_sp(s
, rd
);
4646 tcg_rd
= cpu_reg(s
, rd
);
4648 tcg_rn
= read_cpu_reg_sp(s
, rn
, sf
);
4650 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4651 ext_and_shift_reg(tcg_rm
, tcg_rm
, option
, imm3
);
4653 tcg_result
= tcg_temp_new_i64();
4657 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
4659 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
4663 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4665 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4670 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
4672 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
4677 * Add/subtract (shifted register)
4679 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4680 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4681 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
4682 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4684 * sf: 0 -> 32bit, 1 -> 64bit
4685 * op: 0 -> add , 1 -> sub
4687 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4688 * imm6: Shift amount to apply to Rm before the add/sub
4690 static void disas_add_sub_reg(DisasContext
*s
, uint32_t insn
)
4692 int rd
= extract32(insn
, 0, 5);
4693 int rn
= extract32(insn
, 5, 5);
4694 int imm6
= extract32(insn
, 10, 6);
4695 int rm
= extract32(insn
, 16, 5);
4696 int shift_type
= extract32(insn
, 22, 2);
4697 bool setflags
= extract32(insn
, 29, 1);
4698 bool sub_op
= extract32(insn
, 30, 1);
4699 bool sf
= extract32(insn
, 31, 1);
4701 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4702 TCGv_i64 tcg_rn
, tcg_rm
;
4703 TCGv_i64 tcg_result
;
4705 if ((shift_type
== 3) || (!sf
&& (imm6
> 31))) {
4706 unallocated_encoding(s
);
4710 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
4711 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4713 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, imm6
);
4715 tcg_result
= tcg_temp_new_i64();
4719 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
4721 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
4725 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4727 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4732 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
4734 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
4738 /* Data-processing (3 source)
4740 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
4741 * +--+------+-----------+------+------+----+------+------+------+
4742 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
4743 * +--+------+-----------+------+------+----+------+------+------+
4745 static void disas_data_proc_3src(DisasContext
*s
, uint32_t insn
)
4747 int rd
= extract32(insn
, 0, 5);
4748 int rn
= extract32(insn
, 5, 5);
4749 int ra
= extract32(insn
, 10, 5);
4750 int rm
= extract32(insn
, 16, 5);
4751 int op_id
= (extract32(insn
, 29, 3) << 4) |
4752 (extract32(insn
, 21, 3) << 1) |
4753 extract32(insn
, 15, 1);
4754 bool sf
= extract32(insn
, 31, 1);
4755 bool is_sub
= extract32(op_id
, 0, 1);
4756 bool is_high
= extract32(op_id
, 2, 1);
4757 bool is_signed
= false;
4762 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
4764 case 0x42: /* SMADDL */
4765 case 0x43: /* SMSUBL */
4766 case 0x44: /* SMULH */
4769 case 0x0: /* MADD (32bit) */
4770 case 0x1: /* MSUB (32bit) */
4771 case 0x40: /* MADD (64bit) */
4772 case 0x41: /* MSUB (64bit) */
4773 case 0x4a: /* UMADDL */
4774 case 0x4b: /* UMSUBL */
4775 case 0x4c: /* UMULH */
4778 unallocated_encoding(s
);
4783 TCGv_i64 low_bits
= tcg_temp_new_i64(); /* low bits discarded */
4784 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4785 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
4786 TCGv_i64 tcg_rm
= cpu_reg(s
, rm
);
4789 tcg_gen_muls2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
4791 tcg_gen_mulu2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
4796 tcg_op1
= tcg_temp_new_i64();
4797 tcg_op2
= tcg_temp_new_i64();
4798 tcg_tmp
= tcg_temp_new_i64();
4801 tcg_gen_mov_i64(tcg_op1
, cpu_reg(s
, rn
));
4802 tcg_gen_mov_i64(tcg_op2
, cpu_reg(s
, rm
));
4805 tcg_gen_ext32s_i64(tcg_op1
, cpu_reg(s
, rn
));
4806 tcg_gen_ext32s_i64(tcg_op2
, cpu_reg(s
, rm
));
4808 tcg_gen_ext32u_i64(tcg_op1
, cpu_reg(s
, rn
));
4809 tcg_gen_ext32u_i64(tcg_op2
, cpu_reg(s
, rm
));
4813 if (ra
== 31 && !is_sub
) {
4814 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
4815 tcg_gen_mul_i64(cpu_reg(s
, rd
), tcg_op1
, tcg_op2
);
4817 tcg_gen_mul_i64(tcg_tmp
, tcg_op1
, tcg_op2
);
4819 tcg_gen_sub_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
4821 tcg_gen_add_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
4826 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), cpu_reg(s
, rd
));
4830 /* Add/subtract (with carry)
4831 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
4832 * +--+--+--+------------------------+------+-------------+------+-----+
4833 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd |
4834 * +--+--+--+------------------------+------+-------------+------+-----+
4837 static void disas_adc_sbc(DisasContext
*s
, uint32_t insn
)
4839 unsigned int sf
, op
, setflags
, rm
, rn
, rd
;
4840 TCGv_i64 tcg_y
, tcg_rn
, tcg_rd
;
4842 sf
= extract32(insn
, 31, 1);
4843 op
= extract32(insn
, 30, 1);
4844 setflags
= extract32(insn
, 29, 1);
4845 rm
= extract32(insn
, 16, 5);
4846 rn
= extract32(insn
, 5, 5);
4847 rd
= extract32(insn
, 0, 5);
4849 tcg_rd
= cpu_reg(s
, rd
);
4850 tcg_rn
= cpu_reg(s
, rn
);
4853 tcg_y
= tcg_temp_new_i64();
4854 tcg_gen_not_i64(tcg_y
, cpu_reg(s
, rm
));
4856 tcg_y
= cpu_reg(s
, rm
);
4860 gen_adc_CC(sf
, tcg_rd
, tcg_rn
, tcg_y
);
4862 gen_adc(sf
, tcg_rd
, tcg_rn
, tcg_y
);
4867 * Rotate right into flags
4868 * 31 30 29 21 15 10 5 4 0
4869 * +--+--+--+-----------------+--------+-----------+------+--+------+
4870 * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask |
4871 * +--+--+--+-----------------+--------+-----------+------+--+------+
4873 static void disas_rotate_right_into_flags(DisasContext
*s
, uint32_t insn
)
4875 int mask
= extract32(insn
, 0, 4);
4876 int o2
= extract32(insn
, 4, 1);
4877 int rn
= extract32(insn
, 5, 5);
4878 int imm6
= extract32(insn
, 15, 6);
4879 int sf_op_s
= extract32(insn
, 29, 3);
4883 if (sf_op_s
!= 5 || o2
!= 0 || !dc_isar_feature(aa64_condm_4
, s
)) {
4884 unallocated_encoding(s
);
4888 tcg_rn
= read_cpu_reg(s
, rn
, 1);
4889 tcg_gen_rotri_i64(tcg_rn
, tcg_rn
, imm6
);
4891 nzcv
= tcg_temp_new_i32();
4892 tcg_gen_extrl_i64_i32(nzcv
, tcg_rn
);
4894 if (mask
& 8) { /* N */
4895 tcg_gen_shli_i32(cpu_NF
, nzcv
, 31 - 3);
4897 if (mask
& 4) { /* Z */
4898 tcg_gen_not_i32(cpu_ZF
, nzcv
);
4899 tcg_gen_andi_i32(cpu_ZF
, cpu_ZF
, 4);
4901 if (mask
& 2) { /* C */
4902 tcg_gen_extract_i32(cpu_CF
, nzcv
, 1, 1);
4904 if (mask
& 1) { /* V */
4905 tcg_gen_shli_i32(cpu_VF
, nzcv
, 31 - 0);
4910 * Evaluate into flags
4911 * 31 30 29 21 15 14 10 5 4 0
4912 * +--+--+--+-----------------+---------+----+---------+------+--+------+
4913 * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask |
4914 * +--+--+--+-----------------+---------+----+---------+------+--+------+
4916 static void disas_evaluate_into_flags(DisasContext
*s
, uint32_t insn
)
4918 int o3_mask
= extract32(insn
, 0, 5);
4919 int rn
= extract32(insn
, 5, 5);
4920 int o2
= extract32(insn
, 15, 6);
4921 int sz
= extract32(insn
, 14, 1);
4922 int sf_op_s
= extract32(insn
, 29, 3);
4926 if (sf_op_s
!= 1 || o2
!= 0 || o3_mask
!= 0xd ||
4927 !dc_isar_feature(aa64_condm_4
, s
)) {
4928 unallocated_encoding(s
);
4931 shift
= sz
? 16 : 24; /* SETF16 or SETF8 */
4933 tmp
= tcg_temp_new_i32();
4934 tcg_gen_extrl_i64_i32(tmp
, cpu_reg(s
, rn
));
4935 tcg_gen_shli_i32(cpu_NF
, tmp
, shift
);
4936 tcg_gen_shli_i32(cpu_VF
, tmp
, shift
- 1);
4937 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
4938 tcg_gen_xor_i32(cpu_VF
, cpu_VF
, cpu_NF
);
4941 /* Conditional compare (immediate / register)
4942 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
4943 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4944 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
4945 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4948 static void disas_cc(DisasContext
*s
, uint32_t insn
)
4950 unsigned int sf
, op
, y
, cond
, rn
, nzcv
, is_imm
;
4951 TCGv_i32 tcg_t0
, tcg_t1
, tcg_t2
;
4952 TCGv_i64 tcg_tmp
, tcg_y
, tcg_rn
;
4955 if (!extract32(insn
, 29, 1)) {
4956 unallocated_encoding(s
);
4959 if (insn
& (1 << 10 | 1 << 4)) {
4960 unallocated_encoding(s
);
4963 sf
= extract32(insn
, 31, 1);
4964 op
= extract32(insn
, 30, 1);
4965 is_imm
= extract32(insn
, 11, 1);
4966 y
= extract32(insn
, 16, 5); /* y = rm (reg) or imm5 (imm) */
4967 cond
= extract32(insn
, 12, 4);
4968 rn
= extract32(insn
, 5, 5);
4969 nzcv
= extract32(insn
, 0, 4);
4971 /* Set T0 = !COND. */
4972 tcg_t0
= tcg_temp_new_i32();
4973 arm_test_cc(&c
, cond
);
4974 tcg_gen_setcondi_i32(tcg_invert_cond(c
.cond
), tcg_t0
, c
.value
, 0);
4976 /* Load the arguments for the new comparison. */
4978 tcg_y
= tcg_temp_new_i64();
4979 tcg_gen_movi_i64(tcg_y
, y
);
4981 tcg_y
= cpu_reg(s
, y
);
4983 tcg_rn
= cpu_reg(s
, rn
);
4985 /* Set the flags for the new comparison. */
4986 tcg_tmp
= tcg_temp_new_i64();
4988 gen_sub_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
4990 gen_add_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
4993 /* If COND was false, force the flags to #nzcv. Compute two masks
4994 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
4995 * For tcg hosts that support ANDC, we can make do with just T1.
4996 * In either case, allow the tcg optimizer to delete any unused mask.
4998 tcg_t1
= tcg_temp_new_i32();
4999 tcg_t2
= tcg_temp_new_i32();
5000 tcg_gen_neg_i32(tcg_t1
, tcg_t0
);
5001 tcg_gen_subi_i32(tcg_t2
, tcg_t0
, 1);
5003 if (nzcv
& 8) { /* N */
5004 tcg_gen_or_i32(cpu_NF
, cpu_NF
, tcg_t1
);
5006 if (TCG_TARGET_HAS_andc_i32
) {
5007 tcg_gen_andc_i32(cpu_NF
, cpu_NF
, tcg_t1
);
5009 tcg_gen_and_i32(cpu_NF
, cpu_NF
, tcg_t2
);
5012 if (nzcv
& 4) { /* Z */
5013 if (TCG_TARGET_HAS_andc_i32
) {
5014 tcg_gen_andc_i32(cpu_ZF
, cpu_ZF
, tcg_t1
);
5016 tcg_gen_and_i32(cpu_ZF
, cpu_ZF
, tcg_t2
);
5019 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, tcg_t0
);
5021 if (nzcv
& 2) { /* C */
5022 tcg_gen_or_i32(cpu_CF
, cpu_CF
, tcg_t0
);
5024 if (TCG_TARGET_HAS_andc_i32
) {
5025 tcg_gen_andc_i32(cpu_CF
, cpu_CF
, tcg_t1
);
5027 tcg_gen_and_i32(cpu_CF
, cpu_CF
, tcg_t2
);
5030 if (nzcv
& 1) { /* V */
5031 tcg_gen_or_i32(cpu_VF
, cpu_VF
, tcg_t1
);
5033 if (TCG_TARGET_HAS_andc_i32
) {
5034 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tcg_t1
);
5036 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tcg_t2
);
5041 /* Conditional select
5042 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
5043 * +----+----+---+-----------------+------+------+-----+------+------+
5044 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
5045 * +----+----+---+-----------------+------+------+-----+------+------+
5047 static void disas_cond_select(DisasContext
*s
, uint32_t insn
)
5049 unsigned int sf
, else_inv
, rm
, cond
, else_inc
, rn
, rd
;
5050 TCGv_i64 tcg_rd
, zero
;
5053 if (extract32(insn
, 29, 1) || extract32(insn
, 11, 1)) {
5054 /* S == 1 or op2<1> == 1 */
5055 unallocated_encoding(s
);
5058 sf
= extract32(insn
, 31, 1);
5059 else_inv
= extract32(insn
, 30, 1);
5060 rm
= extract32(insn
, 16, 5);
5061 cond
= extract32(insn
, 12, 4);
5062 else_inc
= extract32(insn
, 10, 1);
5063 rn
= extract32(insn
, 5, 5);
5064 rd
= extract32(insn
, 0, 5);
5066 tcg_rd
= cpu_reg(s
, rd
);
5068 a64_test_cc(&c
, cond
);
5069 zero
= tcg_constant_i64(0);
5071 if (rn
== 31 && rm
== 31 && (else_inc
^ else_inv
)) {
5074 tcg_gen_negsetcond_i64(tcg_invert_cond(c
.cond
),
5075 tcg_rd
, c
.value
, zero
);
5077 tcg_gen_setcond_i64(tcg_invert_cond(c
.cond
),
5078 tcg_rd
, c
.value
, zero
);
5081 TCGv_i64 t_true
= cpu_reg(s
, rn
);
5082 TCGv_i64 t_false
= read_cpu_reg(s
, rm
, 1);
5083 if (else_inv
&& else_inc
) {
5084 tcg_gen_neg_i64(t_false
, t_false
);
5085 } else if (else_inv
) {
5086 tcg_gen_not_i64(t_false
, t_false
);
5087 } else if (else_inc
) {
5088 tcg_gen_addi_i64(t_false
, t_false
, 1);
5090 tcg_gen_movcond_i64(c
.cond
, tcg_rd
, c
.value
, zero
, t_true
, t_false
);
5094 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
5098 static void handle_clz(DisasContext
*s
, unsigned int sf
,
5099 unsigned int rn
, unsigned int rd
)
5101 TCGv_i64 tcg_rd
, tcg_rn
;
5102 tcg_rd
= cpu_reg(s
, rd
);
5103 tcg_rn
= cpu_reg(s
, rn
);
5106 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
5108 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
5109 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
5110 tcg_gen_clzi_i32(tcg_tmp32
, tcg_tmp32
, 32);
5111 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
5115 static void handle_cls(DisasContext
*s
, unsigned int sf
,
5116 unsigned int rn
, unsigned int rd
)
5118 TCGv_i64 tcg_rd
, tcg_rn
;
5119 tcg_rd
= cpu_reg(s
, rd
);
5120 tcg_rn
= cpu_reg(s
, rn
);
5123 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
5125 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
5126 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
5127 tcg_gen_clrsb_i32(tcg_tmp32
, tcg_tmp32
);
5128 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
5132 static void handle_rbit(DisasContext
*s
, unsigned int sf
,
5133 unsigned int rn
, unsigned int rd
)
5135 TCGv_i64 tcg_rd
, tcg_rn
;
5136 tcg_rd
= cpu_reg(s
, rd
);
5137 tcg_rn
= cpu_reg(s
, rn
);
5140 gen_helper_rbit64(tcg_rd
, tcg_rn
);
5142 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
5143 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
5144 gen_helper_rbit(tcg_tmp32
, tcg_tmp32
);
5145 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
5149 /* REV with sf==1, opcode==3 ("REV64") */
5150 static void handle_rev64(DisasContext
*s
, unsigned int sf
,
5151 unsigned int rn
, unsigned int rd
)
5154 unallocated_encoding(s
);
5157 tcg_gen_bswap64_i64(cpu_reg(s
, rd
), cpu_reg(s
, rn
));
5160 /* REV with sf==0, opcode==2
5161 * REV32 (sf==1, opcode==2)
5163 static void handle_rev32(DisasContext
*s
, unsigned int sf
,
5164 unsigned int rn
, unsigned int rd
)
5166 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5167 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
5170 tcg_gen_bswap64_i64(tcg_rd
, tcg_rn
);
5171 tcg_gen_rotri_i64(tcg_rd
, tcg_rd
, 32);
5173 tcg_gen_bswap32_i64(tcg_rd
, tcg_rn
, TCG_BSWAP_OZ
);
5177 /* REV16 (opcode==1) */
5178 static void handle_rev16(DisasContext
*s
, unsigned int sf
,
5179 unsigned int rn
, unsigned int rd
)
5181 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5182 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
5183 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
5184 TCGv_i64 mask
= tcg_constant_i64(sf
? 0x00ff00ff00ff00ffull
: 0x00ff00ff);
5186 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 8);
5187 tcg_gen_and_i64(tcg_rd
, tcg_rn
, mask
);
5188 tcg_gen_and_i64(tcg_tmp
, tcg_tmp
, mask
);
5189 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, 8);
5190 tcg_gen_or_i64(tcg_rd
, tcg_rd
, tcg_tmp
);
5193 /* Data-processing (1 source)
5194 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5195 * +----+---+---+-----------------+---------+--------+------+------+
5196 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
5197 * +----+---+---+-----------------+---------+--------+------+------+
5199 static void disas_data_proc_1src(DisasContext
*s
, uint32_t insn
)
5201 unsigned int sf
, opcode
, opcode2
, rn
, rd
;
5204 if (extract32(insn
, 29, 1)) {
5205 unallocated_encoding(s
);
5209 sf
= extract32(insn
, 31, 1);
5210 opcode
= extract32(insn
, 10, 6);
5211 opcode2
= extract32(insn
, 16, 5);
5212 rn
= extract32(insn
, 5, 5);
5213 rd
= extract32(insn
, 0, 5);
5215 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5217 switch (MAP(sf
, opcode2
, opcode
)) {
5218 case MAP(0, 0x00, 0x00): /* RBIT */
5219 case MAP(1, 0x00, 0x00):
5220 handle_rbit(s
, sf
, rn
, rd
);
5222 case MAP(0, 0x00, 0x01): /* REV16 */
5223 case MAP(1, 0x00, 0x01):
5224 handle_rev16(s
, sf
, rn
, rd
);
5226 case MAP(0, 0x00, 0x02): /* REV/REV32 */
5227 case MAP(1, 0x00, 0x02):
5228 handle_rev32(s
, sf
, rn
, rd
);
5230 case MAP(1, 0x00, 0x03): /* REV64 */
5231 handle_rev64(s
, sf
, rn
, rd
);
5233 case MAP(0, 0x00, 0x04): /* CLZ */
5234 case MAP(1, 0x00, 0x04):
5235 handle_clz(s
, sf
, rn
, rd
);
5237 case MAP(0, 0x00, 0x05): /* CLS */
5238 case MAP(1, 0x00, 0x05):
5239 handle_cls(s
, sf
, rn
, rd
);
5241 case MAP(1, 0x01, 0x00): /* PACIA */
5242 if (s
->pauth_active
) {
5243 tcg_rd
= cpu_reg(s
, rd
);
5244 gen_helper_pacia(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5245 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5246 goto do_unallocated
;
5249 case MAP(1, 0x01, 0x01): /* PACIB */
5250 if (s
->pauth_active
) {
5251 tcg_rd
= cpu_reg(s
, rd
);
5252 gen_helper_pacib(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5253 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5254 goto do_unallocated
;
5257 case MAP(1, 0x01, 0x02): /* PACDA */
5258 if (s
->pauth_active
) {
5259 tcg_rd
= cpu_reg(s
, rd
);
5260 gen_helper_pacda(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5261 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5262 goto do_unallocated
;
5265 case MAP(1, 0x01, 0x03): /* PACDB */
5266 if (s
->pauth_active
) {
5267 tcg_rd
= cpu_reg(s
, rd
);
5268 gen_helper_pacdb(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5269 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5270 goto do_unallocated
;
5273 case MAP(1, 0x01, 0x04): /* AUTIA */
5274 if (s
->pauth_active
) {
5275 tcg_rd
= cpu_reg(s
, rd
);
5276 gen_helper_autia(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5277 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5278 goto do_unallocated
;
5281 case MAP(1, 0x01, 0x05): /* AUTIB */
5282 if (s
->pauth_active
) {
5283 tcg_rd
= cpu_reg(s
, rd
);
5284 gen_helper_autib(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5285 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5286 goto do_unallocated
;
5289 case MAP(1, 0x01, 0x06): /* AUTDA */
5290 if (s
->pauth_active
) {
5291 tcg_rd
= cpu_reg(s
, rd
);
5292 gen_helper_autda(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5293 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5294 goto do_unallocated
;
5297 case MAP(1, 0x01, 0x07): /* AUTDB */
5298 if (s
->pauth_active
) {
5299 tcg_rd
= cpu_reg(s
, rd
);
5300 gen_helper_autdb(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5301 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5302 goto do_unallocated
;
5305 case MAP(1, 0x01, 0x08): /* PACIZA */
5306 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5307 goto do_unallocated
;
5308 } else if (s
->pauth_active
) {
5309 tcg_rd
= cpu_reg(s
, rd
);
5310 gen_helper_pacia(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5313 case MAP(1, 0x01, 0x09): /* PACIZB */
5314 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5315 goto do_unallocated
;
5316 } else if (s
->pauth_active
) {
5317 tcg_rd
= cpu_reg(s
, rd
);
5318 gen_helper_pacib(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5321 case MAP(1, 0x01, 0x0a): /* PACDZA */
5322 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5323 goto do_unallocated
;
5324 } else if (s
->pauth_active
) {
5325 tcg_rd
= cpu_reg(s
, rd
);
5326 gen_helper_pacda(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5329 case MAP(1, 0x01, 0x0b): /* PACDZB */
5330 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5331 goto do_unallocated
;
5332 } else if (s
->pauth_active
) {
5333 tcg_rd
= cpu_reg(s
, rd
);
5334 gen_helper_pacdb(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5337 case MAP(1, 0x01, 0x0c): /* AUTIZA */
5338 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5339 goto do_unallocated
;
5340 } else if (s
->pauth_active
) {
5341 tcg_rd
= cpu_reg(s
, rd
);
5342 gen_helper_autia(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5345 case MAP(1, 0x01, 0x0d): /* AUTIZB */
5346 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5347 goto do_unallocated
;
5348 } else if (s
->pauth_active
) {
5349 tcg_rd
= cpu_reg(s
, rd
);
5350 gen_helper_autib(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5353 case MAP(1, 0x01, 0x0e): /* AUTDZA */
5354 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5355 goto do_unallocated
;
5356 } else if (s
->pauth_active
) {
5357 tcg_rd
= cpu_reg(s
, rd
);
5358 gen_helper_autda(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5361 case MAP(1, 0x01, 0x0f): /* AUTDZB */
5362 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5363 goto do_unallocated
;
5364 } else if (s
->pauth_active
) {
5365 tcg_rd
= cpu_reg(s
, rd
);
5366 gen_helper_autdb(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
5369 case MAP(1, 0x01, 0x10): /* XPACI */
5370 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5371 goto do_unallocated
;
5372 } else if (s
->pauth_active
) {
5373 tcg_rd
= cpu_reg(s
, rd
);
5374 gen_helper_xpaci(tcg_rd
, tcg_env
, tcg_rd
);
5377 case MAP(1, 0x01, 0x11): /* XPACD */
5378 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5379 goto do_unallocated
;
5380 } else if (s
->pauth_active
) {
5381 tcg_rd
= cpu_reg(s
, rd
);
5382 gen_helper_xpacd(tcg_rd
, tcg_env
, tcg_rd
);
5387 unallocated_encoding(s
);
5394 static void handle_div(DisasContext
*s
, bool is_signed
, unsigned int sf
,
5395 unsigned int rm
, unsigned int rn
, unsigned int rd
)
5397 TCGv_i64 tcg_n
, tcg_m
, tcg_rd
;
5398 tcg_rd
= cpu_reg(s
, rd
);
5400 if (!sf
&& is_signed
) {
5401 tcg_n
= tcg_temp_new_i64();
5402 tcg_m
= tcg_temp_new_i64();
5403 tcg_gen_ext32s_i64(tcg_n
, cpu_reg(s
, rn
));
5404 tcg_gen_ext32s_i64(tcg_m
, cpu_reg(s
, rm
));
5406 tcg_n
= read_cpu_reg(s
, rn
, sf
);
5407 tcg_m
= read_cpu_reg(s
, rm
, sf
);
5411 gen_helper_sdiv64(tcg_rd
, tcg_n
, tcg_m
);
5413 gen_helper_udiv64(tcg_rd
, tcg_n
, tcg_m
);
5416 if (!sf
) { /* zero extend final result */
5417 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
5421 /* LSLV, LSRV, ASRV, RORV */
5422 static void handle_shift_reg(DisasContext
*s
,
5423 enum a64_shift_type shift_type
, unsigned int sf
,
5424 unsigned int rm
, unsigned int rn
, unsigned int rd
)
5426 TCGv_i64 tcg_shift
= tcg_temp_new_i64();
5427 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5428 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
5430 tcg_gen_andi_i64(tcg_shift
, cpu_reg(s
, rm
), sf
? 63 : 31);
5431 shift_reg(tcg_rd
, tcg_rn
, sf
, shift_type
, tcg_shift
);
5434 /* CRC32[BHWX], CRC32C[BHWX] */
5435 static void handle_crc32(DisasContext
*s
,
5436 unsigned int sf
, unsigned int sz
, bool crc32c
,
5437 unsigned int rm
, unsigned int rn
, unsigned int rd
)
5439 TCGv_i64 tcg_acc
, tcg_val
;
5442 if (!dc_isar_feature(aa64_crc32
, s
)
5443 || (sf
== 1 && sz
!= 3)
5444 || (sf
== 0 && sz
== 3)) {
5445 unallocated_encoding(s
);
5450 tcg_val
= cpu_reg(s
, rm
);
5464 g_assert_not_reached();
5466 tcg_val
= tcg_temp_new_i64();
5467 tcg_gen_andi_i64(tcg_val
, cpu_reg(s
, rm
), mask
);
5470 tcg_acc
= cpu_reg(s
, rn
);
5471 tcg_bytes
= tcg_constant_i32(1 << sz
);
5474 gen_helper_crc32c_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
5476 gen_helper_crc32_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
5480 /* Data-processing (2 source)
5481 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5482 * +----+---+---+-----------------+------+--------+------+------+
5483 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
5484 * +----+---+---+-----------------+------+--------+------+------+
5486 static void disas_data_proc_2src(DisasContext
*s
, uint32_t insn
)
5488 unsigned int sf
, rm
, opcode
, rn
, rd
, setflag
;
5489 sf
= extract32(insn
, 31, 1);
5490 setflag
= extract32(insn
, 29, 1);
5491 rm
= extract32(insn
, 16, 5);
5492 opcode
= extract32(insn
, 10, 6);
5493 rn
= extract32(insn
, 5, 5);
5494 rd
= extract32(insn
, 0, 5);
5496 if (setflag
&& opcode
!= 0) {
5497 unallocated_encoding(s
);
5502 case 0: /* SUBP(S) */
5503 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
5504 goto do_unallocated
;
5506 TCGv_i64 tcg_n
, tcg_m
, tcg_d
;
5508 tcg_n
= read_cpu_reg_sp(s
, rn
, true);
5509 tcg_m
= read_cpu_reg_sp(s
, rm
, true);
5510 tcg_gen_sextract_i64(tcg_n
, tcg_n
, 0, 56);
5511 tcg_gen_sextract_i64(tcg_m
, tcg_m
, 0, 56);
5512 tcg_d
= cpu_reg(s
, rd
);
5515 gen_sub_CC(true, tcg_d
, tcg_n
, tcg_m
);
5517 tcg_gen_sub_i64(tcg_d
, tcg_n
, tcg_m
);
5522 handle_div(s
, false, sf
, rm
, rn
, rd
);
5525 handle_div(s
, true, sf
, rm
, rn
, rd
);
5528 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
5529 goto do_unallocated
;
5532 gen_helper_irg(cpu_reg_sp(s
, rd
), tcg_env
,
5533 cpu_reg_sp(s
, rn
), cpu_reg(s
, rm
));
5535 gen_address_with_allocation_tag0(cpu_reg_sp(s
, rd
),
5540 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
5541 goto do_unallocated
;
5543 TCGv_i64 t
= tcg_temp_new_i64();
5545 tcg_gen_extract_i64(t
, cpu_reg_sp(s
, rn
), 56, 4);
5546 tcg_gen_shl_i64(t
, tcg_constant_i64(1), t
);
5547 tcg_gen_or_i64(cpu_reg(s
, rd
), cpu_reg(s
, rm
), t
);
5551 handle_shift_reg(s
, A64_SHIFT_TYPE_LSL
, sf
, rm
, rn
, rd
);
5554 handle_shift_reg(s
, A64_SHIFT_TYPE_LSR
, sf
, rm
, rn
, rd
);
5557 handle_shift_reg(s
, A64_SHIFT_TYPE_ASR
, sf
, rm
, rn
, rd
);
5560 handle_shift_reg(s
, A64_SHIFT_TYPE_ROR
, sf
, rm
, rn
, rd
);
5562 case 12: /* PACGA */
5563 if (sf
== 0 || !dc_isar_feature(aa64_pauth
, s
)) {
5564 goto do_unallocated
;
5566 gen_helper_pacga(cpu_reg(s
, rd
), tcg_env
,
5567 cpu_reg(s
, rn
), cpu_reg_sp(s
, rm
));
5576 case 23: /* CRC32 */
5578 int sz
= extract32(opcode
, 0, 2);
5579 bool crc32c
= extract32(opcode
, 2, 1);
5580 handle_crc32(s
, sf
, sz
, crc32c
, rm
, rn
, rd
);
5585 unallocated_encoding(s
);
5591 * Data processing - register
5592 * 31 30 29 28 25 21 20 16 10 0
5593 * +--+---+--+---+-------+-----+-------+-------+---------+
5594 * | |op0| |op1| 1 0 1 | op2 | | op3 | |
5595 * +--+---+--+---+-------+-----+-------+-------+---------+
5597 static void disas_data_proc_reg(DisasContext
*s
, uint32_t insn
)
5599 int op0
= extract32(insn
, 30, 1);
5600 int op1
= extract32(insn
, 28, 1);
5601 int op2
= extract32(insn
, 21, 4);
5602 int op3
= extract32(insn
, 10, 6);
5607 /* Add/sub (extended register) */
5608 disas_add_sub_ext_reg(s
, insn
);
5610 /* Add/sub (shifted register) */
5611 disas_add_sub_reg(s
, insn
);
5614 /* Logical (shifted register) */
5615 disas_logic_reg(s
, insn
);
5623 case 0x00: /* Add/subtract (with carry) */
5624 disas_adc_sbc(s
, insn
);
5627 case 0x01: /* Rotate right into flags */
5629 disas_rotate_right_into_flags(s
, insn
);
5632 case 0x02: /* Evaluate into flags */
5636 disas_evaluate_into_flags(s
, insn
);
5640 goto do_unallocated
;
5644 case 0x2: /* Conditional compare */
5645 disas_cc(s
, insn
); /* both imm and reg forms */
5648 case 0x4: /* Conditional select */
5649 disas_cond_select(s
, insn
);
5652 case 0x6: /* Data-processing */
5653 if (op0
) { /* (1 source) */
5654 disas_data_proc_1src(s
, insn
);
5655 } else { /* (2 source) */
5656 disas_data_proc_2src(s
, insn
);
5659 case 0x8 ... 0xf: /* (3 source) */
5660 disas_data_proc_3src(s
, insn
);
5665 unallocated_encoding(s
);
5670 static void handle_fp_compare(DisasContext
*s
, int size
,
5671 unsigned int rn
, unsigned int rm
,
5672 bool cmp_with_zero
, bool signal_all_nans
)
5674 TCGv_i64 tcg_flags
= tcg_temp_new_i64();
5675 TCGv_ptr fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
5677 if (size
== MO_64
) {
5678 TCGv_i64 tcg_vn
, tcg_vm
;
5680 tcg_vn
= read_fp_dreg(s
, rn
);
5681 if (cmp_with_zero
) {
5682 tcg_vm
= tcg_constant_i64(0);
5684 tcg_vm
= read_fp_dreg(s
, rm
);
5686 if (signal_all_nans
) {
5687 gen_helper_vfp_cmped_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5689 gen_helper_vfp_cmpd_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5692 TCGv_i32 tcg_vn
= tcg_temp_new_i32();
5693 TCGv_i32 tcg_vm
= tcg_temp_new_i32();
5695 read_vec_element_i32(s
, tcg_vn
, rn
, 0, size
);
5696 if (cmp_with_zero
) {
5697 tcg_gen_movi_i32(tcg_vm
, 0);
5699 read_vec_element_i32(s
, tcg_vm
, rm
, 0, size
);
5704 if (signal_all_nans
) {
5705 gen_helper_vfp_cmpes_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5707 gen_helper_vfp_cmps_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5711 if (signal_all_nans
) {
5712 gen_helper_vfp_cmpeh_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5714 gen_helper_vfp_cmph_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5718 g_assert_not_reached();
5722 gen_set_nzcv(tcg_flags
);
5725 /* Floating point compare
5726 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
5727 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5728 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
5729 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5731 static void disas_fp_compare(DisasContext
*s
, uint32_t insn
)
5733 unsigned int mos
, type
, rm
, op
, rn
, opc
, op2r
;
5736 mos
= extract32(insn
, 29, 3);
5737 type
= extract32(insn
, 22, 2);
5738 rm
= extract32(insn
, 16, 5);
5739 op
= extract32(insn
, 14, 2);
5740 rn
= extract32(insn
, 5, 5);
5741 opc
= extract32(insn
, 3, 2);
5742 op2r
= extract32(insn
, 0, 3);
5744 if (mos
|| op
|| op2r
) {
5745 unallocated_encoding(s
);
5758 if (dc_isar_feature(aa64_fp16
, s
)) {
5763 unallocated_encoding(s
);
5767 if (!fp_access_check(s
)) {
5771 handle_fp_compare(s
, size
, rn
, rm
, opc
& 1, opc
& 2);
5774 /* Floating point conditional compare
5775 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
5776 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5777 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
5778 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5780 static void disas_fp_ccomp(DisasContext
*s
, uint32_t insn
)
5782 unsigned int mos
, type
, rm
, cond
, rn
, op
, nzcv
;
5783 TCGLabel
*label_continue
= NULL
;
5786 mos
= extract32(insn
, 29, 3);
5787 type
= extract32(insn
, 22, 2);
5788 rm
= extract32(insn
, 16, 5);
5789 cond
= extract32(insn
, 12, 4);
5790 rn
= extract32(insn
, 5, 5);
5791 op
= extract32(insn
, 4, 1);
5792 nzcv
= extract32(insn
, 0, 4);
5795 unallocated_encoding(s
);
5808 if (dc_isar_feature(aa64_fp16
, s
)) {
5813 unallocated_encoding(s
);
5817 if (!fp_access_check(s
)) {
5821 if (cond
< 0x0e) { /* not always */
5822 TCGLabel
*label_match
= gen_new_label();
5823 label_continue
= gen_new_label();
5824 arm_gen_test_cc(cond
, label_match
);
5826 gen_set_nzcv(tcg_constant_i64(nzcv
<< 28));
5827 tcg_gen_br(label_continue
);
5828 gen_set_label(label_match
);
5831 handle_fp_compare(s
, size
, rn
, rm
, false, op
);
5834 gen_set_label(label_continue
);
5838 /* Floating point conditional select
5839 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
5840 * +---+---+---+-----------+------+---+------+------+-----+------+------+
5841 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
5842 * +---+---+---+-----------+------+---+------+------+-----+------+------+
5844 static void disas_fp_csel(DisasContext
*s
, uint32_t insn
)
5846 unsigned int mos
, type
, rm
, cond
, rn
, rd
;
5847 TCGv_i64 t_true
, t_false
;
5851 mos
= extract32(insn
, 29, 3);
5852 type
= extract32(insn
, 22, 2);
5853 rm
= extract32(insn
, 16, 5);
5854 cond
= extract32(insn
, 12, 4);
5855 rn
= extract32(insn
, 5, 5);
5856 rd
= extract32(insn
, 0, 5);
5859 unallocated_encoding(s
);
5872 if (dc_isar_feature(aa64_fp16
, s
)) {
5877 unallocated_encoding(s
);
5881 if (!fp_access_check(s
)) {
5885 /* Zero extend sreg & hreg inputs to 64 bits now. */
5886 t_true
= tcg_temp_new_i64();
5887 t_false
= tcg_temp_new_i64();
5888 read_vec_element(s
, t_true
, rn
, 0, sz
);
5889 read_vec_element(s
, t_false
, rm
, 0, sz
);
5891 a64_test_cc(&c
, cond
);
5892 tcg_gen_movcond_i64(c
.cond
, t_true
, c
.value
, tcg_constant_i64(0),
5895 /* Note that sregs & hregs write back zeros to the high bits,
5896 and we've already done the zero-extension. */
5897 write_fp_dreg(s
, rd
, t_true
);
5900 /* Floating-point data-processing (1 source) - half precision */
5901 static void handle_fp_1src_half(DisasContext
*s
, int opcode
, int rd
, int rn
)
5903 TCGv_ptr fpst
= NULL
;
5904 TCGv_i32 tcg_op
= read_fp_hreg(s
, rn
);
5905 TCGv_i32 tcg_res
= tcg_temp_new_i32();
5908 case 0x0: /* FMOV */
5909 tcg_gen_mov_i32(tcg_res
, tcg_op
);
5911 case 0x1: /* FABS */
5912 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
5914 case 0x2: /* FNEG */
5915 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
5917 case 0x3: /* FSQRT */
5918 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
5919 gen_helper_sqrt_f16(tcg_res
, tcg_op
, fpst
);
5921 case 0x8: /* FRINTN */
5922 case 0x9: /* FRINTP */
5923 case 0xa: /* FRINTM */
5924 case 0xb: /* FRINTZ */
5925 case 0xc: /* FRINTA */
5929 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
5930 tcg_rmode
= gen_set_rmode(opcode
& 7, fpst
);
5931 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
5932 gen_restore_rmode(tcg_rmode
, fpst
);
5935 case 0xe: /* FRINTX */
5936 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
5937 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, fpst
);
5939 case 0xf: /* FRINTI */
5940 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
5941 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
5944 g_assert_not_reached();
5947 write_fp_sreg(s
, rd
, tcg_res
);
5950 /* Floating-point data-processing (1 source) - single precision */
5951 static void handle_fp_1src_single(DisasContext
*s
, int opcode
, int rd
, int rn
)
5953 void (*gen_fpst
)(TCGv_i32
, TCGv_i32
, TCGv_ptr
);
5954 TCGv_i32 tcg_op
, tcg_res
;
5958 tcg_op
= read_fp_sreg(s
, rn
);
5959 tcg_res
= tcg_temp_new_i32();
5962 case 0x0: /* FMOV */
5963 tcg_gen_mov_i32(tcg_res
, tcg_op
);
5965 case 0x1: /* FABS */
5966 gen_helper_vfp_abss(tcg_res
, tcg_op
);
5968 case 0x2: /* FNEG */
5969 gen_helper_vfp_negs(tcg_res
, tcg_op
);
5971 case 0x3: /* FSQRT */
5972 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, tcg_env
);
5974 case 0x6: /* BFCVT */
5975 gen_fpst
= gen_helper_bfcvt
;
5977 case 0x8: /* FRINTN */
5978 case 0x9: /* FRINTP */
5979 case 0xa: /* FRINTM */
5980 case 0xb: /* FRINTZ */
5981 case 0xc: /* FRINTA */
5983 gen_fpst
= gen_helper_rints
;
5985 case 0xe: /* FRINTX */
5986 gen_fpst
= gen_helper_rints_exact
;
5988 case 0xf: /* FRINTI */
5989 gen_fpst
= gen_helper_rints
;
5991 case 0x10: /* FRINT32Z */
5992 rmode
= FPROUNDING_ZERO
;
5993 gen_fpst
= gen_helper_frint32_s
;
5995 case 0x11: /* FRINT32X */
5996 gen_fpst
= gen_helper_frint32_s
;
5998 case 0x12: /* FRINT64Z */
5999 rmode
= FPROUNDING_ZERO
;
6000 gen_fpst
= gen_helper_frint64_s
;
6002 case 0x13: /* FRINT64X */
6003 gen_fpst
= gen_helper_frint64_s
;
6006 g_assert_not_reached();
6009 fpst
= fpstatus_ptr(FPST_FPCR
);
6011 TCGv_i32 tcg_rmode
= gen_set_rmode(rmode
, fpst
);
6012 gen_fpst(tcg_res
, tcg_op
, fpst
);
6013 gen_restore_rmode(tcg_rmode
, fpst
);
6015 gen_fpst(tcg_res
, tcg_op
, fpst
);
6019 write_fp_sreg(s
, rd
, tcg_res
);
6022 /* Floating-point data-processing (1 source) - double precision */
6023 static void handle_fp_1src_double(DisasContext
*s
, int opcode
, int rd
, int rn
)
6025 void (*gen_fpst
)(TCGv_i64
, TCGv_i64
, TCGv_ptr
);
6026 TCGv_i64 tcg_op
, tcg_res
;
6031 case 0x0: /* FMOV */
6032 gen_gvec_fn2(s
, false, rd
, rn
, tcg_gen_gvec_mov
, 0);
6036 tcg_op
= read_fp_dreg(s
, rn
);
6037 tcg_res
= tcg_temp_new_i64();
6040 case 0x1: /* FABS */
6041 gen_helper_vfp_absd(tcg_res
, tcg_op
);
6043 case 0x2: /* FNEG */
6044 gen_helper_vfp_negd(tcg_res
, tcg_op
);
6046 case 0x3: /* FSQRT */
6047 gen_helper_vfp_sqrtd(tcg_res
, tcg_op
, tcg_env
);
6049 case 0x8: /* FRINTN */
6050 case 0x9: /* FRINTP */
6051 case 0xa: /* FRINTM */
6052 case 0xb: /* FRINTZ */
6053 case 0xc: /* FRINTA */
6055 gen_fpst
= gen_helper_rintd
;
6057 case 0xe: /* FRINTX */
6058 gen_fpst
= gen_helper_rintd_exact
;
6060 case 0xf: /* FRINTI */
6061 gen_fpst
= gen_helper_rintd
;
6063 case 0x10: /* FRINT32Z */
6064 rmode
= FPROUNDING_ZERO
;
6065 gen_fpst
= gen_helper_frint32_d
;
6067 case 0x11: /* FRINT32X */
6068 gen_fpst
= gen_helper_frint32_d
;
6070 case 0x12: /* FRINT64Z */
6071 rmode
= FPROUNDING_ZERO
;
6072 gen_fpst
= gen_helper_frint64_d
;
6074 case 0x13: /* FRINT64X */
6075 gen_fpst
= gen_helper_frint64_d
;
6078 g_assert_not_reached();
6081 fpst
= fpstatus_ptr(FPST_FPCR
);
6083 TCGv_i32 tcg_rmode
= gen_set_rmode(rmode
, fpst
);
6084 gen_fpst(tcg_res
, tcg_op
, fpst
);
6085 gen_restore_rmode(tcg_rmode
, fpst
);
6087 gen_fpst(tcg_res
, tcg_op
, fpst
);
6091 write_fp_dreg(s
, rd
, tcg_res
);
6094 static void handle_fp_fcvt(DisasContext
*s
, int opcode
,
6095 int rd
, int rn
, int dtype
, int ntype
)
6100 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
6102 /* Single to double */
6103 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
6104 gen_helper_vfp_fcvtds(tcg_rd
, tcg_rn
, tcg_env
);
6105 write_fp_dreg(s
, rd
, tcg_rd
);
6107 /* Single to half */
6108 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
6109 TCGv_i32 ahp
= get_ahp_flag();
6110 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6112 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd
, tcg_rn
, fpst
, ahp
);
6113 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6114 write_fp_sreg(s
, rd
, tcg_rd
);
6120 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
6121 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
6123 /* Double to single */
6124 gen_helper_vfp_fcvtsd(tcg_rd
, tcg_rn
, tcg_env
);
6126 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6127 TCGv_i32 ahp
= get_ahp_flag();
6128 /* Double to half */
6129 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd
, tcg_rn
, fpst
, ahp
);
6130 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6132 write_fp_sreg(s
, rd
, tcg_rd
);
6137 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
6138 TCGv_ptr tcg_fpst
= fpstatus_ptr(FPST_FPCR
);
6139 TCGv_i32 tcg_ahp
= get_ahp_flag();
6140 tcg_gen_ext16u_i32(tcg_rn
, tcg_rn
);
6142 /* Half to single */
6143 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
6144 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd
, tcg_rn
, tcg_fpst
, tcg_ahp
);
6145 write_fp_sreg(s
, rd
, tcg_rd
);
6147 /* Half to double */
6148 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
6149 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd
, tcg_rn
, tcg_fpst
, tcg_ahp
);
6150 write_fp_dreg(s
, rd
, tcg_rd
);
6155 g_assert_not_reached();
6159 /* Floating point data-processing (1 source)
6160 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
6161 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6162 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
6163 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6165 static void disas_fp_1src(DisasContext
*s
, uint32_t insn
)
6167 int mos
= extract32(insn
, 29, 3);
6168 int type
= extract32(insn
, 22, 2);
6169 int opcode
= extract32(insn
, 15, 6);
6170 int rn
= extract32(insn
, 5, 5);
6171 int rd
= extract32(insn
, 0, 5);
6174 goto do_unallocated
;
6178 case 0x4: case 0x5: case 0x7:
6180 /* FCVT between half, single and double precision */
6181 int dtype
= extract32(opcode
, 0, 2);
6182 if (type
== 2 || dtype
== type
) {
6183 goto do_unallocated
;
6185 if (!fp_access_check(s
)) {
6189 handle_fp_fcvt(s
, opcode
, rd
, rn
, dtype
, type
);
6193 case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
6194 if (type
> 1 || !dc_isar_feature(aa64_frint
, s
)) {
6195 goto do_unallocated
;
6201 /* 32-to-32 and 64-to-64 ops */
6204 if (!fp_access_check(s
)) {
6207 handle_fp_1src_single(s
, opcode
, rd
, rn
);
6210 if (!fp_access_check(s
)) {
6213 handle_fp_1src_double(s
, opcode
, rd
, rn
);
6216 if (!dc_isar_feature(aa64_fp16
, s
)) {
6217 goto do_unallocated
;
6220 if (!fp_access_check(s
)) {
6223 handle_fp_1src_half(s
, opcode
, rd
, rn
);
6226 goto do_unallocated
;
6233 if (!dc_isar_feature(aa64_bf16
, s
)) {
6234 goto do_unallocated
;
6236 if (!fp_access_check(s
)) {
6239 handle_fp_1src_single(s
, opcode
, rd
, rn
);
6242 goto do_unallocated
;
6248 unallocated_encoding(s
);
6253 /* Floating-point data-processing (2 source) - single precision */
6254 static void handle_fp_2src_single(DisasContext
*s
, int opcode
,
6255 int rd
, int rn
, int rm
)
6262 tcg_res
= tcg_temp_new_i32();
6263 fpst
= fpstatus_ptr(FPST_FPCR
);
6264 tcg_op1
= read_fp_sreg(s
, rn
);
6265 tcg_op2
= read_fp_sreg(s
, rm
);
6268 case 0x0: /* FMUL */
6269 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6271 case 0x1: /* FDIV */
6272 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6274 case 0x2: /* FADD */
6275 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6277 case 0x3: /* FSUB */
6278 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6280 case 0x4: /* FMAX */
6281 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6283 case 0x5: /* FMIN */
6284 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6286 case 0x6: /* FMAXNM */
6287 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6289 case 0x7: /* FMINNM */
6290 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6292 case 0x8: /* FNMUL */
6293 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6294 gen_helper_vfp_negs(tcg_res
, tcg_res
);
6298 write_fp_sreg(s
, rd
, tcg_res
);
6301 /* Floating-point data-processing (2 source) - double precision */
6302 static void handle_fp_2src_double(DisasContext
*s
, int opcode
,
6303 int rd
, int rn
, int rm
)
6310 tcg_res
= tcg_temp_new_i64();
6311 fpst
= fpstatus_ptr(FPST_FPCR
);
6312 tcg_op1
= read_fp_dreg(s
, rn
);
6313 tcg_op2
= read_fp_dreg(s
, rm
);
6316 case 0x0: /* FMUL */
6317 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6319 case 0x1: /* FDIV */
6320 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6322 case 0x2: /* FADD */
6323 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6325 case 0x3: /* FSUB */
6326 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6328 case 0x4: /* FMAX */
6329 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6331 case 0x5: /* FMIN */
6332 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6334 case 0x6: /* FMAXNM */
6335 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6337 case 0x7: /* FMINNM */
6338 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6340 case 0x8: /* FNMUL */
6341 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6342 gen_helper_vfp_negd(tcg_res
, tcg_res
);
6346 write_fp_dreg(s
, rd
, tcg_res
);
6349 /* Floating-point data-processing (2 source) - half precision */
6350 static void handle_fp_2src_half(DisasContext
*s
, int opcode
,
6351 int rd
, int rn
, int rm
)
6358 tcg_res
= tcg_temp_new_i32();
6359 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6360 tcg_op1
= read_fp_hreg(s
, rn
);
6361 tcg_op2
= read_fp_hreg(s
, rm
);
6364 case 0x0: /* FMUL */
6365 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6367 case 0x1: /* FDIV */
6368 gen_helper_advsimd_divh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6370 case 0x2: /* FADD */
6371 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6373 case 0x3: /* FSUB */
6374 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6376 case 0x4: /* FMAX */
6377 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6379 case 0x5: /* FMIN */
6380 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6382 case 0x6: /* FMAXNM */
6383 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6385 case 0x7: /* FMINNM */
6386 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6388 case 0x8: /* FNMUL */
6389 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6390 tcg_gen_xori_i32(tcg_res
, tcg_res
, 0x8000);
6393 g_assert_not_reached();
6396 write_fp_sreg(s
, rd
, tcg_res
);
6399 /* Floating point data-processing (2 source)
6400 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
6401 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6402 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
6403 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6405 static void disas_fp_2src(DisasContext
*s
, uint32_t insn
)
6407 int mos
= extract32(insn
, 29, 3);
6408 int type
= extract32(insn
, 22, 2);
6409 int rd
= extract32(insn
, 0, 5);
6410 int rn
= extract32(insn
, 5, 5);
6411 int rm
= extract32(insn
, 16, 5);
6412 int opcode
= extract32(insn
, 12, 4);
6414 if (opcode
> 8 || mos
) {
6415 unallocated_encoding(s
);
6421 if (!fp_access_check(s
)) {
6424 handle_fp_2src_single(s
, opcode
, rd
, rn
, rm
);
6427 if (!fp_access_check(s
)) {
6430 handle_fp_2src_double(s
, opcode
, rd
, rn
, rm
);
6433 if (!dc_isar_feature(aa64_fp16
, s
)) {
6434 unallocated_encoding(s
);
6437 if (!fp_access_check(s
)) {
6440 handle_fp_2src_half(s
, opcode
, rd
, rn
, rm
);
6443 unallocated_encoding(s
);
6447 /* Floating-point data-processing (3 source) - single precision */
6448 static void handle_fp_3src_single(DisasContext
*s
, bool o0
, bool o1
,
6449 int rd
, int rn
, int rm
, int ra
)
6451 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
;
6452 TCGv_i32 tcg_res
= tcg_temp_new_i32();
6453 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6455 tcg_op1
= read_fp_sreg(s
, rn
);
6456 tcg_op2
= read_fp_sreg(s
, rm
);
6457 tcg_op3
= read_fp_sreg(s
, ra
);
6459 /* These are fused multiply-add, and must be done as one
6460 * floating point operation with no rounding between the
6461 * multiplication and addition steps.
6462 * NB that doing the negations here as separate steps is
6463 * correct : an input NaN should come out with its sign bit
6464 * flipped if it is a negated-input.
6467 gen_helper_vfp_negs(tcg_op3
, tcg_op3
);
6471 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
6474 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
6476 write_fp_sreg(s
, rd
, tcg_res
);
6479 /* Floating-point data-processing (3 source) - double precision */
6480 static void handle_fp_3src_double(DisasContext
*s
, bool o0
, bool o1
,
6481 int rd
, int rn
, int rm
, int ra
)
6483 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
;
6484 TCGv_i64 tcg_res
= tcg_temp_new_i64();
6485 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6487 tcg_op1
= read_fp_dreg(s
, rn
);
6488 tcg_op2
= read_fp_dreg(s
, rm
);
6489 tcg_op3
= read_fp_dreg(s
, ra
);
6491 /* These are fused multiply-add, and must be done as one
6492 * floating point operation with no rounding between the
6493 * multiplication and addition steps.
6494 * NB that doing the negations here as separate steps is
6495 * correct : an input NaN should come out with its sign bit
6496 * flipped if it is a negated-input.
6499 gen_helper_vfp_negd(tcg_op3
, tcg_op3
);
6503 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
6506 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
6508 write_fp_dreg(s
, rd
, tcg_res
);
6511 /* Floating-point data-processing (3 source) - half precision */
6512 static void handle_fp_3src_half(DisasContext
*s
, bool o0
, bool o1
,
6513 int rd
, int rn
, int rm
, int ra
)
6515 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
;
6516 TCGv_i32 tcg_res
= tcg_temp_new_i32();
6517 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6519 tcg_op1
= read_fp_hreg(s
, rn
);
6520 tcg_op2
= read_fp_hreg(s
, rm
);
6521 tcg_op3
= read_fp_hreg(s
, ra
);
6523 /* These are fused multiply-add, and must be done as one
6524 * floating point operation with no rounding between the
6525 * multiplication and addition steps.
6526 * NB that doing the negations here as separate steps is
6527 * correct : an input NaN should come out with its sign bit
6528 * flipped if it is a negated-input.
6531 tcg_gen_xori_i32(tcg_op3
, tcg_op3
, 0x8000);
6535 tcg_gen_xori_i32(tcg_op1
, tcg_op1
, 0x8000);
6538 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
6540 write_fp_sreg(s
, rd
, tcg_res
);
6543 /* Floating point data-processing (3 source)
6544 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
6545 * +---+---+---+-----------+------+----+------+----+------+------+------+
6546 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
6547 * +---+---+---+-----------+------+----+------+----+------+------+------+
6549 static void disas_fp_3src(DisasContext
*s
, uint32_t insn
)
6551 int mos
= extract32(insn
, 29, 3);
6552 int type
= extract32(insn
, 22, 2);
6553 int rd
= extract32(insn
, 0, 5);
6554 int rn
= extract32(insn
, 5, 5);
6555 int ra
= extract32(insn
, 10, 5);
6556 int rm
= extract32(insn
, 16, 5);
6557 bool o0
= extract32(insn
, 15, 1);
6558 bool o1
= extract32(insn
, 21, 1);
6561 unallocated_encoding(s
);
6567 if (!fp_access_check(s
)) {
6570 handle_fp_3src_single(s
, o0
, o1
, rd
, rn
, rm
, ra
);
6573 if (!fp_access_check(s
)) {
6576 handle_fp_3src_double(s
, o0
, o1
, rd
, rn
, rm
, ra
);
6579 if (!dc_isar_feature(aa64_fp16
, s
)) {
6580 unallocated_encoding(s
);
6583 if (!fp_access_check(s
)) {
6586 handle_fp_3src_half(s
, o0
, o1
, rd
, rn
, rm
, ra
);
6589 unallocated_encoding(s
);
6593 /* Floating point immediate
6594 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
6595 * +---+---+---+-----------+------+---+------------+-------+------+------+
6596 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
6597 * +---+---+---+-----------+------+---+------------+-------+------+------+
6599 static void disas_fp_imm(DisasContext
*s
, uint32_t insn
)
6601 int rd
= extract32(insn
, 0, 5);
6602 int imm5
= extract32(insn
, 5, 5);
6603 int imm8
= extract32(insn
, 13, 8);
6604 int type
= extract32(insn
, 22, 2);
6605 int mos
= extract32(insn
, 29, 3);
6610 unallocated_encoding(s
);
6623 if (dc_isar_feature(aa64_fp16
, s
)) {
6628 unallocated_encoding(s
);
6632 if (!fp_access_check(s
)) {
6636 imm
= vfp_expand_imm(sz
, imm8
);
6637 write_fp_dreg(s
, rd
, tcg_constant_i64(imm
));
6640 /* Handle floating point <=> fixed point conversions. Note that we can
6641 * also deal with fp <=> integer conversions as a special case (scale == 64)
6642 * OPTME: consider handling that special case specially or at least skipping
6643 * the call to scalbn in the helpers for zero shifts.
6645 static void handle_fpfpcvt(DisasContext
*s
, int rd
, int rn
, int opcode
,
6646 bool itof
, int rmode
, int scale
, int sf
, int type
)
6648 bool is_signed
= !(opcode
& 1);
6649 TCGv_ptr tcg_fpstatus
;
6650 TCGv_i32 tcg_shift
, tcg_single
;
6651 TCGv_i64 tcg_double
;
6653 tcg_fpstatus
= fpstatus_ptr(type
== 3 ? FPST_FPCR_F16
: FPST_FPCR
);
6655 tcg_shift
= tcg_constant_i32(64 - scale
);
6658 TCGv_i64 tcg_int
= cpu_reg(s
, rn
);
6660 TCGv_i64 tcg_extend
= tcg_temp_new_i64();
6663 tcg_gen_ext32s_i64(tcg_extend
, tcg_int
);
6665 tcg_gen_ext32u_i64(tcg_extend
, tcg_int
);
6668 tcg_int
= tcg_extend
;
6672 case 1: /* float64 */
6673 tcg_double
= tcg_temp_new_i64();
6675 gen_helper_vfp_sqtod(tcg_double
, tcg_int
,
6676 tcg_shift
, tcg_fpstatus
);
6678 gen_helper_vfp_uqtod(tcg_double
, tcg_int
,
6679 tcg_shift
, tcg_fpstatus
);
6681 write_fp_dreg(s
, rd
, tcg_double
);
6684 case 0: /* float32 */
6685 tcg_single
= tcg_temp_new_i32();
6687 gen_helper_vfp_sqtos(tcg_single
, tcg_int
,
6688 tcg_shift
, tcg_fpstatus
);
6690 gen_helper_vfp_uqtos(tcg_single
, tcg_int
,
6691 tcg_shift
, tcg_fpstatus
);
6693 write_fp_sreg(s
, rd
, tcg_single
);
6696 case 3: /* float16 */
6697 tcg_single
= tcg_temp_new_i32();
6699 gen_helper_vfp_sqtoh(tcg_single
, tcg_int
,
6700 tcg_shift
, tcg_fpstatus
);
6702 gen_helper_vfp_uqtoh(tcg_single
, tcg_int
,
6703 tcg_shift
, tcg_fpstatus
);
6705 write_fp_sreg(s
, rd
, tcg_single
);
6709 g_assert_not_reached();
6712 TCGv_i64 tcg_int
= cpu_reg(s
, rd
);
6715 if (extract32(opcode
, 2, 1)) {
6716 /* There are too many rounding modes to all fit into rmode,
6717 * so FCVTA[US] is a special case.
6719 rmode
= FPROUNDING_TIEAWAY
;
6722 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
6725 case 1: /* float64 */
6726 tcg_double
= read_fp_dreg(s
, rn
);
6729 gen_helper_vfp_tosld(tcg_int
, tcg_double
,
6730 tcg_shift
, tcg_fpstatus
);
6732 gen_helper_vfp_tosqd(tcg_int
, tcg_double
,
6733 tcg_shift
, tcg_fpstatus
);
6737 gen_helper_vfp_tould(tcg_int
, tcg_double
,
6738 tcg_shift
, tcg_fpstatus
);
6740 gen_helper_vfp_touqd(tcg_int
, tcg_double
,
6741 tcg_shift
, tcg_fpstatus
);
6745 tcg_gen_ext32u_i64(tcg_int
, tcg_int
);
6749 case 0: /* float32 */
6750 tcg_single
= read_fp_sreg(s
, rn
);
6753 gen_helper_vfp_tosqs(tcg_int
, tcg_single
,
6754 tcg_shift
, tcg_fpstatus
);
6756 gen_helper_vfp_touqs(tcg_int
, tcg_single
,
6757 tcg_shift
, tcg_fpstatus
);
6760 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
6762 gen_helper_vfp_tosls(tcg_dest
, tcg_single
,
6763 tcg_shift
, tcg_fpstatus
);
6765 gen_helper_vfp_touls(tcg_dest
, tcg_single
,
6766 tcg_shift
, tcg_fpstatus
);
6768 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
6772 case 3: /* float16 */
6773 tcg_single
= read_fp_sreg(s
, rn
);
6776 gen_helper_vfp_tosqh(tcg_int
, tcg_single
,
6777 tcg_shift
, tcg_fpstatus
);
6779 gen_helper_vfp_touqh(tcg_int
, tcg_single
,
6780 tcg_shift
, tcg_fpstatus
);
6783 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
6785 gen_helper_vfp_toslh(tcg_dest
, tcg_single
,
6786 tcg_shift
, tcg_fpstatus
);
6788 gen_helper_vfp_toulh(tcg_dest
, tcg_single
,
6789 tcg_shift
, tcg_fpstatus
);
6791 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
6796 g_assert_not_reached();
6799 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
6803 /* Floating point <-> fixed point conversions
6804 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
6805 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6806 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
6807 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6809 static void disas_fp_fixed_conv(DisasContext
*s
, uint32_t insn
)
6811 int rd
= extract32(insn
, 0, 5);
6812 int rn
= extract32(insn
, 5, 5);
6813 int scale
= extract32(insn
, 10, 6);
6814 int opcode
= extract32(insn
, 16, 3);
6815 int rmode
= extract32(insn
, 19, 2);
6816 int type
= extract32(insn
, 22, 2);
6817 bool sbit
= extract32(insn
, 29, 1);
6818 bool sf
= extract32(insn
, 31, 1);
6821 if (sbit
|| (!sf
&& scale
< 32)) {
6822 unallocated_encoding(s
);
6827 case 0: /* float32 */
6828 case 1: /* float64 */
6830 case 3: /* float16 */
6831 if (dc_isar_feature(aa64_fp16
, s
)) {
6836 unallocated_encoding(s
);
6840 switch ((rmode
<< 3) | opcode
) {
6841 case 0x2: /* SCVTF */
6842 case 0x3: /* UCVTF */
6845 case 0x18: /* FCVTZS */
6846 case 0x19: /* FCVTZU */
6850 unallocated_encoding(s
);
6854 if (!fp_access_check(s
)) {
6858 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, FPROUNDING_ZERO
, scale
, sf
, type
);
6861 static void handle_fmov(DisasContext
*s
, int rd
, int rn
, int type
, bool itof
)
6863 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
6864 * without conversion.
6868 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
6874 tmp
= tcg_temp_new_i64();
6875 tcg_gen_ext32u_i64(tmp
, tcg_rn
);
6876 write_fp_dreg(s
, rd
, tmp
);
6880 write_fp_dreg(s
, rd
, tcg_rn
);
6883 /* 64 bit to top half. */
6884 tcg_gen_st_i64(tcg_rn
, tcg_env
, fp_reg_hi_offset(s
, rd
));
6885 clear_vec_high(s
, true, rd
);
6889 tmp
= tcg_temp_new_i64();
6890 tcg_gen_ext16u_i64(tmp
, tcg_rn
);
6891 write_fp_dreg(s
, rd
, tmp
);
6894 g_assert_not_reached();
6897 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
6902 tcg_gen_ld32u_i64(tcg_rd
, tcg_env
, fp_reg_offset(s
, rn
, MO_32
));
6906 tcg_gen_ld_i64(tcg_rd
, tcg_env
, fp_reg_offset(s
, rn
, MO_64
));
6909 /* 64 bits from top half */
6910 tcg_gen_ld_i64(tcg_rd
, tcg_env
, fp_reg_hi_offset(s
, rn
));
6914 tcg_gen_ld16u_i64(tcg_rd
, tcg_env
, fp_reg_offset(s
, rn
, MO_16
));
6917 g_assert_not_reached();
6922 static void handle_fjcvtzs(DisasContext
*s
, int rd
, int rn
)
6924 TCGv_i64 t
= read_fp_dreg(s
, rn
);
6925 TCGv_ptr fpstatus
= fpstatus_ptr(FPST_FPCR
);
6927 gen_helper_fjcvtzs(t
, t
, fpstatus
);
6929 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), t
);
6930 tcg_gen_extrh_i64_i32(cpu_ZF
, t
);
6931 tcg_gen_movi_i32(cpu_CF
, 0);
6932 tcg_gen_movi_i32(cpu_NF
, 0);
6933 tcg_gen_movi_i32(cpu_VF
, 0);
6936 /* Floating point <-> integer conversions
6937 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
6938 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6939 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
6940 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6942 static void disas_fp_int_conv(DisasContext
*s
, uint32_t insn
)
6944 int rd
= extract32(insn
, 0, 5);
6945 int rn
= extract32(insn
, 5, 5);
6946 int opcode
= extract32(insn
, 16, 3);
6947 int rmode
= extract32(insn
, 19, 2);
6948 int type
= extract32(insn
, 22, 2);
6949 bool sbit
= extract32(insn
, 29, 1);
6950 bool sf
= extract32(insn
, 31, 1);
6954 goto do_unallocated
;
6962 case 4: /* FCVTAS */
6963 case 5: /* FCVTAU */
6965 goto do_unallocated
;
6968 case 0: /* FCVT[NPMZ]S */
6969 case 1: /* FCVT[NPMZ]U */
6971 case 0: /* float32 */
6972 case 1: /* float64 */
6974 case 3: /* float16 */
6975 if (!dc_isar_feature(aa64_fp16
, s
)) {
6976 goto do_unallocated
;
6980 goto do_unallocated
;
6982 if (!fp_access_check(s
)) {
6985 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, rmode
, 64, sf
, type
);
6989 switch (sf
<< 7 | type
<< 5 | rmode
<< 3 | opcode
) {
6990 case 0b01100110: /* FMOV half <-> 32-bit int */
6992 case 0b11100110: /* FMOV half <-> 64-bit int */
6994 if (!dc_isar_feature(aa64_fp16
, s
)) {
6995 goto do_unallocated
;
6998 case 0b00000110: /* FMOV 32-bit */
7000 case 0b10100110: /* FMOV 64-bit */
7002 case 0b11001110: /* FMOV top half of 128-bit */
7004 if (!fp_access_check(s
)) {
7008 handle_fmov(s
, rd
, rn
, type
, itof
);
7011 case 0b00111110: /* FJCVTZS */
7012 if (!dc_isar_feature(aa64_jscvt
, s
)) {
7013 goto do_unallocated
;
7014 } else if (fp_access_check(s
)) {
7015 handle_fjcvtzs(s
, rd
, rn
);
7021 unallocated_encoding(s
);
7028 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
7029 * 31 30 29 28 25 24 0
7030 * +---+---+---+---------+-----------------------------+
7031 * | | 0 | | 1 1 1 1 | |
7032 * +---+---+---+---------+-----------------------------+
7034 static void disas_data_proc_fp(DisasContext
*s
, uint32_t insn
)
7036 if (extract32(insn
, 24, 1)) {
7037 /* Floating point data-processing (3 source) */
7038 disas_fp_3src(s
, insn
);
7039 } else if (extract32(insn
, 21, 1) == 0) {
7040 /* Floating point to fixed point conversions */
7041 disas_fp_fixed_conv(s
, insn
);
7043 switch (extract32(insn
, 10, 2)) {
7045 /* Floating point conditional compare */
7046 disas_fp_ccomp(s
, insn
);
7049 /* Floating point data-processing (2 source) */
7050 disas_fp_2src(s
, insn
);
7053 /* Floating point conditional select */
7054 disas_fp_csel(s
, insn
);
7057 switch (ctz32(extract32(insn
, 12, 4))) {
7058 case 0: /* [15:12] == xxx1 */
7059 /* Floating point immediate */
7060 disas_fp_imm(s
, insn
);
7062 case 1: /* [15:12] == xx10 */
7063 /* Floating point compare */
7064 disas_fp_compare(s
, insn
);
7066 case 2: /* [15:12] == x100 */
7067 /* Floating point data-processing (1 source) */
7068 disas_fp_1src(s
, insn
);
7070 case 3: /* [15:12] == 1000 */
7071 unallocated_encoding(s
);
7073 default: /* [15:12] == 0000 */
7074 /* Floating point <-> integer conversions */
7075 disas_fp_int_conv(s
, insn
);
7083 static void do_ext64(DisasContext
*s
, TCGv_i64 tcg_left
, TCGv_i64 tcg_right
,
7086 /* Extract 64 bits from the middle of two concatenated 64 bit
7087 * vector register slices left:right. The extracted bits start
7088 * at 'pos' bits into the right (least significant) side.
7089 * We return the result in tcg_right, and guarantee not to
7092 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
7093 assert(pos
> 0 && pos
< 64);
7095 tcg_gen_shri_i64(tcg_right
, tcg_right
, pos
);
7096 tcg_gen_shli_i64(tcg_tmp
, tcg_left
, 64 - pos
);
7097 tcg_gen_or_i64(tcg_right
, tcg_right
, tcg_tmp
);
7101 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
7102 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7103 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
7104 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7106 static void disas_simd_ext(DisasContext
*s
, uint32_t insn
)
7108 int is_q
= extract32(insn
, 30, 1);
7109 int op2
= extract32(insn
, 22, 2);
7110 int imm4
= extract32(insn
, 11, 4);
7111 int rm
= extract32(insn
, 16, 5);
7112 int rn
= extract32(insn
, 5, 5);
7113 int rd
= extract32(insn
, 0, 5);
7114 int pos
= imm4
<< 3;
7115 TCGv_i64 tcg_resl
, tcg_resh
;
7117 if (op2
!= 0 || (!is_q
&& extract32(imm4
, 3, 1))) {
7118 unallocated_encoding(s
);
7122 if (!fp_access_check(s
)) {
7126 tcg_resh
= tcg_temp_new_i64();
7127 tcg_resl
= tcg_temp_new_i64();
7129 /* Vd gets bits starting at pos bits into Vm:Vn. This is
7130 * either extracting 128 bits from a 128:128 concatenation, or
7131 * extracting 64 bits from a 64:64 concatenation.
7134 read_vec_element(s
, tcg_resl
, rn
, 0, MO_64
);
7136 read_vec_element(s
, tcg_resh
, rm
, 0, MO_64
);
7137 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
7145 EltPosns eltposns
[] = { {rn
, 0}, {rn
, 1}, {rm
, 0}, {rm
, 1} };
7146 EltPosns
*elt
= eltposns
;
7153 read_vec_element(s
, tcg_resl
, elt
->reg
, elt
->elt
, MO_64
);
7155 read_vec_element(s
, tcg_resh
, elt
->reg
, elt
->elt
, MO_64
);
7158 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
7159 tcg_hh
= tcg_temp_new_i64();
7160 read_vec_element(s
, tcg_hh
, elt
->reg
, elt
->elt
, MO_64
);
7161 do_ext64(s
, tcg_hh
, tcg_resh
, pos
);
7165 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
7167 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
7169 clear_vec_high(s
, is_q
, rd
);
7173 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
7174 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7175 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
7176 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7178 static void disas_simd_tb(DisasContext
*s
, uint32_t insn
)
7180 int op2
= extract32(insn
, 22, 2);
7181 int is_q
= extract32(insn
, 30, 1);
7182 int rm
= extract32(insn
, 16, 5);
7183 int rn
= extract32(insn
, 5, 5);
7184 int rd
= extract32(insn
, 0, 5);
7185 int is_tbx
= extract32(insn
, 12, 1);
7186 int len
= (extract32(insn
, 13, 2) + 1) * 16;
7189 unallocated_encoding(s
);
7193 if (!fp_access_check(s
)) {
7197 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s
, rd
),
7198 vec_full_reg_offset(s
, rm
), tcg_env
,
7199 is_q
? 16 : 8, vec_full_reg_size(s
),
7200 (len
<< 6) | (is_tbx
<< 5) | rn
,
7201 gen_helper_simd_tblx
);
7205 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
7206 * +---+---+-------------+------+---+------+---+------------------+------+
7207 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
7208 * +---+---+-------------+------+---+------+---+------------------+------+
7210 static void disas_simd_zip_trn(DisasContext
*s
, uint32_t insn
)
7212 int rd
= extract32(insn
, 0, 5);
7213 int rn
= extract32(insn
, 5, 5);
7214 int rm
= extract32(insn
, 16, 5);
7215 int size
= extract32(insn
, 22, 2);
7216 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
7217 * bit 2 indicates 1 vs 2 variant of the insn.
7219 int opcode
= extract32(insn
, 12, 2);
7220 bool part
= extract32(insn
, 14, 1);
7221 bool is_q
= extract32(insn
, 30, 1);
7222 int esize
= 8 << size
;
7224 int datasize
= is_q
? 128 : 64;
7225 int elements
= datasize
/ esize
;
7226 TCGv_i64 tcg_res
[2], tcg_ele
;
7228 if (opcode
== 0 || (size
== 3 && !is_q
)) {
7229 unallocated_encoding(s
);
7233 if (!fp_access_check(s
)) {
7237 tcg_res
[0] = tcg_temp_new_i64();
7238 tcg_res
[1] = is_q
? tcg_temp_new_i64() : NULL
;
7239 tcg_ele
= tcg_temp_new_i64();
7241 for (i
= 0; i
< elements
; i
++) {
7245 case 1: /* UZP1/2 */
7247 int midpoint
= elements
/ 2;
7249 read_vec_element(s
, tcg_ele
, rn
, 2 * i
+ part
, size
);
7251 read_vec_element(s
, tcg_ele
, rm
,
7252 2 * (i
- midpoint
) + part
, size
);
7256 case 2: /* TRN1/2 */
7258 read_vec_element(s
, tcg_ele
, rm
, (i
& ~1) + part
, size
);
7260 read_vec_element(s
, tcg_ele
, rn
, (i
& ~1) + part
, size
);
7263 case 3: /* ZIP1/2 */
7265 int base
= part
* elements
/ 2;
7267 read_vec_element(s
, tcg_ele
, rm
, base
+ (i
>> 1), size
);
7269 read_vec_element(s
, tcg_ele
, rn
, base
+ (i
>> 1), size
);
7274 g_assert_not_reached();
7277 w
= (i
* esize
) / 64;
7278 o
= (i
* esize
) % 64;
7280 tcg_gen_mov_i64(tcg_res
[w
], tcg_ele
);
7282 tcg_gen_shli_i64(tcg_ele
, tcg_ele
, o
);
7283 tcg_gen_or_i64(tcg_res
[w
], tcg_res
[w
], tcg_ele
);
7287 for (i
= 0; i
<= is_q
; ++i
) {
7288 write_vec_element(s
, tcg_res
[i
], rd
, i
, MO_64
);
7290 clear_vec_high(s
, is_q
, rd
);
7294 * do_reduction_op helper
7296 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
7297 * important for correct NaN propagation that we do these
7298 * operations in exactly the order specified by the pseudocode.
7300 * This is a recursive function, TCG temps should be freed by the
7301 * calling function once it is done with the values.
7303 static TCGv_i32
do_reduction_op(DisasContext
*s
, int fpopcode
, int rn
,
7304 int esize
, int size
, int vmap
, TCGv_ptr fpst
)
7306 if (esize
== size
) {
7308 MemOp msize
= esize
== 16 ? MO_16
: MO_32
;
7311 /* We should have one register left here */
7312 assert(ctpop8(vmap
) == 1);
7313 element
= ctz32(vmap
);
7314 assert(element
< 8);
7316 tcg_elem
= tcg_temp_new_i32();
7317 read_vec_element_i32(s
, tcg_elem
, rn
, element
, msize
);
7320 int bits
= size
/ 2;
7321 int shift
= ctpop8(vmap
) / 2;
7322 int vmap_lo
= (vmap
>> shift
) & vmap
;
7323 int vmap_hi
= (vmap
& ~vmap_lo
);
7324 TCGv_i32 tcg_hi
, tcg_lo
, tcg_res
;
7326 tcg_hi
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_hi
, fpst
);
7327 tcg_lo
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_lo
, fpst
);
7328 tcg_res
= tcg_temp_new_i32();
7331 case 0x0c: /* fmaxnmv half-precision */
7332 gen_helper_advsimd_maxnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7334 case 0x0f: /* fmaxv half-precision */
7335 gen_helper_advsimd_maxh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7337 case 0x1c: /* fminnmv half-precision */
7338 gen_helper_advsimd_minnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7340 case 0x1f: /* fminv half-precision */
7341 gen_helper_advsimd_minh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7343 case 0x2c: /* fmaxnmv */
7344 gen_helper_vfp_maxnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7346 case 0x2f: /* fmaxv */
7347 gen_helper_vfp_maxs(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7349 case 0x3c: /* fminnmv */
7350 gen_helper_vfp_minnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7352 case 0x3f: /* fminv */
7353 gen_helper_vfp_mins(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7356 g_assert_not_reached();
7362 /* AdvSIMD across lanes
7363 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7364 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7365 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
7366 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7368 static void disas_simd_across_lanes(DisasContext
*s
, uint32_t insn
)
7370 int rd
= extract32(insn
, 0, 5);
7371 int rn
= extract32(insn
, 5, 5);
7372 int size
= extract32(insn
, 22, 2);
7373 int opcode
= extract32(insn
, 12, 5);
7374 bool is_q
= extract32(insn
, 30, 1);
7375 bool is_u
= extract32(insn
, 29, 1);
7377 bool is_min
= false;
7381 TCGv_i64 tcg_res
, tcg_elt
;
7384 case 0x1b: /* ADDV */
7386 unallocated_encoding(s
);
7390 case 0x3: /* SADDLV, UADDLV */
7391 case 0xa: /* SMAXV, UMAXV */
7392 case 0x1a: /* SMINV, UMINV */
7393 if (size
== 3 || (size
== 2 && !is_q
)) {
7394 unallocated_encoding(s
);
7398 case 0xc: /* FMAXNMV, FMINNMV */
7399 case 0xf: /* FMAXV, FMINV */
7400 /* Bit 1 of size field encodes min vs max and the actual size
7401 * depends on the encoding of the U bit. If not set (and FP16
7402 * enabled) then we do half-precision float instead of single
7405 is_min
= extract32(size
, 1, 1);
7407 if (!is_u
&& dc_isar_feature(aa64_fp16
, s
)) {
7409 } else if (!is_u
|| !is_q
|| extract32(size
, 0, 1)) {
7410 unallocated_encoding(s
);
7417 unallocated_encoding(s
);
7421 if (!fp_access_check(s
)) {
7426 elements
= (is_q
? 128 : 64) / esize
;
7428 tcg_res
= tcg_temp_new_i64();
7429 tcg_elt
= tcg_temp_new_i64();
7431 /* These instructions operate across all lanes of a vector
7432 * to produce a single result. We can guarantee that a 64
7433 * bit intermediate is sufficient:
7434 * + for [US]ADDLV the maximum element size is 32 bits, and
7435 * the result type is 64 bits
7436 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
7437 * same as the element size, which is 32 bits at most
7438 * For the integer operations we can choose to work at 64
7439 * or 32 bits and truncate at the end; for simplicity
7440 * we use 64 bits always. The floating point
7441 * ops do require 32 bit intermediates, though.
7444 read_vec_element(s
, tcg_res
, rn
, 0, size
| (is_u
? 0 : MO_SIGN
));
7446 for (i
= 1; i
< elements
; i
++) {
7447 read_vec_element(s
, tcg_elt
, rn
, i
, size
| (is_u
? 0 : MO_SIGN
));
7450 case 0x03: /* SADDLV / UADDLV */
7451 case 0x1b: /* ADDV */
7452 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_elt
);
7454 case 0x0a: /* SMAXV / UMAXV */
7456 tcg_gen_umax_i64(tcg_res
, tcg_res
, tcg_elt
);
7458 tcg_gen_smax_i64(tcg_res
, tcg_res
, tcg_elt
);
7461 case 0x1a: /* SMINV / UMINV */
7463 tcg_gen_umin_i64(tcg_res
, tcg_res
, tcg_elt
);
7465 tcg_gen_smin_i64(tcg_res
, tcg_res
, tcg_elt
);
7469 g_assert_not_reached();
7474 /* Floating point vector reduction ops which work across 32
7475 * bit (single) or 16 bit (half-precision) intermediates.
7476 * Note that correct NaN propagation requires that we do these
7477 * operations in exactly the order specified by the pseudocode.
7479 TCGv_ptr fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
7480 int fpopcode
= opcode
| is_min
<< 4 | is_u
<< 5;
7481 int vmap
= (1 << elements
) - 1;
7482 TCGv_i32 tcg_res32
= do_reduction_op(s
, fpopcode
, rn
, esize
,
7483 (is_q
? 128 : 64), vmap
, fpst
);
7484 tcg_gen_extu_i32_i64(tcg_res
, tcg_res32
);
7487 /* Now truncate the result to the width required for the final output */
7488 if (opcode
== 0x03) {
7489 /* SADDLV, UADDLV: result is 2*esize */
7495 tcg_gen_ext8u_i64(tcg_res
, tcg_res
);
7498 tcg_gen_ext16u_i64(tcg_res
, tcg_res
);
7501 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
7506 g_assert_not_reached();
7509 write_fp_dreg(s
, rd
, tcg_res
);
7512 /* DUP (Element, Vector)
7514 * 31 30 29 21 20 16 15 10 9 5 4 0
7515 * +---+---+-------------------+--------+-------------+------+------+
7516 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7517 * +---+---+-------------------+--------+-------------+------+------+
7519 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7521 static void handle_simd_dupe(DisasContext
*s
, int is_q
, int rd
, int rn
,
7524 int size
= ctz32(imm5
);
7527 if (size
> 3 || (size
== 3 && !is_q
)) {
7528 unallocated_encoding(s
);
7532 if (!fp_access_check(s
)) {
7536 index
= imm5
>> (size
+ 1);
7537 tcg_gen_gvec_dup_mem(size
, vec_full_reg_offset(s
, rd
),
7538 vec_reg_offset(s
, rn
, index
, size
),
7539 is_q
? 16 : 8, vec_full_reg_size(s
));
7542 /* DUP (element, scalar)
7543 * 31 21 20 16 15 10 9 5 4 0
7544 * +-----------------------+--------+-------------+------+------+
7545 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7546 * +-----------------------+--------+-------------+------+------+
7548 static void handle_simd_dupes(DisasContext
*s
, int rd
, int rn
,
7551 int size
= ctz32(imm5
);
7556 unallocated_encoding(s
);
7560 if (!fp_access_check(s
)) {
7564 index
= imm5
>> (size
+ 1);
7566 /* This instruction just extracts the specified element and
7567 * zero-extends it into the bottom of the destination register.
7569 tmp
= tcg_temp_new_i64();
7570 read_vec_element(s
, tmp
, rn
, index
, size
);
7571 write_fp_dreg(s
, rd
, tmp
);
7576 * 31 30 29 21 20 16 15 10 9 5 4 0
7577 * +---+---+-------------------+--------+-------------+------+------+
7578 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
7579 * +---+---+-------------------+--------+-------------+------+------+
7581 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7583 static void handle_simd_dupg(DisasContext
*s
, int is_q
, int rd
, int rn
,
7586 int size
= ctz32(imm5
);
7587 uint32_t dofs
, oprsz
, maxsz
;
7589 if (size
> 3 || ((size
== 3) && !is_q
)) {
7590 unallocated_encoding(s
);
7594 if (!fp_access_check(s
)) {
7598 dofs
= vec_full_reg_offset(s
, rd
);
7599 oprsz
= is_q
? 16 : 8;
7600 maxsz
= vec_full_reg_size(s
);
7602 tcg_gen_gvec_dup_i64(size
, dofs
, oprsz
, maxsz
, cpu_reg(s
, rn
));
7607 * 31 21 20 16 15 14 11 10 9 5 4 0
7608 * +-----------------------+--------+------------+---+------+------+
7609 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7610 * +-----------------------+--------+------------+---+------+------+
7612 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7613 * index: encoded in imm5<4:size+1>
7615 static void handle_simd_inse(DisasContext
*s
, int rd
, int rn
,
7618 int size
= ctz32(imm5
);
7619 int src_index
, dst_index
;
7623 unallocated_encoding(s
);
7627 if (!fp_access_check(s
)) {
7631 dst_index
= extract32(imm5
, 1+size
, 5);
7632 src_index
= extract32(imm4
, size
, 4);
7634 tmp
= tcg_temp_new_i64();
7636 read_vec_element(s
, tmp
, rn
, src_index
, size
);
7637 write_vec_element(s
, tmp
, rd
, dst_index
, size
);
7639 /* INS is considered a 128-bit write for SVE. */
7640 clear_vec_high(s
, true, rd
);
7646 * 31 21 20 16 15 10 9 5 4 0
7647 * +-----------------------+--------+-------------+------+------+
7648 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
7649 * +-----------------------+--------+-------------+------+------+
7651 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7652 * index: encoded in imm5<4:size+1>
7654 static void handle_simd_insg(DisasContext
*s
, int rd
, int rn
, int imm5
)
7656 int size
= ctz32(imm5
);
7660 unallocated_encoding(s
);
7664 if (!fp_access_check(s
)) {
7668 idx
= extract32(imm5
, 1 + size
, 4 - size
);
7669 write_vec_element(s
, cpu_reg(s
, rn
), rd
, idx
, size
);
7671 /* INS is considered a 128-bit write for SVE. */
7672 clear_vec_high(s
, true, rd
);
7679 * 31 30 29 21 20 16 15 12 10 9 5 4 0
7680 * +---+---+-------------------+--------+-------------+------+------+
7681 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
7682 * +---+---+-------------------+--------+-------------+------+------+
7684 * U: unsigned when set
7685 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7687 static void handle_simd_umov_smov(DisasContext
*s
, int is_q
, int is_signed
,
7688 int rn
, int rd
, int imm5
)
7690 int size
= ctz32(imm5
);
7694 /* Check for UnallocatedEncodings */
7696 if (size
> 2 || (size
== 2 && !is_q
)) {
7697 unallocated_encoding(s
);
7702 || (size
< 3 && is_q
)
7703 || (size
== 3 && !is_q
)) {
7704 unallocated_encoding(s
);
7709 if (!fp_access_check(s
)) {
7713 element
= extract32(imm5
, 1+size
, 4);
7715 tcg_rd
= cpu_reg(s
, rd
);
7716 read_vec_element(s
, tcg_rd
, rn
, element
, size
| (is_signed
? MO_SIGN
: 0));
7717 if (is_signed
&& !is_q
) {
7718 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
7723 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
7724 * +---+---+----+-----------------+------+---+------+---+------+------+
7725 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7726 * +---+---+----+-----------------+------+---+------+---+------+------+
7728 static void disas_simd_copy(DisasContext
*s
, uint32_t insn
)
7730 int rd
= extract32(insn
, 0, 5);
7731 int rn
= extract32(insn
, 5, 5);
7732 int imm4
= extract32(insn
, 11, 4);
7733 int op
= extract32(insn
, 29, 1);
7734 int is_q
= extract32(insn
, 30, 1);
7735 int imm5
= extract32(insn
, 16, 5);
7740 handle_simd_inse(s
, rd
, rn
, imm4
, imm5
);
7742 unallocated_encoding(s
);
7747 /* DUP (element - vector) */
7748 handle_simd_dupe(s
, is_q
, rd
, rn
, imm5
);
7752 handle_simd_dupg(s
, is_q
, rd
, rn
, imm5
);
7757 handle_simd_insg(s
, rd
, rn
, imm5
);
7759 unallocated_encoding(s
);
7764 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
7765 handle_simd_umov_smov(s
, is_q
, (imm4
== 5), rn
, rd
, imm5
);
7768 unallocated_encoding(s
);
7774 /* AdvSIMD modified immediate
7775 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
7776 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7777 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
7778 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7780 * There are a number of operations that can be carried out here:
7781 * MOVI - move (shifted) imm into register
7782 * MVNI - move inverted (shifted) imm into register
7783 * ORR - bitwise OR of (shifted) imm with register
7784 * BIC - bitwise clear of (shifted) imm with register
7785 * With ARMv8.2 we also have:
7786 * FMOV half-precision
7788 static void disas_simd_mod_imm(DisasContext
*s
, uint32_t insn
)
7790 int rd
= extract32(insn
, 0, 5);
7791 int cmode
= extract32(insn
, 12, 4);
7792 int o2
= extract32(insn
, 11, 1);
7793 uint64_t abcdefgh
= extract32(insn
, 5, 5) | (extract32(insn
, 16, 3) << 5);
7794 bool is_neg
= extract32(insn
, 29, 1);
7795 bool is_q
= extract32(insn
, 30, 1);
7798 if (o2
!= 0 || ((cmode
== 0xf) && is_neg
&& !is_q
)) {
7799 /* Check for FMOV (vector, immediate) - half-precision */
7800 if (!(dc_isar_feature(aa64_fp16
, s
) && o2
&& cmode
== 0xf)) {
7801 unallocated_encoding(s
);
7806 if (!fp_access_check(s
)) {
7810 if (cmode
== 15 && o2
&& !is_neg
) {
7811 /* FMOV (vector, immediate) - half-precision */
7812 imm
= vfp_expand_imm(MO_16
, abcdefgh
);
7813 /* now duplicate across the lanes */
7814 imm
= dup_const(MO_16
, imm
);
7816 imm
= asimd_imm_const(abcdefgh
, cmode
, is_neg
);
7819 if (!((cmode
& 0x9) == 0x1 || (cmode
& 0xd) == 0x9)) {
7820 /* MOVI or MVNI, with MVNI negation handled above. */
7821 tcg_gen_gvec_dup_imm(MO_64
, vec_full_reg_offset(s
, rd
), is_q
? 16 : 8,
7822 vec_full_reg_size(s
), imm
);
7824 /* ORR or BIC, with BIC negation to AND handled above. */
7826 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_andi
, MO_64
);
7828 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_ori
, MO_64
);
7833 /* AdvSIMD scalar copy
7834 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
7835 * +-----+----+-----------------+------+---+------+---+------+------+
7836 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7837 * +-----+----+-----------------+------+---+------+---+------+------+
7839 static void disas_simd_scalar_copy(DisasContext
*s
, uint32_t insn
)
7841 int rd
= extract32(insn
, 0, 5);
7842 int rn
= extract32(insn
, 5, 5);
7843 int imm4
= extract32(insn
, 11, 4);
7844 int imm5
= extract32(insn
, 16, 5);
7845 int op
= extract32(insn
, 29, 1);
7847 if (op
!= 0 || imm4
!= 0) {
7848 unallocated_encoding(s
);
7852 /* DUP (element, scalar) */
7853 handle_simd_dupes(s
, rd
, rn
, imm5
);
7856 /* AdvSIMD scalar pairwise
7857 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7858 * +-----+---+-----------+------+-----------+--------+-----+------+------+
7859 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
7860 * +-----+---+-----------+------+-----------+--------+-----+------+------+
7862 static void disas_simd_scalar_pairwise(DisasContext
*s
, uint32_t insn
)
7864 int u
= extract32(insn
, 29, 1);
7865 int size
= extract32(insn
, 22, 2);
7866 int opcode
= extract32(insn
, 12, 5);
7867 int rn
= extract32(insn
, 5, 5);
7868 int rd
= extract32(insn
, 0, 5);
7871 /* For some ops (the FP ones), size[1] is part of the encoding.
7872 * For ADDP strictly it is not but size[1] is always 1 for valid
7875 opcode
|= (extract32(size
, 1, 1) << 5);
7878 case 0x3b: /* ADDP */
7879 if (u
|| size
!= 3) {
7880 unallocated_encoding(s
);
7883 if (!fp_access_check(s
)) {
7889 case 0xc: /* FMAXNMP */
7890 case 0xd: /* FADDP */
7891 case 0xf: /* FMAXP */
7892 case 0x2c: /* FMINNMP */
7893 case 0x2f: /* FMINP */
7894 /* FP op, size[0] is 32 or 64 bit*/
7896 if (!dc_isar_feature(aa64_fp16
, s
)) {
7897 unallocated_encoding(s
);
7903 size
= extract32(size
, 0, 1) ? MO_64
: MO_32
;
7906 if (!fp_access_check(s
)) {
7910 fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
7913 unallocated_encoding(s
);
7917 if (size
== MO_64
) {
7918 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
7919 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
7920 TCGv_i64 tcg_res
= tcg_temp_new_i64();
7922 read_vec_element(s
, tcg_op1
, rn
, 0, MO_64
);
7923 read_vec_element(s
, tcg_op2
, rn
, 1, MO_64
);
7926 case 0x3b: /* ADDP */
7927 tcg_gen_add_i64(tcg_res
, tcg_op1
, tcg_op2
);
7929 case 0xc: /* FMAXNMP */
7930 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7932 case 0xd: /* FADDP */
7933 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7935 case 0xf: /* FMAXP */
7936 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7938 case 0x2c: /* FMINNMP */
7939 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7941 case 0x2f: /* FMINP */
7942 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7945 g_assert_not_reached();
7948 write_fp_dreg(s
, rd
, tcg_res
);
7950 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
7951 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
7952 TCGv_i32 tcg_res
= tcg_temp_new_i32();
7954 read_vec_element_i32(s
, tcg_op1
, rn
, 0, size
);
7955 read_vec_element_i32(s
, tcg_op2
, rn
, 1, size
);
7957 if (size
== MO_16
) {
7959 case 0xc: /* FMAXNMP */
7960 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7962 case 0xd: /* FADDP */
7963 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7965 case 0xf: /* FMAXP */
7966 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7968 case 0x2c: /* FMINNMP */
7969 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7971 case 0x2f: /* FMINP */
7972 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7975 g_assert_not_reached();
7979 case 0xc: /* FMAXNMP */
7980 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7982 case 0xd: /* FADDP */
7983 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7985 case 0xf: /* FMAXP */
7986 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7988 case 0x2c: /* FMINNMP */
7989 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7991 case 0x2f: /* FMINP */
7992 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7995 g_assert_not_reached();
7999 write_fp_sreg(s
, rd
, tcg_res
);
8004 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
8006 * This code is handles the common shifting code and is used by both
8007 * the vector and scalar code.
8009 static void handle_shri_with_rndacc(TCGv_i64 tcg_res
, TCGv_i64 tcg_src
,
8010 TCGv_i64 tcg_rnd
, bool accumulate
,
8011 bool is_u
, int size
, int shift
)
8013 bool extended_result
= false;
8014 bool round
= tcg_rnd
!= NULL
;
8016 TCGv_i64 tcg_src_hi
;
8018 if (round
&& size
== 3) {
8019 extended_result
= true;
8020 ext_lshift
= 64 - shift
;
8021 tcg_src_hi
= tcg_temp_new_i64();
8022 } else if (shift
== 64) {
8023 if (!accumulate
&& is_u
) {
8024 /* result is zero */
8025 tcg_gen_movi_i64(tcg_res
, 0);
8030 /* Deal with the rounding step */
8032 if (extended_result
) {
8033 TCGv_i64 tcg_zero
= tcg_constant_i64(0);
8035 /* take care of sign extending tcg_res */
8036 tcg_gen_sari_i64(tcg_src_hi
, tcg_src
, 63);
8037 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
8038 tcg_src
, tcg_src_hi
,
8041 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
8046 tcg_gen_add_i64(tcg_src
, tcg_src
, tcg_rnd
);
8050 /* Now do the shift right */
8051 if (round
&& extended_result
) {
8052 /* extended case, >64 bit precision required */
8053 if (ext_lshift
== 0) {
8054 /* special case, only high bits matter */
8055 tcg_gen_mov_i64(tcg_src
, tcg_src_hi
);
8057 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
8058 tcg_gen_shli_i64(tcg_src_hi
, tcg_src_hi
, ext_lshift
);
8059 tcg_gen_or_i64(tcg_src
, tcg_src
, tcg_src_hi
);
8064 /* essentially shifting in 64 zeros */
8065 tcg_gen_movi_i64(tcg_src
, 0);
8067 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
8071 /* effectively extending the sign-bit */
8072 tcg_gen_sari_i64(tcg_src
, tcg_src
, 63);
8074 tcg_gen_sari_i64(tcg_src
, tcg_src
, shift
);
8080 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_src
);
8082 tcg_gen_mov_i64(tcg_res
, tcg_src
);
8086 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
8087 static void handle_scalar_simd_shri(DisasContext
*s
,
8088 bool is_u
, int immh
, int immb
,
8089 int opcode
, int rn
, int rd
)
8092 int immhb
= immh
<< 3 | immb
;
8093 int shift
= 2 * (8 << size
) - immhb
;
8094 bool accumulate
= false;
8096 bool insert
= false;
8101 if (!extract32(immh
, 3, 1)) {
8102 unallocated_encoding(s
);
8106 if (!fp_access_check(s
)) {
8111 case 0x02: /* SSRA / USRA (accumulate) */
8114 case 0x04: /* SRSHR / URSHR (rounding) */
8117 case 0x06: /* SRSRA / URSRA (accum + rounding) */
8118 accumulate
= round
= true;
8120 case 0x08: /* SRI */
8126 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
8131 tcg_rn
= read_fp_dreg(s
, rn
);
8132 tcg_rd
= (accumulate
|| insert
) ? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
8135 /* shift count same as element size is valid but does nothing;
8136 * special case to avoid potential shift by 64.
8138 int esize
= 8 << size
;
8139 if (shift
!= esize
) {
8140 tcg_gen_shri_i64(tcg_rn
, tcg_rn
, shift
);
8141 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, 0, esize
- shift
);
8144 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
8145 accumulate
, is_u
, size
, shift
);
8148 write_fp_dreg(s
, rd
, tcg_rd
);
8151 /* SHL/SLI - Scalar shift left */
8152 static void handle_scalar_simd_shli(DisasContext
*s
, bool insert
,
8153 int immh
, int immb
, int opcode
,
8156 int size
= 32 - clz32(immh
) - 1;
8157 int immhb
= immh
<< 3 | immb
;
8158 int shift
= immhb
- (8 << size
);
8162 if (!extract32(immh
, 3, 1)) {
8163 unallocated_encoding(s
);
8167 if (!fp_access_check(s
)) {
8171 tcg_rn
= read_fp_dreg(s
, rn
);
8172 tcg_rd
= insert
? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
8175 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, shift
, 64 - shift
);
8177 tcg_gen_shli_i64(tcg_rd
, tcg_rn
, shift
);
8180 write_fp_dreg(s
, rd
, tcg_rd
);
8183 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
8184 * (signed/unsigned) narrowing */
8185 static void handle_vec_simd_sqshrn(DisasContext
*s
, bool is_scalar
, bool is_q
,
8186 bool is_u_shift
, bool is_u_narrow
,
8187 int immh
, int immb
, int opcode
,
8190 int immhb
= immh
<< 3 | immb
;
8191 int size
= 32 - clz32(immh
) - 1;
8192 int esize
= 8 << size
;
8193 int shift
= (2 * esize
) - immhb
;
8194 int elements
= is_scalar
? 1 : (64 / esize
);
8195 bool round
= extract32(opcode
, 0, 1);
8196 MemOp ldop
= (size
+ 1) | (is_u_shift
? 0 : MO_SIGN
);
8197 TCGv_i64 tcg_rn
, tcg_rd
, tcg_round
;
8198 TCGv_i32 tcg_rd_narrowed
;
8201 static NeonGenNarrowEnvFn
* const signed_narrow_fns
[4][2] = {
8202 { gen_helper_neon_narrow_sat_s8
,
8203 gen_helper_neon_unarrow_sat8
},
8204 { gen_helper_neon_narrow_sat_s16
,
8205 gen_helper_neon_unarrow_sat16
},
8206 { gen_helper_neon_narrow_sat_s32
,
8207 gen_helper_neon_unarrow_sat32
},
8210 static NeonGenNarrowEnvFn
* const unsigned_narrow_fns
[4] = {
8211 gen_helper_neon_narrow_sat_u8
,
8212 gen_helper_neon_narrow_sat_u16
,
8213 gen_helper_neon_narrow_sat_u32
,
8216 NeonGenNarrowEnvFn
*narrowfn
;
8222 if (extract32(immh
, 3, 1)) {
8223 unallocated_encoding(s
);
8227 if (!fp_access_check(s
)) {
8232 narrowfn
= unsigned_narrow_fns
[size
];
8234 narrowfn
= signed_narrow_fns
[size
][is_u_narrow
? 1 : 0];
8237 tcg_rn
= tcg_temp_new_i64();
8238 tcg_rd
= tcg_temp_new_i64();
8239 tcg_rd_narrowed
= tcg_temp_new_i32();
8240 tcg_final
= tcg_temp_new_i64();
8243 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
8248 for (i
= 0; i
< elements
; i
++) {
8249 read_vec_element(s
, tcg_rn
, rn
, i
, ldop
);
8250 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
8251 false, is_u_shift
, size
+1, shift
);
8252 narrowfn(tcg_rd_narrowed
, tcg_env
, tcg_rd
);
8253 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd_narrowed
);
8255 tcg_gen_mov_i64(tcg_final
, tcg_rd
);
8257 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
8262 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
8264 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
8266 clear_vec_high(s
, is_q
, rd
);
8269 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
8270 static void handle_simd_qshl(DisasContext
*s
, bool scalar
, bool is_q
,
8271 bool src_unsigned
, bool dst_unsigned
,
8272 int immh
, int immb
, int rn
, int rd
)
8274 int immhb
= immh
<< 3 | immb
;
8275 int size
= 32 - clz32(immh
) - 1;
8276 int shift
= immhb
- (8 << size
);
8280 assert(!(scalar
&& is_q
));
8283 if (!is_q
&& extract32(immh
, 3, 1)) {
8284 unallocated_encoding(s
);
8288 /* Since we use the variable-shift helpers we must
8289 * replicate the shift count into each element of
8290 * the tcg_shift value.
8294 shift
|= shift
<< 8;
8297 shift
|= shift
<< 16;
8303 g_assert_not_reached();
8307 if (!fp_access_check(s
)) {
8312 TCGv_i64 tcg_shift
= tcg_constant_i64(shift
);
8313 static NeonGenTwo64OpEnvFn
* const fns
[2][2] = {
8314 { gen_helper_neon_qshl_s64
, gen_helper_neon_qshlu_s64
},
8315 { NULL
, gen_helper_neon_qshl_u64
},
8317 NeonGenTwo64OpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
];
8318 int maxpass
= is_q
? 2 : 1;
8320 for (pass
= 0; pass
< maxpass
; pass
++) {
8321 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8323 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8324 genfn(tcg_op
, tcg_env
, tcg_op
, tcg_shift
);
8325 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
8327 clear_vec_high(s
, is_q
, rd
);
8329 TCGv_i32 tcg_shift
= tcg_constant_i32(shift
);
8330 static NeonGenTwoOpEnvFn
* const fns
[2][2][3] = {
8332 { gen_helper_neon_qshl_s8
,
8333 gen_helper_neon_qshl_s16
,
8334 gen_helper_neon_qshl_s32
},
8335 { gen_helper_neon_qshlu_s8
,
8336 gen_helper_neon_qshlu_s16
,
8337 gen_helper_neon_qshlu_s32
}
8339 { NULL
, NULL
, NULL
},
8340 { gen_helper_neon_qshl_u8
,
8341 gen_helper_neon_qshl_u16
,
8342 gen_helper_neon_qshl_u32
}
8345 NeonGenTwoOpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
][size
];
8346 MemOp memop
= scalar
? size
: MO_32
;
8347 int maxpass
= scalar
? 1 : is_q
? 4 : 2;
8349 for (pass
= 0; pass
< maxpass
; pass
++) {
8350 TCGv_i32 tcg_op
= tcg_temp_new_i32();
8352 read_vec_element_i32(s
, tcg_op
, rn
, pass
, memop
);
8353 genfn(tcg_op
, tcg_env
, tcg_op
, tcg_shift
);
8357 tcg_gen_ext8u_i32(tcg_op
, tcg_op
);
8360 tcg_gen_ext16u_i32(tcg_op
, tcg_op
);
8365 g_assert_not_reached();
8367 write_fp_sreg(s
, rd
, tcg_op
);
8369 write_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
8374 clear_vec_high(s
, is_q
, rd
);
8379 /* Common vector code for handling integer to FP conversion */
8380 static void handle_simd_intfp_conv(DisasContext
*s
, int rd
, int rn
,
8381 int elements
, int is_signed
,
8382 int fracbits
, int size
)
8384 TCGv_ptr tcg_fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8385 TCGv_i32 tcg_shift
= NULL
;
8387 MemOp mop
= size
| (is_signed
? MO_SIGN
: 0);
8390 if (fracbits
|| size
== MO_64
) {
8391 tcg_shift
= tcg_constant_i32(fracbits
);
8394 if (size
== MO_64
) {
8395 TCGv_i64 tcg_int64
= tcg_temp_new_i64();
8396 TCGv_i64 tcg_double
= tcg_temp_new_i64();
8398 for (pass
= 0; pass
< elements
; pass
++) {
8399 read_vec_element(s
, tcg_int64
, rn
, pass
, mop
);
8402 gen_helper_vfp_sqtod(tcg_double
, tcg_int64
,
8403 tcg_shift
, tcg_fpst
);
8405 gen_helper_vfp_uqtod(tcg_double
, tcg_int64
,
8406 tcg_shift
, tcg_fpst
);
8408 if (elements
== 1) {
8409 write_fp_dreg(s
, rd
, tcg_double
);
8411 write_vec_element(s
, tcg_double
, rd
, pass
, MO_64
);
8415 TCGv_i32 tcg_int32
= tcg_temp_new_i32();
8416 TCGv_i32 tcg_float
= tcg_temp_new_i32();
8418 for (pass
= 0; pass
< elements
; pass
++) {
8419 read_vec_element_i32(s
, tcg_int32
, rn
, pass
, mop
);
8425 gen_helper_vfp_sltos(tcg_float
, tcg_int32
,
8426 tcg_shift
, tcg_fpst
);
8428 gen_helper_vfp_ultos(tcg_float
, tcg_int32
,
8429 tcg_shift
, tcg_fpst
);
8433 gen_helper_vfp_sitos(tcg_float
, tcg_int32
, tcg_fpst
);
8435 gen_helper_vfp_uitos(tcg_float
, tcg_int32
, tcg_fpst
);
8442 gen_helper_vfp_sltoh(tcg_float
, tcg_int32
,
8443 tcg_shift
, tcg_fpst
);
8445 gen_helper_vfp_ultoh(tcg_float
, tcg_int32
,
8446 tcg_shift
, tcg_fpst
);
8450 gen_helper_vfp_sitoh(tcg_float
, tcg_int32
, tcg_fpst
);
8452 gen_helper_vfp_uitoh(tcg_float
, tcg_int32
, tcg_fpst
);
8457 g_assert_not_reached();
8460 if (elements
== 1) {
8461 write_fp_sreg(s
, rd
, tcg_float
);
8463 write_vec_element_i32(s
, tcg_float
, rd
, pass
, size
);
8468 clear_vec_high(s
, elements
<< size
== 16, rd
);
8471 /* UCVTF/SCVTF - Integer to FP conversion */
8472 static void handle_simd_shift_intfp_conv(DisasContext
*s
, bool is_scalar
,
8473 bool is_q
, bool is_u
,
8474 int immh
, int immb
, int opcode
,
8477 int size
, elements
, fracbits
;
8478 int immhb
= immh
<< 3 | immb
;
8482 if (!is_scalar
&& !is_q
) {
8483 unallocated_encoding(s
);
8486 } else if (immh
& 4) {
8488 } else if (immh
& 2) {
8490 if (!dc_isar_feature(aa64_fp16
, s
)) {
8491 unallocated_encoding(s
);
8495 /* immh == 0 would be a failure of the decode logic */
8496 g_assert(immh
== 1);
8497 unallocated_encoding(s
);
8504 elements
= (8 << is_q
) >> size
;
8506 fracbits
= (16 << size
) - immhb
;
8508 if (!fp_access_check(s
)) {
8512 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !is_u
, fracbits
, size
);
8515 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
8516 static void handle_simd_shift_fpint_conv(DisasContext
*s
, bool is_scalar
,
8517 bool is_q
, bool is_u
,
8518 int immh
, int immb
, int rn
, int rd
)
8520 int immhb
= immh
<< 3 | immb
;
8521 int pass
, size
, fracbits
;
8522 TCGv_ptr tcg_fpstatus
;
8523 TCGv_i32 tcg_rmode
, tcg_shift
;
8527 if (!is_scalar
&& !is_q
) {
8528 unallocated_encoding(s
);
8531 } else if (immh
& 0x4) {
8533 } else if (immh
& 0x2) {
8535 if (!dc_isar_feature(aa64_fp16
, s
)) {
8536 unallocated_encoding(s
);
8540 /* Should have split out AdvSIMD modified immediate earlier. */
8542 unallocated_encoding(s
);
8546 if (!fp_access_check(s
)) {
8550 assert(!(is_scalar
&& is_q
));
8552 tcg_fpstatus
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8553 tcg_rmode
= gen_set_rmode(FPROUNDING_ZERO
, tcg_fpstatus
);
8554 fracbits
= (16 << size
) - immhb
;
8555 tcg_shift
= tcg_constant_i32(fracbits
);
8557 if (size
== MO_64
) {
8558 int maxpass
= is_scalar
? 1 : 2;
8560 for (pass
= 0; pass
< maxpass
; pass
++) {
8561 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8563 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8565 gen_helper_vfp_touqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
8567 gen_helper_vfp_tosqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
8569 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
8571 clear_vec_high(s
, is_q
, rd
);
8573 void (*fn
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
8574 int maxpass
= is_scalar
? 1 : ((8 << is_q
) >> size
);
8579 fn
= gen_helper_vfp_touhh
;
8581 fn
= gen_helper_vfp_toshh
;
8586 fn
= gen_helper_vfp_touls
;
8588 fn
= gen_helper_vfp_tosls
;
8592 g_assert_not_reached();
8595 for (pass
= 0; pass
< maxpass
; pass
++) {
8596 TCGv_i32 tcg_op
= tcg_temp_new_i32();
8598 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
8599 fn(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
8601 write_fp_sreg(s
, rd
, tcg_op
);
8603 write_vec_element_i32(s
, tcg_op
, rd
, pass
, size
);
8607 clear_vec_high(s
, is_q
, rd
);
8611 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
8614 /* AdvSIMD scalar shift by immediate
8615 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
8616 * +-----+---+-------------+------+------+--------+---+------+------+
8617 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
8618 * +-----+---+-------------+------+------+--------+---+------+------+
8620 * This is the scalar version so it works on a fixed sized registers
8622 static void disas_simd_scalar_shift_imm(DisasContext
*s
, uint32_t insn
)
8624 int rd
= extract32(insn
, 0, 5);
8625 int rn
= extract32(insn
, 5, 5);
8626 int opcode
= extract32(insn
, 11, 5);
8627 int immb
= extract32(insn
, 16, 3);
8628 int immh
= extract32(insn
, 19, 4);
8629 bool is_u
= extract32(insn
, 29, 1);
8632 unallocated_encoding(s
);
8637 case 0x08: /* SRI */
8639 unallocated_encoding(s
);
8643 case 0x00: /* SSHR / USHR */
8644 case 0x02: /* SSRA / USRA */
8645 case 0x04: /* SRSHR / URSHR */
8646 case 0x06: /* SRSRA / URSRA */
8647 handle_scalar_simd_shri(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
8649 case 0x0a: /* SHL / SLI */
8650 handle_scalar_simd_shli(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
8652 case 0x1c: /* SCVTF, UCVTF */
8653 handle_simd_shift_intfp_conv(s
, true, false, is_u
, immh
, immb
,
8656 case 0x10: /* SQSHRUN, SQSHRUN2 */
8657 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
8659 unallocated_encoding(s
);
8662 handle_vec_simd_sqshrn(s
, true, false, false, true,
8663 immh
, immb
, opcode
, rn
, rd
);
8665 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
8666 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
8667 handle_vec_simd_sqshrn(s
, true, false, is_u
, is_u
,
8668 immh
, immb
, opcode
, rn
, rd
);
8670 case 0xc: /* SQSHLU */
8672 unallocated_encoding(s
);
8675 handle_simd_qshl(s
, true, false, false, true, immh
, immb
, rn
, rd
);
8677 case 0xe: /* SQSHL, UQSHL */
8678 handle_simd_qshl(s
, true, false, is_u
, is_u
, immh
, immb
, rn
, rd
);
8680 case 0x1f: /* FCVTZS, FCVTZU */
8681 handle_simd_shift_fpint_conv(s
, true, false, is_u
, immh
, immb
, rn
, rd
);
8684 unallocated_encoding(s
);
8689 /* AdvSIMD scalar three different
8690 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
8691 * +-----+---+-----------+------+---+------+--------+-----+------+------+
8692 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
8693 * +-----+---+-----------+------+---+------+--------+-----+------+------+
8695 static void disas_simd_scalar_three_reg_diff(DisasContext
*s
, uint32_t insn
)
8697 bool is_u
= extract32(insn
, 29, 1);
8698 int size
= extract32(insn
, 22, 2);
8699 int opcode
= extract32(insn
, 12, 4);
8700 int rm
= extract32(insn
, 16, 5);
8701 int rn
= extract32(insn
, 5, 5);
8702 int rd
= extract32(insn
, 0, 5);
8705 unallocated_encoding(s
);
8710 case 0x9: /* SQDMLAL, SQDMLAL2 */
8711 case 0xb: /* SQDMLSL, SQDMLSL2 */
8712 case 0xd: /* SQDMULL, SQDMULL2 */
8713 if (size
== 0 || size
== 3) {
8714 unallocated_encoding(s
);
8719 unallocated_encoding(s
);
8723 if (!fp_access_check(s
)) {
8728 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
8729 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
8730 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8732 read_vec_element(s
, tcg_op1
, rn
, 0, MO_32
| MO_SIGN
);
8733 read_vec_element(s
, tcg_op2
, rm
, 0, MO_32
| MO_SIGN
);
8735 tcg_gen_mul_i64(tcg_res
, tcg_op1
, tcg_op2
);
8736 gen_helper_neon_addl_saturate_s64(tcg_res
, tcg_env
, tcg_res
, tcg_res
);
8739 case 0xd: /* SQDMULL, SQDMULL2 */
8741 case 0xb: /* SQDMLSL, SQDMLSL2 */
8742 tcg_gen_neg_i64(tcg_res
, tcg_res
);
8744 case 0x9: /* SQDMLAL, SQDMLAL2 */
8745 read_vec_element(s
, tcg_op1
, rd
, 0, MO_64
);
8746 gen_helper_neon_addl_saturate_s64(tcg_res
, tcg_env
,
8750 g_assert_not_reached();
8753 write_fp_dreg(s
, rd
, tcg_res
);
8755 TCGv_i32 tcg_op1
= read_fp_hreg(s
, rn
);
8756 TCGv_i32 tcg_op2
= read_fp_hreg(s
, rm
);
8757 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8759 gen_helper_neon_mull_s16(tcg_res
, tcg_op1
, tcg_op2
);
8760 gen_helper_neon_addl_saturate_s32(tcg_res
, tcg_env
, tcg_res
, tcg_res
);
8763 case 0xd: /* SQDMULL, SQDMULL2 */
8765 case 0xb: /* SQDMLSL, SQDMLSL2 */
8766 gen_helper_neon_negl_u32(tcg_res
, tcg_res
);
8768 case 0x9: /* SQDMLAL, SQDMLAL2 */
8770 TCGv_i64 tcg_op3
= tcg_temp_new_i64();
8771 read_vec_element(s
, tcg_op3
, rd
, 0, MO_32
);
8772 gen_helper_neon_addl_saturate_s32(tcg_res
, tcg_env
,
8777 g_assert_not_reached();
8780 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
8781 write_fp_dreg(s
, rd
, tcg_res
);
8785 static void handle_3same_64(DisasContext
*s
, int opcode
, bool u
,
8786 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
, TCGv_i64 tcg_rm
)
8788 /* Handle 64x64->64 opcodes which are shared between the scalar
8789 * and vector 3-same groups. We cover every opcode where size == 3
8790 * is valid in either the three-reg-same (integer, not pairwise)
8791 * or scalar-three-reg-same groups.
8796 case 0x1: /* SQADD */
8798 gen_helper_neon_qadd_u64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8800 gen_helper_neon_qadd_s64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8803 case 0x5: /* SQSUB */
8805 gen_helper_neon_qsub_u64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8807 gen_helper_neon_qsub_s64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8810 case 0x6: /* CMGT, CMHI */
8811 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
8813 /* 64 bit integer comparison, result = test ? -1 : 0. */
8814 tcg_gen_negsetcond_i64(cond
, tcg_rd
, tcg_rn
, tcg_rm
);
8816 case 0x7: /* CMGE, CMHS */
8817 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
8819 case 0x11: /* CMTST, CMEQ */
8824 gen_cmtst_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8826 case 0x8: /* SSHL, USHL */
8828 gen_ushl_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8830 gen_sshl_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8833 case 0x9: /* SQSHL, UQSHL */
8835 gen_helper_neon_qshl_u64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8837 gen_helper_neon_qshl_s64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8840 case 0xa: /* SRSHL, URSHL */
8842 gen_helper_neon_rshl_u64(tcg_rd
, tcg_rn
, tcg_rm
);
8844 gen_helper_neon_rshl_s64(tcg_rd
, tcg_rn
, tcg_rm
);
8847 case 0xb: /* SQRSHL, UQRSHL */
8849 gen_helper_neon_qrshl_u64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8851 gen_helper_neon_qrshl_s64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rm
);
8854 case 0x10: /* ADD, SUB */
8856 tcg_gen_sub_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8858 tcg_gen_add_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8862 g_assert_not_reached();
8866 /* Handle the 3-same-operands float operations; shared by the scalar
8867 * and vector encodings. The caller must filter out any encodings
8868 * not allocated for the encoding it is dealing with.
8870 static void handle_3same_float(DisasContext
*s
, int size
, int elements
,
8871 int fpopcode
, int rd
, int rn
, int rm
)
8874 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
8876 for (pass
= 0; pass
< elements
; pass
++) {
8879 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
8880 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
8881 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8883 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
8884 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
8887 case 0x39: /* FMLS */
8888 /* As usual for ARM, separate negation for fused multiply-add */
8889 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
8891 case 0x19: /* FMLA */
8892 read_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
8893 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
,
8896 case 0x18: /* FMAXNM */
8897 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8899 case 0x1a: /* FADD */
8900 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8902 case 0x1b: /* FMULX */
8903 gen_helper_vfp_mulxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8905 case 0x1c: /* FCMEQ */
8906 gen_helper_neon_ceq_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8908 case 0x1e: /* FMAX */
8909 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8911 case 0x1f: /* FRECPS */
8912 gen_helper_recpsf_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8914 case 0x38: /* FMINNM */
8915 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8917 case 0x3a: /* FSUB */
8918 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8920 case 0x3e: /* FMIN */
8921 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8923 case 0x3f: /* FRSQRTS */
8924 gen_helper_rsqrtsf_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8926 case 0x5b: /* FMUL */
8927 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8929 case 0x5c: /* FCMGE */
8930 gen_helper_neon_cge_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8932 case 0x5d: /* FACGE */
8933 gen_helper_neon_acge_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8935 case 0x5f: /* FDIV */
8936 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8938 case 0x7a: /* FABD */
8939 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8940 gen_helper_vfp_absd(tcg_res
, tcg_res
);
8942 case 0x7c: /* FCMGT */
8943 gen_helper_neon_cgt_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8945 case 0x7d: /* FACGT */
8946 gen_helper_neon_acgt_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8949 g_assert_not_reached();
8952 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
8955 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
8956 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
8957 TCGv_i32 tcg_res
= tcg_temp_new_i32();
8959 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
8960 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
8963 case 0x39: /* FMLS */
8964 /* As usual for ARM, separate negation for fused multiply-add */
8965 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
8967 case 0x19: /* FMLA */
8968 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
8969 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
,
8972 case 0x1a: /* FADD */
8973 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8975 case 0x1b: /* FMULX */
8976 gen_helper_vfp_mulxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8978 case 0x1c: /* FCMEQ */
8979 gen_helper_neon_ceq_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8981 case 0x1e: /* FMAX */
8982 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8984 case 0x1f: /* FRECPS */
8985 gen_helper_recpsf_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8987 case 0x18: /* FMAXNM */
8988 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8990 case 0x38: /* FMINNM */
8991 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8993 case 0x3a: /* FSUB */
8994 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8996 case 0x3e: /* FMIN */
8997 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8999 case 0x3f: /* FRSQRTS */
9000 gen_helper_rsqrtsf_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9002 case 0x5b: /* FMUL */
9003 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9005 case 0x5c: /* FCMGE */
9006 gen_helper_neon_cge_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9008 case 0x5d: /* FACGE */
9009 gen_helper_neon_acge_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9011 case 0x5f: /* FDIV */
9012 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9014 case 0x7a: /* FABD */
9015 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9016 gen_helper_vfp_abss(tcg_res
, tcg_res
);
9018 case 0x7c: /* FCMGT */
9019 gen_helper_neon_cgt_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9021 case 0x7d: /* FACGT */
9022 gen_helper_neon_acgt_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9025 g_assert_not_reached();
9028 if (elements
== 1) {
9029 /* scalar single so clear high part */
9030 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
9032 tcg_gen_extu_i32_i64(tcg_tmp
, tcg_res
);
9033 write_vec_element(s
, tcg_tmp
, rd
, pass
, MO_64
);
9035 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
9040 clear_vec_high(s
, elements
* (size
? 8 : 4) > 8, rd
);
9043 /* AdvSIMD scalar three same
9044 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
9045 * +-----+---+-----------+------+---+------+--------+---+------+------+
9046 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
9047 * +-----+---+-----------+------+---+------+--------+---+------+------+
9049 static void disas_simd_scalar_three_reg_same(DisasContext
*s
, uint32_t insn
)
9051 int rd
= extract32(insn
, 0, 5);
9052 int rn
= extract32(insn
, 5, 5);
9053 int opcode
= extract32(insn
, 11, 5);
9054 int rm
= extract32(insn
, 16, 5);
9055 int size
= extract32(insn
, 22, 2);
9056 bool u
= extract32(insn
, 29, 1);
9059 if (opcode
>= 0x18) {
9060 /* Floating point: U, size[1] and opcode indicate operation */
9061 int fpopcode
= opcode
| (extract32(size
, 1, 1) << 5) | (u
<< 6);
9063 case 0x1b: /* FMULX */
9064 case 0x1f: /* FRECPS */
9065 case 0x3f: /* FRSQRTS */
9066 case 0x5d: /* FACGE */
9067 case 0x7d: /* FACGT */
9068 case 0x1c: /* FCMEQ */
9069 case 0x5c: /* FCMGE */
9070 case 0x7c: /* FCMGT */
9071 case 0x7a: /* FABD */
9074 unallocated_encoding(s
);
9078 if (!fp_access_check(s
)) {
9082 handle_3same_float(s
, extract32(size
, 0, 1), 1, fpopcode
, rd
, rn
, rm
);
9087 case 0x1: /* SQADD, UQADD */
9088 case 0x5: /* SQSUB, UQSUB */
9089 case 0x9: /* SQSHL, UQSHL */
9090 case 0xb: /* SQRSHL, UQRSHL */
9092 case 0x8: /* SSHL, USHL */
9093 case 0xa: /* SRSHL, URSHL */
9094 case 0x6: /* CMGT, CMHI */
9095 case 0x7: /* CMGE, CMHS */
9096 case 0x11: /* CMTST, CMEQ */
9097 case 0x10: /* ADD, SUB (vector) */
9099 unallocated_encoding(s
);
9103 case 0x16: /* SQDMULH, SQRDMULH (vector) */
9104 if (size
!= 1 && size
!= 2) {
9105 unallocated_encoding(s
);
9110 unallocated_encoding(s
);
9114 if (!fp_access_check(s
)) {
9118 tcg_rd
= tcg_temp_new_i64();
9121 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
9122 TCGv_i64 tcg_rm
= read_fp_dreg(s
, rm
);
9124 handle_3same_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rm
);
9126 /* Do a single operation on the lowest element in the vector.
9127 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
9128 * no side effects for all these operations.
9129 * OPTME: special-purpose helpers would avoid doing some
9130 * unnecessary work in the helper for the 8 and 16 bit cases.
9132 NeonGenTwoOpEnvFn
*genenvfn
;
9133 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
9134 TCGv_i32 tcg_rm
= tcg_temp_new_i32();
9135 TCGv_i32 tcg_rd32
= tcg_temp_new_i32();
9137 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
9138 read_vec_element_i32(s
, tcg_rm
, rm
, 0, size
);
9141 case 0x1: /* SQADD, UQADD */
9143 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9144 { gen_helper_neon_qadd_s8
, gen_helper_neon_qadd_u8
},
9145 { gen_helper_neon_qadd_s16
, gen_helper_neon_qadd_u16
},
9146 { gen_helper_neon_qadd_s32
, gen_helper_neon_qadd_u32
},
9148 genenvfn
= fns
[size
][u
];
9151 case 0x5: /* SQSUB, UQSUB */
9153 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9154 { gen_helper_neon_qsub_s8
, gen_helper_neon_qsub_u8
},
9155 { gen_helper_neon_qsub_s16
, gen_helper_neon_qsub_u16
},
9156 { gen_helper_neon_qsub_s32
, gen_helper_neon_qsub_u32
},
9158 genenvfn
= fns
[size
][u
];
9161 case 0x9: /* SQSHL, UQSHL */
9163 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9164 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
9165 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
9166 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
9168 genenvfn
= fns
[size
][u
];
9171 case 0xb: /* SQRSHL, UQRSHL */
9173 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9174 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
9175 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
9176 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
9178 genenvfn
= fns
[size
][u
];
9181 case 0x16: /* SQDMULH, SQRDMULH */
9183 static NeonGenTwoOpEnvFn
* const fns
[2][2] = {
9184 { gen_helper_neon_qdmulh_s16
, gen_helper_neon_qrdmulh_s16
},
9185 { gen_helper_neon_qdmulh_s32
, gen_helper_neon_qrdmulh_s32
},
9187 assert(size
== 1 || size
== 2);
9188 genenvfn
= fns
[size
- 1][u
];
9192 g_assert_not_reached();
9195 genenvfn(tcg_rd32
, tcg_env
, tcg_rn
, tcg_rm
);
9196 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd32
);
9199 write_fp_dreg(s
, rd
, tcg_rd
);
9202 /* AdvSIMD scalar three same FP16
9203 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
9204 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9205 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
9206 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9207 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
9208 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
9210 static void disas_simd_scalar_three_reg_same_fp16(DisasContext
*s
,
9213 int rd
= extract32(insn
, 0, 5);
9214 int rn
= extract32(insn
, 5, 5);
9215 int opcode
= extract32(insn
, 11, 3);
9216 int rm
= extract32(insn
, 16, 5);
9217 bool u
= extract32(insn
, 29, 1);
9218 bool a
= extract32(insn
, 23, 1);
9219 int fpopcode
= opcode
| (a
<< 3) | (u
<< 4);
9226 case 0x03: /* FMULX */
9227 case 0x04: /* FCMEQ (reg) */
9228 case 0x07: /* FRECPS */
9229 case 0x0f: /* FRSQRTS */
9230 case 0x14: /* FCMGE (reg) */
9231 case 0x15: /* FACGE */
9232 case 0x1a: /* FABD */
9233 case 0x1c: /* FCMGT (reg) */
9234 case 0x1d: /* FACGT */
9237 unallocated_encoding(s
);
9241 if (!dc_isar_feature(aa64_fp16
, s
)) {
9242 unallocated_encoding(s
);
9245 if (!fp_access_check(s
)) {
9249 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
9251 tcg_op1
= read_fp_hreg(s
, rn
);
9252 tcg_op2
= read_fp_hreg(s
, rm
);
9253 tcg_res
= tcg_temp_new_i32();
9256 case 0x03: /* FMULX */
9257 gen_helper_advsimd_mulxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9259 case 0x04: /* FCMEQ (reg) */
9260 gen_helper_advsimd_ceq_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9262 case 0x07: /* FRECPS */
9263 gen_helper_recpsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9265 case 0x0f: /* FRSQRTS */
9266 gen_helper_rsqrtsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9268 case 0x14: /* FCMGE (reg) */
9269 gen_helper_advsimd_cge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9271 case 0x15: /* FACGE */
9272 gen_helper_advsimd_acge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9274 case 0x1a: /* FABD */
9275 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9276 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0x7fff);
9278 case 0x1c: /* FCMGT (reg) */
9279 gen_helper_advsimd_cgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9281 case 0x1d: /* FACGT */
9282 gen_helper_advsimd_acgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9285 g_assert_not_reached();
9288 write_fp_sreg(s
, rd
, tcg_res
);
9291 /* AdvSIMD scalar three same extra
9292 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
9293 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9294 * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
9295 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9297 static void disas_simd_scalar_three_reg_same_extra(DisasContext
*s
,
9300 int rd
= extract32(insn
, 0, 5);
9301 int rn
= extract32(insn
, 5, 5);
9302 int opcode
= extract32(insn
, 11, 4);
9303 int rm
= extract32(insn
, 16, 5);
9304 int size
= extract32(insn
, 22, 2);
9305 bool u
= extract32(insn
, 29, 1);
9306 TCGv_i32 ele1
, ele2
, ele3
;
9310 switch (u
* 16 + opcode
) {
9311 case 0x10: /* SQRDMLAH (vector) */
9312 case 0x11: /* SQRDMLSH (vector) */
9313 if (size
!= 1 && size
!= 2) {
9314 unallocated_encoding(s
);
9317 feature
= dc_isar_feature(aa64_rdm
, s
);
9320 unallocated_encoding(s
);
9324 unallocated_encoding(s
);
9327 if (!fp_access_check(s
)) {
9331 /* Do a single operation on the lowest element in the vector.
9332 * We use the standard Neon helpers and rely on 0 OP 0 == 0
9333 * with no side effects for all these operations.
9334 * OPTME: special-purpose helpers would avoid doing some
9335 * unnecessary work in the helper for the 16 bit cases.
9337 ele1
= tcg_temp_new_i32();
9338 ele2
= tcg_temp_new_i32();
9339 ele3
= tcg_temp_new_i32();
9341 read_vec_element_i32(s
, ele1
, rn
, 0, size
);
9342 read_vec_element_i32(s
, ele2
, rm
, 0, size
);
9343 read_vec_element_i32(s
, ele3
, rd
, 0, size
);
9346 case 0x0: /* SQRDMLAH */
9348 gen_helper_neon_qrdmlah_s16(ele3
, tcg_env
, ele1
, ele2
, ele3
);
9350 gen_helper_neon_qrdmlah_s32(ele3
, tcg_env
, ele1
, ele2
, ele3
);
9353 case 0x1: /* SQRDMLSH */
9355 gen_helper_neon_qrdmlsh_s16(ele3
, tcg_env
, ele1
, ele2
, ele3
);
9357 gen_helper_neon_qrdmlsh_s32(ele3
, tcg_env
, ele1
, ele2
, ele3
);
9361 g_assert_not_reached();
9364 res
= tcg_temp_new_i64();
9365 tcg_gen_extu_i32_i64(res
, ele3
);
9366 write_fp_dreg(s
, rd
, res
);
9369 static void handle_2misc_64(DisasContext
*s
, int opcode
, bool u
,
9370 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
,
9371 TCGv_i32 tcg_rmode
, TCGv_ptr tcg_fpstatus
)
9373 /* Handle 64->64 opcodes which are shared between the scalar and
9374 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9375 * is valid in either group and also the double-precision fp ops.
9376 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9382 case 0x4: /* CLS, CLZ */
9384 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
9386 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
9390 /* This opcode is shared with CNT and RBIT but we have earlier
9391 * enforced that size == 3 if and only if this is the NOT insn.
9393 tcg_gen_not_i64(tcg_rd
, tcg_rn
);
9395 case 0x7: /* SQABS, SQNEG */
9397 gen_helper_neon_qneg_s64(tcg_rd
, tcg_env
, tcg_rn
);
9399 gen_helper_neon_qabs_s64(tcg_rd
, tcg_env
, tcg_rn
);
9402 case 0xa: /* CMLT */
9405 /* 64 bit integer comparison against zero, result is test ? -1 : 0. */
9406 tcg_gen_negsetcond_i64(cond
, tcg_rd
, tcg_rn
, tcg_constant_i64(0));
9408 case 0x8: /* CMGT, CMGE */
9409 cond
= u
? TCG_COND_GE
: TCG_COND_GT
;
9411 case 0x9: /* CMEQ, CMLE */
9412 cond
= u
? TCG_COND_LE
: TCG_COND_EQ
;
9414 case 0xb: /* ABS, NEG */
9416 tcg_gen_neg_i64(tcg_rd
, tcg_rn
);
9418 tcg_gen_abs_i64(tcg_rd
, tcg_rn
);
9421 case 0x2f: /* FABS */
9422 gen_helper_vfp_absd(tcg_rd
, tcg_rn
);
9424 case 0x6f: /* FNEG */
9425 gen_helper_vfp_negd(tcg_rd
, tcg_rn
);
9427 case 0x7f: /* FSQRT */
9428 gen_helper_vfp_sqrtd(tcg_rd
, tcg_rn
, tcg_env
);
9430 case 0x1a: /* FCVTNS */
9431 case 0x1b: /* FCVTMS */
9432 case 0x1c: /* FCVTAS */
9433 case 0x3a: /* FCVTPS */
9434 case 0x3b: /* FCVTZS */
9435 gen_helper_vfp_tosqd(tcg_rd
, tcg_rn
, tcg_constant_i32(0), tcg_fpstatus
);
9437 case 0x5a: /* FCVTNU */
9438 case 0x5b: /* FCVTMU */
9439 case 0x5c: /* FCVTAU */
9440 case 0x7a: /* FCVTPU */
9441 case 0x7b: /* FCVTZU */
9442 gen_helper_vfp_touqd(tcg_rd
, tcg_rn
, tcg_constant_i32(0), tcg_fpstatus
);
9444 case 0x18: /* FRINTN */
9445 case 0x19: /* FRINTM */
9446 case 0x38: /* FRINTP */
9447 case 0x39: /* FRINTZ */
9448 case 0x58: /* FRINTA */
9449 case 0x79: /* FRINTI */
9450 gen_helper_rintd(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9452 case 0x59: /* FRINTX */
9453 gen_helper_rintd_exact(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9455 case 0x1e: /* FRINT32Z */
9456 case 0x5e: /* FRINT32X */
9457 gen_helper_frint32_d(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9459 case 0x1f: /* FRINT64Z */
9460 case 0x5f: /* FRINT64X */
9461 gen_helper_frint64_d(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9464 g_assert_not_reached();
9468 static void handle_2misc_fcmp_zero(DisasContext
*s
, int opcode
,
9469 bool is_scalar
, bool is_u
, bool is_q
,
9470 int size
, int rn
, int rd
)
9472 bool is_double
= (size
== MO_64
);
9475 if (!fp_access_check(s
)) {
9479 fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
9482 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9483 TCGv_i64 tcg_zero
= tcg_constant_i64(0);
9484 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9485 NeonGenTwoDoubleOpFn
*genfn
;
9490 case 0x2e: /* FCMLT (zero) */
9493 case 0x2c: /* FCMGT (zero) */
9494 genfn
= gen_helper_neon_cgt_f64
;
9496 case 0x2d: /* FCMEQ (zero) */
9497 genfn
= gen_helper_neon_ceq_f64
;
9499 case 0x6d: /* FCMLE (zero) */
9502 case 0x6c: /* FCMGE (zero) */
9503 genfn
= gen_helper_neon_cge_f64
;
9506 g_assert_not_reached();
9509 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9510 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9512 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
9514 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
9516 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
9519 clear_vec_high(s
, !is_scalar
, rd
);
9521 TCGv_i32 tcg_op
= tcg_temp_new_i32();
9522 TCGv_i32 tcg_zero
= tcg_constant_i32(0);
9523 TCGv_i32 tcg_res
= tcg_temp_new_i32();
9524 NeonGenTwoSingleOpFn
*genfn
;
9526 int pass
, maxpasses
;
9528 if (size
== MO_16
) {
9530 case 0x2e: /* FCMLT (zero) */
9533 case 0x2c: /* FCMGT (zero) */
9534 genfn
= gen_helper_advsimd_cgt_f16
;
9536 case 0x2d: /* FCMEQ (zero) */
9537 genfn
= gen_helper_advsimd_ceq_f16
;
9539 case 0x6d: /* FCMLE (zero) */
9542 case 0x6c: /* FCMGE (zero) */
9543 genfn
= gen_helper_advsimd_cge_f16
;
9546 g_assert_not_reached();
9550 case 0x2e: /* FCMLT (zero) */
9553 case 0x2c: /* FCMGT (zero) */
9554 genfn
= gen_helper_neon_cgt_f32
;
9556 case 0x2d: /* FCMEQ (zero) */
9557 genfn
= gen_helper_neon_ceq_f32
;
9559 case 0x6d: /* FCMLE (zero) */
9562 case 0x6c: /* FCMGE (zero) */
9563 genfn
= gen_helper_neon_cge_f32
;
9566 g_assert_not_reached();
9573 int vector_size
= 8 << is_q
;
9574 maxpasses
= vector_size
>> size
;
9577 for (pass
= 0; pass
< maxpasses
; pass
++) {
9578 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
9580 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
9582 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
9585 write_fp_sreg(s
, rd
, tcg_res
);
9587 write_vec_element_i32(s
, tcg_res
, rd
, pass
, size
);
9592 clear_vec_high(s
, is_q
, rd
);
9597 static void handle_2misc_reciprocal(DisasContext
*s
, int opcode
,
9598 bool is_scalar
, bool is_u
, bool is_q
,
9599 int size
, int rn
, int rd
)
9601 bool is_double
= (size
== 3);
9602 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
9605 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9606 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9609 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9610 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9612 case 0x3d: /* FRECPE */
9613 gen_helper_recpe_f64(tcg_res
, tcg_op
, fpst
);
9615 case 0x3f: /* FRECPX */
9616 gen_helper_frecpx_f64(tcg_res
, tcg_op
, fpst
);
9618 case 0x7d: /* FRSQRTE */
9619 gen_helper_rsqrte_f64(tcg_res
, tcg_op
, fpst
);
9622 g_assert_not_reached();
9624 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
9626 clear_vec_high(s
, !is_scalar
, rd
);
9628 TCGv_i32 tcg_op
= tcg_temp_new_i32();
9629 TCGv_i32 tcg_res
= tcg_temp_new_i32();
9630 int pass
, maxpasses
;
9635 maxpasses
= is_q
? 4 : 2;
9638 for (pass
= 0; pass
< maxpasses
; pass
++) {
9639 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
9642 case 0x3c: /* URECPE */
9643 gen_helper_recpe_u32(tcg_res
, tcg_op
);
9645 case 0x3d: /* FRECPE */
9646 gen_helper_recpe_f32(tcg_res
, tcg_op
, fpst
);
9648 case 0x3f: /* FRECPX */
9649 gen_helper_frecpx_f32(tcg_res
, tcg_op
, fpst
);
9651 case 0x7d: /* FRSQRTE */
9652 gen_helper_rsqrte_f32(tcg_res
, tcg_op
, fpst
);
9655 g_assert_not_reached();
9659 write_fp_sreg(s
, rd
, tcg_res
);
9661 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
9665 clear_vec_high(s
, is_q
, rd
);
9670 static void handle_2misc_narrow(DisasContext
*s
, bool scalar
,
9671 int opcode
, bool u
, bool is_q
,
9672 int size
, int rn
, int rd
)
9674 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
9675 * in the source becomes a size element in the destination).
9678 TCGv_i32 tcg_res
[2];
9679 int destelt
= is_q
? 2 : 0;
9680 int passes
= scalar
? 1 : 2;
9683 tcg_res
[1] = tcg_constant_i32(0);
9686 for (pass
= 0; pass
< passes
; pass
++) {
9687 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9688 NeonGenNarrowFn
*genfn
= NULL
;
9689 NeonGenNarrowEnvFn
*genenvfn
= NULL
;
9692 read_vec_element(s
, tcg_op
, rn
, pass
, size
+ 1);
9694 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9696 tcg_res
[pass
] = tcg_temp_new_i32();
9699 case 0x12: /* XTN, SQXTUN */
9701 static NeonGenNarrowFn
* const xtnfns
[3] = {
9702 gen_helper_neon_narrow_u8
,
9703 gen_helper_neon_narrow_u16
,
9704 tcg_gen_extrl_i64_i32
,
9706 static NeonGenNarrowEnvFn
* const sqxtunfns
[3] = {
9707 gen_helper_neon_unarrow_sat8
,
9708 gen_helper_neon_unarrow_sat16
,
9709 gen_helper_neon_unarrow_sat32
,
9712 genenvfn
= sqxtunfns
[size
];
9714 genfn
= xtnfns
[size
];
9718 case 0x14: /* SQXTN, UQXTN */
9720 static NeonGenNarrowEnvFn
* const fns
[3][2] = {
9721 { gen_helper_neon_narrow_sat_s8
,
9722 gen_helper_neon_narrow_sat_u8
},
9723 { gen_helper_neon_narrow_sat_s16
,
9724 gen_helper_neon_narrow_sat_u16
},
9725 { gen_helper_neon_narrow_sat_s32
,
9726 gen_helper_neon_narrow_sat_u32
},
9728 genenvfn
= fns
[size
][u
];
9731 case 0x16: /* FCVTN, FCVTN2 */
9732 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
9734 gen_helper_vfp_fcvtsd(tcg_res
[pass
], tcg_op
, tcg_env
);
9736 TCGv_i32 tcg_lo
= tcg_temp_new_i32();
9737 TCGv_i32 tcg_hi
= tcg_temp_new_i32();
9738 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
9739 TCGv_i32 ahp
= get_ahp_flag();
9741 tcg_gen_extr_i64_i32(tcg_lo
, tcg_hi
, tcg_op
);
9742 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo
, tcg_lo
, fpst
, ahp
);
9743 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi
, tcg_hi
, fpst
, ahp
);
9744 tcg_gen_deposit_i32(tcg_res
[pass
], tcg_lo
, tcg_hi
, 16, 16);
9747 case 0x36: /* BFCVTN, BFCVTN2 */
9749 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
9750 gen_helper_bfcvt_pair(tcg_res
[pass
], tcg_op
, fpst
);
9753 case 0x56: /* FCVTXN, FCVTXN2 */
9754 /* 64 bit to 32 bit float conversion
9755 * with von Neumann rounding (round to odd)
9758 gen_helper_fcvtx_f64_to_f32(tcg_res
[pass
], tcg_op
, tcg_env
);
9761 g_assert_not_reached();
9765 genfn(tcg_res
[pass
], tcg_op
);
9766 } else if (genenvfn
) {
9767 genenvfn(tcg_res
[pass
], tcg_env
, tcg_op
);
9771 for (pass
= 0; pass
< 2; pass
++) {
9772 write_vec_element_i32(s
, tcg_res
[pass
], rd
, destelt
+ pass
, MO_32
);
9774 clear_vec_high(s
, is_q
, rd
);
9777 /* Remaining saturating accumulating ops */
9778 static void handle_2misc_satacc(DisasContext
*s
, bool is_scalar
, bool is_u
,
9779 bool is_q
, int size
, int rn
, int rd
)
9781 bool is_double
= (size
== 3);
9784 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
9785 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
9788 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9789 read_vec_element(s
, tcg_rn
, rn
, pass
, MO_64
);
9790 read_vec_element(s
, tcg_rd
, rd
, pass
, MO_64
);
9792 if (is_u
) { /* USQADD */
9793 gen_helper_neon_uqadd_s64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9794 } else { /* SUQADD */
9795 gen_helper_neon_sqadd_u64(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9797 write_vec_element(s
, tcg_rd
, rd
, pass
, MO_64
);
9799 clear_vec_high(s
, !is_scalar
, rd
);
9801 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
9802 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
9803 int pass
, maxpasses
;
9808 maxpasses
= is_q
? 4 : 2;
9811 for (pass
= 0; pass
< maxpasses
; pass
++) {
9813 read_vec_element_i32(s
, tcg_rn
, rn
, pass
, size
);
9814 read_vec_element_i32(s
, tcg_rd
, rd
, pass
, size
);
9816 read_vec_element_i32(s
, tcg_rn
, rn
, pass
, MO_32
);
9817 read_vec_element_i32(s
, tcg_rd
, rd
, pass
, MO_32
);
9820 if (is_u
) { /* USQADD */
9823 gen_helper_neon_uqadd_s8(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9826 gen_helper_neon_uqadd_s16(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9829 gen_helper_neon_uqadd_s32(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9832 g_assert_not_reached();
9834 } else { /* SUQADD */
9837 gen_helper_neon_sqadd_u8(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9840 gen_helper_neon_sqadd_u16(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9843 gen_helper_neon_sqadd_u32(tcg_rd
, tcg_env
, tcg_rn
, tcg_rd
);
9846 g_assert_not_reached();
9851 write_vec_element(s
, tcg_constant_i64(0), rd
, 0, MO_64
);
9853 write_vec_element_i32(s
, tcg_rd
, rd
, pass
, MO_32
);
9855 clear_vec_high(s
, is_q
, rd
);
9859 /* AdvSIMD scalar two reg misc
9860 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
9861 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9862 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
9863 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9865 static void disas_simd_scalar_two_reg_misc(DisasContext
*s
, uint32_t insn
)
9867 int rd
= extract32(insn
, 0, 5);
9868 int rn
= extract32(insn
, 5, 5);
9869 int opcode
= extract32(insn
, 12, 5);
9870 int size
= extract32(insn
, 22, 2);
9871 bool u
= extract32(insn
, 29, 1);
9872 bool is_fcvt
= false;
9875 TCGv_ptr tcg_fpstatus
;
9878 case 0x3: /* USQADD / SUQADD*/
9879 if (!fp_access_check(s
)) {
9882 handle_2misc_satacc(s
, true, u
, false, size
, rn
, rd
);
9884 case 0x7: /* SQABS / SQNEG */
9886 case 0xa: /* CMLT */
9888 unallocated_encoding(s
);
9892 case 0x8: /* CMGT, CMGE */
9893 case 0x9: /* CMEQ, CMLE */
9894 case 0xb: /* ABS, NEG */
9896 unallocated_encoding(s
);
9900 case 0x12: /* SQXTUN */
9902 unallocated_encoding(s
);
9906 case 0x14: /* SQXTN, UQXTN */
9908 unallocated_encoding(s
);
9911 if (!fp_access_check(s
)) {
9914 handle_2misc_narrow(s
, true, opcode
, u
, false, size
, rn
, rd
);
9919 /* Floating point: U, size[1] and opcode indicate operation;
9920 * size[0] indicates single or double precision.
9922 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
9923 size
= extract32(size
, 0, 1) ? 3 : 2;
9925 case 0x2c: /* FCMGT (zero) */
9926 case 0x2d: /* FCMEQ (zero) */
9927 case 0x2e: /* FCMLT (zero) */
9928 case 0x6c: /* FCMGE (zero) */
9929 case 0x6d: /* FCMLE (zero) */
9930 handle_2misc_fcmp_zero(s
, opcode
, true, u
, true, size
, rn
, rd
);
9932 case 0x1d: /* SCVTF */
9933 case 0x5d: /* UCVTF */
9935 bool is_signed
= (opcode
== 0x1d);
9936 if (!fp_access_check(s
)) {
9939 handle_simd_intfp_conv(s
, rd
, rn
, 1, is_signed
, 0, size
);
9942 case 0x3d: /* FRECPE */
9943 case 0x3f: /* FRECPX */
9944 case 0x7d: /* FRSQRTE */
9945 if (!fp_access_check(s
)) {
9948 handle_2misc_reciprocal(s
, opcode
, true, u
, true, size
, rn
, rd
);
9950 case 0x1a: /* FCVTNS */
9951 case 0x1b: /* FCVTMS */
9952 case 0x3a: /* FCVTPS */
9953 case 0x3b: /* FCVTZS */
9954 case 0x5a: /* FCVTNU */
9955 case 0x5b: /* FCVTMU */
9956 case 0x7a: /* FCVTPU */
9957 case 0x7b: /* FCVTZU */
9959 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
9961 case 0x1c: /* FCVTAS */
9962 case 0x5c: /* FCVTAU */
9963 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
9965 rmode
= FPROUNDING_TIEAWAY
;
9967 case 0x56: /* FCVTXN, FCVTXN2 */
9969 unallocated_encoding(s
);
9972 if (!fp_access_check(s
)) {
9975 handle_2misc_narrow(s
, true, opcode
, u
, false, size
- 1, rn
, rd
);
9978 unallocated_encoding(s
);
9983 unallocated_encoding(s
);
9987 if (!fp_access_check(s
)) {
9992 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR
);
9993 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
9995 tcg_fpstatus
= NULL
;
10000 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
10001 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
10003 handle_2misc_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rmode
, tcg_fpstatus
);
10004 write_fp_dreg(s
, rd
, tcg_rd
);
10006 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
10007 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
10009 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
10012 case 0x7: /* SQABS, SQNEG */
10014 NeonGenOneOpEnvFn
*genfn
;
10015 static NeonGenOneOpEnvFn
* const fns
[3][2] = {
10016 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
10017 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
10018 { gen_helper_neon_qabs_s32
, gen_helper_neon_qneg_s32
},
10020 genfn
= fns
[size
][u
];
10021 genfn(tcg_rd
, tcg_env
, tcg_rn
);
10024 case 0x1a: /* FCVTNS */
10025 case 0x1b: /* FCVTMS */
10026 case 0x1c: /* FCVTAS */
10027 case 0x3a: /* FCVTPS */
10028 case 0x3b: /* FCVTZS */
10029 gen_helper_vfp_tosls(tcg_rd
, tcg_rn
, tcg_constant_i32(0),
10032 case 0x5a: /* FCVTNU */
10033 case 0x5b: /* FCVTMU */
10034 case 0x5c: /* FCVTAU */
10035 case 0x7a: /* FCVTPU */
10036 case 0x7b: /* FCVTZU */
10037 gen_helper_vfp_touls(tcg_rd
, tcg_rn
, tcg_constant_i32(0),
10041 g_assert_not_reached();
10044 write_fp_sreg(s
, rd
, tcg_rd
);
10048 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
10052 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
10053 static void handle_vec_simd_shri(DisasContext
*s
, bool is_q
, bool is_u
,
10054 int immh
, int immb
, int opcode
, int rn
, int rd
)
10056 int size
= 32 - clz32(immh
) - 1;
10057 int immhb
= immh
<< 3 | immb
;
10058 int shift
= 2 * (8 << size
) - immhb
;
10059 GVecGen2iFn
*gvec_fn
;
10061 if (extract32(immh
, 3, 1) && !is_q
) {
10062 unallocated_encoding(s
);
10065 tcg_debug_assert(size
<= 3);
10067 if (!fp_access_check(s
)) {
10072 case 0x02: /* SSRA / USRA (accumulate) */
10073 gvec_fn
= is_u
? gen_gvec_usra
: gen_gvec_ssra
;
10076 case 0x08: /* SRI */
10077 gvec_fn
= gen_gvec_sri
;
10080 case 0x00: /* SSHR / USHR */
10082 if (shift
== 8 << size
) {
10083 /* Shift count the same size as element size produces zero. */
10084 tcg_gen_gvec_dup_imm(size
, vec_full_reg_offset(s
, rd
),
10085 is_q
? 16 : 8, vec_full_reg_size(s
), 0);
10088 gvec_fn
= tcg_gen_gvec_shri
;
10090 /* Shift count the same size as element size produces all sign. */
10091 if (shift
== 8 << size
) {
10094 gvec_fn
= tcg_gen_gvec_sari
;
10098 case 0x04: /* SRSHR / URSHR (rounding) */
10099 gvec_fn
= is_u
? gen_gvec_urshr
: gen_gvec_srshr
;
10102 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10103 gvec_fn
= is_u
? gen_gvec_ursra
: gen_gvec_srsra
;
10107 g_assert_not_reached();
10110 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, gvec_fn
, size
);
10113 /* SHL/SLI - Vector shift left */
10114 static void handle_vec_simd_shli(DisasContext
*s
, bool is_q
, bool insert
,
10115 int immh
, int immb
, int opcode
, int rn
, int rd
)
10117 int size
= 32 - clz32(immh
) - 1;
10118 int immhb
= immh
<< 3 | immb
;
10119 int shift
= immhb
- (8 << size
);
10121 /* Range of size is limited by decode: immh is a non-zero 4 bit field */
10122 assert(size
>= 0 && size
<= 3);
10124 if (extract32(immh
, 3, 1) && !is_q
) {
10125 unallocated_encoding(s
);
10129 if (!fp_access_check(s
)) {
10134 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, gen_gvec_sli
, size
);
10136 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, tcg_gen_gvec_shli
, size
);
10140 /* USHLL/SHLL - Vector shift left with widening */
10141 static void handle_vec_simd_wshli(DisasContext
*s
, bool is_q
, bool is_u
,
10142 int immh
, int immb
, int opcode
, int rn
, int rd
)
10144 int size
= 32 - clz32(immh
) - 1;
10145 int immhb
= immh
<< 3 | immb
;
10146 int shift
= immhb
- (8 << size
);
10148 int esize
= 8 << size
;
10149 int elements
= dsize
/esize
;
10150 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
10151 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
10155 unallocated_encoding(s
);
10159 if (!fp_access_check(s
)) {
10163 /* For the LL variants the store is larger than the load,
10164 * so if rd == rn we would overwrite parts of our input.
10165 * So load everything right now and use shifts in the main loop.
10167 read_vec_element(s
, tcg_rn
, rn
, is_q
? 1 : 0, MO_64
);
10169 for (i
= 0; i
< elements
; i
++) {
10170 tcg_gen_shri_i64(tcg_rd
, tcg_rn
, i
* esize
);
10171 ext_and_shift_reg(tcg_rd
, tcg_rd
, size
| (!is_u
<< 2), 0);
10172 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, shift
);
10173 write_vec_element(s
, tcg_rd
, rd
, i
, size
+ 1);
10177 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
10178 static void handle_vec_simd_shrn(DisasContext
*s
, bool is_q
,
10179 int immh
, int immb
, int opcode
, int rn
, int rd
)
10181 int immhb
= immh
<< 3 | immb
;
10182 int size
= 32 - clz32(immh
) - 1;
10184 int esize
= 8 << size
;
10185 int elements
= dsize
/esize
;
10186 int shift
= (2 * esize
) - immhb
;
10187 bool round
= extract32(opcode
, 0, 1);
10188 TCGv_i64 tcg_rn
, tcg_rd
, tcg_final
;
10189 TCGv_i64 tcg_round
;
10192 if (extract32(immh
, 3, 1)) {
10193 unallocated_encoding(s
);
10197 if (!fp_access_check(s
)) {
10201 tcg_rn
= tcg_temp_new_i64();
10202 tcg_rd
= tcg_temp_new_i64();
10203 tcg_final
= tcg_temp_new_i64();
10204 read_vec_element(s
, tcg_final
, rd
, is_q
? 1 : 0, MO_64
);
10207 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
10212 for (i
= 0; i
< elements
; i
++) {
10213 read_vec_element(s
, tcg_rn
, rn
, i
, size
+1);
10214 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
10215 false, true, size
+1, shift
);
10217 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
10221 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
10223 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
10226 clear_vec_high(s
, is_q
, rd
);
10230 /* AdvSIMD shift by immediate
10231 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
10232 * +---+---+---+-------------+------+------+--------+---+------+------+
10233 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
10234 * +---+---+---+-------------+------+------+--------+---+------+------+
10236 static void disas_simd_shift_imm(DisasContext
*s
, uint32_t insn
)
10238 int rd
= extract32(insn
, 0, 5);
10239 int rn
= extract32(insn
, 5, 5);
10240 int opcode
= extract32(insn
, 11, 5);
10241 int immb
= extract32(insn
, 16, 3);
10242 int immh
= extract32(insn
, 19, 4);
10243 bool is_u
= extract32(insn
, 29, 1);
10244 bool is_q
= extract32(insn
, 30, 1);
10246 /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
10250 case 0x08: /* SRI */
10252 unallocated_encoding(s
);
10256 case 0x00: /* SSHR / USHR */
10257 case 0x02: /* SSRA / USRA (accumulate) */
10258 case 0x04: /* SRSHR / URSHR (rounding) */
10259 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10260 handle_vec_simd_shri(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10262 case 0x0a: /* SHL / SLI */
10263 handle_vec_simd_shli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10265 case 0x10: /* SHRN */
10266 case 0x11: /* RSHRN / SQRSHRUN */
10268 handle_vec_simd_sqshrn(s
, false, is_q
, false, true, immh
, immb
,
10271 handle_vec_simd_shrn(s
, is_q
, immh
, immb
, opcode
, rn
, rd
);
10274 case 0x12: /* SQSHRN / UQSHRN */
10275 case 0x13: /* SQRSHRN / UQRSHRN */
10276 handle_vec_simd_sqshrn(s
, false, is_q
, is_u
, is_u
, immh
, immb
,
10279 case 0x14: /* SSHLL / USHLL */
10280 handle_vec_simd_wshli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10282 case 0x1c: /* SCVTF / UCVTF */
10283 handle_simd_shift_intfp_conv(s
, false, is_q
, is_u
, immh
, immb
,
10286 case 0xc: /* SQSHLU */
10288 unallocated_encoding(s
);
10291 handle_simd_qshl(s
, false, is_q
, false, true, immh
, immb
, rn
, rd
);
10293 case 0xe: /* SQSHL, UQSHL */
10294 handle_simd_qshl(s
, false, is_q
, is_u
, is_u
, immh
, immb
, rn
, rd
);
10296 case 0x1f: /* FCVTZS/ FCVTZU */
10297 handle_simd_shift_fpint_conv(s
, false, is_q
, is_u
, immh
, immb
, rn
, rd
);
10300 unallocated_encoding(s
);
10305 /* Generate code to do a "long" addition or subtraction, ie one done in
10306 * TCGv_i64 on vector lanes twice the width specified by size.
10308 static void gen_neon_addl(int size
, bool is_sub
, TCGv_i64 tcg_res
,
10309 TCGv_i64 tcg_op1
, TCGv_i64 tcg_op2
)
10311 static NeonGenTwo64OpFn
* const fns
[3][2] = {
10312 { gen_helper_neon_addl_u16
, gen_helper_neon_subl_u16
},
10313 { gen_helper_neon_addl_u32
, gen_helper_neon_subl_u32
},
10314 { tcg_gen_add_i64
, tcg_gen_sub_i64
},
10316 NeonGenTwo64OpFn
*genfn
;
10319 genfn
= fns
[size
][is_sub
];
10320 genfn(tcg_res
, tcg_op1
, tcg_op2
);
10323 static void handle_3rd_widening(DisasContext
*s
, int is_q
, int is_u
, int size
,
10324 int opcode
, int rd
, int rn
, int rm
)
10326 /* 3-reg-different widening insns: 64 x 64 -> 128 */
10327 TCGv_i64 tcg_res
[2];
10330 tcg_res
[0] = tcg_temp_new_i64();
10331 tcg_res
[1] = tcg_temp_new_i64();
10333 /* Does this op do an adding accumulate, a subtracting accumulate,
10334 * or no accumulate at all?
10352 read_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
10353 read_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
10356 /* size == 2 means two 32x32->64 operations; this is worth special
10357 * casing because we can generally handle it inline.
10360 for (pass
= 0; pass
< 2; pass
++) {
10361 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10362 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10363 TCGv_i64 tcg_passres
;
10364 MemOp memop
= MO_32
| (is_u
? 0 : MO_SIGN
);
10366 int elt
= pass
+ is_q
* 2;
10368 read_vec_element(s
, tcg_op1
, rn
, elt
, memop
);
10369 read_vec_element(s
, tcg_op2
, rm
, elt
, memop
);
10372 tcg_passres
= tcg_res
[pass
];
10374 tcg_passres
= tcg_temp_new_i64();
10378 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10379 tcg_gen_add_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10381 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10382 tcg_gen_sub_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10384 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10385 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10387 TCGv_i64 tcg_tmp1
= tcg_temp_new_i64();
10388 TCGv_i64 tcg_tmp2
= tcg_temp_new_i64();
10390 tcg_gen_sub_i64(tcg_tmp1
, tcg_op1
, tcg_op2
);
10391 tcg_gen_sub_i64(tcg_tmp2
, tcg_op2
, tcg_op1
);
10392 tcg_gen_movcond_i64(is_u
? TCG_COND_GEU
: TCG_COND_GE
,
10394 tcg_op1
, tcg_op2
, tcg_tmp1
, tcg_tmp2
);
10397 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10398 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10399 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10400 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10402 case 9: /* SQDMLAL, SQDMLAL2 */
10403 case 11: /* SQDMLSL, SQDMLSL2 */
10404 case 13: /* SQDMULL, SQDMULL2 */
10405 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10406 gen_helper_neon_addl_saturate_s64(tcg_passres
, tcg_env
,
10407 tcg_passres
, tcg_passres
);
10410 g_assert_not_reached();
10413 if (opcode
== 9 || opcode
== 11) {
10414 /* saturating accumulate ops */
10416 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
10418 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], tcg_env
,
10419 tcg_res
[pass
], tcg_passres
);
10420 } else if (accop
> 0) {
10421 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
10422 } else if (accop
< 0) {
10423 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
10427 /* size 0 or 1, generally helper functions */
10428 for (pass
= 0; pass
< 2; pass
++) {
10429 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10430 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10431 TCGv_i64 tcg_passres
;
10432 int elt
= pass
+ is_q
* 2;
10434 read_vec_element_i32(s
, tcg_op1
, rn
, elt
, MO_32
);
10435 read_vec_element_i32(s
, tcg_op2
, rm
, elt
, MO_32
);
10438 tcg_passres
= tcg_res
[pass
];
10440 tcg_passres
= tcg_temp_new_i64();
10444 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10445 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10447 TCGv_i64 tcg_op2_64
= tcg_temp_new_i64();
10448 static NeonGenWidenFn
* const widenfns
[2][2] = {
10449 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
10450 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
10452 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
10454 widenfn(tcg_op2_64
, tcg_op2
);
10455 widenfn(tcg_passres
, tcg_op1
);
10456 gen_neon_addl(size
, (opcode
== 2), tcg_passres
,
10457 tcg_passres
, tcg_op2_64
);
10460 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10461 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10464 gen_helper_neon_abdl_u16(tcg_passres
, tcg_op1
, tcg_op2
);
10466 gen_helper_neon_abdl_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10470 gen_helper_neon_abdl_u32(tcg_passres
, tcg_op1
, tcg_op2
);
10472 gen_helper_neon_abdl_s32(tcg_passres
, tcg_op1
, tcg_op2
);
10476 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10477 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10478 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10481 gen_helper_neon_mull_u8(tcg_passres
, tcg_op1
, tcg_op2
);
10483 gen_helper_neon_mull_s8(tcg_passres
, tcg_op1
, tcg_op2
);
10487 gen_helper_neon_mull_u16(tcg_passres
, tcg_op1
, tcg_op2
);
10489 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10493 case 9: /* SQDMLAL, SQDMLAL2 */
10494 case 11: /* SQDMLSL, SQDMLSL2 */
10495 case 13: /* SQDMULL, SQDMULL2 */
10497 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10498 gen_helper_neon_addl_saturate_s32(tcg_passres
, tcg_env
,
10499 tcg_passres
, tcg_passres
);
10502 g_assert_not_reached();
10506 if (opcode
== 9 || opcode
== 11) {
10507 /* saturating accumulate ops */
10509 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
10511 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], tcg_env
,
10515 gen_neon_addl(size
, (accop
< 0), tcg_res
[pass
],
10516 tcg_res
[pass
], tcg_passres
);
10522 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
10523 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
10526 static void handle_3rd_wide(DisasContext
*s
, int is_q
, int is_u
, int size
,
10527 int opcode
, int rd
, int rn
, int rm
)
10529 TCGv_i64 tcg_res
[2];
10530 int part
= is_q
? 2 : 0;
10533 for (pass
= 0; pass
< 2; pass
++) {
10534 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10535 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10536 TCGv_i64 tcg_op2_wide
= tcg_temp_new_i64();
10537 static NeonGenWidenFn
* const widenfns
[3][2] = {
10538 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
10539 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
10540 { tcg_gen_ext_i32_i64
, tcg_gen_extu_i32_i64
},
10542 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
10544 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
10545 read_vec_element_i32(s
, tcg_op2
, rm
, part
+ pass
, MO_32
);
10546 widenfn(tcg_op2_wide
, tcg_op2
);
10547 tcg_res
[pass
] = tcg_temp_new_i64();
10548 gen_neon_addl(size
, (opcode
== 3),
10549 tcg_res
[pass
], tcg_op1
, tcg_op2_wide
);
10552 for (pass
= 0; pass
< 2; pass
++) {
10553 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
10557 static void do_narrow_round_high_u32(TCGv_i32 res
, TCGv_i64 in
)
10559 tcg_gen_addi_i64(in
, in
, 1U << 31);
10560 tcg_gen_extrh_i64_i32(res
, in
);
10563 static void handle_3rd_narrowing(DisasContext
*s
, int is_q
, int is_u
, int size
,
10564 int opcode
, int rd
, int rn
, int rm
)
10566 TCGv_i32 tcg_res
[2];
10567 int part
= is_q
? 2 : 0;
10570 for (pass
= 0; pass
< 2; pass
++) {
10571 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10572 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10573 TCGv_i64 tcg_wideres
= tcg_temp_new_i64();
10574 static NeonGenNarrowFn
* const narrowfns
[3][2] = {
10575 { gen_helper_neon_narrow_high_u8
,
10576 gen_helper_neon_narrow_round_high_u8
},
10577 { gen_helper_neon_narrow_high_u16
,
10578 gen_helper_neon_narrow_round_high_u16
},
10579 { tcg_gen_extrh_i64_i32
, do_narrow_round_high_u32
},
10581 NeonGenNarrowFn
*gennarrow
= narrowfns
[size
][is_u
];
10583 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
10584 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
10586 gen_neon_addl(size
, (opcode
== 6), tcg_wideres
, tcg_op1
, tcg_op2
);
10588 tcg_res
[pass
] = tcg_temp_new_i32();
10589 gennarrow(tcg_res
[pass
], tcg_wideres
);
10592 for (pass
= 0; pass
< 2; pass
++) {
10593 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
+ part
, MO_32
);
10595 clear_vec_high(s
, is_q
, rd
);
10598 /* AdvSIMD three different
10599 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
10600 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10601 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
10602 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10604 static void disas_simd_three_reg_diff(DisasContext
*s
, uint32_t insn
)
10606 /* Instructions in this group fall into three basic classes
10607 * (in each case with the operation working on each element in
10608 * the input vectors):
10609 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
10611 * (2) wide 64 x 128 -> 128
10612 * (3) narrowing 128 x 128 -> 64
10613 * Here we do initial decode, catch unallocated cases and
10614 * dispatch to separate functions for each class.
10616 int is_q
= extract32(insn
, 30, 1);
10617 int is_u
= extract32(insn
, 29, 1);
10618 int size
= extract32(insn
, 22, 2);
10619 int opcode
= extract32(insn
, 12, 4);
10620 int rm
= extract32(insn
, 16, 5);
10621 int rn
= extract32(insn
, 5, 5);
10622 int rd
= extract32(insn
, 0, 5);
10625 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10626 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10627 /* 64 x 128 -> 128 */
10629 unallocated_encoding(s
);
10632 if (!fp_access_check(s
)) {
10635 handle_3rd_wide(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10637 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10638 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10639 /* 128 x 128 -> 64 */
10641 unallocated_encoding(s
);
10644 if (!fp_access_check(s
)) {
10647 handle_3rd_narrowing(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10649 case 14: /* PMULL, PMULL2 */
10651 unallocated_encoding(s
);
10655 case 0: /* PMULL.P8 */
10656 if (!fp_access_check(s
)) {
10659 /* The Q field specifies lo/hi half input for this insn. */
10660 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, is_q
,
10661 gen_helper_neon_pmull_h
);
10664 case 3: /* PMULL.P64 */
10665 if (!dc_isar_feature(aa64_pmull
, s
)) {
10666 unallocated_encoding(s
);
10669 if (!fp_access_check(s
)) {
10672 /* The Q field specifies lo/hi half input for this insn. */
10673 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, is_q
,
10674 gen_helper_gvec_pmull_q
);
10678 unallocated_encoding(s
);
10682 case 9: /* SQDMLAL, SQDMLAL2 */
10683 case 11: /* SQDMLSL, SQDMLSL2 */
10684 case 13: /* SQDMULL, SQDMULL2 */
10685 if (is_u
|| size
== 0) {
10686 unallocated_encoding(s
);
10690 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10691 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10692 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10693 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10694 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10695 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10696 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10697 /* 64 x 64 -> 128 */
10699 unallocated_encoding(s
);
10702 if (!fp_access_check(s
)) {
10706 handle_3rd_widening(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10709 /* opcode 15 not allocated */
10710 unallocated_encoding(s
);
10715 /* Logic op (opcode == 3) subgroup of C3.6.16. */
10716 static void disas_simd_3same_logic(DisasContext
*s
, uint32_t insn
)
10718 int rd
= extract32(insn
, 0, 5);
10719 int rn
= extract32(insn
, 5, 5);
10720 int rm
= extract32(insn
, 16, 5);
10721 int size
= extract32(insn
, 22, 2);
10722 bool is_u
= extract32(insn
, 29, 1);
10723 bool is_q
= extract32(insn
, 30, 1);
10725 if (!fp_access_check(s
)) {
10729 switch (size
+ 4 * is_u
) {
10731 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_and
, 0);
10734 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_andc
, 0);
10737 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_or
, 0);
10740 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_orc
, 0);
10743 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_xor
, 0);
10746 case 5: /* BSL bitwise select */
10747 gen_gvec_fn4(s
, is_q
, rd
, rd
, rn
, rm
, tcg_gen_gvec_bitsel
, 0);
10749 case 6: /* BIT, bitwise insert if true */
10750 gen_gvec_fn4(s
, is_q
, rd
, rm
, rn
, rd
, tcg_gen_gvec_bitsel
, 0);
10752 case 7: /* BIF, bitwise insert if false */
10753 gen_gvec_fn4(s
, is_q
, rd
, rm
, rd
, rn
, tcg_gen_gvec_bitsel
, 0);
10757 g_assert_not_reached();
10761 /* Pairwise op subgroup of C3.6.16.
10763 * This is called directly or via the handle_3same_float for float pairwise
10764 * operations where the opcode and size are calculated differently.
10766 static void handle_simd_3same_pair(DisasContext
*s
, int is_q
, int u
, int opcode
,
10767 int size
, int rn
, int rm
, int rd
)
10772 /* Floating point operations need fpst */
10773 if (opcode
>= 0x58) {
10774 fpst
= fpstatus_ptr(FPST_FPCR
);
10779 if (!fp_access_check(s
)) {
10783 /* These operations work on the concatenated rm:rn, with each pair of
10784 * adjacent elements being operated on to produce an element in the result.
10787 TCGv_i64 tcg_res
[2];
10789 for (pass
= 0; pass
< 2; pass
++) {
10790 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10791 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10792 int passreg
= (pass
== 0) ? rn
: rm
;
10794 read_vec_element(s
, tcg_op1
, passreg
, 0, MO_64
);
10795 read_vec_element(s
, tcg_op2
, passreg
, 1, MO_64
);
10796 tcg_res
[pass
] = tcg_temp_new_i64();
10799 case 0x17: /* ADDP */
10800 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
10802 case 0x58: /* FMAXNMP */
10803 gen_helper_vfp_maxnumd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10805 case 0x5a: /* FADDP */
10806 gen_helper_vfp_addd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10808 case 0x5e: /* FMAXP */
10809 gen_helper_vfp_maxd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10811 case 0x78: /* FMINNMP */
10812 gen_helper_vfp_minnumd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10814 case 0x7e: /* FMINP */
10815 gen_helper_vfp_mind(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10818 g_assert_not_reached();
10822 for (pass
= 0; pass
< 2; pass
++) {
10823 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
10826 int maxpass
= is_q
? 4 : 2;
10827 TCGv_i32 tcg_res
[4];
10829 for (pass
= 0; pass
< maxpass
; pass
++) {
10830 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10831 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10832 NeonGenTwoOpFn
*genfn
= NULL
;
10833 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
10834 int passelt
= (is_q
&& (pass
& 1)) ? 2 : 0;
10836 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_32
);
10837 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_32
);
10838 tcg_res
[pass
] = tcg_temp_new_i32();
10841 case 0x17: /* ADDP */
10843 static NeonGenTwoOpFn
* const fns
[3] = {
10844 gen_helper_neon_padd_u8
,
10845 gen_helper_neon_padd_u16
,
10851 case 0x14: /* SMAXP, UMAXP */
10853 static NeonGenTwoOpFn
* const fns
[3][2] = {
10854 { gen_helper_neon_pmax_s8
, gen_helper_neon_pmax_u8
},
10855 { gen_helper_neon_pmax_s16
, gen_helper_neon_pmax_u16
},
10856 { tcg_gen_smax_i32
, tcg_gen_umax_i32
},
10858 genfn
= fns
[size
][u
];
10861 case 0x15: /* SMINP, UMINP */
10863 static NeonGenTwoOpFn
* const fns
[3][2] = {
10864 { gen_helper_neon_pmin_s8
, gen_helper_neon_pmin_u8
},
10865 { gen_helper_neon_pmin_s16
, gen_helper_neon_pmin_u16
},
10866 { tcg_gen_smin_i32
, tcg_gen_umin_i32
},
10868 genfn
= fns
[size
][u
];
10871 /* The FP operations are all on single floats (32 bit) */
10872 case 0x58: /* FMAXNMP */
10873 gen_helper_vfp_maxnums(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10875 case 0x5a: /* FADDP */
10876 gen_helper_vfp_adds(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10878 case 0x5e: /* FMAXP */
10879 gen_helper_vfp_maxs(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10881 case 0x78: /* FMINNMP */
10882 gen_helper_vfp_minnums(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10884 case 0x7e: /* FMINP */
10885 gen_helper_vfp_mins(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10888 g_assert_not_reached();
10891 /* FP ops called directly, otherwise call now */
10893 genfn(tcg_res
[pass
], tcg_op1
, tcg_op2
);
10897 for (pass
= 0; pass
< maxpass
; pass
++) {
10898 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
10900 clear_vec_high(s
, is_q
, rd
);
10904 /* Floating point op subgroup of C3.6.16. */
10905 static void disas_simd_3same_float(DisasContext
*s
, uint32_t insn
)
10907 /* For floating point ops, the U, size[1] and opcode bits
10908 * together indicate the operation. size[0] indicates single
10911 int fpopcode
= extract32(insn
, 11, 5)
10912 | (extract32(insn
, 23, 1) << 5)
10913 | (extract32(insn
, 29, 1) << 6);
10914 int is_q
= extract32(insn
, 30, 1);
10915 int size
= extract32(insn
, 22, 1);
10916 int rm
= extract32(insn
, 16, 5);
10917 int rn
= extract32(insn
, 5, 5);
10918 int rd
= extract32(insn
, 0, 5);
10920 int datasize
= is_q
? 128 : 64;
10921 int esize
= 32 << size
;
10922 int elements
= datasize
/ esize
;
10924 if (size
== 1 && !is_q
) {
10925 unallocated_encoding(s
);
10929 switch (fpopcode
) {
10930 case 0x58: /* FMAXNMP */
10931 case 0x5a: /* FADDP */
10932 case 0x5e: /* FMAXP */
10933 case 0x78: /* FMINNMP */
10934 case 0x7e: /* FMINP */
10935 if (size
&& !is_q
) {
10936 unallocated_encoding(s
);
10939 handle_simd_3same_pair(s
, is_q
, 0, fpopcode
, size
? MO_64
: MO_32
,
10942 case 0x1b: /* FMULX */
10943 case 0x1f: /* FRECPS */
10944 case 0x3f: /* FRSQRTS */
10945 case 0x5d: /* FACGE */
10946 case 0x7d: /* FACGT */
10947 case 0x19: /* FMLA */
10948 case 0x39: /* FMLS */
10949 case 0x18: /* FMAXNM */
10950 case 0x1a: /* FADD */
10951 case 0x1c: /* FCMEQ */
10952 case 0x1e: /* FMAX */
10953 case 0x38: /* FMINNM */
10954 case 0x3a: /* FSUB */
10955 case 0x3e: /* FMIN */
10956 case 0x5b: /* FMUL */
10957 case 0x5c: /* FCMGE */
10958 case 0x5f: /* FDIV */
10959 case 0x7a: /* FABD */
10960 case 0x7c: /* FCMGT */
10961 if (!fp_access_check(s
)) {
10964 handle_3same_float(s
, size
, elements
, fpopcode
, rd
, rn
, rm
);
10967 case 0x1d: /* FMLAL */
10968 case 0x3d: /* FMLSL */
10969 case 0x59: /* FMLAL2 */
10970 case 0x79: /* FMLSL2 */
10971 if (size
& 1 || !dc_isar_feature(aa64_fhm
, s
)) {
10972 unallocated_encoding(s
);
10975 if (fp_access_check(s
)) {
10976 int is_s
= extract32(insn
, 23, 1);
10977 int is_2
= extract32(insn
, 29, 1);
10978 int data
= (is_2
<< 1) | is_s
;
10979 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
10980 vec_full_reg_offset(s
, rn
),
10981 vec_full_reg_offset(s
, rm
), tcg_env
,
10982 is_q
? 16 : 8, vec_full_reg_size(s
),
10983 data
, gen_helper_gvec_fmlal_a64
);
10988 unallocated_encoding(s
);
10993 /* Integer op subgroup of C3.6.16. */
10994 static void disas_simd_3same_int(DisasContext
*s
, uint32_t insn
)
10996 int is_q
= extract32(insn
, 30, 1);
10997 int u
= extract32(insn
, 29, 1);
10998 int size
= extract32(insn
, 22, 2);
10999 int opcode
= extract32(insn
, 11, 5);
11000 int rm
= extract32(insn
, 16, 5);
11001 int rn
= extract32(insn
, 5, 5);
11002 int rd
= extract32(insn
, 0, 5);
11007 case 0x13: /* MUL, PMUL */
11008 if (u
&& size
!= 0) {
11009 unallocated_encoding(s
);
11013 case 0x0: /* SHADD, UHADD */
11014 case 0x2: /* SRHADD, URHADD */
11015 case 0x4: /* SHSUB, UHSUB */
11016 case 0xc: /* SMAX, UMAX */
11017 case 0xd: /* SMIN, UMIN */
11018 case 0xe: /* SABD, UABD */
11019 case 0xf: /* SABA, UABA */
11020 case 0x12: /* MLA, MLS */
11022 unallocated_encoding(s
);
11026 case 0x16: /* SQDMULH, SQRDMULH */
11027 if (size
== 0 || size
== 3) {
11028 unallocated_encoding(s
);
11033 if (size
== 3 && !is_q
) {
11034 unallocated_encoding(s
);
11040 if (!fp_access_check(s
)) {
11045 case 0x01: /* SQADD, UQADD */
11047 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uqadd_qc
, size
);
11049 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqadd_qc
, size
);
11052 case 0x05: /* SQSUB, UQSUB */
11054 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uqsub_qc
, size
);
11056 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqsub_qc
, size
);
11059 case 0x08: /* SSHL, USHL */
11061 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_ushl
, size
);
11063 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sshl
, size
);
11066 case 0x0c: /* SMAX, UMAX */
11068 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_umax
, size
);
11070 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_smax
, size
);
11073 case 0x0d: /* SMIN, UMIN */
11075 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_umin
, size
);
11077 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_smin
, size
);
11080 case 0xe: /* SABD, UABD */
11082 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uabd
, size
);
11084 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sabd
, size
);
11087 case 0xf: /* SABA, UABA */
11089 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uaba
, size
);
11091 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_saba
, size
);
11094 case 0x10: /* ADD, SUB */
11096 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_sub
, size
);
11098 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_add
, size
);
11101 case 0x13: /* MUL, PMUL */
11102 if (!u
) { /* MUL */
11103 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_mul
, size
);
11104 } else { /* PMUL */
11105 gen_gvec_op3_ool(s
, is_q
, rd
, rn
, rm
, 0, gen_helper_gvec_pmul_b
);
11108 case 0x12: /* MLA, MLS */
11110 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_mls
, size
);
11112 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_mla
, size
);
11115 case 0x16: /* SQDMULH, SQRDMULH */
11117 static gen_helper_gvec_3_ptr
* const fns
[2][2] = {
11118 { gen_helper_neon_sqdmulh_h
, gen_helper_neon_sqrdmulh_h
},
11119 { gen_helper_neon_sqdmulh_s
, gen_helper_neon_sqrdmulh_s
},
11121 gen_gvec_op3_qc(s
, is_q
, rd
, rn
, rm
, fns
[size
- 1][u
]);
11125 if (!u
) { /* CMTST */
11126 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_cmtst
, size
);
11130 cond
= TCG_COND_EQ
;
11132 case 0x06: /* CMGT, CMHI */
11133 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
11135 case 0x07: /* CMGE, CMHS */
11136 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
11138 tcg_gen_gvec_cmp(cond
, size
, vec_full_reg_offset(s
, rd
),
11139 vec_full_reg_offset(s
, rn
),
11140 vec_full_reg_offset(s
, rm
),
11141 is_q
? 16 : 8, vec_full_reg_size(s
));
11147 for (pass
= 0; pass
< 2; pass
++) {
11148 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11149 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
11150 TCGv_i64 tcg_res
= tcg_temp_new_i64();
11152 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
11153 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
11155 handle_3same_64(s
, opcode
, u
, tcg_res
, tcg_op1
, tcg_op2
);
11157 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
11160 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
11161 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11162 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11163 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11164 NeonGenTwoOpFn
*genfn
= NULL
;
11165 NeonGenTwoOpEnvFn
*genenvfn
= NULL
;
11167 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
11168 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
11171 case 0x0: /* SHADD, UHADD */
11173 static NeonGenTwoOpFn
* const fns
[3][2] = {
11174 { gen_helper_neon_hadd_s8
, gen_helper_neon_hadd_u8
},
11175 { gen_helper_neon_hadd_s16
, gen_helper_neon_hadd_u16
},
11176 { gen_helper_neon_hadd_s32
, gen_helper_neon_hadd_u32
},
11178 genfn
= fns
[size
][u
];
11181 case 0x2: /* SRHADD, URHADD */
11183 static NeonGenTwoOpFn
* const fns
[3][2] = {
11184 { gen_helper_neon_rhadd_s8
, gen_helper_neon_rhadd_u8
},
11185 { gen_helper_neon_rhadd_s16
, gen_helper_neon_rhadd_u16
},
11186 { gen_helper_neon_rhadd_s32
, gen_helper_neon_rhadd_u32
},
11188 genfn
= fns
[size
][u
];
11191 case 0x4: /* SHSUB, UHSUB */
11193 static NeonGenTwoOpFn
* const fns
[3][2] = {
11194 { gen_helper_neon_hsub_s8
, gen_helper_neon_hsub_u8
},
11195 { gen_helper_neon_hsub_s16
, gen_helper_neon_hsub_u16
},
11196 { gen_helper_neon_hsub_s32
, gen_helper_neon_hsub_u32
},
11198 genfn
= fns
[size
][u
];
11201 case 0x9: /* SQSHL, UQSHL */
11203 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
11204 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
11205 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
11206 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
11208 genenvfn
= fns
[size
][u
];
11211 case 0xa: /* SRSHL, URSHL */
11213 static NeonGenTwoOpFn
* const fns
[3][2] = {
11214 { gen_helper_neon_rshl_s8
, gen_helper_neon_rshl_u8
},
11215 { gen_helper_neon_rshl_s16
, gen_helper_neon_rshl_u16
},
11216 { gen_helper_neon_rshl_s32
, gen_helper_neon_rshl_u32
},
11218 genfn
= fns
[size
][u
];
11221 case 0xb: /* SQRSHL, UQRSHL */
11223 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
11224 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
11225 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
11226 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
11228 genenvfn
= fns
[size
][u
];
11232 g_assert_not_reached();
11236 genenvfn(tcg_res
, tcg_env
, tcg_op1
, tcg_op2
);
11238 genfn(tcg_res
, tcg_op1
, tcg_op2
);
11241 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
11244 clear_vec_high(s
, is_q
, rd
);
11247 /* AdvSIMD three same
11248 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
11249 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11250 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
11251 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11253 static void disas_simd_three_reg_same(DisasContext
*s
, uint32_t insn
)
11255 int opcode
= extract32(insn
, 11, 5);
11258 case 0x3: /* logic ops */
11259 disas_simd_3same_logic(s
, insn
);
11261 case 0x17: /* ADDP */
11262 case 0x14: /* SMAXP, UMAXP */
11263 case 0x15: /* SMINP, UMINP */
11265 /* Pairwise operations */
11266 int is_q
= extract32(insn
, 30, 1);
11267 int u
= extract32(insn
, 29, 1);
11268 int size
= extract32(insn
, 22, 2);
11269 int rm
= extract32(insn
, 16, 5);
11270 int rn
= extract32(insn
, 5, 5);
11271 int rd
= extract32(insn
, 0, 5);
11272 if (opcode
== 0x17) {
11273 if (u
|| (size
== 3 && !is_q
)) {
11274 unallocated_encoding(s
);
11279 unallocated_encoding(s
);
11283 handle_simd_3same_pair(s
, is_q
, u
, opcode
, size
, rn
, rm
, rd
);
11286 case 0x18 ... 0x31:
11287 /* floating point ops, sz[1] and U are part of opcode */
11288 disas_simd_3same_float(s
, insn
);
11291 disas_simd_3same_int(s
, insn
);
11297 * Advanced SIMD three same (ARMv8.2 FP16 variants)
11299 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
11300 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11301 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
11302 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11304 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11305 * (register), FACGE, FABD, FCMGT (register) and FACGT.
11308 static void disas_simd_three_reg_same_fp16(DisasContext
*s
, uint32_t insn
)
11310 int opcode
= extract32(insn
, 11, 3);
11311 int u
= extract32(insn
, 29, 1);
11312 int a
= extract32(insn
, 23, 1);
11313 int is_q
= extract32(insn
, 30, 1);
11314 int rm
= extract32(insn
, 16, 5);
11315 int rn
= extract32(insn
, 5, 5);
11316 int rd
= extract32(insn
, 0, 5);
11318 * For these floating point ops, the U, a and opcode bits
11319 * together indicate the operation.
11321 int fpopcode
= opcode
| (a
<< 3) | (u
<< 4);
11322 int datasize
= is_q
? 128 : 64;
11323 int elements
= datasize
/ 16;
11328 switch (fpopcode
) {
11329 case 0x0: /* FMAXNM */
11330 case 0x1: /* FMLA */
11331 case 0x2: /* FADD */
11332 case 0x3: /* FMULX */
11333 case 0x4: /* FCMEQ */
11334 case 0x6: /* FMAX */
11335 case 0x7: /* FRECPS */
11336 case 0x8: /* FMINNM */
11337 case 0x9: /* FMLS */
11338 case 0xa: /* FSUB */
11339 case 0xe: /* FMIN */
11340 case 0xf: /* FRSQRTS */
11341 case 0x13: /* FMUL */
11342 case 0x14: /* FCMGE */
11343 case 0x15: /* FACGE */
11344 case 0x17: /* FDIV */
11345 case 0x1a: /* FABD */
11346 case 0x1c: /* FCMGT */
11347 case 0x1d: /* FACGT */
11350 case 0x10: /* FMAXNMP */
11351 case 0x12: /* FADDP */
11352 case 0x16: /* FMAXP */
11353 case 0x18: /* FMINNMP */
11354 case 0x1e: /* FMINP */
11358 unallocated_encoding(s
);
11362 if (!dc_isar_feature(aa64_fp16
, s
)) {
11363 unallocated_encoding(s
);
11367 if (!fp_access_check(s
)) {
11371 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
11374 int maxpass
= is_q
? 8 : 4;
11375 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11376 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11377 TCGv_i32 tcg_res
[8];
11379 for (pass
= 0; pass
< maxpass
; pass
++) {
11380 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
11381 int passelt
= (pass
<< 1) & (maxpass
- 1);
11383 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_16
);
11384 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_16
);
11385 tcg_res
[pass
] = tcg_temp_new_i32();
11387 switch (fpopcode
) {
11388 case 0x10: /* FMAXNMP */
11389 gen_helper_advsimd_maxnumh(tcg_res
[pass
], tcg_op1
, tcg_op2
,
11392 case 0x12: /* FADDP */
11393 gen_helper_advsimd_addh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11395 case 0x16: /* FMAXP */
11396 gen_helper_advsimd_maxh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11398 case 0x18: /* FMINNMP */
11399 gen_helper_advsimd_minnumh(tcg_res
[pass
], tcg_op1
, tcg_op2
,
11402 case 0x1e: /* FMINP */
11403 gen_helper_advsimd_minh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11406 g_assert_not_reached();
11410 for (pass
= 0; pass
< maxpass
; pass
++) {
11411 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_16
);
11414 for (pass
= 0; pass
< elements
; pass
++) {
11415 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11416 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11417 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11419 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_16
);
11420 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_16
);
11422 switch (fpopcode
) {
11423 case 0x0: /* FMAXNM */
11424 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11426 case 0x1: /* FMLA */
11427 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11428 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_res
,
11431 case 0x2: /* FADD */
11432 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11434 case 0x3: /* FMULX */
11435 gen_helper_advsimd_mulxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11437 case 0x4: /* FCMEQ */
11438 gen_helper_advsimd_ceq_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11440 case 0x6: /* FMAX */
11441 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11443 case 0x7: /* FRECPS */
11444 gen_helper_recpsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11446 case 0x8: /* FMINNM */
11447 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11449 case 0x9: /* FMLS */
11450 /* As usual for ARM, separate negation for fused multiply-add */
11451 tcg_gen_xori_i32(tcg_op1
, tcg_op1
, 0x8000);
11452 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11453 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_res
,
11456 case 0xa: /* FSUB */
11457 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11459 case 0xe: /* FMIN */
11460 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11462 case 0xf: /* FRSQRTS */
11463 gen_helper_rsqrtsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11465 case 0x13: /* FMUL */
11466 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11468 case 0x14: /* FCMGE */
11469 gen_helper_advsimd_cge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11471 case 0x15: /* FACGE */
11472 gen_helper_advsimd_acge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11474 case 0x17: /* FDIV */
11475 gen_helper_advsimd_divh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11477 case 0x1a: /* FABD */
11478 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11479 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0x7fff);
11481 case 0x1c: /* FCMGT */
11482 gen_helper_advsimd_cgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11484 case 0x1d: /* FACGT */
11485 gen_helper_advsimd_acgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11488 g_assert_not_reached();
11491 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11495 clear_vec_high(s
, is_q
, rd
);
11498 /* AdvSIMD three same extra
11499 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
11500 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11501 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
11502 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11504 static void disas_simd_three_reg_same_extra(DisasContext
*s
, uint32_t insn
)
11506 int rd
= extract32(insn
, 0, 5);
11507 int rn
= extract32(insn
, 5, 5);
11508 int opcode
= extract32(insn
, 11, 4);
11509 int rm
= extract32(insn
, 16, 5);
11510 int size
= extract32(insn
, 22, 2);
11511 bool u
= extract32(insn
, 29, 1);
11512 bool is_q
= extract32(insn
, 30, 1);
11516 switch (u
* 16 + opcode
) {
11517 case 0x10: /* SQRDMLAH (vector) */
11518 case 0x11: /* SQRDMLSH (vector) */
11519 if (size
!= 1 && size
!= 2) {
11520 unallocated_encoding(s
);
11523 feature
= dc_isar_feature(aa64_rdm
, s
);
11525 case 0x02: /* SDOT (vector) */
11526 case 0x12: /* UDOT (vector) */
11527 if (size
!= MO_32
) {
11528 unallocated_encoding(s
);
11531 feature
= dc_isar_feature(aa64_dp
, s
);
11533 case 0x03: /* USDOT */
11534 if (size
!= MO_32
) {
11535 unallocated_encoding(s
);
11538 feature
= dc_isar_feature(aa64_i8mm
, s
);
11540 case 0x04: /* SMMLA */
11541 case 0x14: /* UMMLA */
11542 case 0x05: /* USMMLA */
11543 if (!is_q
|| size
!= MO_32
) {
11544 unallocated_encoding(s
);
11547 feature
= dc_isar_feature(aa64_i8mm
, s
);
11549 case 0x18: /* FCMLA, #0 */
11550 case 0x19: /* FCMLA, #90 */
11551 case 0x1a: /* FCMLA, #180 */
11552 case 0x1b: /* FCMLA, #270 */
11553 case 0x1c: /* FCADD, #90 */
11554 case 0x1e: /* FCADD, #270 */
11556 || (size
== 1 && !dc_isar_feature(aa64_fp16
, s
))
11557 || (size
== 3 && !is_q
)) {
11558 unallocated_encoding(s
);
11561 feature
= dc_isar_feature(aa64_fcma
, s
);
11563 case 0x1d: /* BFMMLA */
11564 if (size
!= MO_16
|| !is_q
) {
11565 unallocated_encoding(s
);
11568 feature
= dc_isar_feature(aa64_bf16
, s
);
11572 case 1: /* BFDOT */
11573 case 3: /* BFMLAL{B,T} */
11574 feature
= dc_isar_feature(aa64_bf16
, s
);
11577 unallocated_encoding(s
);
11582 unallocated_encoding(s
);
11586 unallocated_encoding(s
);
11589 if (!fp_access_check(s
)) {
11594 case 0x0: /* SQRDMLAH (vector) */
11595 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqrdmlah_qc
, size
);
11598 case 0x1: /* SQRDMLSH (vector) */
11599 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqrdmlsh_qc
, size
);
11602 case 0x2: /* SDOT / UDOT */
11603 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0,
11604 u
? gen_helper_gvec_udot_b
: gen_helper_gvec_sdot_b
);
11607 case 0x3: /* USDOT */
11608 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_usdot_b
);
11611 case 0x04: /* SMMLA, UMMLA */
11612 gen_gvec_op4_ool(s
, 1, rd
, rn
, rm
, rd
, 0,
11613 u
? gen_helper_gvec_ummla_b
11614 : gen_helper_gvec_smmla_b
);
11616 case 0x05: /* USMMLA */
11617 gen_gvec_op4_ool(s
, 1, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_usmmla_b
);
11620 case 0x8: /* FCMLA, #0 */
11621 case 0x9: /* FCMLA, #90 */
11622 case 0xa: /* FCMLA, #180 */
11623 case 0xb: /* FCMLA, #270 */
11624 rot
= extract32(opcode
, 0, 2);
11627 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, true, rot
,
11628 gen_helper_gvec_fcmlah
);
11631 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, false, rot
,
11632 gen_helper_gvec_fcmlas
);
11635 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, false, rot
,
11636 gen_helper_gvec_fcmlad
);
11639 g_assert_not_reached();
11643 case 0xc: /* FCADD, #90 */
11644 case 0xe: /* FCADD, #270 */
11645 rot
= extract32(opcode
, 1, 1);
11648 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11649 gen_helper_gvec_fcaddh
);
11652 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11653 gen_helper_gvec_fcadds
);
11656 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11657 gen_helper_gvec_fcaddd
);
11660 g_assert_not_reached();
11664 case 0xd: /* BFMMLA */
11665 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_bfmmla
);
11669 case 1: /* BFDOT */
11670 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_bfdot
);
11672 case 3: /* BFMLAL{B,T} */
11673 gen_gvec_op4_fpst(s
, 1, rd
, rn
, rm
, rd
, false, is_q
,
11674 gen_helper_gvec_bfmlal
);
11677 g_assert_not_reached();
11682 g_assert_not_reached();
11686 static void handle_2misc_widening(DisasContext
*s
, int opcode
, bool is_q
,
11687 int size
, int rn
, int rd
)
11689 /* Handle 2-reg-misc ops which are widening (so each size element
11690 * in the source becomes a 2*size element in the destination.
11691 * The only instruction like this is FCVTL.
11696 /* 32 -> 64 bit fp conversion */
11697 TCGv_i64 tcg_res
[2];
11698 int srcelt
= is_q
? 2 : 0;
11700 for (pass
= 0; pass
< 2; pass
++) {
11701 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11702 tcg_res
[pass
] = tcg_temp_new_i64();
11704 read_vec_element_i32(s
, tcg_op
, rn
, srcelt
+ pass
, MO_32
);
11705 gen_helper_vfp_fcvtds(tcg_res
[pass
], tcg_op
, tcg_env
);
11707 for (pass
= 0; pass
< 2; pass
++) {
11708 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11711 /* 16 -> 32 bit fp conversion */
11712 int srcelt
= is_q
? 4 : 0;
11713 TCGv_i32 tcg_res
[4];
11714 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
11715 TCGv_i32 ahp
= get_ahp_flag();
11717 for (pass
= 0; pass
< 4; pass
++) {
11718 tcg_res
[pass
] = tcg_temp_new_i32();
11720 read_vec_element_i32(s
, tcg_res
[pass
], rn
, srcelt
+ pass
, MO_16
);
11721 gen_helper_vfp_fcvt_f16_to_f32(tcg_res
[pass
], tcg_res
[pass
],
11724 for (pass
= 0; pass
< 4; pass
++) {
11725 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
11730 static void handle_rev(DisasContext
*s
, int opcode
, bool u
,
11731 bool is_q
, int size
, int rn
, int rd
)
11733 int op
= (opcode
<< 1) | u
;
11734 int opsz
= op
+ size
;
11735 int grp_size
= 3 - opsz
;
11736 int dsize
= is_q
? 128 : 64;
11740 unallocated_encoding(s
);
11744 if (!fp_access_check(s
)) {
11749 /* Special case bytes, use bswap op on each group of elements */
11750 int groups
= dsize
/ (8 << grp_size
);
11752 for (i
= 0; i
< groups
; i
++) {
11753 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
11755 read_vec_element(s
, tcg_tmp
, rn
, i
, grp_size
);
11756 switch (grp_size
) {
11758 tcg_gen_bswap16_i64(tcg_tmp
, tcg_tmp
, TCG_BSWAP_IZ
);
11761 tcg_gen_bswap32_i64(tcg_tmp
, tcg_tmp
, TCG_BSWAP_IZ
);
11764 tcg_gen_bswap64_i64(tcg_tmp
, tcg_tmp
);
11767 g_assert_not_reached();
11769 write_vec_element(s
, tcg_tmp
, rd
, i
, grp_size
);
11771 clear_vec_high(s
, is_q
, rd
);
11773 int revmask
= (1 << grp_size
) - 1;
11774 int esize
= 8 << size
;
11775 int elements
= dsize
/ esize
;
11776 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
11777 TCGv_i64 tcg_rd
[2];
11779 for (i
= 0; i
< 2; i
++) {
11780 tcg_rd
[i
] = tcg_temp_new_i64();
11781 tcg_gen_movi_i64(tcg_rd
[i
], 0);
11784 for (i
= 0; i
< elements
; i
++) {
11785 int e_rev
= (i
& 0xf) ^ revmask
;
11786 int w
= (e_rev
* esize
) / 64;
11787 int o
= (e_rev
* esize
) % 64;
11789 read_vec_element(s
, tcg_rn
, rn
, i
, size
);
11790 tcg_gen_deposit_i64(tcg_rd
[w
], tcg_rd
[w
], tcg_rn
, o
, esize
);
11793 for (i
= 0; i
< 2; i
++) {
11794 write_vec_element(s
, tcg_rd
[i
], rd
, i
, MO_64
);
11796 clear_vec_high(s
, true, rd
);
11800 static void handle_2misc_pairwise(DisasContext
*s
, int opcode
, bool u
,
11801 bool is_q
, int size
, int rn
, int rd
)
11803 /* Implement the pairwise operations from 2-misc:
11804 * SADDLP, UADDLP, SADALP, UADALP.
11805 * These all add pairs of elements in the input to produce a
11806 * double-width result element in the output (possibly accumulating).
11808 bool accum
= (opcode
== 0x6);
11809 int maxpass
= is_q
? 2 : 1;
11811 TCGv_i64 tcg_res
[2];
11814 /* 32 + 32 -> 64 op */
11815 MemOp memop
= size
+ (u
? 0 : MO_SIGN
);
11817 for (pass
= 0; pass
< maxpass
; pass
++) {
11818 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11819 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
11821 tcg_res
[pass
] = tcg_temp_new_i64();
11823 read_vec_element(s
, tcg_op1
, rn
, pass
* 2, memop
);
11824 read_vec_element(s
, tcg_op2
, rn
, pass
* 2 + 1, memop
);
11825 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
11827 read_vec_element(s
, tcg_op1
, rd
, pass
, MO_64
);
11828 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
11832 for (pass
= 0; pass
< maxpass
; pass
++) {
11833 TCGv_i64 tcg_op
= tcg_temp_new_i64();
11834 NeonGenOne64OpFn
*genfn
;
11835 static NeonGenOne64OpFn
* const fns
[2][2] = {
11836 { gen_helper_neon_addlp_s8
, gen_helper_neon_addlp_u8
},
11837 { gen_helper_neon_addlp_s16
, gen_helper_neon_addlp_u16
},
11840 genfn
= fns
[size
][u
];
11842 tcg_res
[pass
] = tcg_temp_new_i64();
11844 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
11845 genfn(tcg_res
[pass
], tcg_op
);
11848 read_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
11850 gen_helper_neon_addl_u16(tcg_res
[pass
],
11851 tcg_res
[pass
], tcg_op
);
11853 gen_helper_neon_addl_u32(tcg_res
[pass
],
11854 tcg_res
[pass
], tcg_op
);
11860 tcg_res
[1] = tcg_constant_i64(0);
11862 for (pass
= 0; pass
< 2; pass
++) {
11863 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11867 static void handle_shll(DisasContext
*s
, bool is_q
, int size
, int rn
, int rd
)
11869 /* Implement SHLL and SHLL2 */
11871 int part
= is_q
? 2 : 0;
11872 TCGv_i64 tcg_res
[2];
11874 for (pass
= 0; pass
< 2; pass
++) {
11875 static NeonGenWidenFn
* const widenfns
[3] = {
11876 gen_helper_neon_widen_u8
,
11877 gen_helper_neon_widen_u16
,
11878 tcg_gen_extu_i32_i64
,
11880 NeonGenWidenFn
*widenfn
= widenfns
[size
];
11881 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11883 read_vec_element_i32(s
, tcg_op
, rn
, part
+ pass
, MO_32
);
11884 tcg_res
[pass
] = tcg_temp_new_i64();
11885 widenfn(tcg_res
[pass
], tcg_op
);
11886 tcg_gen_shli_i64(tcg_res
[pass
], tcg_res
[pass
], 8 << size
);
11889 for (pass
= 0; pass
< 2; pass
++) {
11890 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11894 /* AdvSIMD two reg misc
11895 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
11896 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11897 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
11898 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11900 static void disas_simd_two_reg_misc(DisasContext
*s
, uint32_t insn
)
11902 int size
= extract32(insn
, 22, 2);
11903 int opcode
= extract32(insn
, 12, 5);
11904 bool u
= extract32(insn
, 29, 1);
11905 bool is_q
= extract32(insn
, 30, 1);
11906 int rn
= extract32(insn
, 5, 5);
11907 int rd
= extract32(insn
, 0, 5);
11908 bool need_fpstatus
= false;
11910 TCGv_i32 tcg_rmode
;
11911 TCGv_ptr tcg_fpstatus
;
11914 case 0x0: /* REV64, REV32 */
11915 case 0x1: /* REV16 */
11916 handle_rev(s
, opcode
, u
, is_q
, size
, rn
, rd
);
11918 case 0x5: /* CNT, NOT, RBIT */
11919 if (u
&& size
== 0) {
11922 } else if (u
&& size
== 1) {
11925 } else if (!u
&& size
== 0) {
11929 unallocated_encoding(s
);
11931 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
11932 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
11934 unallocated_encoding(s
);
11937 if (!fp_access_check(s
)) {
11941 handle_2misc_narrow(s
, false, opcode
, u
, is_q
, size
, rn
, rd
);
11943 case 0x4: /* CLS, CLZ */
11945 unallocated_encoding(s
);
11949 case 0x2: /* SADDLP, UADDLP */
11950 case 0x6: /* SADALP, UADALP */
11952 unallocated_encoding(s
);
11955 if (!fp_access_check(s
)) {
11958 handle_2misc_pairwise(s
, opcode
, u
, is_q
, size
, rn
, rd
);
11960 case 0x13: /* SHLL, SHLL2 */
11961 if (u
== 0 || size
== 3) {
11962 unallocated_encoding(s
);
11965 if (!fp_access_check(s
)) {
11968 handle_shll(s
, is_q
, size
, rn
, rd
);
11970 case 0xa: /* CMLT */
11972 unallocated_encoding(s
);
11976 case 0x8: /* CMGT, CMGE */
11977 case 0x9: /* CMEQ, CMLE */
11978 case 0xb: /* ABS, NEG */
11979 if (size
== 3 && !is_q
) {
11980 unallocated_encoding(s
);
11984 case 0x3: /* SUQADD, USQADD */
11985 if (size
== 3 && !is_q
) {
11986 unallocated_encoding(s
);
11989 if (!fp_access_check(s
)) {
11992 handle_2misc_satacc(s
, false, u
, is_q
, size
, rn
, rd
);
11994 case 0x7: /* SQABS, SQNEG */
11995 if (size
== 3 && !is_q
) {
11996 unallocated_encoding(s
);
12001 case 0x16 ... 0x1f:
12003 /* Floating point: U, size[1] and opcode indicate operation;
12004 * size[0] indicates single or double precision.
12006 int is_double
= extract32(size
, 0, 1);
12007 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
12008 size
= is_double
? 3 : 2;
12010 case 0x2f: /* FABS */
12011 case 0x6f: /* FNEG */
12012 if (size
== 3 && !is_q
) {
12013 unallocated_encoding(s
);
12017 case 0x1d: /* SCVTF */
12018 case 0x5d: /* UCVTF */
12020 bool is_signed
= (opcode
== 0x1d) ? true : false;
12021 int elements
= is_double
? 2 : is_q
? 4 : 2;
12022 if (is_double
&& !is_q
) {
12023 unallocated_encoding(s
);
12026 if (!fp_access_check(s
)) {
12029 handle_simd_intfp_conv(s
, rd
, rn
, elements
, is_signed
, 0, size
);
12032 case 0x2c: /* FCMGT (zero) */
12033 case 0x2d: /* FCMEQ (zero) */
12034 case 0x2e: /* FCMLT (zero) */
12035 case 0x6c: /* FCMGE (zero) */
12036 case 0x6d: /* FCMLE (zero) */
12037 if (size
== 3 && !is_q
) {
12038 unallocated_encoding(s
);
12041 handle_2misc_fcmp_zero(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
12043 case 0x7f: /* FSQRT */
12044 if (size
== 3 && !is_q
) {
12045 unallocated_encoding(s
);
12049 case 0x1a: /* FCVTNS */
12050 case 0x1b: /* FCVTMS */
12051 case 0x3a: /* FCVTPS */
12052 case 0x3b: /* FCVTZS */
12053 case 0x5a: /* FCVTNU */
12054 case 0x5b: /* FCVTMU */
12055 case 0x7a: /* FCVTPU */
12056 case 0x7b: /* FCVTZU */
12057 need_fpstatus
= true;
12058 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
12059 if (size
== 3 && !is_q
) {
12060 unallocated_encoding(s
);
12064 case 0x5c: /* FCVTAU */
12065 case 0x1c: /* FCVTAS */
12066 need_fpstatus
= true;
12067 rmode
= FPROUNDING_TIEAWAY
;
12068 if (size
== 3 && !is_q
) {
12069 unallocated_encoding(s
);
12073 case 0x3c: /* URECPE */
12075 unallocated_encoding(s
);
12079 case 0x3d: /* FRECPE */
12080 case 0x7d: /* FRSQRTE */
12081 if (size
== 3 && !is_q
) {
12082 unallocated_encoding(s
);
12085 if (!fp_access_check(s
)) {
12088 handle_2misc_reciprocal(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
12090 case 0x56: /* FCVTXN, FCVTXN2 */
12092 unallocated_encoding(s
);
12096 case 0x16: /* FCVTN, FCVTN2 */
12097 /* handle_2misc_narrow does a 2*size -> size operation, but these
12098 * instructions encode the source size rather than dest size.
12100 if (!fp_access_check(s
)) {
12103 handle_2misc_narrow(s
, false, opcode
, 0, is_q
, size
- 1, rn
, rd
);
12105 case 0x36: /* BFCVTN, BFCVTN2 */
12106 if (!dc_isar_feature(aa64_bf16
, s
) || size
!= 2) {
12107 unallocated_encoding(s
);
12110 if (!fp_access_check(s
)) {
12113 handle_2misc_narrow(s
, false, opcode
, 0, is_q
, size
- 1, rn
, rd
);
12115 case 0x17: /* FCVTL, FCVTL2 */
12116 if (!fp_access_check(s
)) {
12119 handle_2misc_widening(s
, opcode
, is_q
, size
, rn
, rd
);
12121 case 0x18: /* FRINTN */
12122 case 0x19: /* FRINTM */
12123 case 0x38: /* FRINTP */
12124 case 0x39: /* FRINTZ */
12125 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
12127 case 0x59: /* FRINTX */
12128 case 0x79: /* FRINTI */
12129 need_fpstatus
= true;
12130 if (size
== 3 && !is_q
) {
12131 unallocated_encoding(s
);
12135 case 0x58: /* FRINTA */
12136 rmode
= FPROUNDING_TIEAWAY
;
12137 need_fpstatus
= true;
12138 if (size
== 3 && !is_q
) {
12139 unallocated_encoding(s
);
12143 case 0x7c: /* URSQRTE */
12145 unallocated_encoding(s
);
12149 case 0x1e: /* FRINT32Z */
12150 case 0x1f: /* FRINT64Z */
12151 rmode
= FPROUNDING_ZERO
;
12153 case 0x5e: /* FRINT32X */
12154 case 0x5f: /* FRINT64X */
12155 need_fpstatus
= true;
12156 if ((size
== 3 && !is_q
) || !dc_isar_feature(aa64_frint
, s
)) {
12157 unallocated_encoding(s
);
12162 unallocated_encoding(s
);
12168 unallocated_encoding(s
);
12172 if (!fp_access_check(s
)) {
12176 if (need_fpstatus
|| rmode
>= 0) {
12177 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR
);
12179 tcg_fpstatus
= NULL
;
12182 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
12189 if (u
&& size
== 0) { /* NOT */
12190 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_not
, 0);
12194 case 0x8: /* CMGT, CMGE */
12196 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cge0
, size
);
12198 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cgt0
, size
);
12201 case 0x9: /* CMEQ, CMLE */
12203 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cle0
, size
);
12205 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_ceq0
, size
);
12208 case 0xa: /* CMLT */
12209 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_clt0
, size
);
12212 if (u
) { /* ABS, NEG */
12213 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_neg
, size
);
12215 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_abs
, size
);
12221 /* All 64-bit element operations can be shared with scalar 2misc */
12224 /* Coverity claims (size == 3 && !is_q) has been eliminated
12225 * from all paths leading to here.
12227 tcg_debug_assert(is_q
);
12228 for (pass
= 0; pass
< 2; pass
++) {
12229 TCGv_i64 tcg_op
= tcg_temp_new_i64();
12230 TCGv_i64 tcg_res
= tcg_temp_new_i64();
12232 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
12234 handle_2misc_64(s
, opcode
, u
, tcg_res
, tcg_op
,
12235 tcg_rmode
, tcg_fpstatus
);
12237 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
12242 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
12243 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12244 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12246 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
12249 /* Special cases for 32 bit elements */
12251 case 0x4: /* CLS */
12253 tcg_gen_clzi_i32(tcg_res
, tcg_op
, 32);
12255 tcg_gen_clrsb_i32(tcg_res
, tcg_op
);
12258 case 0x7: /* SQABS, SQNEG */
12260 gen_helper_neon_qneg_s32(tcg_res
, tcg_env
, tcg_op
);
12262 gen_helper_neon_qabs_s32(tcg_res
, tcg_env
, tcg_op
);
12265 case 0x2f: /* FABS */
12266 gen_helper_vfp_abss(tcg_res
, tcg_op
);
12268 case 0x6f: /* FNEG */
12269 gen_helper_vfp_negs(tcg_res
, tcg_op
);
12271 case 0x7f: /* FSQRT */
12272 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, tcg_env
);
12274 case 0x1a: /* FCVTNS */
12275 case 0x1b: /* FCVTMS */
12276 case 0x1c: /* FCVTAS */
12277 case 0x3a: /* FCVTPS */
12278 case 0x3b: /* FCVTZS */
12279 gen_helper_vfp_tosls(tcg_res
, tcg_op
,
12280 tcg_constant_i32(0), tcg_fpstatus
);
12282 case 0x5a: /* FCVTNU */
12283 case 0x5b: /* FCVTMU */
12284 case 0x5c: /* FCVTAU */
12285 case 0x7a: /* FCVTPU */
12286 case 0x7b: /* FCVTZU */
12287 gen_helper_vfp_touls(tcg_res
, tcg_op
,
12288 tcg_constant_i32(0), tcg_fpstatus
);
12290 case 0x18: /* FRINTN */
12291 case 0x19: /* FRINTM */
12292 case 0x38: /* FRINTP */
12293 case 0x39: /* FRINTZ */
12294 case 0x58: /* FRINTA */
12295 case 0x79: /* FRINTI */
12296 gen_helper_rints(tcg_res
, tcg_op
, tcg_fpstatus
);
12298 case 0x59: /* FRINTX */
12299 gen_helper_rints_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
12301 case 0x7c: /* URSQRTE */
12302 gen_helper_rsqrte_u32(tcg_res
, tcg_op
);
12304 case 0x1e: /* FRINT32Z */
12305 case 0x5e: /* FRINT32X */
12306 gen_helper_frint32_s(tcg_res
, tcg_op
, tcg_fpstatus
);
12308 case 0x1f: /* FRINT64Z */
12309 case 0x5f: /* FRINT64X */
12310 gen_helper_frint64_s(tcg_res
, tcg_op
, tcg_fpstatus
);
12313 g_assert_not_reached();
12316 /* Use helpers for 8 and 16 bit elements */
12318 case 0x5: /* CNT, RBIT */
12319 /* For these two insns size is part of the opcode specifier
12320 * (handled earlier); they always operate on byte elements.
12323 gen_helper_neon_rbit_u8(tcg_res
, tcg_op
);
12325 gen_helper_neon_cnt_u8(tcg_res
, tcg_op
);
12328 case 0x7: /* SQABS, SQNEG */
12330 NeonGenOneOpEnvFn
*genfn
;
12331 static NeonGenOneOpEnvFn
* const fns
[2][2] = {
12332 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
12333 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
12335 genfn
= fns
[size
][u
];
12336 genfn(tcg_res
, tcg_env
, tcg_op
);
12339 case 0x4: /* CLS, CLZ */
12342 gen_helper_neon_clz_u8(tcg_res
, tcg_op
);
12344 gen_helper_neon_clz_u16(tcg_res
, tcg_op
);
12348 gen_helper_neon_cls_s8(tcg_res
, tcg_op
);
12350 gen_helper_neon_cls_s16(tcg_res
, tcg_op
);
12355 g_assert_not_reached();
12359 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
12362 clear_vec_high(s
, is_q
, rd
);
12365 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
12369 /* AdvSIMD [scalar] two register miscellaneous (FP16)
12371 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
12372 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12373 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
12374 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12375 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
12376 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
12378 * This actually covers two groups where scalar access is governed by
12379 * bit 28. A bunch of the instructions (float to integral) only exist
12380 * in the vector form and are un-allocated for the scalar decode. Also
12381 * in the scalar decode Q is always 1.
12383 static void disas_simd_two_reg_misc_fp16(DisasContext
*s
, uint32_t insn
)
12385 int fpop
, opcode
, a
, u
;
12389 bool only_in_vector
= false;
12392 TCGv_i32 tcg_rmode
= NULL
;
12393 TCGv_ptr tcg_fpstatus
= NULL
;
12394 bool need_fpst
= true;
12397 if (!dc_isar_feature(aa64_fp16
, s
)) {
12398 unallocated_encoding(s
);
12402 rd
= extract32(insn
, 0, 5);
12403 rn
= extract32(insn
, 5, 5);
12405 a
= extract32(insn
, 23, 1);
12406 u
= extract32(insn
, 29, 1);
12407 is_scalar
= extract32(insn
, 28, 1);
12408 is_q
= extract32(insn
, 30, 1);
12410 opcode
= extract32(insn
, 12, 5);
12411 fpop
= deposit32(opcode
, 5, 1, a
);
12412 fpop
= deposit32(fpop
, 6, 1, u
);
12415 case 0x1d: /* SCVTF */
12416 case 0x5d: /* UCVTF */
12423 elements
= (is_q
? 8 : 4);
12426 if (!fp_access_check(s
)) {
12429 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !u
, 0, MO_16
);
12433 case 0x2c: /* FCMGT (zero) */
12434 case 0x2d: /* FCMEQ (zero) */
12435 case 0x2e: /* FCMLT (zero) */
12436 case 0x6c: /* FCMGE (zero) */
12437 case 0x6d: /* FCMLE (zero) */
12438 handle_2misc_fcmp_zero(s
, fpop
, is_scalar
, 0, is_q
, MO_16
, rn
, rd
);
12440 case 0x3d: /* FRECPE */
12441 case 0x3f: /* FRECPX */
12443 case 0x18: /* FRINTN */
12444 only_in_vector
= true;
12445 rmode
= FPROUNDING_TIEEVEN
;
12447 case 0x19: /* FRINTM */
12448 only_in_vector
= true;
12449 rmode
= FPROUNDING_NEGINF
;
12451 case 0x38: /* FRINTP */
12452 only_in_vector
= true;
12453 rmode
= FPROUNDING_POSINF
;
12455 case 0x39: /* FRINTZ */
12456 only_in_vector
= true;
12457 rmode
= FPROUNDING_ZERO
;
12459 case 0x58: /* FRINTA */
12460 only_in_vector
= true;
12461 rmode
= FPROUNDING_TIEAWAY
;
12463 case 0x59: /* FRINTX */
12464 case 0x79: /* FRINTI */
12465 only_in_vector
= true;
12466 /* current rounding mode */
12468 case 0x1a: /* FCVTNS */
12469 rmode
= FPROUNDING_TIEEVEN
;
12471 case 0x1b: /* FCVTMS */
12472 rmode
= FPROUNDING_NEGINF
;
12474 case 0x1c: /* FCVTAS */
12475 rmode
= FPROUNDING_TIEAWAY
;
12477 case 0x3a: /* FCVTPS */
12478 rmode
= FPROUNDING_POSINF
;
12480 case 0x3b: /* FCVTZS */
12481 rmode
= FPROUNDING_ZERO
;
12483 case 0x5a: /* FCVTNU */
12484 rmode
= FPROUNDING_TIEEVEN
;
12486 case 0x5b: /* FCVTMU */
12487 rmode
= FPROUNDING_NEGINF
;
12489 case 0x5c: /* FCVTAU */
12490 rmode
= FPROUNDING_TIEAWAY
;
12492 case 0x7a: /* FCVTPU */
12493 rmode
= FPROUNDING_POSINF
;
12495 case 0x7b: /* FCVTZU */
12496 rmode
= FPROUNDING_ZERO
;
12498 case 0x2f: /* FABS */
12499 case 0x6f: /* FNEG */
12502 case 0x7d: /* FRSQRTE */
12503 case 0x7f: /* FSQRT (vector) */
12506 unallocated_encoding(s
);
12511 /* Check additional constraints for the scalar encoding */
12514 unallocated_encoding(s
);
12517 /* FRINTxx is only in the vector form */
12518 if (only_in_vector
) {
12519 unallocated_encoding(s
);
12524 if (!fp_access_check(s
)) {
12528 if (rmode
>= 0 || need_fpst
) {
12529 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR_F16
);
12533 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
12537 TCGv_i32 tcg_op
= read_fp_hreg(s
, rn
);
12538 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12541 case 0x1a: /* FCVTNS */
12542 case 0x1b: /* FCVTMS */
12543 case 0x1c: /* FCVTAS */
12544 case 0x3a: /* FCVTPS */
12545 case 0x3b: /* FCVTZS */
12546 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12548 case 0x3d: /* FRECPE */
12549 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12551 case 0x3f: /* FRECPX */
12552 gen_helper_frecpx_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12554 case 0x5a: /* FCVTNU */
12555 case 0x5b: /* FCVTMU */
12556 case 0x5c: /* FCVTAU */
12557 case 0x7a: /* FCVTPU */
12558 case 0x7b: /* FCVTZU */
12559 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12561 case 0x6f: /* FNEG */
12562 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
12564 case 0x7d: /* FRSQRTE */
12565 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12568 g_assert_not_reached();
12571 /* limit any sign extension going on */
12572 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0xffff);
12573 write_fp_sreg(s
, rd
, tcg_res
);
12575 for (pass
= 0; pass
< (is_q
? 8 : 4); pass
++) {
12576 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12577 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12579 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_16
);
12582 case 0x1a: /* FCVTNS */
12583 case 0x1b: /* FCVTMS */
12584 case 0x1c: /* FCVTAS */
12585 case 0x3a: /* FCVTPS */
12586 case 0x3b: /* FCVTZS */
12587 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12589 case 0x3d: /* FRECPE */
12590 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12592 case 0x5a: /* FCVTNU */
12593 case 0x5b: /* FCVTMU */
12594 case 0x5c: /* FCVTAU */
12595 case 0x7a: /* FCVTPU */
12596 case 0x7b: /* FCVTZU */
12597 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12599 case 0x18: /* FRINTN */
12600 case 0x19: /* FRINTM */
12601 case 0x38: /* FRINTP */
12602 case 0x39: /* FRINTZ */
12603 case 0x58: /* FRINTA */
12604 case 0x79: /* FRINTI */
12605 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12607 case 0x59: /* FRINTX */
12608 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
12610 case 0x2f: /* FABS */
12611 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
12613 case 0x6f: /* FNEG */
12614 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
12616 case 0x7d: /* FRSQRTE */
12617 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12619 case 0x7f: /* FSQRT */
12620 gen_helper_sqrt_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12623 g_assert_not_reached();
12626 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
12629 clear_vec_high(s
, is_q
, rd
);
12633 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
12637 /* AdvSIMD scalar x indexed element
12638 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12639 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12640 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12641 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12642 * AdvSIMD vector x indexed element
12643 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12644 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12645 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12646 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12648 static void disas_simd_indexed(DisasContext
*s
, uint32_t insn
)
12650 /* This encoding has two kinds of instruction:
12651 * normal, where we perform elt x idxelt => elt for each
12652 * element in the vector
12653 * long, where we perform elt x idxelt and generate a result of
12654 * double the width of the input element
12655 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12657 bool is_scalar
= extract32(insn
, 28, 1);
12658 bool is_q
= extract32(insn
, 30, 1);
12659 bool u
= extract32(insn
, 29, 1);
12660 int size
= extract32(insn
, 22, 2);
12661 int l
= extract32(insn
, 21, 1);
12662 int m
= extract32(insn
, 20, 1);
12663 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12664 int rm
= extract32(insn
, 16, 4);
12665 int opcode
= extract32(insn
, 12, 4);
12666 int h
= extract32(insn
, 11, 1);
12667 int rn
= extract32(insn
, 5, 5);
12668 int rd
= extract32(insn
, 0, 5);
12669 bool is_long
= false;
12671 bool is_fp16
= false;
12675 switch (16 * u
+ opcode
) {
12676 case 0x08: /* MUL */
12677 case 0x10: /* MLA */
12678 case 0x14: /* MLS */
12680 unallocated_encoding(s
);
12684 case 0x02: /* SMLAL, SMLAL2 */
12685 case 0x12: /* UMLAL, UMLAL2 */
12686 case 0x06: /* SMLSL, SMLSL2 */
12687 case 0x16: /* UMLSL, UMLSL2 */
12688 case 0x0a: /* SMULL, SMULL2 */
12689 case 0x1a: /* UMULL, UMULL2 */
12691 unallocated_encoding(s
);
12696 case 0x03: /* SQDMLAL, SQDMLAL2 */
12697 case 0x07: /* SQDMLSL, SQDMLSL2 */
12698 case 0x0b: /* SQDMULL, SQDMULL2 */
12701 case 0x0c: /* SQDMULH */
12702 case 0x0d: /* SQRDMULH */
12704 case 0x01: /* FMLA */
12705 case 0x05: /* FMLS */
12706 case 0x09: /* FMUL */
12707 case 0x19: /* FMULX */
12710 case 0x1d: /* SQRDMLAH */
12711 case 0x1f: /* SQRDMLSH */
12712 if (!dc_isar_feature(aa64_rdm
, s
)) {
12713 unallocated_encoding(s
);
12717 case 0x0e: /* SDOT */
12718 case 0x1e: /* UDOT */
12719 if (is_scalar
|| size
!= MO_32
|| !dc_isar_feature(aa64_dp
, s
)) {
12720 unallocated_encoding(s
);
12726 case 0: /* SUDOT */
12727 case 2: /* USDOT */
12728 if (is_scalar
|| !dc_isar_feature(aa64_i8mm
, s
)) {
12729 unallocated_encoding(s
);
12734 case 1: /* BFDOT */
12735 if (is_scalar
|| !dc_isar_feature(aa64_bf16
, s
)) {
12736 unallocated_encoding(s
);
12741 case 3: /* BFMLAL{B,T} */
12742 if (is_scalar
|| !dc_isar_feature(aa64_bf16
, s
)) {
12743 unallocated_encoding(s
);
12746 /* can't set is_fp without other incorrect size checks */
12750 unallocated_encoding(s
);
12754 case 0x11: /* FCMLA #0 */
12755 case 0x13: /* FCMLA #90 */
12756 case 0x15: /* FCMLA #180 */
12757 case 0x17: /* FCMLA #270 */
12758 if (is_scalar
|| !dc_isar_feature(aa64_fcma
, s
)) {
12759 unallocated_encoding(s
);
12764 case 0x00: /* FMLAL */
12765 case 0x04: /* FMLSL */
12766 case 0x18: /* FMLAL2 */
12767 case 0x1c: /* FMLSL2 */
12768 if (is_scalar
|| size
!= MO_32
|| !dc_isar_feature(aa64_fhm
, s
)) {
12769 unallocated_encoding(s
);
12773 /* is_fp, but we pass tcg_env not fp_status. */
12776 unallocated_encoding(s
);
12781 case 1: /* normal fp */
12782 /* convert insn encoded size to MemOp size */
12784 case 0: /* half-precision */
12788 case MO_32
: /* single precision */
12789 case MO_64
: /* double precision */
12792 unallocated_encoding(s
);
12797 case 2: /* complex fp */
12798 /* Each indexable element is a complex pair. */
12803 unallocated_encoding(s
);
12811 unallocated_encoding(s
);
12816 default: /* integer */
12820 unallocated_encoding(s
);
12825 if (is_fp16
&& !dc_isar_feature(aa64_fp16
, s
)) {
12826 unallocated_encoding(s
);
12830 /* Given MemOp size, adjust register and indexing. */
12833 index
= h
<< 2 | l
<< 1 | m
;
12836 index
= h
<< 1 | l
;
12841 unallocated_encoding(s
);
12848 g_assert_not_reached();
12851 if (!fp_access_check(s
)) {
12856 fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
12861 switch (16 * u
+ opcode
) {
12862 case 0x0e: /* SDOT */
12863 case 0x1e: /* UDOT */
12864 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
12865 u
? gen_helper_gvec_udot_idx_b
12866 : gen_helper_gvec_sdot_idx_b
);
12869 switch (extract32(insn
, 22, 2)) {
12870 case 0: /* SUDOT */
12871 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
12872 gen_helper_gvec_sudot_idx_b
);
12874 case 1: /* BFDOT */
12875 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
12876 gen_helper_gvec_bfdot_idx
);
12878 case 2: /* USDOT */
12879 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
12880 gen_helper_gvec_usdot_idx_b
);
12882 case 3: /* BFMLAL{B,T} */
12883 gen_gvec_op4_fpst(s
, 1, rd
, rn
, rm
, rd
, 0, (index
<< 1) | is_q
,
12884 gen_helper_gvec_bfmlal_idx
);
12887 g_assert_not_reached();
12888 case 0x11: /* FCMLA #0 */
12889 case 0x13: /* FCMLA #90 */
12890 case 0x15: /* FCMLA #180 */
12891 case 0x17: /* FCMLA #270 */
12893 int rot
= extract32(insn
, 13, 2);
12894 int data
= (index
<< 2) | rot
;
12895 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, rd
),
12896 vec_full_reg_offset(s
, rn
),
12897 vec_full_reg_offset(s
, rm
),
12898 vec_full_reg_offset(s
, rd
), fpst
,
12899 is_q
? 16 : 8, vec_full_reg_size(s
), data
,
12901 ? gen_helper_gvec_fcmlas_idx
12902 : gen_helper_gvec_fcmlah_idx
);
12906 case 0x00: /* FMLAL */
12907 case 0x04: /* FMLSL */
12908 case 0x18: /* FMLAL2 */
12909 case 0x1c: /* FMLSL2 */
12911 int is_s
= extract32(opcode
, 2, 1);
12913 int data
= (index
<< 2) | (is_2
<< 1) | is_s
;
12914 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
12915 vec_full_reg_offset(s
, rn
),
12916 vec_full_reg_offset(s
, rm
), tcg_env
,
12917 is_q
? 16 : 8, vec_full_reg_size(s
),
12918 data
, gen_helper_gvec_fmlal_idx_a64
);
12922 case 0x08: /* MUL */
12923 if (!is_long
&& !is_scalar
) {
12924 static gen_helper_gvec_3
* const fns
[3] = {
12925 gen_helper_gvec_mul_idx_h
,
12926 gen_helper_gvec_mul_idx_s
,
12927 gen_helper_gvec_mul_idx_d
,
12929 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, rd
),
12930 vec_full_reg_offset(s
, rn
),
12931 vec_full_reg_offset(s
, rm
),
12932 is_q
? 16 : 8, vec_full_reg_size(s
),
12933 index
, fns
[size
- 1]);
12938 case 0x10: /* MLA */
12939 if (!is_long
&& !is_scalar
) {
12940 static gen_helper_gvec_4
* const fns
[3] = {
12941 gen_helper_gvec_mla_idx_h
,
12942 gen_helper_gvec_mla_idx_s
,
12943 gen_helper_gvec_mla_idx_d
,
12945 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
12946 vec_full_reg_offset(s
, rn
),
12947 vec_full_reg_offset(s
, rm
),
12948 vec_full_reg_offset(s
, rd
),
12949 is_q
? 16 : 8, vec_full_reg_size(s
),
12950 index
, fns
[size
- 1]);
12955 case 0x14: /* MLS */
12956 if (!is_long
&& !is_scalar
) {
12957 static gen_helper_gvec_4
* const fns
[3] = {
12958 gen_helper_gvec_mls_idx_h
,
12959 gen_helper_gvec_mls_idx_s
,
12960 gen_helper_gvec_mls_idx_d
,
12962 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
12963 vec_full_reg_offset(s
, rn
),
12964 vec_full_reg_offset(s
, rm
),
12965 vec_full_reg_offset(s
, rd
),
12966 is_q
? 16 : 8, vec_full_reg_size(s
),
12967 index
, fns
[size
- 1]);
12974 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
12977 assert(is_fp
&& is_q
&& !is_long
);
12979 read_vec_element(s
, tcg_idx
, rm
, index
, MO_64
);
12981 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
12982 TCGv_i64 tcg_op
= tcg_temp_new_i64();
12983 TCGv_i64 tcg_res
= tcg_temp_new_i64();
12985 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
12987 switch (16 * u
+ opcode
) {
12988 case 0x05: /* FMLS */
12989 /* As usual for ARM, separate negation for fused multiply-add */
12990 gen_helper_vfp_negd(tcg_op
, tcg_op
);
12992 case 0x01: /* FMLA */
12993 read_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
12994 gen_helper_vfp_muladdd(tcg_res
, tcg_op
, tcg_idx
, tcg_res
, fpst
);
12996 case 0x09: /* FMUL */
12997 gen_helper_vfp_muld(tcg_res
, tcg_op
, tcg_idx
, fpst
);
12999 case 0x19: /* FMULX */
13000 gen_helper_vfp_mulxd(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13003 g_assert_not_reached();
13006 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
13009 clear_vec_high(s
, !is_scalar
, rd
);
13010 } else if (!is_long
) {
13011 /* 32 bit floating point, or 16 or 32 bit integer.
13012 * For the 16 bit scalar case we use the usual Neon helpers and
13013 * rely on the fact that 0 op 0 == 0 with no side effects.
13015 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
13016 int pass
, maxpasses
;
13021 maxpasses
= is_q
? 4 : 2;
13024 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
13026 if (size
== 1 && !is_scalar
) {
13027 /* The simplest way to handle the 16x16 indexed ops is to duplicate
13028 * the index into both halves of the 32 bit tcg_idx and then use
13029 * the usual Neon helpers.
13031 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
13034 for (pass
= 0; pass
< maxpasses
; pass
++) {
13035 TCGv_i32 tcg_op
= tcg_temp_new_i32();
13036 TCGv_i32 tcg_res
= tcg_temp_new_i32();
13038 read_vec_element_i32(s
, tcg_op
, rn
, pass
, is_scalar
? size
: MO_32
);
13040 switch (16 * u
+ opcode
) {
13041 case 0x08: /* MUL */
13042 case 0x10: /* MLA */
13043 case 0x14: /* MLS */
13045 static NeonGenTwoOpFn
* const fns
[2][2] = {
13046 { gen_helper_neon_add_u16
, gen_helper_neon_sub_u16
},
13047 { tcg_gen_add_i32
, tcg_gen_sub_i32
},
13049 NeonGenTwoOpFn
*genfn
;
13050 bool is_sub
= opcode
== 0x4;
13053 gen_helper_neon_mul_u16(tcg_res
, tcg_op
, tcg_idx
);
13055 tcg_gen_mul_i32(tcg_res
, tcg_op
, tcg_idx
);
13057 if (opcode
== 0x8) {
13060 read_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
13061 genfn
= fns
[size
- 1][is_sub
];
13062 genfn(tcg_res
, tcg_op
, tcg_res
);
13065 case 0x05: /* FMLS */
13066 case 0x01: /* FMLA */
13067 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
13068 is_scalar
? size
: MO_32
);
13071 if (opcode
== 0x5) {
13072 /* As usual for ARM, separate negation for fused
13074 tcg_gen_xori_i32(tcg_op
, tcg_op
, 0x80008000);
13077 gen_helper_advsimd_muladdh(tcg_res
, tcg_op
, tcg_idx
,
13080 gen_helper_advsimd_muladd2h(tcg_res
, tcg_op
, tcg_idx
,
13085 if (opcode
== 0x5) {
13086 /* As usual for ARM, separate negation for
13087 * fused multiply-add */
13088 tcg_gen_xori_i32(tcg_op
, tcg_op
, 0x80000000);
13090 gen_helper_vfp_muladds(tcg_res
, tcg_op
, tcg_idx
,
13094 g_assert_not_reached();
13097 case 0x09: /* FMUL */
13101 gen_helper_advsimd_mulh(tcg_res
, tcg_op
,
13104 gen_helper_advsimd_mul2h(tcg_res
, tcg_op
,
13109 gen_helper_vfp_muls(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13112 g_assert_not_reached();
13115 case 0x19: /* FMULX */
13119 gen_helper_advsimd_mulxh(tcg_res
, tcg_op
,
13122 gen_helper_advsimd_mulx2h(tcg_res
, tcg_op
,
13127 gen_helper_vfp_mulxs(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13130 g_assert_not_reached();
13133 case 0x0c: /* SQDMULH */
13135 gen_helper_neon_qdmulh_s16(tcg_res
, tcg_env
,
13138 gen_helper_neon_qdmulh_s32(tcg_res
, tcg_env
,
13142 case 0x0d: /* SQRDMULH */
13144 gen_helper_neon_qrdmulh_s16(tcg_res
, tcg_env
,
13147 gen_helper_neon_qrdmulh_s32(tcg_res
, tcg_env
,
13151 case 0x1d: /* SQRDMLAH */
13152 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
13153 is_scalar
? size
: MO_32
);
13155 gen_helper_neon_qrdmlah_s16(tcg_res
, tcg_env
,
13156 tcg_op
, tcg_idx
, tcg_res
);
13158 gen_helper_neon_qrdmlah_s32(tcg_res
, tcg_env
,
13159 tcg_op
, tcg_idx
, tcg_res
);
13162 case 0x1f: /* SQRDMLSH */
13163 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
13164 is_scalar
? size
: MO_32
);
13166 gen_helper_neon_qrdmlsh_s16(tcg_res
, tcg_env
,
13167 tcg_op
, tcg_idx
, tcg_res
);
13169 gen_helper_neon_qrdmlsh_s32(tcg_res
, tcg_env
,
13170 tcg_op
, tcg_idx
, tcg_res
);
13174 g_assert_not_reached();
13178 write_fp_sreg(s
, rd
, tcg_res
);
13180 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
13184 clear_vec_high(s
, is_q
, rd
);
13186 /* long ops: 16x16->32 or 32x32->64 */
13187 TCGv_i64 tcg_res
[2];
13189 bool satop
= extract32(opcode
, 0, 1);
13190 MemOp memop
= MO_32
;
13197 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
13199 read_vec_element(s
, tcg_idx
, rm
, index
, memop
);
13201 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
13202 TCGv_i64 tcg_op
= tcg_temp_new_i64();
13203 TCGv_i64 tcg_passres
;
13209 passelt
= pass
+ (is_q
* 2);
13212 read_vec_element(s
, tcg_op
, rn
, passelt
, memop
);
13214 tcg_res
[pass
] = tcg_temp_new_i64();
13216 if (opcode
== 0xa || opcode
== 0xb) {
13217 /* Non-accumulating ops */
13218 tcg_passres
= tcg_res
[pass
];
13220 tcg_passres
= tcg_temp_new_i64();
13223 tcg_gen_mul_i64(tcg_passres
, tcg_op
, tcg_idx
);
13226 /* saturating, doubling */
13227 gen_helper_neon_addl_saturate_s64(tcg_passres
, tcg_env
,
13228 tcg_passres
, tcg_passres
);
13231 if (opcode
== 0xa || opcode
== 0xb) {
13235 /* Accumulating op: handle accumulate step */
13236 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13239 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13240 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
13242 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13243 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
13245 case 0x7: /* SQDMLSL, SQDMLSL2 */
13246 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
13248 case 0x3: /* SQDMLAL, SQDMLAL2 */
13249 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], tcg_env
,
13254 g_assert_not_reached();
13258 clear_vec_high(s
, !is_scalar
, rd
);
13260 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
13263 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
13266 /* The simplest way to handle the 16x16 indexed ops is to
13267 * duplicate the index into both halves of the 32 bit tcg_idx
13268 * and then use the usual Neon helpers.
13270 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
13273 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
13274 TCGv_i32 tcg_op
= tcg_temp_new_i32();
13275 TCGv_i64 tcg_passres
;
13278 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
13280 read_vec_element_i32(s
, tcg_op
, rn
,
13281 pass
+ (is_q
* 2), MO_32
);
13284 tcg_res
[pass
] = tcg_temp_new_i64();
13286 if (opcode
== 0xa || opcode
== 0xb) {
13287 /* Non-accumulating ops */
13288 tcg_passres
= tcg_res
[pass
];
13290 tcg_passres
= tcg_temp_new_i64();
13293 if (memop
& MO_SIGN
) {
13294 gen_helper_neon_mull_s16(tcg_passres
, tcg_op
, tcg_idx
);
13296 gen_helper_neon_mull_u16(tcg_passres
, tcg_op
, tcg_idx
);
13299 gen_helper_neon_addl_saturate_s32(tcg_passres
, tcg_env
,
13300 tcg_passres
, tcg_passres
);
13303 if (opcode
== 0xa || opcode
== 0xb) {
13307 /* Accumulating op: handle accumulate step */
13308 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13311 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13312 gen_helper_neon_addl_u32(tcg_res
[pass
], tcg_res
[pass
],
13315 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13316 gen_helper_neon_subl_u32(tcg_res
[pass
], tcg_res
[pass
],
13319 case 0x7: /* SQDMLSL, SQDMLSL2 */
13320 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
13322 case 0x3: /* SQDMLAL, SQDMLAL2 */
13323 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], tcg_env
,
13328 g_assert_not_reached();
13333 tcg_gen_ext32u_i64(tcg_res
[0], tcg_res
[0]);
13338 tcg_res
[1] = tcg_constant_i64(0);
13341 for (pass
= 0; pass
< 2; pass
++) {
13342 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13348 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13349 * +-----------------+------+-----------+--------+-----+------+------+
13350 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13351 * +-----------------+------+-----------+--------+-----+------+------+
13353 static void disas_crypto_aes(DisasContext
*s
, uint32_t insn
)
13355 int size
= extract32(insn
, 22, 2);
13356 int opcode
= extract32(insn
, 12, 5);
13357 int rn
= extract32(insn
, 5, 5);
13358 int rd
= extract32(insn
, 0, 5);
13359 gen_helper_gvec_2
*genfn2
= NULL
;
13360 gen_helper_gvec_3
*genfn3
= NULL
;
13362 if (!dc_isar_feature(aa64_aes
, s
) || size
!= 0) {
13363 unallocated_encoding(s
);
13368 case 0x4: /* AESE */
13369 genfn3
= gen_helper_crypto_aese
;
13371 case 0x6: /* AESMC */
13372 genfn2
= gen_helper_crypto_aesmc
;
13374 case 0x5: /* AESD */
13375 genfn3
= gen_helper_crypto_aesd
;
13377 case 0x7: /* AESIMC */
13378 genfn2
= gen_helper_crypto_aesimc
;
13381 unallocated_encoding(s
);
13385 if (!fp_access_check(s
)) {
13389 gen_gvec_op2_ool(s
, true, rd
, rn
, 0, genfn2
);
13391 gen_gvec_op3_ool(s
, true, rd
, rd
, rn
, 0, genfn3
);
13395 /* Crypto three-reg SHA
13396 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
13397 * +-----------------+------+---+------+---+--------+-----+------+------+
13398 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
13399 * +-----------------+------+---+------+---+--------+-----+------+------+
13401 static void disas_crypto_three_reg_sha(DisasContext
*s
, uint32_t insn
)
13403 int size
= extract32(insn
, 22, 2);
13404 int opcode
= extract32(insn
, 12, 3);
13405 int rm
= extract32(insn
, 16, 5);
13406 int rn
= extract32(insn
, 5, 5);
13407 int rd
= extract32(insn
, 0, 5);
13408 gen_helper_gvec_3
*genfn
;
13412 unallocated_encoding(s
);
13417 case 0: /* SHA1C */
13418 genfn
= gen_helper_crypto_sha1c
;
13419 feature
= dc_isar_feature(aa64_sha1
, s
);
13421 case 1: /* SHA1P */
13422 genfn
= gen_helper_crypto_sha1p
;
13423 feature
= dc_isar_feature(aa64_sha1
, s
);
13425 case 2: /* SHA1M */
13426 genfn
= gen_helper_crypto_sha1m
;
13427 feature
= dc_isar_feature(aa64_sha1
, s
);
13429 case 3: /* SHA1SU0 */
13430 genfn
= gen_helper_crypto_sha1su0
;
13431 feature
= dc_isar_feature(aa64_sha1
, s
);
13433 case 4: /* SHA256H */
13434 genfn
= gen_helper_crypto_sha256h
;
13435 feature
= dc_isar_feature(aa64_sha256
, s
);
13437 case 5: /* SHA256H2 */
13438 genfn
= gen_helper_crypto_sha256h2
;
13439 feature
= dc_isar_feature(aa64_sha256
, s
);
13441 case 6: /* SHA256SU1 */
13442 genfn
= gen_helper_crypto_sha256su1
;
13443 feature
= dc_isar_feature(aa64_sha256
, s
);
13446 unallocated_encoding(s
);
13451 unallocated_encoding(s
);
13455 if (!fp_access_check(s
)) {
13458 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, 0, genfn
);
13461 /* Crypto two-reg SHA
13462 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13463 * +-----------------+------+-----------+--------+-----+------+------+
13464 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13465 * +-----------------+------+-----------+--------+-----+------+------+
13467 static void disas_crypto_two_reg_sha(DisasContext
*s
, uint32_t insn
)
13469 int size
= extract32(insn
, 22, 2);
13470 int opcode
= extract32(insn
, 12, 5);
13471 int rn
= extract32(insn
, 5, 5);
13472 int rd
= extract32(insn
, 0, 5);
13473 gen_helper_gvec_2
*genfn
;
13477 unallocated_encoding(s
);
13482 case 0: /* SHA1H */
13483 feature
= dc_isar_feature(aa64_sha1
, s
);
13484 genfn
= gen_helper_crypto_sha1h
;
13486 case 1: /* SHA1SU1 */
13487 feature
= dc_isar_feature(aa64_sha1
, s
);
13488 genfn
= gen_helper_crypto_sha1su1
;
13490 case 2: /* SHA256SU0 */
13491 feature
= dc_isar_feature(aa64_sha256
, s
);
13492 genfn
= gen_helper_crypto_sha256su0
;
13495 unallocated_encoding(s
);
13500 unallocated_encoding(s
);
13504 if (!fp_access_check(s
)) {
13507 gen_gvec_op2_ool(s
, true, rd
, rn
, 0, genfn
);
13510 static void gen_rax1_i64(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
)
13512 tcg_gen_rotli_i64(d
, m
, 1);
13513 tcg_gen_xor_i64(d
, d
, n
);
13516 static void gen_rax1_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
, TCGv_vec m
)
13518 tcg_gen_rotli_vec(vece
, d
, m
, 1);
13519 tcg_gen_xor_vec(vece
, d
, d
, n
);
13522 void gen_gvec_rax1(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
13523 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
13525 static const TCGOpcode vecop_list
[] = { INDEX_op_rotli_vec
, 0 };
13526 static const GVecGen3 op
= {
13527 .fni8
= gen_rax1_i64
,
13528 .fniv
= gen_rax1_vec
,
13529 .opt_opc
= vecop_list
,
13530 .fno
= gen_helper_crypto_rax1
,
13533 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &op
);
13536 /* Crypto three-reg SHA512
13537 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13538 * +-----------------------+------+---+---+-----+--------+------+------+
13539 * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
13540 * +-----------------------+------+---+---+-----+--------+------+------+
13542 static void disas_crypto_three_reg_sha512(DisasContext
*s
, uint32_t insn
)
13544 int opcode
= extract32(insn
, 10, 2);
13545 int o
= extract32(insn
, 14, 1);
13546 int rm
= extract32(insn
, 16, 5);
13547 int rn
= extract32(insn
, 5, 5);
13548 int rd
= extract32(insn
, 0, 5);
13550 gen_helper_gvec_3
*oolfn
= NULL
;
13551 GVecGen3Fn
*gvecfn
= NULL
;
13555 case 0: /* SHA512H */
13556 feature
= dc_isar_feature(aa64_sha512
, s
);
13557 oolfn
= gen_helper_crypto_sha512h
;
13559 case 1: /* SHA512H2 */
13560 feature
= dc_isar_feature(aa64_sha512
, s
);
13561 oolfn
= gen_helper_crypto_sha512h2
;
13563 case 2: /* SHA512SU1 */
13564 feature
= dc_isar_feature(aa64_sha512
, s
);
13565 oolfn
= gen_helper_crypto_sha512su1
;
13568 feature
= dc_isar_feature(aa64_sha3
, s
);
13569 gvecfn
= gen_gvec_rax1
;
13572 g_assert_not_reached();
13576 case 0: /* SM3PARTW1 */
13577 feature
= dc_isar_feature(aa64_sm3
, s
);
13578 oolfn
= gen_helper_crypto_sm3partw1
;
13580 case 1: /* SM3PARTW2 */
13581 feature
= dc_isar_feature(aa64_sm3
, s
);
13582 oolfn
= gen_helper_crypto_sm3partw2
;
13584 case 2: /* SM4EKEY */
13585 feature
= dc_isar_feature(aa64_sm4
, s
);
13586 oolfn
= gen_helper_crypto_sm4ekey
;
13589 unallocated_encoding(s
);
13595 unallocated_encoding(s
);
13599 if (!fp_access_check(s
)) {
13604 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, 0, oolfn
);
13606 gen_gvec_fn3(s
, true, rd
, rn
, rm
, gvecfn
, MO_64
);
13610 /* Crypto two-reg SHA512
13611 * 31 12 11 10 9 5 4 0
13612 * +-----------------------------------------+--------+------+------+
13613 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
13614 * +-----------------------------------------+--------+------+------+
13616 static void disas_crypto_two_reg_sha512(DisasContext
*s
, uint32_t insn
)
13618 int opcode
= extract32(insn
, 10, 2);
13619 int rn
= extract32(insn
, 5, 5);
13620 int rd
= extract32(insn
, 0, 5);
13624 case 0: /* SHA512SU0 */
13625 feature
= dc_isar_feature(aa64_sha512
, s
);
13628 feature
= dc_isar_feature(aa64_sm4
, s
);
13631 unallocated_encoding(s
);
13636 unallocated_encoding(s
);
13640 if (!fp_access_check(s
)) {
13645 case 0: /* SHA512SU0 */
13646 gen_gvec_op2_ool(s
, true, rd
, rn
, 0, gen_helper_crypto_sha512su0
);
13649 gen_gvec_op3_ool(s
, true, rd
, rd
, rn
, 0, gen_helper_crypto_sm4e
);
13652 g_assert_not_reached();
13656 /* Crypto four-register
13657 * 31 23 22 21 20 16 15 14 10 9 5 4 0
13658 * +-------------------+-----+------+---+------+------+------+
13659 * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
13660 * +-------------------+-----+------+---+------+------+------+
13662 static void disas_crypto_four_reg(DisasContext
*s
, uint32_t insn
)
13664 int op0
= extract32(insn
, 21, 2);
13665 int rm
= extract32(insn
, 16, 5);
13666 int ra
= extract32(insn
, 10, 5);
13667 int rn
= extract32(insn
, 5, 5);
13668 int rd
= extract32(insn
, 0, 5);
13674 feature
= dc_isar_feature(aa64_sha3
, s
);
13676 case 2: /* SM3SS1 */
13677 feature
= dc_isar_feature(aa64_sm3
, s
);
13680 unallocated_encoding(s
);
13685 unallocated_encoding(s
);
13689 if (!fp_access_check(s
)) {
13694 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
, tcg_res
[2];
13697 tcg_op1
= tcg_temp_new_i64();
13698 tcg_op2
= tcg_temp_new_i64();
13699 tcg_op3
= tcg_temp_new_i64();
13700 tcg_res
[0] = tcg_temp_new_i64();
13701 tcg_res
[1] = tcg_temp_new_i64();
13703 for (pass
= 0; pass
< 2; pass
++) {
13704 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
13705 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
13706 read_vec_element(s
, tcg_op3
, ra
, pass
, MO_64
);
13710 tcg_gen_xor_i64(tcg_res
[pass
], tcg_op2
, tcg_op3
);
13713 tcg_gen_andc_i64(tcg_res
[pass
], tcg_op2
, tcg_op3
);
13715 tcg_gen_xor_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
13717 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
13718 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
13720 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
, tcg_res
, tcg_zero
;
13722 tcg_op1
= tcg_temp_new_i32();
13723 tcg_op2
= tcg_temp_new_i32();
13724 tcg_op3
= tcg_temp_new_i32();
13725 tcg_res
= tcg_temp_new_i32();
13726 tcg_zero
= tcg_constant_i32(0);
13728 read_vec_element_i32(s
, tcg_op1
, rn
, 3, MO_32
);
13729 read_vec_element_i32(s
, tcg_op2
, rm
, 3, MO_32
);
13730 read_vec_element_i32(s
, tcg_op3
, ra
, 3, MO_32
);
13732 tcg_gen_rotri_i32(tcg_res
, tcg_op1
, 20);
13733 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op2
);
13734 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op3
);
13735 tcg_gen_rotri_i32(tcg_res
, tcg_res
, 25);
13737 write_vec_element_i32(s
, tcg_zero
, rd
, 0, MO_32
);
13738 write_vec_element_i32(s
, tcg_zero
, rd
, 1, MO_32
);
13739 write_vec_element_i32(s
, tcg_zero
, rd
, 2, MO_32
);
13740 write_vec_element_i32(s
, tcg_res
, rd
, 3, MO_32
);
13745 * 31 21 20 16 15 10 9 5 4 0
13746 * +-----------------------+------+--------+------+------+
13747 * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
13748 * +-----------------------+------+--------+------+------+
13750 static void disas_crypto_xar(DisasContext
*s
, uint32_t insn
)
13752 int rm
= extract32(insn
, 16, 5);
13753 int imm6
= extract32(insn
, 10, 6);
13754 int rn
= extract32(insn
, 5, 5);
13755 int rd
= extract32(insn
, 0, 5);
13757 if (!dc_isar_feature(aa64_sha3
, s
)) {
13758 unallocated_encoding(s
);
13762 if (!fp_access_check(s
)) {
13766 gen_gvec_xar(MO_64
, vec_full_reg_offset(s
, rd
),
13767 vec_full_reg_offset(s
, rn
),
13768 vec_full_reg_offset(s
, rm
), imm6
, 16,
13769 vec_full_reg_size(s
));
13772 /* Crypto three-reg imm2
13773 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13774 * +-----------------------+------+-----+------+--------+------+------+
13775 * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
13776 * +-----------------------+------+-----+------+--------+------+------+
13778 static void disas_crypto_three_reg_imm2(DisasContext
*s
, uint32_t insn
)
13780 static gen_helper_gvec_3
* const fns
[4] = {
13781 gen_helper_crypto_sm3tt1a
, gen_helper_crypto_sm3tt1b
,
13782 gen_helper_crypto_sm3tt2a
, gen_helper_crypto_sm3tt2b
,
13784 int opcode
= extract32(insn
, 10, 2);
13785 int imm2
= extract32(insn
, 12, 2);
13786 int rm
= extract32(insn
, 16, 5);
13787 int rn
= extract32(insn
, 5, 5);
13788 int rd
= extract32(insn
, 0, 5);
13790 if (!dc_isar_feature(aa64_sm3
, s
)) {
13791 unallocated_encoding(s
);
13795 if (!fp_access_check(s
)) {
13799 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, imm2
, fns
[opcode
]);
13802 /* C3.6 Data processing - SIMD, inc Crypto
13804 * As the decode gets a little complex we are using a table based
13805 * approach for this part of the decode.
13807 static const AArch64DecodeTable data_proc_simd
[] = {
13808 /* pattern , mask , fn */
13809 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same
},
13810 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra
},
13811 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff
},
13812 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc
},
13813 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes
},
13814 { 0x0e000400, 0x9fe08400, disas_simd_copy
},
13815 { 0x0f000000, 0x9f000400, disas_simd_indexed
}, /* vector indexed */
13816 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
13817 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm
},
13818 { 0x0f000400, 0x9f800400, disas_simd_shift_imm
},
13819 { 0x0e000000, 0xbf208c00, disas_simd_tb
},
13820 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn
},
13821 { 0x2e000000, 0xbf208400, disas_simd_ext
},
13822 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same
},
13823 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra
},
13824 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff
},
13825 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc
},
13826 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise
},
13827 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy
},
13828 { 0x5f000000, 0xdf000400, disas_simd_indexed
}, /* scalar indexed */
13829 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm
},
13830 { 0x4e280800, 0xff3e0c00, disas_crypto_aes
},
13831 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha
},
13832 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha
},
13833 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512
},
13834 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512
},
13835 { 0xce000000, 0xff808000, disas_crypto_four_reg
},
13836 { 0xce800000, 0xffe00000, disas_crypto_xar
},
13837 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2
},
13838 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16
},
13839 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16
},
13840 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16
},
13841 { 0x00000000, 0x00000000, NULL
}
13844 static void disas_data_proc_simd(DisasContext
*s
, uint32_t insn
)
13846 /* Note that this is called with all non-FP cases from
13847 * table C3-6 so it must UNDEF for entries not specifically
13848 * allocated to instructions in that table.
13850 AArch64DecodeFn
*fn
= lookup_disas_fn(&data_proc_simd
[0], insn
);
13854 unallocated_encoding(s
);
13858 /* C3.6 Data processing - SIMD and floating point */
13859 static void disas_data_proc_simd_fp(DisasContext
*s
, uint32_t insn
)
13861 if (extract32(insn
, 28, 1) == 1 && extract32(insn
, 30, 1) == 0) {
13862 disas_data_proc_fp(s
, insn
);
13864 /* SIMD, including crypto */
13865 disas_data_proc_simd(s
, insn
);
13869 static bool trans_OK(DisasContext
*s
, arg_OK
*a
)
13874 static bool trans_FAIL(DisasContext
*s
, arg_OK
*a
)
13876 s
->is_nonstreaming
= true;
13882 * @env: The cpu environment
13883 * @s: The DisasContext
13885 * Return true if the page is guarded.
13887 static bool is_guarded_page(CPUARMState
*env
, DisasContext
*s
)
13889 uint64_t addr
= s
->base
.pc_first
;
13890 #ifdef CONFIG_USER_ONLY
13891 return page_get_flags(addr
) & PAGE_BTI
;
13893 CPUTLBEntryFull
*full
;
13895 int mmu_idx
= arm_to_core_mmu_idx(s
->mmu_idx
);
13899 * We test this immediately after reading an insn, which means
13900 * that the TLB entry must be present and valid, and thus this
13901 * access will never raise an exception.
13903 flags
= probe_access_full(env
, addr
, 0, MMU_INST_FETCH
, mmu_idx
,
13904 false, &host
, &full
, 0);
13905 assert(!(flags
& TLB_INVALID_MASK
));
13907 return full
->extra
.arm
.guarded
;
13912 * btype_destination_ok:
13913 * @insn: The instruction at the branch destination
13914 * @bt: SCTLR_ELx.BT
13915 * @btype: PSTATE.BTYPE, and is non-zero
13917 * On a guarded page, there are a limited number of insns
13918 * that may be present at the branch target:
13919 * - branch target identifiers,
13920 * - paciasp, pacibsp,
13923 * Anything else causes a Branch Target Exception.
13925 * Return true if the branch is compatible, false to raise BTITRAP.
13927 static bool btype_destination_ok(uint32_t insn
, bool bt
, int btype
)
13929 if ((insn
& 0xfffff01fu
) == 0xd503201fu
) {
13931 switch (extract32(insn
, 5, 7)) {
13932 case 0b011001: /* PACIASP */
13933 case 0b011011: /* PACIBSP */
13935 * If SCTLR_ELx.BT, then PACI*SP are not compatible
13936 * with btype == 3. Otherwise all btype are ok.
13938 return !bt
|| btype
!= 3;
13939 case 0b100000: /* BTI */
13940 /* Not compatible with any btype. */
13942 case 0b100010: /* BTI c */
13943 /* Not compatible with btype == 3 */
13945 case 0b100100: /* BTI j */
13946 /* Not compatible with btype == 2 */
13948 case 0b100110: /* BTI jc */
13949 /* Compatible with any btype. */
13953 switch (insn
& 0xffe0001fu
) {
13954 case 0xd4200000u
: /* BRK */
13955 case 0xd4400000u
: /* HLT */
13956 /* Give priority to the breakpoint exception. */
13963 /* C3.1 A64 instruction index by encoding */
13964 static void disas_a64_legacy(DisasContext
*s
, uint32_t insn
)
13966 switch (extract32(insn
, 25, 4)) {
13968 case 0xd: /* Data processing - register */
13969 disas_data_proc_reg(s
, insn
);
13972 case 0xf: /* Data processing - SIMD and floating point */
13973 disas_data_proc_simd_fp(s
, insn
);
13976 unallocated_encoding(s
);
13981 static void aarch64_tr_init_disas_context(DisasContextBase
*dcbase
,
13984 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13985 CPUARMState
*env
= cpu_env(cpu
);
13986 ARMCPU
*arm_cpu
= env_archcpu(env
);
13987 CPUARMTBFlags tb_flags
= arm_tbflags_from_tb(dc
->base
.tb
);
13988 int bound
, core_mmu_idx
;
13990 dc
->isar
= &arm_cpu
->isar
;
13992 dc
->pc_save
= dc
->base
.pc_first
;
13993 dc
->aarch64
= true;
13996 dc
->be_data
= EX_TBFLAG_ANY(tb_flags
, BE_DATA
) ? MO_BE
: MO_LE
;
13997 dc
->condexec_mask
= 0;
13998 dc
->condexec_cond
= 0;
13999 core_mmu_idx
= EX_TBFLAG_ANY(tb_flags
, MMUIDX
);
14000 dc
->mmu_idx
= core_to_aa64_mmu_idx(core_mmu_idx
);
14001 dc
->tbii
= EX_TBFLAG_A64(tb_flags
, TBII
);
14002 dc
->tbid
= EX_TBFLAG_A64(tb_flags
, TBID
);
14003 dc
->tcma
= EX_TBFLAG_A64(tb_flags
, TCMA
);
14004 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
14005 #if !defined(CONFIG_USER_ONLY)
14006 dc
->user
= (dc
->current_el
== 0);
14008 dc
->fp_excp_el
= EX_TBFLAG_ANY(tb_flags
, FPEXC_EL
);
14009 dc
->align_mem
= EX_TBFLAG_ANY(tb_flags
, ALIGN_MEM
);
14010 dc
->pstate_il
= EX_TBFLAG_ANY(tb_flags
, PSTATE__IL
);
14011 dc
->fgt_active
= EX_TBFLAG_ANY(tb_flags
, FGT_ACTIVE
);
14012 dc
->fgt_svc
= EX_TBFLAG_ANY(tb_flags
, FGT_SVC
);
14013 dc
->fgt_eret
= EX_TBFLAG_A64(tb_flags
, FGT_ERET
);
14014 dc
->sve_excp_el
= EX_TBFLAG_A64(tb_flags
, SVEEXC_EL
);
14015 dc
->sme_excp_el
= EX_TBFLAG_A64(tb_flags
, SMEEXC_EL
);
14016 dc
->vl
= (EX_TBFLAG_A64(tb_flags
, VL
) + 1) * 16;
14017 dc
->svl
= (EX_TBFLAG_A64(tb_flags
, SVL
) + 1) * 16;
14018 dc
->pauth_active
= EX_TBFLAG_A64(tb_flags
, PAUTH_ACTIVE
);
14019 dc
->bt
= EX_TBFLAG_A64(tb_flags
, BT
);
14020 dc
->btype
= EX_TBFLAG_A64(tb_flags
, BTYPE
);
14021 dc
->unpriv
= EX_TBFLAG_A64(tb_flags
, UNPRIV
);
14022 dc
->ata
[0] = EX_TBFLAG_A64(tb_flags
, ATA
);
14023 dc
->ata
[1] = EX_TBFLAG_A64(tb_flags
, ATA0
);
14024 dc
->mte_active
[0] = EX_TBFLAG_A64(tb_flags
, MTE_ACTIVE
);
14025 dc
->mte_active
[1] = EX_TBFLAG_A64(tb_flags
, MTE0_ACTIVE
);
14026 dc
->pstate_sm
= EX_TBFLAG_A64(tb_flags
, PSTATE_SM
);
14027 dc
->pstate_za
= EX_TBFLAG_A64(tb_flags
, PSTATE_ZA
);
14028 dc
->sme_trap_nonstreaming
= EX_TBFLAG_A64(tb_flags
, SME_TRAP_NONSTREAMING
);
14029 dc
->naa
= EX_TBFLAG_A64(tb_flags
, NAA
);
14031 dc
->vec_stride
= 0;
14032 dc
->cp_regs
= arm_cpu
->cp_regs
;
14033 dc
->features
= env
->features
;
14034 dc
->dcz_blocksize
= arm_cpu
->dcz_blocksize
;
14035 dc
->gm_blocksize
= arm_cpu
->gm_blocksize
;
14037 #ifdef CONFIG_USER_ONLY
14038 /* In sve_probe_page, we assume TBI is enabled. */
14039 tcg_debug_assert(dc
->tbid
& 1);
14042 dc
->lse2
= dc_isar_feature(aa64_lse2
, dc
);
14044 /* Single step state. The code-generation logic here is:
14046 * generate code with no special handling for single-stepping (except
14047 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
14048 * this happens anyway because those changes are all system register or
14050 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
14051 * emit code for one insn
14052 * emit code to clear PSTATE.SS
14053 * emit code to generate software step exception for completed step
14054 * end TB (as usual for having generated an exception)
14055 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
14056 * emit code to generate a software step exception
14059 dc
->ss_active
= EX_TBFLAG_ANY(tb_flags
, SS_ACTIVE
);
14060 dc
->pstate_ss
= EX_TBFLAG_ANY(tb_flags
, PSTATE__SS
);
14061 dc
->is_ldex
= false;
14063 /* Bound the number of insns to execute to those left on the page. */
14064 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
14066 /* If architectural single step active, limit to 1. */
14067 if (dc
->ss_active
) {
14070 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
14073 static void aarch64_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
14077 static void aarch64_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
14079 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14080 target_ulong pc_arg
= dc
->base
.pc_next
;
14082 if (tb_cflags(dcbase
->tb
) & CF_PCREL
) {
14083 pc_arg
&= ~TARGET_PAGE_MASK
;
14085 tcg_gen_insn_start(pc_arg
, 0, 0);
14086 dc
->insn_start
= tcg_last_op();
14089 static void aarch64_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
14091 DisasContext
*s
= container_of(dcbase
, DisasContext
, base
);
14092 CPUARMState
*env
= cpu_env(cpu
);
14093 uint64_t pc
= s
->base
.pc_next
;
14096 /* Singlestep exceptions have the highest priority. */
14097 if (s
->ss_active
&& !s
->pstate_ss
) {
14098 /* Singlestep state is Active-pending.
14099 * If we're in this state at the start of a TB then either
14100 * a) we just took an exception to an EL which is being debugged
14101 * and this is the first insn in the exception handler
14102 * b) debug exceptions were masked and we just unmasked them
14103 * without changing EL (eg by clearing PSTATE.D)
14104 * In either case we're going to take a swstep exception in the
14105 * "did not step an insn" case, and so the syndrome ISV and EX
14106 * bits should be zero.
14108 assert(s
->base
.num_insns
== 1);
14109 gen_swstep_exception(s
, 0, 0);
14110 s
->base
.is_jmp
= DISAS_NORETURN
;
14111 s
->base
.pc_next
= pc
+ 4;
14117 * PC alignment fault. This has priority over the instruction abort
14118 * that we would receive from a translation fault via arm_ldl_code.
14119 * This should only be possible after an indirect branch, at the
14122 assert(s
->base
.num_insns
== 1);
14123 gen_helper_exception_pc_alignment(tcg_env
, tcg_constant_tl(pc
));
14124 s
->base
.is_jmp
= DISAS_NORETURN
;
14125 s
->base
.pc_next
= QEMU_ALIGN_UP(pc
, 4);
14130 insn
= arm_ldl_code(env
, &s
->base
, pc
, s
->sctlr_b
);
14132 s
->base
.pc_next
= pc
+ 4;
14134 s
->fp_access_checked
= false;
14135 s
->sve_access_checked
= false;
14137 if (s
->pstate_il
) {
14139 * Illegal execution state. This has priority over BTI
14140 * exceptions, but comes after instruction abort exceptions.
14142 gen_exception_insn(s
, 0, EXCP_UDEF
, syn_illegalstate());
14146 if (dc_isar_feature(aa64_bti
, s
)) {
14147 if (s
->base
.num_insns
== 1) {
14149 * At the first insn of the TB, compute s->guarded_page.
14150 * We delayed computing this until successfully reading
14151 * the first insn of the TB, above. This (mostly) ensures
14152 * that the softmmu tlb entry has been populated, and the
14153 * page table GP bit is available.
14155 * Note that we need to compute this even if btype == 0,
14156 * because this value is used for BR instructions later
14157 * where ENV is not available.
14159 s
->guarded_page
= is_guarded_page(env
, s
);
14161 /* First insn can have btype set to non-zero. */
14162 tcg_debug_assert(s
->btype
>= 0);
14165 * Note that the Branch Target Exception has fairly high
14166 * priority -- below debugging exceptions but above most
14167 * everything else. This allows us to handle this now
14168 * instead of waiting until the insn is otherwise decoded.
14172 && !btype_destination_ok(insn
, s
->bt
, s
->btype
)) {
14173 gen_exception_insn(s
, 0, EXCP_UDEF
, syn_btitrap(s
->btype
));
14177 /* Not the first insn: btype must be 0. */
14178 tcg_debug_assert(s
->btype
== 0);
14182 s
->is_nonstreaming
= false;
14183 if (s
->sme_trap_nonstreaming
) {
14184 disas_sme_fa64(s
, insn
);
14187 if (!disas_a64(s
, insn
) &&
14188 !disas_sme(s
, insn
) &&
14189 !disas_sve(s
, insn
)) {
14190 disas_a64_legacy(s
, insn
);
14194 * After execution of most insns, btype is reset to 0.
14195 * Note that we set btype == -1 when the insn sets btype.
14197 if (s
->btype
> 0 && s
->base
.is_jmp
!= DISAS_NORETURN
) {
14202 static void aarch64_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
14204 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14206 if (unlikely(dc
->ss_active
)) {
14207 /* Note that this means single stepping WFI doesn't halt the CPU.
14208 * For conditional branch insns this is harmless unreachable code as
14209 * gen_goto_tb() has already handled emitting the debug exception
14210 * (and thus a tb-jump is not possible when singlestepping).
14212 switch (dc
->base
.is_jmp
) {
14214 gen_a64_update_pc(dc
, 4);
14218 gen_step_complete_exception(dc
);
14220 case DISAS_NORETURN
:
14224 switch (dc
->base
.is_jmp
) {
14226 case DISAS_TOO_MANY
:
14227 gen_goto_tb(dc
, 1, 4);
14230 case DISAS_UPDATE_EXIT
:
14231 gen_a64_update_pc(dc
, 4);
14234 tcg_gen_exit_tb(NULL
, 0);
14236 case DISAS_UPDATE_NOCHAIN
:
14237 gen_a64_update_pc(dc
, 4);
14240 tcg_gen_lookup_and_goto_ptr();
14242 case DISAS_NORETURN
:
14246 gen_a64_update_pc(dc
, 4);
14247 gen_helper_wfe(tcg_env
);
14250 gen_a64_update_pc(dc
, 4);
14251 gen_helper_yield(tcg_env
);
14255 * This is a special case because we don't want to just halt
14256 * the CPU if trying to debug across a WFI.
14258 gen_a64_update_pc(dc
, 4);
14259 gen_helper_wfi(tcg_env
, tcg_constant_i32(4));
14261 * The helper doesn't necessarily throw an exception, but we
14262 * must go back to the main loop to check for interrupts anyway.
14264 tcg_gen_exit_tb(NULL
, 0);
14270 static void aarch64_tr_disas_log(const DisasContextBase
*dcbase
,
14271 CPUState
*cpu
, FILE *logfile
)
14273 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14275 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
14276 target_disas(logfile
, cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
14279 const TranslatorOps aarch64_translator_ops
= {
14280 .init_disas_context
= aarch64_tr_init_disas_context
,
14281 .tb_start
= aarch64_tr_tb_start
,
14282 .insn_start
= aarch64_tr_insn_start
,
14283 .translate_insn
= aarch64_tr_translate_insn
,
14284 .tb_stop
= aarch64_tr_tb_stop
,
14285 .disas_log
= aarch64_tr_disas_log
,