4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "translate.h"
22 #include "translate-a64.h"
24 #include "disas/disas.h"
26 #include "semihosting/semihost.h"
29 static TCGv_i64 cpu_X
[32];
30 static TCGv_i64 cpu_pc
;
32 /* Load/store exclusive handling */
33 static TCGv_i64 cpu_exclusive_high
;
35 static const char *regnames
[] = {
36 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
37 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
38 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
39 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
43 A64_SHIFT_TYPE_LSL
= 0,
44 A64_SHIFT_TYPE_LSR
= 1,
45 A64_SHIFT_TYPE_ASR
= 2,
46 A64_SHIFT_TYPE_ROR
= 3
50 * Include the generated decoders.
53 #include "decode-sme-fa64.c.inc"
54 #include "decode-a64.c.inc"
56 /* Table based decoder typedefs - used when the relevant bits for decode
57 * are too awkwardly scattered across the instruction (eg SIMD).
59 typedef void AArch64DecodeFn(DisasContext
*s
, uint32_t insn
);
61 typedef struct AArch64DecodeTable
{
64 AArch64DecodeFn
*disas_fn
;
67 /* initialize TCG globals. */
68 void a64_translate_init(void)
72 cpu_pc
= tcg_global_mem_new_i64(cpu_env
,
73 offsetof(CPUARMState
, pc
),
75 for (i
= 0; i
< 32; i
++) {
76 cpu_X
[i
] = tcg_global_mem_new_i64(cpu_env
,
77 offsetof(CPUARMState
, xregs
[i
]),
81 cpu_exclusive_high
= tcg_global_mem_new_i64(cpu_env
,
82 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
86 * Return the core mmu_idx to use for A64 "unprivileged load/store" insns
88 static int get_a64_user_mem_index(DisasContext
*s
)
91 * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
92 * which is the usual mmu_idx for this cpu state.
94 ARMMMUIdx useridx
= s
->mmu_idx
;
98 * We have pre-computed the condition for AccType_UNPRIV.
99 * Therefore we should never get here with a mmu_idx for
100 * which we do not know the corresponding user mmu_idx.
103 case ARMMMUIdx_E10_1
:
104 case ARMMMUIdx_E10_1_PAN
:
105 useridx
= ARMMMUIdx_E10_0
;
107 case ARMMMUIdx_E20_2
:
108 case ARMMMUIdx_E20_2_PAN
:
109 useridx
= ARMMMUIdx_E20_0
;
112 g_assert_not_reached();
115 return arm_to_core_mmu_idx(useridx
);
118 static void set_btype_raw(int val
)
120 tcg_gen_st_i32(tcg_constant_i32(val
), cpu_env
,
121 offsetof(CPUARMState
, btype
));
124 static void set_btype(DisasContext
*s
, int val
)
126 /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
127 tcg_debug_assert(val
>= 1 && val
<= 3);
132 static void reset_btype(DisasContext
*s
)
140 static void gen_pc_plus_diff(DisasContext
*s
, TCGv_i64 dest
, target_long diff
)
142 assert(s
->pc_save
!= -1);
143 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
144 tcg_gen_addi_i64(dest
, cpu_pc
, (s
->pc_curr
- s
->pc_save
) + diff
);
146 tcg_gen_movi_i64(dest
, s
->pc_curr
+ diff
);
150 void gen_a64_update_pc(DisasContext
*s
, target_long diff
)
152 gen_pc_plus_diff(s
, cpu_pc
, diff
);
153 s
->pc_save
= s
->pc_curr
+ diff
;
157 * Handle Top Byte Ignore (TBI) bits.
159 * If address tagging is enabled via the TCR TBI bits:
160 * + for EL2 and EL3 there is only one TBI bit, and if it is set
161 * then the address is zero-extended, clearing bits [63:56]
162 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
163 * and TBI1 controls addressses with bit 55 == 1.
164 * If the appropriate TBI bit is set for the address then
165 * the address is sign-extended from bit 55 into bits [63:56]
167 * Here We have concatenated TBI{1,0} into tbi.
169 static void gen_top_byte_ignore(DisasContext
*s
, TCGv_i64 dst
,
170 TCGv_i64 src
, int tbi
)
173 /* Load unmodified address */
174 tcg_gen_mov_i64(dst
, src
);
175 } else if (!regime_has_2_ranges(s
->mmu_idx
)) {
176 /* Force tag byte to all zero */
177 tcg_gen_extract_i64(dst
, src
, 0, 56);
179 /* Sign-extend from bit 55. */
180 tcg_gen_sextract_i64(dst
, src
, 0, 56);
184 /* tbi0 but !tbi1: only use the extension if positive */
185 tcg_gen_and_i64(dst
, dst
, src
);
188 /* !tbi0 but tbi1: only use the extension if negative */
189 tcg_gen_or_i64(dst
, dst
, src
);
192 /* tbi0 and tbi1: always use the extension */
195 g_assert_not_reached();
200 static void gen_a64_set_pc(DisasContext
*s
, TCGv_i64 src
)
203 * If address tagging is enabled for instructions via the TCR TBI bits,
204 * then loading an address into the PC will clear out any tag.
206 gen_top_byte_ignore(s
, cpu_pc
, src
, s
->tbii
);
211 * Handle MTE and/or TBI.
213 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
214 * for the tag to be present in the FAR_ELx register. But for user-only
215 * mode we do not have a TLB with which to implement this, so we must
216 * remove the top byte now.
218 * Always return a fresh temporary that we can increment independently
219 * of the write-back address.
222 TCGv_i64
clean_data_tbi(DisasContext
*s
, TCGv_i64 addr
)
224 TCGv_i64 clean
= tcg_temp_new_i64();
225 #ifdef CONFIG_USER_ONLY
226 gen_top_byte_ignore(s
, clean
, addr
, s
->tbid
);
228 tcg_gen_mov_i64(clean
, addr
);
233 /* Insert a zero tag into src, with the result at dst. */
234 static void gen_address_with_allocation_tag0(TCGv_i64 dst
, TCGv_i64 src
)
236 tcg_gen_andi_i64(dst
, src
, ~MAKE_64BIT_MASK(56, 4));
239 static void gen_probe_access(DisasContext
*s
, TCGv_i64 ptr
,
240 MMUAccessType acc
, int log2_size
)
242 gen_helper_probe_access(cpu_env
, ptr
,
243 tcg_constant_i32(acc
),
244 tcg_constant_i32(get_mem_index(s
)),
245 tcg_constant_i32(1 << log2_size
));
249 * For MTE, check a single logical or atomic access. This probes a single
250 * address, the exact one specified. The size and alignment of the access
251 * is not relevant to MTE, per se, but watchpoints do require the size,
252 * and we want to recognize those before making any other changes to state.
254 static TCGv_i64
gen_mte_check1_mmuidx(DisasContext
*s
, TCGv_i64 addr
,
255 bool is_write
, bool tag_checked
,
256 MemOp memop
, bool is_unpriv
,
259 if (tag_checked
&& s
->mte_active
[is_unpriv
]) {
263 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, core_idx
);
264 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
265 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
266 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, is_write
);
267 desc
= FIELD_DP32(desc
, MTEDESC
, SIZEM1
, memop_size(memop
) - 1);
269 ret
= tcg_temp_new_i64();
270 gen_helper_mte_check(ret
, cpu_env
, tcg_constant_i32(desc
), addr
);
274 return clean_data_tbi(s
, addr
);
277 TCGv_i64
gen_mte_check1(DisasContext
*s
, TCGv_i64 addr
, bool is_write
,
278 bool tag_checked
, MemOp memop
)
280 return gen_mte_check1_mmuidx(s
, addr
, is_write
, tag_checked
, memop
,
281 false, get_mem_index(s
));
285 * For MTE, check multiple logical sequential accesses.
287 TCGv_i64
gen_mte_checkN(DisasContext
*s
, TCGv_i64 addr
, bool is_write
,
288 bool tag_checked
, int size
)
290 if (tag_checked
&& s
->mte_active
[0]) {
294 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, get_mem_index(s
));
295 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
296 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
297 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, is_write
);
298 desc
= FIELD_DP32(desc
, MTEDESC
, SIZEM1
, size
- 1);
300 ret
= tcg_temp_new_i64();
301 gen_helper_mte_check(ret
, cpu_env
, tcg_constant_i32(desc
), addr
);
305 return clean_data_tbi(s
, addr
);
308 typedef struct DisasCompare64
{
313 static void a64_test_cc(DisasCompare64
*c64
, int cc
)
317 arm_test_cc(&c32
, cc
);
320 * Sign-extend the 32-bit value so that the GE/LT comparisons work
321 * properly. The NE/EQ comparisons are also fine with this choice.
323 c64
->cond
= c32
.cond
;
324 c64
->value
= tcg_temp_new_i64();
325 tcg_gen_ext_i32_i64(c64
->value
, c32
.value
);
328 static void gen_rebuild_hflags(DisasContext
*s
)
330 gen_helper_rebuild_hflags_a64(cpu_env
, tcg_constant_i32(s
->current_el
));
333 static void gen_exception_internal(int excp
)
335 assert(excp_is_internal(excp
));
336 gen_helper_exception_internal(cpu_env
, tcg_constant_i32(excp
));
339 static void gen_exception_internal_insn(DisasContext
*s
, int excp
)
341 gen_a64_update_pc(s
, 0);
342 gen_exception_internal(excp
);
343 s
->base
.is_jmp
= DISAS_NORETURN
;
346 static void gen_exception_bkpt_insn(DisasContext
*s
, uint32_t syndrome
)
348 gen_a64_update_pc(s
, 0);
349 gen_helper_exception_bkpt_insn(cpu_env
, tcg_constant_i32(syndrome
));
350 s
->base
.is_jmp
= DISAS_NORETURN
;
353 static void gen_step_complete_exception(DisasContext
*s
)
355 /* We just completed step of an insn. Move from Active-not-pending
356 * to Active-pending, and then also take the swstep exception.
357 * This corresponds to making the (IMPDEF) choice to prioritize
358 * swstep exceptions over asynchronous exceptions taken to an exception
359 * level where debug is disabled. This choice has the advantage that
360 * we do not need to maintain internal state corresponding to the
361 * ISV/EX syndrome bits between completion of the step and generation
362 * of the exception, and our syndrome information is always correct.
365 gen_swstep_exception(s
, 1, s
->is_ldex
);
366 s
->base
.is_jmp
= DISAS_NORETURN
;
369 static inline bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
374 return translator_use_goto_tb(&s
->base
, dest
);
377 static void gen_goto_tb(DisasContext
*s
, int n
, int64_t diff
)
379 if (use_goto_tb(s
, s
->pc_curr
+ diff
)) {
381 * For pcrel, the pc must always be up-to-date on entry to
382 * the linked TB, so that it can use simple additions for all
383 * further adjustments. For !pcrel, the linked TB is compiled
384 * to know its full virtual address, so we can delay the
385 * update to pc to the unlinked path. A long chain of links
386 * can thus avoid many updates to the PC.
388 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
389 gen_a64_update_pc(s
, diff
);
393 gen_a64_update_pc(s
, diff
);
395 tcg_gen_exit_tb(s
->base
.tb
, n
);
396 s
->base
.is_jmp
= DISAS_NORETURN
;
398 gen_a64_update_pc(s
, diff
);
400 gen_step_complete_exception(s
);
402 tcg_gen_lookup_and_goto_ptr();
403 s
->base
.is_jmp
= DISAS_NORETURN
;
409 * Register access functions
411 * These functions are used for directly accessing a register in where
412 * changes to the final register value are likely to be made. If you
413 * need to use a register for temporary calculation (e.g. index type
414 * operations) use the read_* form.
416 * B1.2.1 Register mappings
418 * In instruction register encoding 31 can refer to ZR (zero register) or
419 * the SP (stack pointer) depending on context. In QEMU's case we map SP
420 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
421 * This is the point of the _sp forms.
423 TCGv_i64
cpu_reg(DisasContext
*s
, int reg
)
426 TCGv_i64 t
= tcg_temp_new_i64();
427 tcg_gen_movi_i64(t
, 0);
434 /* register access for when 31 == SP */
435 TCGv_i64
cpu_reg_sp(DisasContext
*s
, int reg
)
440 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
441 * representing the register contents. This TCGv is an auto-freed
442 * temporary so it need not be explicitly freed, and may be modified.
444 TCGv_i64
read_cpu_reg(DisasContext
*s
, int reg
, int sf
)
446 TCGv_i64 v
= tcg_temp_new_i64();
449 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
451 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
454 tcg_gen_movi_i64(v
, 0);
459 TCGv_i64
read_cpu_reg_sp(DisasContext
*s
, int reg
, int sf
)
461 TCGv_i64 v
= tcg_temp_new_i64();
463 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
465 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
470 /* Return the offset into CPUARMState of a slice (from
471 * the least significant end) of FP register Qn (ie
473 * (Note that this is not the same mapping as for A32; see cpu.h)
475 static inline int fp_reg_offset(DisasContext
*s
, int regno
, MemOp size
)
477 return vec_reg_offset(s
, regno
, 0, size
);
480 /* Offset of the high half of the 128 bit vector Qn */
481 static inline int fp_reg_hi_offset(DisasContext
*s
, int regno
)
483 return vec_reg_offset(s
, regno
, 1, MO_64
);
486 /* Convenience accessors for reading and writing single and double
487 * FP registers. Writing clears the upper parts of the associated
488 * 128 bit vector register, as required by the architecture.
489 * Note that unlike the GP register accessors, the values returned
490 * by the read functions must be manually freed.
492 static TCGv_i64
read_fp_dreg(DisasContext
*s
, int reg
)
494 TCGv_i64 v
= tcg_temp_new_i64();
496 tcg_gen_ld_i64(v
, cpu_env
, fp_reg_offset(s
, reg
, MO_64
));
500 static TCGv_i32
read_fp_sreg(DisasContext
*s
, int reg
)
502 TCGv_i32 v
= tcg_temp_new_i32();
504 tcg_gen_ld_i32(v
, cpu_env
, fp_reg_offset(s
, reg
, MO_32
));
508 static TCGv_i32
read_fp_hreg(DisasContext
*s
, int reg
)
510 TCGv_i32 v
= tcg_temp_new_i32();
512 tcg_gen_ld16u_i32(v
, cpu_env
, fp_reg_offset(s
, reg
, MO_16
));
516 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
517 * If SVE is not enabled, then there are only 128 bits in the vector.
519 static void clear_vec_high(DisasContext
*s
, bool is_q
, int rd
)
521 unsigned ofs
= fp_reg_offset(s
, rd
, MO_64
);
522 unsigned vsz
= vec_full_reg_size(s
);
524 /* Nop move, with side effect of clearing the tail. */
525 tcg_gen_gvec_mov(MO_64
, ofs
, ofs
, is_q
? 16 : 8, vsz
);
528 void write_fp_dreg(DisasContext
*s
, int reg
, TCGv_i64 v
)
530 unsigned ofs
= fp_reg_offset(s
, reg
, MO_64
);
532 tcg_gen_st_i64(v
, cpu_env
, ofs
);
533 clear_vec_high(s
, false, reg
);
536 static void write_fp_sreg(DisasContext
*s
, int reg
, TCGv_i32 v
)
538 TCGv_i64 tmp
= tcg_temp_new_i64();
540 tcg_gen_extu_i32_i64(tmp
, v
);
541 write_fp_dreg(s
, reg
, tmp
);
544 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
545 static void gen_gvec_fn2(DisasContext
*s
, bool is_q
, int rd
, int rn
,
546 GVecGen2Fn
*gvec_fn
, int vece
)
548 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
549 is_q
? 16 : 8, vec_full_reg_size(s
));
552 /* Expand a 2-operand + immediate AdvSIMD vector operation using
553 * an expander function.
555 static void gen_gvec_fn2i(DisasContext
*s
, bool is_q
, int rd
, int rn
,
556 int64_t imm
, GVecGen2iFn
*gvec_fn
, int vece
)
558 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
559 imm
, is_q
? 16 : 8, vec_full_reg_size(s
));
562 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
563 static void gen_gvec_fn3(DisasContext
*s
, bool is_q
, int rd
, int rn
, int rm
,
564 GVecGen3Fn
*gvec_fn
, int vece
)
566 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
567 vec_full_reg_offset(s
, rm
), is_q
? 16 : 8, vec_full_reg_size(s
));
570 /* Expand a 4-operand AdvSIMD vector operation using an expander function. */
571 static void gen_gvec_fn4(DisasContext
*s
, bool is_q
, int rd
, int rn
, int rm
,
572 int rx
, GVecGen4Fn
*gvec_fn
, int vece
)
574 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
575 vec_full_reg_offset(s
, rm
), vec_full_reg_offset(s
, rx
),
576 is_q
? 16 : 8, vec_full_reg_size(s
));
579 /* Expand a 2-operand operation using an out-of-line helper. */
580 static void gen_gvec_op2_ool(DisasContext
*s
, bool is_q
, int rd
,
581 int rn
, int data
, gen_helper_gvec_2
*fn
)
583 tcg_gen_gvec_2_ool(vec_full_reg_offset(s
, rd
),
584 vec_full_reg_offset(s
, rn
),
585 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
588 /* Expand a 3-operand operation using an out-of-line helper. */
589 static void gen_gvec_op3_ool(DisasContext
*s
, bool is_q
, int rd
,
590 int rn
, int rm
, int data
, gen_helper_gvec_3
*fn
)
592 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, rd
),
593 vec_full_reg_offset(s
, rn
),
594 vec_full_reg_offset(s
, rm
),
595 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
598 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
599 * an out-of-line helper.
601 static void gen_gvec_op3_fpst(DisasContext
*s
, bool is_q
, int rd
, int rn
,
602 int rm
, bool is_fp16
, int data
,
603 gen_helper_gvec_3_ptr
*fn
)
605 TCGv_ptr fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
606 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
607 vec_full_reg_offset(s
, rn
),
608 vec_full_reg_offset(s
, rm
), fpst
,
609 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
612 /* Expand a 3-operand + qc + operation using an out-of-line helper. */
613 static void gen_gvec_op3_qc(DisasContext
*s
, bool is_q
, int rd
, int rn
,
614 int rm
, gen_helper_gvec_3_ptr
*fn
)
616 TCGv_ptr qc_ptr
= tcg_temp_new_ptr();
618 tcg_gen_addi_ptr(qc_ptr
, cpu_env
, offsetof(CPUARMState
, vfp
.qc
));
619 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
620 vec_full_reg_offset(s
, rn
),
621 vec_full_reg_offset(s
, rm
), qc_ptr
,
622 is_q
? 16 : 8, vec_full_reg_size(s
), 0, fn
);
625 /* Expand a 4-operand operation using an out-of-line helper. */
626 static void gen_gvec_op4_ool(DisasContext
*s
, bool is_q
, int rd
, int rn
,
627 int rm
, int ra
, int data
, gen_helper_gvec_4
*fn
)
629 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
630 vec_full_reg_offset(s
, rn
),
631 vec_full_reg_offset(s
, rm
),
632 vec_full_reg_offset(s
, ra
),
633 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
637 * Expand a 4-operand + fpstatus pointer + simd data value operation using
638 * an out-of-line helper.
640 static void gen_gvec_op4_fpst(DisasContext
*s
, bool is_q
, int rd
, int rn
,
641 int rm
, int ra
, bool is_fp16
, int data
,
642 gen_helper_gvec_4_ptr
*fn
)
644 TCGv_ptr fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
645 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, rd
),
646 vec_full_reg_offset(s
, rn
),
647 vec_full_reg_offset(s
, rm
),
648 vec_full_reg_offset(s
, ra
), fpst
,
649 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
652 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
653 * than the 32 bit equivalent.
655 static inline void gen_set_NZ64(TCGv_i64 result
)
657 tcg_gen_extr_i64_i32(cpu_ZF
, cpu_NF
, result
);
658 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, cpu_NF
);
661 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
662 static inline void gen_logic_CC(int sf
, TCGv_i64 result
)
665 gen_set_NZ64(result
);
667 tcg_gen_extrl_i64_i32(cpu_ZF
, result
);
668 tcg_gen_mov_i32(cpu_NF
, cpu_ZF
);
670 tcg_gen_movi_i32(cpu_CF
, 0);
671 tcg_gen_movi_i32(cpu_VF
, 0);
674 /* dest = T0 + T1; compute C, N, V and Z flags */
675 static void gen_add64_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
677 TCGv_i64 result
, flag
, tmp
;
678 result
= tcg_temp_new_i64();
679 flag
= tcg_temp_new_i64();
680 tmp
= tcg_temp_new_i64();
682 tcg_gen_movi_i64(tmp
, 0);
683 tcg_gen_add2_i64(result
, flag
, t0
, tmp
, t1
, tmp
);
685 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
687 gen_set_NZ64(result
);
689 tcg_gen_xor_i64(flag
, result
, t0
);
690 tcg_gen_xor_i64(tmp
, t0
, t1
);
691 tcg_gen_andc_i64(flag
, flag
, tmp
);
692 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
694 tcg_gen_mov_i64(dest
, result
);
697 static void gen_add32_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
699 TCGv_i32 t0_32
= tcg_temp_new_i32();
700 TCGv_i32 t1_32
= tcg_temp_new_i32();
701 TCGv_i32 tmp
= tcg_temp_new_i32();
703 tcg_gen_movi_i32(tmp
, 0);
704 tcg_gen_extrl_i64_i32(t0_32
, t0
);
705 tcg_gen_extrl_i64_i32(t1_32
, t1
);
706 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, tmp
, t1_32
, tmp
);
707 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
708 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
709 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
710 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
711 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
714 static void gen_add_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
717 gen_add64_CC(dest
, t0
, t1
);
719 gen_add32_CC(dest
, t0
, t1
);
723 /* dest = T0 - T1; compute C, N, V and Z flags */
724 static void gen_sub64_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
726 /* 64 bit arithmetic */
727 TCGv_i64 result
, flag
, tmp
;
729 result
= tcg_temp_new_i64();
730 flag
= tcg_temp_new_i64();
731 tcg_gen_sub_i64(result
, t0
, t1
);
733 gen_set_NZ64(result
);
735 tcg_gen_setcond_i64(TCG_COND_GEU
, flag
, t0
, t1
);
736 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
738 tcg_gen_xor_i64(flag
, result
, t0
);
739 tmp
= tcg_temp_new_i64();
740 tcg_gen_xor_i64(tmp
, t0
, t1
);
741 tcg_gen_and_i64(flag
, flag
, tmp
);
742 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
743 tcg_gen_mov_i64(dest
, result
);
746 static void gen_sub32_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
748 /* 32 bit arithmetic */
749 TCGv_i32 t0_32
= tcg_temp_new_i32();
750 TCGv_i32 t1_32
= tcg_temp_new_i32();
753 tcg_gen_extrl_i64_i32(t0_32
, t0
);
754 tcg_gen_extrl_i64_i32(t1_32
, t1
);
755 tcg_gen_sub_i32(cpu_NF
, t0_32
, t1_32
);
756 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
757 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0_32
, t1_32
);
758 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
759 tmp
= tcg_temp_new_i32();
760 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
761 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
762 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
765 static void gen_sub_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
768 gen_sub64_CC(dest
, t0
, t1
);
770 gen_sub32_CC(dest
, t0
, t1
);
774 /* dest = T0 + T1 + CF; do not compute flags. */
775 static void gen_adc(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
777 TCGv_i64 flag
= tcg_temp_new_i64();
778 tcg_gen_extu_i32_i64(flag
, cpu_CF
);
779 tcg_gen_add_i64(dest
, t0
, t1
);
780 tcg_gen_add_i64(dest
, dest
, flag
);
783 tcg_gen_ext32u_i64(dest
, dest
);
787 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
788 static void gen_adc_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
791 TCGv_i64 result
= tcg_temp_new_i64();
792 TCGv_i64 cf_64
= tcg_temp_new_i64();
793 TCGv_i64 vf_64
= tcg_temp_new_i64();
794 TCGv_i64 tmp
= tcg_temp_new_i64();
795 TCGv_i64 zero
= tcg_constant_i64(0);
797 tcg_gen_extu_i32_i64(cf_64
, cpu_CF
);
798 tcg_gen_add2_i64(result
, cf_64
, t0
, zero
, cf_64
, zero
);
799 tcg_gen_add2_i64(result
, cf_64
, result
, cf_64
, t1
, zero
);
800 tcg_gen_extrl_i64_i32(cpu_CF
, cf_64
);
801 gen_set_NZ64(result
);
803 tcg_gen_xor_i64(vf_64
, result
, t0
);
804 tcg_gen_xor_i64(tmp
, t0
, t1
);
805 tcg_gen_andc_i64(vf_64
, vf_64
, tmp
);
806 tcg_gen_extrh_i64_i32(cpu_VF
, vf_64
);
808 tcg_gen_mov_i64(dest
, result
);
810 TCGv_i32 t0_32
= tcg_temp_new_i32();
811 TCGv_i32 t1_32
= tcg_temp_new_i32();
812 TCGv_i32 tmp
= tcg_temp_new_i32();
813 TCGv_i32 zero
= tcg_constant_i32(0);
815 tcg_gen_extrl_i64_i32(t0_32
, t0
);
816 tcg_gen_extrl_i64_i32(t1_32
, t1
);
817 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, zero
, cpu_CF
, zero
);
818 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1_32
, zero
);
820 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
821 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
822 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
823 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
824 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
829 * Load/Store generators
833 * Store from GPR register to memory.
835 static void do_gpr_st_memidx(DisasContext
*s
, TCGv_i64 source
,
836 TCGv_i64 tcg_addr
, MemOp memop
, int memidx
,
838 unsigned int iss_srt
,
839 bool iss_sf
, bool iss_ar
)
841 tcg_gen_qemu_st_i64(source
, tcg_addr
, memidx
, memop
);
846 syn
= syn_data_abort_with_iss(0,
852 0, 0, 0, 0, 0, false);
853 disas_set_insn_syndrome(s
, syn
);
857 static void do_gpr_st(DisasContext
*s
, TCGv_i64 source
,
858 TCGv_i64 tcg_addr
, MemOp memop
,
860 unsigned int iss_srt
,
861 bool iss_sf
, bool iss_ar
)
863 do_gpr_st_memidx(s
, source
, tcg_addr
, memop
, get_mem_index(s
),
864 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
868 * Load from memory to GPR register
870 static void do_gpr_ld_memidx(DisasContext
*s
, TCGv_i64 dest
, TCGv_i64 tcg_addr
,
871 MemOp memop
, bool extend
, int memidx
,
872 bool iss_valid
, unsigned int iss_srt
,
873 bool iss_sf
, bool iss_ar
)
875 tcg_gen_qemu_ld_i64(dest
, tcg_addr
, memidx
, memop
);
877 if (extend
&& (memop
& MO_SIGN
)) {
878 g_assert((memop
& MO_SIZE
) <= MO_32
);
879 tcg_gen_ext32u_i64(dest
, dest
);
885 syn
= syn_data_abort_with_iss(0,
887 (memop
& MO_SIGN
) != 0,
891 0, 0, 0, 0, 0, false);
892 disas_set_insn_syndrome(s
, syn
);
896 static void do_gpr_ld(DisasContext
*s
, TCGv_i64 dest
, TCGv_i64 tcg_addr
,
897 MemOp memop
, bool extend
,
898 bool iss_valid
, unsigned int iss_srt
,
899 bool iss_sf
, bool iss_ar
)
901 do_gpr_ld_memidx(s
, dest
, tcg_addr
, memop
, extend
, get_mem_index(s
),
902 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
906 * Store from FP register to memory
908 static void do_fp_st(DisasContext
*s
, int srcidx
, TCGv_i64 tcg_addr
, MemOp mop
)
910 /* This writes the bottom N bits of a 128 bit wide vector to memory */
911 TCGv_i64 tmplo
= tcg_temp_new_i64();
913 tcg_gen_ld_i64(tmplo
, cpu_env
, fp_reg_offset(s
, srcidx
, MO_64
));
915 if ((mop
& MO_SIZE
) < MO_128
) {
916 tcg_gen_qemu_st_i64(tmplo
, tcg_addr
, get_mem_index(s
), mop
);
918 TCGv_i64 tmphi
= tcg_temp_new_i64();
919 TCGv_i128 t16
= tcg_temp_new_i128();
921 tcg_gen_ld_i64(tmphi
, cpu_env
, fp_reg_hi_offset(s
, srcidx
));
922 tcg_gen_concat_i64_i128(t16
, tmplo
, tmphi
);
924 tcg_gen_qemu_st_i128(t16
, tcg_addr
, get_mem_index(s
), mop
);
929 * Load from memory to FP register
931 static void do_fp_ld(DisasContext
*s
, int destidx
, TCGv_i64 tcg_addr
, MemOp mop
)
933 /* This always zero-extends and writes to a full 128 bit wide vector */
934 TCGv_i64 tmplo
= tcg_temp_new_i64();
935 TCGv_i64 tmphi
= NULL
;
937 if ((mop
& MO_SIZE
) < MO_128
) {
938 tcg_gen_qemu_ld_i64(tmplo
, tcg_addr
, get_mem_index(s
), mop
);
940 TCGv_i128 t16
= tcg_temp_new_i128();
942 tcg_gen_qemu_ld_i128(t16
, tcg_addr
, get_mem_index(s
), mop
);
944 tmphi
= tcg_temp_new_i64();
945 tcg_gen_extr_i128_i64(tmplo
, tmphi
, t16
);
948 tcg_gen_st_i64(tmplo
, cpu_env
, fp_reg_offset(s
, destidx
, MO_64
));
951 tcg_gen_st_i64(tmphi
, cpu_env
, fp_reg_hi_offset(s
, destidx
));
953 clear_vec_high(s
, tmphi
!= NULL
, destidx
);
957 * Vector load/store helpers.
959 * The principal difference between this and a FP load is that we don't
960 * zero extend as we are filling a partial chunk of the vector register.
961 * These functions don't support 128 bit loads/stores, which would be
962 * normal load/store operations.
964 * The _i32 versions are useful when operating on 32 bit quantities
965 * (eg for floating point single or using Neon helper functions).
968 /* Get value of an element within a vector register */
969 static void read_vec_element(DisasContext
*s
, TCGv_i64 tcg_dest
, int srcidx
,
970 int element
, MemOp memop
)
972 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
973 switch ((unsigned)memop
) {
975 tcg_gen_ld8u_i64(tcg_dest
, cpu_env
, vect_off
);
978 tcg_gen_ld16u_i64(tcg_dest
, cpu_env
, vect_off
);
981 tcg_gen_ld32u_i64(tcg_dest
, cpu_env
, vect_off
);
984 tcg_gen_ld8s_i64(tcg_dest
, cpu_env
, vect_off
);
987 tcg_gen_ld16s_i64(tcg_dest
, cpu_env
, vect_off
);
990 tcg_gen_ld32s_i64(tcg_dest
, cpu_env
, vect_off
);
994 tcg_gen_ld_i64(tcg_dest
, cpu_env
, vect_off
);
997 g_assert_not_reached();
1001 static void read_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_dest
, int srcidx
,
1002 int element
, MemOp memop
)
1004 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1007 tcg_gen_ld8u_i32(tcg_dest
, cpu_env
, vect_off
);
1010 tcg_gen_ld16u_i32(tcg_dest
, cpu_env
, vect_off
);
1013 tcg_gen_ld8s_i32(tcg_dest
, cpu_env
, vect_off
);
1016 tcg_gen_ld16s_i32(tcg_dest
, cpu_env
, vect_off
);
1020 tcg_gen_ld_i32(tcg_dest
, cpu_env
, vect_off
);
1023 g_assert_not_reached();
1027 /* Set value of an element within a vector register */
1028 static void write_vec_element(DisasContext
*s
, TCGv_i64 tcg_src
, int destidx
,
1029 int element
, MemOp memop
)
1031 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1034 tcg_gen_st8_i64(tcg_src
, cpu_env
, vect_off
);
1037 tcg_gen_st16_i64(tcg_src
, cpu_env
, vect_off
);
1040 tcg_gen_st32_i64(tcg_src
, cpu_env
, vect_off
);
1043 tcg_gen_st_i64(tcg_src
, cpu_env
, vect_off
);
1046 g_assert_not_reached();
1050 static void write_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_src
,
1051 int destidx
, int element
, MemOp memop
)
1053 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1056 tcg_gen_st8_i32(tcg_src
, cpu_env
, vect_off
);
1059 tcg_gen_st16_i32(tcg_src
, cpu_env
, vect_off
);
1062 tcg_gen_st_i32(tcg_src
, cpu_env
, vect_off
);
1065 g_assert_not_reached();
1069 /* Store from vector register to memory */
1070 static void do_vec_st(DisasContext
*s
, int srcidx
, int element
,
1071 TCGv_i64 tcg_addr
, MemOp mop
)
1073 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1075 read_vec_element(s
, tcg_tmp
, srcidx
, element
, mop
& MO_SIZE
);
1076 tcg_gen_qemu_st_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), mop
);
1079 /* Load from memory to vector register */
1080 static void do_vec_ld(DisasContext
*s
, int destidx
, int element
,
1081 TCGv_i64 tcg_addr
, MemOp mop
)
1083 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1085 tcg_gen_qemu_ld_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), mop
);
1086 write_vec_element(s
, tcg_tmp
, destidx
, element
, mop
& MO_SIZE
);
1089 /* Check that FP/Neon access is enabled. If it is, return
1090 * true. If not, emit code to generate an appropriate exception,
1091 * and return false; the caller should not emit any code for
1092 * the instruction. Note that this check must happen after all
1093 * unallocated-encoding checks (otherwise the syndrome information
1094 * for the resulting exception will be incorrect).
1096 static bool fp_access_check_only(DisasContext
*s
)
1098 if (s
->fp_excp_el
) {
1099 assert(!s
->fp_access_checked
);
1100 s
->fp_access_checked
= true;
1102 gen_exception_insn_el(s
, 0, EXCP_UDEF
,
1103 syn_fp_access_trap(1, 0xe, false, 0),
1107 s
->fp_access_checked
= true;
1111 static bool fp_access_check(DisasContext
*s
)
1113 if (!fp_access_check_only(s
)) {
1116 if (s
->sme_trap_nonstreaming
&& s
->is_nonstreaming
) {
1117 gen_exception_insn(s
, 0, EXCP_UDEF
,
1118 syn_smetrap(SME_ET_Streaming
, false));
1125 * Check that SVE access is enabled. If it is, return true.
1126 * If not, emit code to generate an appropriate exception and return false.
1127 * This function corresponds to CheckSVEEnabled().
1129 bool sve_access_check(DisasContext
*s
)
1131 if (s
->pstate_sm
|| !dc_isar_feature(aa64_sve
, s
)) {
1132 assert(dc_isar_feature(aa64_sme
, s
));
1133 if (!sme_sm_enabled_check(s
)) {
1136 } else if (s
->sve_excp_el
) {
1137 gen_exception_insn_el(s
, 0, EXCP_UDEF
,
1138 syn_sve_access_trap(), s
->sve_excp_el
);
1141 s
->sve_access_checked
= true;
1142 return fp_access_check(s
);
1145 /* Assert that we only raise one exception per instruction. */
1146 assert(!s
->sve_access_checked
);
1147 s
->sve_access_checked
= true;
1152 * Check that SME access is enabled, raise an exception if not.
1153 * Note that this function corresponds to CheckSMEAccess and is
1154 * only used directly for cpregs.
1156 static bool sme_access_check(DisasContext
*s
)
1158 if (s
->sme_excp_el
) {
1159 gen_exception_insn_el(s
, 0, EXCP_UDEF
,
1160 syn_smetrap(SME_ET_AccessTrap
, false),
1167 /* This function corresponds to CheckSMEEnabled. */
1168 bool sme_enabled_check(DisasContext
*s
)
1171 * Note that unlike sve_excp_el, we have not constrained sme_excp_el
1172 * to be zero when fp_excp_el has priority. This is because we need
1173 * sme_excp_el by itself for cpregs access checks.
1175 if (!s
->fp_excp_el
|| s
->sme_excp_el
< s
->fp_excp_el
) {
1176 s
->fp_access_checked
= true;
1177 return sme_access_check(s
);
1179 return fp_access_check_only(s
);
1182 /* Common subroutine for CheckSMEAnd*Enabled. */
1183 bool sme_enabled_check_with_svcr(DisasContext
*s
, unsigned req
)
1185 if (!sme_enabled_check(s
)) {
1188 if (FIELD_EX64(req
, SVCR
, SM
) && !s
->pstate_sm
) {
1189 gen_exception_insn(s
, 0, EXCP_UDEF
,
1190 syn_smetrap(SME_ET_NotStreaming
, false));
1193 if (FIELD_EX64(req
, SVCR
, ZA
) && !s
->pstate_za
) {
1194 gen_exception_insn(s
, 0, EXCP_UDEF
,
1195 syn_smetrap(SME_ET_InactiveZA
, false));
1202 * This utility function is for doing register extension with an
1203 * optional shift. You will likely want to pass a temporary for the
1204 * destination register. See DecodeRegExtend() in the ARM ARM.
1206 static void ext_and_shift_reg(TCGv_i64 tcg_out
, TCGv_i64 tcg_in
,
1207 int option
, unsigned int shift
)
1209 int extsize
= extract32(option
, 0, 2);
1210 bool is_signed
= extract32(option
, 2, 1);
1215 tcg_gen_ext8s_i64(tcg_out
, tcg_in
);
1218 tcg_gen_ext16s_i64(tcg_out
, tcg_in
);
1221 tcg_gen_ext32s_i64(tcg_out
, tcg_in
);
1224 tcg_gen_mov_i64(tcg_out
, tcg_in
);
1230 tcg_gen_ext8u_i64(tcg_out
, tcg_in
);
1233 tcg_gen_ext16u_i64(tcg_out
, tcg_in
);
1236 tcg_gen_ext32u_i64(tcg_out
, tcg_in
);
1239 tcg_gen_mov_i64(tcg_out
, tcg_in
);
1245 tcg_gen_shli_i64(tcg_out
, tcg_out
, shift
);
1249 static inline void gen_check_sp_alignment(DisasContext
*s
)
1251 /* The AArch64 architecture mandates that (if enabled via PSTATE
1252 * or SCTLR bits) there is a check that SP is 16-aligned on every
1253 * SP-relative load or store (with an exception generated if it is not).
1254 * In line with general QEMU practice regarding misaligned accesses,
1255 * we omit these checks for the sake of guest program performance.
1256 * This function is provided as a hook so we can more easily add these
1257 * checks in future (possibly as a "favour catching guest program bugs
1258 * over speed" user selectable option).
1263 * This provides a simple table based table lookup decoder. It is
1264 * intended to be used when the relevant bits for decode are too
1265 * awkwardly placed and switch/if based logic would be confusing and
1266 * deeply nested. Since it's a linear search through the table, tables
1267 * should be kept small.
1269 * It returns the first handler where insn & mask == pattern, or
1270 * NULL if there is no match.
1271 * The table is terminated by an empty mask (i.e. 0)
1273 static inline AArch64DecodeFn
*lookup_disas_fn(const AArch64DecodeTable
*table
,
1276 const AArch64DecodeTable
*tptr
= table
;
1278 while (tptr
->mask
) {
1279 if ((insn
& tptr
->mask
) == tptr
->pattern
) {
1280 return tptr
->disas_fn
;
1288 * The instruction disassembly implemented here matches
1289 * the instruction encoding classifications in chapter C4
1290 * of the ARM Architecture Reference Manual (DDI0487B_a);
1291 * classification names and decode diagrams here should generally
1292 * match up with those in the manual.
1295 static bool trans_B(DisasContext
*s
, arg_i
*a
)
1298 gen_goto_tb(s
, 0, a
->imm
);
1302 static bool trans_BL(DisasContext
*s
, arg_i
*a
)
1304 gen_pc_plus_diff(s
, cpu_reg(s
, 30), curr_insn_len(s
));
1306 gen_goto_tb(s
, 0, a
->imm
);
1311 static bool trans_CBZ(DisasContext
*s
, arg_cbz
*a
)
1316 tcg_cmp
= read_cpu_reg(s
, a
->rt
, a
->sf
);
1319 match
= gen_disas_label(s
);
1320 tcg_gen_brcondi_i64(a
->nz
? TCG_COND_NE
: TCG_COND_EQ
,
1321 tcg_cmp
, 0, match
.label
);
1322 gen_goto_tb(s
, 0, 4);
1323 set_disas_label(s
, match
);
1324 gen_goto_tb(s
, 1, a
->imm
);
1328 static bool trans_TBZ(DisasContext
*s
, arg_tbz
*a
)
1333 tcg_cmp
= tcg_temp_new_i64();
1334 tcg_gen_andi_i64(tcg_cmp
, cpu_reg(s
, a
->rt
), 1ULL << a
->bitpos
);
1338 match
= gen_disas_label(s
);
1339 tcg_gen_brcondi_i64(a
->nz
? TCG_COND_NE
: TCG_COND_EQ
,
1340 tcg_cmp
, 0, match
.label
);
1341 gen_goto_tb(s
, 0, 4);
1342 set_disas_label(s
, match
);
1343 gen_goto_tb(s
, 1, a
->imm
);
1347 static bool trans_B_cond(DisasContext
*s
, arg_B_cond
*a
)
1350 if (a
->cond
< 0x0e) {
1351 /* genuinely conditional branches */
1352 DisasLabel match
= gen_disas_label(s
);
1353 arm_gen_test_cc(a
->cond
, match
.label
);
1354 gen_goto_tb(s
, 0, 4);
1355 set_disas_label(s
, match
);
1356 gen_goto_tb(s
, 1, a
->imm
);
1358 /* 0xe and 0xf are both "always" conditions */
1359 gen_goto_tb(s
, 0, a
->imm
);
1364 static void set_btype_for_br(DisasContext
*s
, int rn
)
1366 if (dc_isar_feature(aa64_bti
, s
)) {
1367 /* BR to {x16,x17} or !guard -> 1, else 3. */
1368 set_btype(s
, rn
== 16 || rn
== 17 || !s
->guarded_page
? 1 : 3);
1372 static void set_btype_for_blr(DisasContext
*s
)
1374 if (dc_isar_feature(aa64_bti
, s
)) {
1375 /* BLR sets BTYPE to 2, regardless of source guarded page. */
1380 static bool trans_BR(DisasContext
*s
, arg_r
*a
)
1382 gen_a64_set_pc(s
, cpu_reg(s
, a
->rn
));
1383 set_btype_for_br(s
, a
->rn
);
1384 s
->base
.is_jmp
= DISAS_JUMP
;
1388 static bool trans_BLR(DisasContext
*s
, arg_r
*a
)
1390 TCGv_i64 dst
= cpu_reg(s
, a
->rn
);
1391 TCGv_i64 lr
= cpu_reg(s
, 30);
1393 TCGv_i64 tmp
= tcg_temp_new_i64();
1394 tcg_gen_mov_i64(tmp
, dst
);
1397 gen_pc_plus_diff(s
, lr
, curr_insn_len(s
));
1398 gen_a64_set_pc(s
, dst
);
1399 set_btype_for_blr(s
);
1400 s
->base
.is_jmp
= DISAS_JUMP
;
1404 static bool trans_RET(DisasContext
*s
, arg_r
*a
)
1406 gen_a64_set_pc(s
, cpu_reg(s
, a
->rn
));
1407 s
->base
.is_jmp
= DISAS_JUMP
;
1411 static TCGv_i64
auth_branch_target(DisasContext
*s
, TCGv_i64 dst
,
1412 TCGv_i64 modifier
, bool use_key_a
)
1416 * Return the branch target for a BRAA/RETA/etc, which is either
1417 * just the destination dst, or that value with the pauth check
1418 * done and the code removed from the high bits.
1420 if (!s
->pauth_active
) {
1424 truedst
= tcg_temp_new_i64();
1426 gen_helper_autia(truedst
, cpu_env
, dst
, modifier
);
1428 gen_helper_autib(truedst
, cpu_env
, dst
, modifier
);
1433 static bool trans_BRAZ(DisasContext
*s
, arg_braz
*a
)
1437 if (!dc_isar_feature(aa64_pauth
, s
)) {
1441 dst
= auth_branch_target(s
, cpu_reg(s
, a
->rn
), tcg_constant_i64(0), !a
->m
);
1442 gen_a64_set_pc(s
, dst
);
1443 set_btype_for_br(s
, a
->rn
);
1444 s
->base
.is_jmp
= DISAS_JUMP
;
1448 static bool trans_BLRAZ(DisasContext
*s
, arg_braz
*a
)
1452 if (!dc_isar_feature(aa64_pauth
, s
)) {
1456 dst
= auth_branch_target(s
, cpu_reg(s
, a
->rn
), tcg_constant_i64(0), !a
->m
);
1457 lr
= cpu_reg(s
, 30);
1459 TCGv_i64 tmp
= tcg_temp_new_i64();
1460 tcg_gen_mov_i64(tmp
, dst
);
1463 gen_pc_plus_diff(s
, lr
, curr_insn_len(s
));
1464 gen_a64_set_pc(s
, dst
);
1465 set_btype_for_blr(s
);
1466 s
->base
.is_jmp
= DISAS_JUMP
;
1470 static bool trans_RETA(DisasContext
*s
, arg_reta
*a
)
1474 dst
= auth_branch_target(s
, cpu_reg(s
, 30), cpu_X
[31], !a
->m
);
1475 gen_a64_set_pc(s
, dst
);
1476 s
->base
.is_jmp
= DISAS_JUMP
;
1480 static bool trans_BRA(DisasContext
*s
, arg_bra
*a
)
1484 if (!dc_isar_feature(aa64_pauth
, s
)) {
1487 dst
= auth_branch_target(s
, cpu_reg(s
,a
->rn
), cpu_reg_sp(s
, a
->rm
), !a
->m
);
1488 gen_a64_set_pc(s
, dst
);
1489 set_btype_for_br(s
, a
->rn
);
1490 s
->base
.is_jmp
= DISAS_JUMP
;
1494 static bool trans_BLRA(DisasContext
*s
, arg_bra
*a
)
1498 if (!dc_isar_feature(aa64_pauth
, s
)) {
1501 dst
= auth_branch_target(s
, cpu_reg(s
, a
->rn
), cpu_reg_sp(s
, a
->rm
), !a
->m
);
1502 lr
= cpu_reg(s
, 30);
1504 TCGv_i64 tmp
= tcg_temp_new_i64();
1505 tcg_gen_mov_i64(tmp
, dst
);
1508 gen_pc_plus_diff(s
, lr
, curr_insn_len(s
));
1509 gen_a64_set_pc(s
, dst
);
1510 set_btype_for_blr(s
);
1511 s
->base
.is_jmp
= DISAS_JUMP
;
1515 static bool trans_ERET(DisasContext
*s
, arg_ERET
*a
)
1519 if (s
->current_el
== 0) {
1523 gen_exception_insn_el(s
, 0, EXCP_UDEF
, 0, 2);
1526 dst
= tcg_temp_new_i64();
1527 tcg_gen_ld_i64(dst
, cpu_env
,
1528 offsetof(CPUARMState
, elr_el
[s
->current_el
]));
1530 translator_io_start(&s
->base
);
1532 gen_helper_exception_return(cpu_env
, dst
);
1533 /* Must exit loop to check un-masked IRQs */
1534 s
->base
.is_jmp
= DISAS_EXIT
;
1538 static bool trans_ERETA(DisasContext
*s
, arg_reta
*a
)
1542 if (!dc_isar_feature(aa64_pauth
, s
)) {
1545 if (s
->current_el
== 0) {
1548 /* The FGT trap takes precedence over an auth trap. */
1550 gen_exception_insn_el(s
, 0, EXCP_UDEF
, a
->m
? 3 : 2, 2);
1553 dst
= tcg_temp_new_i64();
1554 tcg_gen_ld_i64(dst
, cpu_env
,
1555 offsetof(CPUARMState
, elr_el
[s
->current_el
]));
1557 dst
= auth_branch_target(s
, dst
, cpu_X
[31], !a
->m
);
1559 translator_io_start(&s
->base
);
1561 gen_helper_exception_return(cpu_env
, dst
);
1562 /* Must exit loop to check un-masked IRQs */
1563 s
->base
.is_jmp
= DISAS_EXIT
;
1567 /* HINT instruction group, including various allocated HINTs */
1568 static void handle_hint(DisasContext
*s
, uint32_t insn
,
1569 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1571 unsigned int selector
= crm
<< 3 | op2
;
1574 unallocated_encoding(s
);
1579 case 0b00000: /* NOP */
1581 case 0b00011: /* WFI */
1582 s
->base
.is_jmp
= DISAS_WFI
;
1584 case 0b00001: /* YIELD */
1585 /* When running in MTTCG we don't generate jumps to the yield and
1586 * WFE helpers as it won't affect the scheduling of other vCPUs.
1587 * If we wanted to more completely model WFE/SEV so we don't busy
1588 * spin unnecessarily we would need to do something more involved.
1590 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1591 s
->base
.is_jmp
= DISAS_YIELD
;
1594 case 0b00010: /* WFE */
1595 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1596 s
->base
.is_jmp
= DISAS_WFE
;
1599 case 0b00100: /* SEV */
1600 case 0b00101: /* SEVL */
1601 case 0b00110: /* DGH */
1602 /* we treat all as NOP at least for now */
1604 case 0b00111: /* XPACLRI */
1605 if (s
->pauth_active
) {
1606 gen_helper_xpaci(cpu_X
[30], cpu_env
, cpu_X
[30]);
1609 case 0b01000: /* PACIA1716 */
1610 if (s
->pauth_active
) {
1611 gen_helper_pacia(cpu_X
[17], cpu_env
, cpu_X
[17], cpu_X
[16]);
1614 case 0b01010: /* PACIB1716 */
1615 if (s
->pauth_active
) {
1616 gen_helper_pacib(cpu_X
[17], cpu_env
, cpu_X
[17], cpu_X
[16]);
1619 case 0b01100: /* AUTIA1716 */
1620 if (s
->pauth_active
) {
1621 gen_helper_autia(cpu_X
[17], cpu_env
, cpu_X
[17], cpu_X
[16]);
1624 case 0b01110: /* AUTIB1716 */
1625 if (s
->pauth_active
) {
1626 gen_helper_autib(cpu_X
[17], cpu_env
, cpu_X
[17], cpu_X
[16]);
1629 case 0b10000: /* ESB */
1630 /* Without RAS, we must implement this as NOP. */
1631 if (dc_isar_feature(aa64_ras
, s
)) {
1633 * QEMU does not have a source of physical SErrors,
1634 * so we are only concerned with virtual SErrors.
1635 * The pseudocode in the ARM for this case is
1636 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
1637 * AArch64.vESBOperation();
1638 * Most of the condition can be evaluated at translation time.
1639 * Test for EL2 present, and defer test for SEL2 to runtime.
1641 if (s
->current_el
<= 1 && arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
1642 gen_helper_vesb(cpu_env
);
1646 case 0b11000: /* PACIAZ */
1647 if (s
->pauth_active
) {
1648 gen_helper_pacia(cpu_X
[30], cpu_env
, cpu_X
[30],
1649 tcg_constant_i64(0));
1652 case 0b11001: /* PACIASP */
1653 if (s
->pauth_active
) {
1654 gen_helper_pacia(cpu_X
[30], cpu_env
, cpu_X
[30], cpu_X
[31]);
1657 case 0b11010: /* PACIBZ */
1658 if (s
->pauth_active
) {
1659 gen_helper_pacib(cpu_X
[30], cpu_env
, cpu_X
[30],
1660 tcg_constant_i64(0));
1663 case 0b11011: /* PACIBSP */
1664 if (s
->pauth_active
) {
1665 gen_helper_pacib(cpu_X
[30], cpu_env
, cpu_X
[30], cpu_X
[31]);
1668 case 0b11100: /* AUTIAZ */
1669 if (s
->pauth_active
) {
1670 gen_helper_autia(cpu_X
[30], cpu_env
, cpu_X
[30],
1671 tcg_constant_i64(0));
1674 case 0b11101: /* AUTIASP */
1675 if (s
->pauth_active
) {
1676 gen_helper_autia(cpu_X
[30], cpu_env
, cpu_X
[30], cpu_X
[31]);
1679 case 0b11110: /* AUTIBZ */
1680 if (s
->pauth_active
) {
1681 gen_helper_autib(cpu_X
[30], cpu_env
, cpu_X
[30],
1682 tcg_constant_i64(0));
1685 case 0b11111: /* AUTIBSP */
1686 if (s
->pauth_active
) {
1687 gen_helper_autib(cpu_X
[30], cpu_env
, cpu_X
[30], cpu_X
[31]);
1691 /* default specified as NOP equivalent */
1696 static void gen_clrex(DisasContext
*s
, uint32_t insn
)
1698 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
1701 /* CLREX, DSB, DMB, ISB */
1702 static void handle_sync(DisasContext
*s
, uint32_t insn
,
1703 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1708 unallocated_encoding(s
);
1719 case 1: /* MBReqTypes_Reads */
1720 bar
= TCG_BAR_SC
| TCG_MO_LD_LD
| TCG_MO_LD_ST
;
1722 case 2: /* MBReqTypes_Writes */
1723 bar
= TCG_BAR_SC
| TCG_MO_ST_ST
;
1725 default: /* MBReqTypes_All */
1726 bar
= TCG_BAR_SC
| TCG_MO_ALL
;
1732 /* We need to break the TB after this insn to execute
1733 * a self-modified code correctly and also to take
1734 * any pending interrupts immediately.
1737 gen_goto_tb(s
, 0, 4);
1741 if (crm
!= 0 || !dc_isar_feature(aa64_sb
, s
)) {
1742 goto do_unallocated
;
1745 * TODO: There is no speculation barrier opcode for TCG;
1746 * MB and end the TB instead.
1748 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1749 gen_goto_tb(s
, 0, 4);
1754 unallocated_encoding(s
);
1759 static void gen_xaflag(void)
1761 TCGv_i32 z
= tcg_temp_new_i32();
1763 tcg_gen_setcondi_i32(TCG_COND_EQ
, z
, cpu_ZF
, 0);
1772 tcg_gen_or_i32(cpu_NF
, cpu_CF
, z
);
1773 tcg_gen_subi_i32(cpu_NF
, cpu_NF
, 1);
1776 tcg_gen_and_i32(cpu_ZF
, z
, cpu_CF
);
1777 tcg_gen_xori_i32(cpu_ZF
, cpu_ZF
, 1);
1779 /* (!C & Z) << 31 -> -(Z & ~C) */
1780 tcg_gen_andc_i32(cpu_VF
, z
, cpu_CF
);
1781 tcg_gen_neg_i32(cpu_VF
, cpu_VF
);
1784 tcg_gen_or_i32(cpu_CF
, cpu_CF
, z
);
1787 static void gen_axflag(void)
1789 tcg_gen_sari_i32(cpu_VF
, cpu_VF
, 31); /* V ? -1 : 0 */
1790 tcg_gen_andc_i32(cpu_CF
, cpu_CF
, cpu_VF
); /* C & !V */
1792 /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
1793 tcg_gen_andc_i32(cpu_ZF
, cpu_ZF
, cpu_VF
);
1795 tcg_gen_movi_i32(cpu_NF
, 0);
1796 tcg_gen_movi_i32(cpu_VF
, 0);
1799 /* MSR (immediate) - move immediate to processor state field */
1800 static void handle_msr_i(DisasContext
*s
, uint32_t insn
,
1801 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1803 int op
= op1
<< 3 | op2
;
1805 /* End the TB by default, chaining is ok. */
1806 s
->base
.is_jmp
= DISAS_TOO_MANY
;
1809 case 0x00: /* CFINV */
1810 if (crm
!= 0 || !dc_isar_feature(aa64_condm_4
, s
)) {
1811 goto do_unallocated
;
1813 tcg_gen_xori_i32(cpu_CF
, cpu_CF
, 1);
1814 s
->base
.is_jmp
= DISAS_NEXT
;
1817 case 0x01: /* XAFlag */
1818 if (crm
!= 0 || !dc_isar_feature(aa64_condm_5
, s
)) {
1819 goto do_unallocated
;
1822 s
->base
.is_jmp
= DISAS_NEXT
;
1825 case 0x02: /* AXFlag */
1826 if (crm
!= 0 || !dc_isar_feature(aa64_condm_5
, s
)) {
1827 goto do_unallocated
;
1830 s
->base
.is_jmp
= DISAS_NEXT
;
1833 case 0x03: /* UAO */
1834 if (!dc_isar_feature(aa64_uao
, s
) || s
->current_el
== 0) {
1835 goto do_unallocated
;
1838 set_pstate_bits(PSTATE_UAO
);
1840 clear_pstate_bits(PSTATE_UAO
);
1842 gen_rebuild_hflags(s
);
1845 case 0x04: /* PAN */
1846 if (!dc_isar_feature(aa64_pan
, s
) || s
->current_el
== 0) {
1847 goto do_unallocated
;
1850 set_pstate_bits(PSTATE_PAN
);
1852 clear_pstate_bits(PSTATE_PAN
);
1854 gen_rebuild_hflags(s
);
1857 case 0x05: /* SPSel */
1858 if (s
->current_el
== 0) {
1859 goto do_unallocated
;
1861 gen_helper_msr_i_spsel(cpu_env
, tcg_constant_i32(crm
& PSTATE_SP
));
1864 case 0x19: /* SSBS */
1865 if (!dc_isar_feature(aa64_ssbs
, s
)) {
1866 goto do_unallocated
;
1869 set_pstate_bits(PSTATE_SSBS
);
1871 clear_pstate_bits(PSTATE_SSBS
);
1873 /* Don't need to rebuild hflags since SSBS is a nop */
1876 case 0x1a: /* DIT */
1877 if (!dc_isar_feature(aa64_dit
, s
)) {
1878 goto do_unallocated
;
1881 set_pstate_bits(PSTATE_DIT
);
1883 clear_pstate_bits(PSTATE_DIT
);
1885 /* There's no need to rebuild hflags because DIT is a nop */
1888 case 0x1e: /* DAIFSet */
1889 gen_helper_msr_i_daifset(cpu_env
, tcg_constant_i32(crm
));
1892 case 0x1f: /* DAIFClear */
1893 gen_helper_msr_i_daifclear(cpu_env
, tcg_constant_i32(crm
));
1894 /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
1895 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
1898 case 0x1c: /* TCO */
1899 if (dc_isar_feature(aa64_mte
, s
)) {
1900 /* Full MTE is enabled -- set the TCO bit as directed. */
1902 set_pstate_bits(PSTATE_TCO
);
1904 clear_pstate_bits(PSTATE_TCO
);
1906 gen_rebuild_hflags(s
);
1907 /* Many factors, including TCO, go into MTE_ACTIVE. */
1908 s
->base
.is_jmp
= DISAS_UPDATE_NOCHAIN
;
1909 } else if (dc_isar_feature(aa64_mte_insn_reg
, s
)) {
1910 /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */
1911 s
->base
.is_jmp
= DISAS_NEXT
;
1913 goto do_unallocated
;
1917 case 0x1b: /* SVCR* */
1918 if (!dc_isar_feature(aa64_sme
, s
) || crm
< 2 || crm
> 7) {
1919 goto do_unallocated
;
1921 if (sme_access_check(s
)) {
1922 int old
= s
->pstate_sm
| (s
->pstate_za
<< 1);
1923 int new = (crm
& 1) * 3;
1924 int msk
= (crm
>> 1) & 3;
1926 if ((old
^ new) & msk
) {
1927 /* At least one bit changes. */
1928 gen_helper_set_svcr(cpu_env
, tcg_constant_i32(new),
1929 tcg_constant_i32(msk
));
1931 s
->base
.is_jmp
= DISAS_NEXT
;
1938 unallocated_encoding(s
);
1943 static void gen_get_nzcv(TCGv_i64 tcg_rt
)
1945 TCGv_i32 tmp
= tcg_temp_new_i32();
1946 TCGv_i32 nzcv
= tcg_temp_new_i32();
1948 /* build bit 31, N */
1949 tcg_gen_andi_i32(nzcv
, cpu_NF
, (1U << 31));
1950 /* build bit 30, Z */
1951 tcg_gen_setcondi_i32(TCG_COND_EQ
, tmp
, cpu_ZF
, 0);
1952 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 30, 1);
1953 /* build bit 29, C */
1954 tcg_gen_deposit_i32(nzcv
, nzcv
, cpu_CF
, 29, 1);
1955 /* build bit 28, V */
1956 tcg_gen_shri_i32(tmp
, cpu_VF
, 31);
1957 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 28, 1);
1958 /* generate result */
1959 tcg_gen_extu_i32_i64(tcg_rt
, nzcv
);
1962 static void gen_set_nzcv(TCGv_i64 tcg_rt
)
1964 TCGv_i32 nzcv
= tcg_temp_new_i32();
1966 /* take NZCV from R[t] */
1967 tcg_gen_extrl_i64_i32(nzcv
, tcg_rt
);
1970 tcg_gen_andi_i32(cpu_NF
, nzcv
, (1U << 31));
1972 tcg_gen_andi_i32(cpu_ZF
, nzcv
, (1 << 30));
1973 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_ZF
, cpu_ZF
, 0);
1975 tcg_gen_andi_i32(cpu_CF
, nzcv
, (1 << 29));
1976 tcg_gen_shri_i32(cpu_CF
, cpu_CF
, 29);
1978 tcg_gen_andi_i32(cpu_VF
, nzcv
, (1 << 28));
1979 tcg_gen_shli_i32(cpu_VF
, cpu_VF
, 3);
1982 static void gen_sysreg_undef(DisasContext
*s
, bool isread
,
1983 uint8_t op0
, uint8_t op1
, uint8_t op2
,
1984 uint8_t crn
, uint8_t crm
, uint8_t rt
)
1987 * Generate code to emit an UNDEF with correct syndrome
1988 * information for a failed system register access.
1989 * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
1990 * but if FEAT_IDST is implemented then read accesses to registers
1991 * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
1996 if (isread
&& dc_isar_feature(aa64_ids
, s
) &&
1997 arm_cpreg_encoding_in_idspace(op0
, op1
, op2
, crn
, crm
)) {
1998 syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
2000 syndrome
= syn_uncategorized();
2002 gen_exception_insn(s
, 0, EXCP_UDEF
, syndrome
);
2005 /* MRS - move from system register
2006 * MSR (register) - move to system register
2009 * These are all essentially the same insn in 'read' and 'write'
2010 * versions, with varying op0 fields.
2012 static void handle_sys(DisasContext
*s
, uint32_t insn
, bool isread
,
2013 unsigned int op0
, unsigned int op1
, unsigned int op2
,
2014 unsigned int crn
, unsigned int crm
, unsigned int rt
)
2016 uint32_t key
= ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP
,
2017 crn
, crm
, op0
, op1
, op2
);
2018 const ARMCPRegInfo
*ri
= get_arm_cp_reginfo(s
->cp_regs
, key
);
2019 bool need_exit_tb
= false;
2020 TCGv_ptr tcg_ri
= NULL
;
2024 /* Unknown register; this might be a guest error or a QEMU
2025 * unimplemented feature.
2027 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch64 "
2028 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
2029 isread
? "read" : "write", op0
, op1
, crn
, crm
, op2
);
2030 gen_sysreg_undef(s
, isread
, op0
, op1
, op2
, crn
, crm
, rt
);
2034 /* Check access permissions */
2035 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
2036 gen_sysreg_undef(s
, isread
, op0
, op1
, op2
, crn
, crm
, rt
);
2040 if (ri
->accessfn
|| (ri
->fgt
&& s
->fgt_active
)) {
2041 /* Emit code to perform further access permissions checks at
2042 * runtime; this may result in an exception.
2046 syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
2047 gen_a64_update_pc(s
, 0);
2048 tcg_ri
= tcg_temp_new_ptr();
2049 gen_helper_access_check_cp_reg(tcg_ri
, cpu_env
,
2050 tcg_constant_i32(key
),
2051 tcg_constant_i32(syndrome
),
2052 tcg_constant_i32(isread
));
2053 } else if (ri
->type
& ARM_CP_RAISES_EXC
) {
2055 * The readfn or writefn might raise an exception;
2056 * synchronize the CPU state in case it does.
2058 gen_a64_update_pc(s
, 0);
2061 /* Handle special cases first */
2062 switch (ri
->type
& ARM_CP_SPECIAL_MASK
) {
2068 tcg_rt
= cpu_reg(s
, rt
);
2070 gen_get_nzcv(tcg_rt
);
2072 gen_set_nzcv(tcg_rt
);
2075 case ARM_CP_CURRENTEL
:
2076 /* Reads as current EL value from pstate, which is
2077 * guaranteed to be constant by the tb flags.
2079 tcg_rt
= cpu_reg(s
, rt
);
2080 tcg_gen_movi_i64(tcg_rt
, s
->current_el
<< 2);
2083 /* Writes clear the aligned block of memory which rt points into. */
2084 if (s
->mte_active
[0]) {
2087 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, get_mem_index(s
));
2088 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
2089 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
2091 tcg_rt
= tcg_temp_new_i64();
2092 gen_helper_mte_check_zva(tcg_rt
, cpu_env
,
2093 tcg_constant_i32(desc
), cpu_reg(s
, rt
));
2095 tcg_rt
= clean_data_tbi(s
, cpu_reg(s
, rt
));
2097 gen_helper_dc_zva(cpu_env
, tcg_rt
);
2101 TCGv_i64 clean_addr
, tag
;
2104 * DC_GVA, like DC_ZVA, requires that we supply the original
2105 * pointer for an invalid page. Probe that address first.
2107 tcg_rt
= cpu_reg(s
, rt
);
2108 clean_addr
= clean_data_tbi(s
, tcg_rt
);
2109 gen_probe_access(s
, clean_addr
, MMU_DATA_STORE
, MO_8
);
2112 /* Extract the tag from the register to match STZGM. */
2113 tag
= tcg_temp_new_i64();
2114 tcg_gen_shri_i64(tag
, tcg_rt
, 56);
2115 gen_helper_stzgm_tags(cpu_env
, clean_addr
, tag
);
2119 case ARM_CP_DC_GZVA
:
2121 TCGv_i64 clean_addr
, tag
;
2123 /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
2124 tcg_rt
= cpu_reg(s
, rt
);
2125 clean_addr
= clean_data_tbi(s
, tcg_rt
);
2126 gen_helper_dc_zva(cpu_env
, clean_addr
);
2129 /* Extract the tag from the register to match STZGM. */
2130 tag
= tcg_temp_new_i64();
2131 tcg_gen_shri_i64(tag
, tcg_rt
, 56);
2132 gen_helper_stzgm_tags(cpu_env
, clean_addr
, tag
);
2137 g_assert_not_reached();
2139 if ((ri
->type
& ARM_CP_FPU
) && !fp_access_check_only(s
)) {
2141 } else if ((ri
->type
& ARM_CP_SVE
) && !sve_access_check(s
)) {
2143 } else if ((ri
->type
& ARM_CP_SME
) && !sme_access_check(s
)) {
2147 if (ri
->type
& ARM_CP_IO
) {
2148 /* I/O operations must end the TB here (whether read or write) */
2149 need_exit_tb
= translator_io_start(&s
->base
);
2152 tcg_rt
= cpu_reg(s
, rt
);
2155 if (ri
->type
& ARM_CP_CONST
) {
2156 tcg_gen_movi_i64(tcg_rt
, ri
->resetvalue
);
2157 } else if (ri
->readfn
) {
2159 tcg_ri
= gen_lookup_cp_reg(key
);
2161 gen_helper_get_cp_reg64(tcg_rt
, cpu_env
, tcg_ri
);
2163 tcg_gen_ld_i64(tcg_rt
, cpu_env
, ri
->fieldoffset
);
2166 if (ri
->type
& ARM_CP_CONST
) {
2167 /* If not forbidden by access permissions, treat as WI */
2169 } else if (ri
->writefn
) {
2171 tcg_ri
= gen_lookup_cp_reg(key
);
2173 gen_helper_set_cp_reg64(cpu_env
, tcg_ri
, tcg_rt
);
2175 tcg_gen_st_i64(tcg_rt
, cpu_env
, ri
->fieldoffset
);
2179 if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
2181 * A write to any coprocessor regiser that ends a TB
2182 * must rebuild the hflags for the next TB.
2184 gen_rebuild_hflags(s
);
2186 * We default to ending the TB on a coprocessor register write,
2187 * but allow this to be suppressed by the register definition
2188 * (usually only necessary to work around guest bugs).
2190 need_exit_tb
= true;
2193 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
2198 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
2199 * +---------------------+---+-----+-----+-------+-------+-----+------+
2200 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
2201 * +---------------------+---+-----+-----+-------+-------+-----+------+
2203 static void disas_system(DisasContext
*s
, uint32_t insn
)
2205 unsigned int l
, op0
, op1
, crn
, crm
, op2
, rt
;
2206 l
= extract32(insn
, 21, 1);
2207 op0
= extract32(insn
, 19, 2);
2208 op1
= extract32(insn
, 16, 3);
2209 crn
= extract32(insn
, 12, 4);
2210 crm
= extract32(insn
, 8, 4);
2211 op2
= extract32(insn
, 5, 3);
2212 rt
= extract32(insn
, 0, 5);
2215 if (l
|| rt
!= 31) {
2216 unallocated_encoding(s
);
2220 case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
2221 handle_hint(s
, insn
, op1
, op2
, crm
);
2223 case 3: /* CLREX, DSB, DMB, ISB */
2224 handle_sync(s
, insn
, op1
, op2
, crm
);
2226 case 4: /* MSR (immediate) */
2227 handle_msr_i(s
, insn
, op1
, op2
, crm
);
2230 unallocated_encoding(s
);
2235 handle_sys(s
, insn
, l
, op0
, op1
, op2
, crn
, crm
, rt
);
2238 /* Exception generation
2240 * 31 24 23 21 20 5 4 2 1 0
2241 * +-----------------+-----+------------------------+-----+----+
2242 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
2243 * +-----------------------+------------------------+----------+
2245 static void disas_exc(DisasContext
*s
, uint32_t insn
)
2247 int opc
= extract32(insn
, 21, 3);
2248 int op2_ll
= extract32(insn
, 0, 5);
2249 int imm16
= extract32(insn
, 5, 16);
2254 /* For SVC, HVC and SMC we advance the single-step state
2255 * machine before taking the exception. This is architecturally
2256 * mandated, to ensure that single-stepping a system call
2257 * instruction works properly.
2261 syndrome
= syn_aa64_svc(imm16
);
2263 gen_exception_insn_el(s
, 0, EXCP_UDEF
, syndrome
, 2);
2267 gen_exception_insn(s
, 4, EXCP_SWI
, syndrome
);
2270 if (s
->current_el
== 0) {
2271 unallocated_encoding(s
);
2274 /* The pre HVC helper handles cases when HVC gets trapped
2275 * as an undefined insn by runtime configuration.
2277 gen_a64_update_pc(s
, 0);
2278 gen_helper_pre_hvc(cpu_env
);
2280 gen_exception_insn_el(s
, 4, EXCP_HVC
, syn_aa64_hvc(imm16
), 2);
2283 if (s
->current_el
== 0) {
2284 unallocated_encoding(s
);
2287 gen_a64_update_pc(s
, 0);
2288 gen_helper_pre_smc(cpu_env
, tcg_constant_i32(syn_aa64_smc(imm16
)));
2290 gen_exception_insn_el(s
, 4, EXCP_SMC
, syn_aa64_smc(imm16
), 3);
2293 unallocated_encoding(s
);
2299 unallocated_encoding(s
);
2303 gen_exception_bkpt_insn(s
, syn_aa64_bkpt(imm16
));
2307 unallocated_encoding(s
);
2310 /* HLT. This has two purposes.
2311 * Architecturally, it is an external halting debug instruction.
2312 * Since QEMU doesn't implement external debug, we treat this as
2313 * it is required for halting debug disabled: it will UNDEF.
2314 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
2316 if (semihosting_enabled(s
->current_el
== 0) && imm16
== 0xf000) {
2317 gen_exception_internal_insn(s
, EXCP_SEMIHOST
);
2319 unallocated_encoding(s
);
2323 if (op2_ll
< 1 || op2_ll
> 3) {
2324 unallocated_encoding(s
);
2327 /* DCPS1, DCPS2, DCPS3 */
2328 unallocated_encoding(s
);
2331 unallocated_encoding(s
);
2336 /* Branches, exception generating and system instructions */
2337 static void disas_b_exc_sys(DisasContext
*s
, uint32_t insn
)
2339 switch (extract32(insn
, 25, 7)) {
2340 case 0x6a: /* Exception generation / System */
2341 if (insn
& (1 << 24)) {
2342 if (extract32(insn
, 22, 2) == 0) {
2343 disas_system(s
, insn
);
2345 unallocated_encoding(s
);
2352 unallocated_encoding(s
);
2358 * Load/Store exclusive instructions are implemented by remembering
2359 * the value/address loaded, and seeing if these are the same
2360 * when the store is performed. This is not actually the architecturally
2361 * mandated semantics, but it works for typical guest code sequences
2362 * and avoids having to monitor regular stores.
2364 * The store exclusive uses the atomic cmpxchg primitives to avoid
2365 * races in multi-threaded linux-user and when MTTCG softmmu is
2368 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
, int rn
,
2369 int size
, bool is_pair
)
2371 int idx
= get_mem_index(s
);
2372 TCGv_i64 dirty_addr
, clean_addr
;
2377 * if size == 2, the operation is single-copy atomic for the doubleword.
2378 * if size == 3, the operation is single-copy atomic for *each* doubleword,
2379 * not the entire quadword, however it must be quadword aligned.
2381 memop
= size
+ is_pair
;
2382 if (memop
== MO_128
) {
2383 memop
= finalize_memop_atom(s
, MO_128
| MO_ALIGN
,
2384 MO_ATOM_IFALIGN_PAIR
);
2386 memop
= finalize_memop(s
, memop
| MO_ALIGN
);
2390 dirty_addr
= cpu_reg_sp(s
, rn
);
2391 clean_addr
= gen_mte_check1(s
, dirty_addr
, false, rn
!= 31, memop
);
2393 g_assert(size
<= 3);
2395 g_assert(size
>= 2);
2397 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, clean_addr
, idx
, memop
);
2398 if (s
->be_data
== MO_LE
) {
2399 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 0, 32);
2400 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 32, 32);
2402 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 32, 32);
2403 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 0, 32);
2406 TCGv_i128 t16
= tcg_temp_new_i128();
2408 tcg_gen_qemu_ld_i128(t16
, clean_addr
, idx
, memop
);
2410 if (s
->be_data
== MO_LE
) {
2411 tcg_gen_extr_i128_i64(cpu_exclusive_val
,
2412 cpu_exclusive_high
, t16
);
2414 tcg_gen_extr_i128_i64(cpu_exclusive_high
,
2415 cpu_exclusive_val
, t16
);
2417 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2418 tcg_gen_mov_i64(cpu_reg(s
, rt2
), cpu_exclusive_high
);
2421 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, clean_addr
, idx
, memop
);
2422 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2424 tcg_gen_mov_i64(cpu_exclusive_addr
, clean_addr
);
2427 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
2428 int rn
, int size
, int is_pair
)
2430 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2431 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2434 * [addr + datasize] = {Rt2};
2440 * env->exclusive_addr = -1;
2442 TCGLabel
*fail_label
= gen_new_label();
2443 TCGLabel
*done_label
= gen_new_label();
2444 TCGv_i64 tmp
, dirty_addr
, clean_addr
;
2447 memop
= (size
+ is_pair
) | MO_ALIGN
;
2448 memop
= finalize_memop(s
, memop
);
2450 dirty_addr
= cpu_reg_sp(s
, rn
);
2451 clean_addr
= gen_mte_check1(s
, dirty_addr
, true, rn
!= 31, memop
);
2453 tcg_gen_brcond_i64(TCG_COND_NE
, clean_addr
, cpu_exclusive_addr
, fail_label
);
2455 tmp
= tcg_temp_new_i64();
2458 if (s
->be_data
== MO_LE
) {
2459 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2461 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt2
), cpu_reg(s
, rt
));
2463 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
,
2464 cpu_exclusive_val
, tmp
,
2465 get_mem_index(s
), memop
);
2466 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2468 TCGv_i128 t16
= tcg_temp_new_i128();
2469 TCGv_i128 c16
= tcg_temp_new_i128();
2472 if (s
->be_data
== MO_LE
) {
2473 tcg_gen_concat_i64_i128(t16
, cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2474 tcg_gen_concat_i64_i128(c16
, cpu_exclusive_val
,
2475 cpu_exclusive_high
);
2477 tcg_gen_concat_i64_i128(t16
, cpu_reg(s
, rt2
), cpu_reg(s
, rt
));
2478 tcg_gen_concat_i64_i128(c16
, cpu_exclusive_high
,
2482 tcg_gen_atomic_cmpxchg_i128(t16
, cpu_exclusive_addr
, c16
, t16
,
2483 get_mem_index(s
), memop
);
2485 a
= tcg_temp_new_i64();
2486 b
= tcg_temp_new_i64();
2487 if (s
->be_data
== MO_LE
) {
2488 tcg_gen_extr_i128_i64(a
, b
, t16
);
2490 tcg_gen_extr_i128_i64(b
, a
, t16
);
2493 tcg_gen_xor_i64(a
, a
, cpu_exclusive_val
);
2494 tcg_gen_xor_i64(b
, b
, cpu_exclusive_high
);
2495 tcg_gen_or_i64(tmp
, a
, b
);
2497 tcg_gen_setcondi_i64(TCG_COND_NE
, tmp
, tmp
, 0);
2500 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
, cpu_exclusive_val
,
2501 cpu_reg(s
, rt
), get_mem_index(s
), memop
);
2502 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2504 tcg_gen_mov_i64(cpu_reg(s
, rd
), tmp
);
2505 tcg_gen_br(done_label
);
2507 gen_set_label(fail_label
);
2508 tcg_gen_movi_i64(cpu_reg(s
, rd
), 1);
2509 gen_set_label(done_label
);
2510 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
2513 static void gen_compare_and_swap(DisasContext
*s
, int rs
, int rt
,
2516 TCGv_i64 tcg_rs
= cpu_reg(s
, rs
);
2517 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2518 int memidx
= get_mem_index(s
);
2519 TCGv_i64 clean_addr
;
2523 gen_check_sp_alignment(s
);
2525 memop
= finalize_memop(s
, size
| MO_ALIGN
);
2526 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, memop
);
2527 tcg_gen_atomic_cmpxchg_i64(tcg_rs
, clean_addr
, tcg_rs
, tcg_rt
,
2531 static void gen_compare_and_swap_pair(DisasContext
*s
, int rs
, int rt
,
2534 TCGv_i64 s1
= cpu_reg(s
, rs
);
2535 TCGv_i64 s2
= cpu_reg(s
, rs
+ 1);
2536 TCGv_i64 t1
= cpu_reg(s
, rt
);
2537 TCGv_i64 t2
= cpu_reg(s
, rt
+ 1);
2538 TCGv_i64 clean_addr
;
2539 int memidx
= get_mem_index(s
);
2543 gen_check_sp_alignment(s
);
2546 /* This is a single atomic access, despite the "pair". */
2547 memop
= finalize_memop(s
, (size
+ 1) | MO_ALIGN
);
2548 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, memop
);
2551 TCGv_i64 cmp
= tcg_temp_new_i64();
2552 TCGv_i64 val
= tcg_temp_new_i64();
2554 if (s
->be_data
== MO_LE
) {
2555 tcg_gen_concat32_i64(val
, t1
, t2
);
2556 tcg_gen_concat32_i64(cmp
, s1
, s2
);
2558 tcg_gen_concat32_i64(val
, t2
, t1
);
2559 tcg_gen_concat32_i64(cmp
, s2
, s1
);
2562 tcg_gen_atomic_cmpxchg_i64(cmp
, clean_addr
, cmp
, val
, memidx
, memop
);
2564 if (s
->be_data
== MO_LE
) {
2565 tcg_gen_extr32_i64(s1
, s2
, cmp
);
2567 tcg_gen_extr32_i64(s2
, s1
, cmp
);
2570 TCGv_i128 cmp
= tcg_temp_new_i128();
2571 TCGv_i128 val
= tcg_temp_new_i128();
2573 if (s
->be_data
== MO_LE
) {
2574 tcg_gen_concat_i64_i128(val
, t1
, t2
);
2575 tcg_gen_concat_i64_i128(cmp
, s1
, s2
);
2577 tcg_gen_concat_i64_i128(val
, t2
, t1
);
2578 tcg_gen_concat_i64_i128(cmp
, s2
, s1
);
2581 tcg_gen_atomic_cmpxchg_i128(cmp
, clean_addr
, cmp
, val
, memidx
, memop
);
2583 if (s
->be_data
== MO_LE
) {
2584 tcg_gen_extr_i128_i64(s1
, s2
, cmp
);
2586 tcg_gen_extr_i128_i64(s2
, s1
, cmp
);
2591 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
2592 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2594 static bool disas_ldst_compute_iss_sf(int size
, bool is_signed
, int opc
)
2596 int opc0
= extract32(opc
, 0, 1);
2600 regsize
= opc0
? 32 : 64;
2602 regsize
= size
== 3 ? 64 : 32;
2604 return regsize
== 64;
2607 /* Load/store exclusive
2609 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
2610 * +-----+-------------+----+---+----+------+----+-------+------+------+
2611 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
2612 * +-----+-------------+----+---+----+------+----+-------+------+------+
2614 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2615 * L: 0 -> store, 1 -> load
2616 * o2: 0 -> exclusive, 1 -> not
2617 * o1: 0 -> single register, 1 -> register pair
2618 * o0: 1 -> load-acquire/store-release, 0 -> not
2620 static void disas_ldst_excl(DisasContext
*s
, uint32_t insn
)
2622 int rt
= extract32(insn
, 0, 5);
2623 int rn
= extract32(insn
, 5, 5);
2624 int rt2
= extract32(insn
, 10, 5);
2625 int rs
= extract32(insn
, 16, 5);
2626 int is_lasr
= extract32(insn
, 15, 1);
2627 int o2_L_o1_o0
= extract32(insn
, 21, 3) * 2 | is_lasr
;
2628 int size
= extract32(insn
, 30, 2);
2629 TCGv_i64 clean_addr
;
2632 switch (o2_L_o1_o0
) {
2633 case 0x0: /* STXR */
2634 case 0x1: /* STLXR */
2636 gen_check_sp_alignment(s
);
2639 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2641 gen_store_exclusive(s
, rs
, rt
, rt2
, rn
, size
, false);
2644 case 0x4: /* LDXR */
2645 case 0x5: /* LDAXR */
2647 gen_check_sp_alignment(s
);
2649 gen_load_exclusive(s
, rt
, rt2
, rn
, size
, false);
2651 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2655 case 0x8: /* STLLR */
2656 if (!dc_isar_feature(aa64_lor
, s
)) {
2659 /* StoreLORelease is the same as Store-Release for QEMU. */
2661 case 0x9: /* STLR */
2662 /* Generate ISS for non-exclusive accesses including LASR. */
2664 gen_check_sp_alignment(s
);
2666 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2667 /* TODO: ARMv8.4-LSE SCTLR.nAA */
2668 memop
= finalize_memop(s
, size
| MO_ALIGN
);
2669 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
),
2670 true, rn
!= 31, memop
);
2671 do_gpr_st(s
, cpu_reg(s
, rt
), clean_addr
, memop
, true, rt
,
2672 disas_ldst_compute_iss_sf(size
, false, 0), is_lasr
);
2675 case 0xc: /* LDLAR */
2676 if (!dc_isar_feature(aa64_lor
, s
)) {
2679 /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
2681 case 0xd: /* LDAR */
2682 /* Generate ISS for non-exclusive accesses including LASR. */
2684 gen_check_sp_alignment(s
);
2686 /* TODO: ARMv8.4-LSE SCTLR.nAA */
2687 memop
= finalize_memop(s
, size
| MO_ALIGN
);
2688 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
),
2689 false, rn
!= 31, memop
);
2690 do_gpr_ld(s
, cpu_reg(s
, rt
), clean_addr
, memop
, false, true,
2691 rt
, disas_ldst_compute_iss_sf(size
, false, 0), is_lasr
);
2692 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2695 case 0x2: case 0x3: /* CASP / STXP */
2696 if (size
& 2) { /* STXP / STLXP */
2698 gen_check_sp_alignment(s
);
2701 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2703 gen_store_exclusive(s
, rs
, rt
, rt2
, rn
, size
, true);
2707 && ((rt
| rs
) & 1) == 0
2708 && dc_isar_feature(aa64_atomics
, s
)) {
2710 gen_compare_and_swap_pair(s
, rs
, rt
, rn
, size
| 2);
2715 case 0x6: case 0x7: /* CASPA / LDXP */
2716 if (size
& 2) { /* LDXP / LDAXP */
2718 gen_check_sp_alignment(s
);
2720 gen_load_exclusive(s
, rt
, rt2
, rn
, size
, true);
2722 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2727 && ((rt
| rs
) & 1) == 0
2728 && dc_isar_feature(aa64_atomics
, s
)) {
2729 /* CASPA / CASPAL */
2730 gen_compare_and_swap_pair(s
, rs
, rt
, rn
, size
| 2);
2736 case 0xb: /* CASL */
2737 case 0xe: /* CASA */
2738 case 0xf: /* CASAL */
2739 if (rt2
== 31 && dc_isar_feature(aa64_atomics
, s
)) {
2740 gen_compare_and_swap(s
, rs
, rt
, rn
, size
);
2745 unallocated_encoding(s
);
2749 * Load register (literal)
2751 * 31 30 29 27 26 25 24 23 5 4 0
2752 * +-----+-------+---+-----+-------------------+-------+
2753 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
2754 * +-----+-------+---+-----+-------------------+-------+
2756 * V: 1 -> vector (simd/fp)
2757 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2758 * 10-> 32 bit signed, 11 -> prefetch
2759 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2761 static void disas_ld_lit(DisasContext
*s
, uint32_t insn
)
2763 int rt
= extract32(insn
, 0, 5);
2764 int64_t imm
= sextract32(insn
, 5, 19) << 2;
2765 bool is_vector
= extract32(insn
, 26, 1);
2766 int opc
= extract32(insn
, 30, 2);
2767 bool is_signed
= false;
2769 TCGv_i64 tcg_rt
, clean_addr
;
2774 unallocated_encoding(s
);
2778 if (!fp_access_check(s
)) {
2781 memop
= finalize_memop_asimd(s
, size
);
2784 /* PRFM (literal) : prefetch */
2787 size
= 2 + extract32(opc
, 0, 1);
2788 is_signed
= extract32(opc
, 1, 1);
2789 memop
= finalize_memop(s
, size
+ is_signed
* MO_SIGN
);
2792 tcg_rt
= cpu_reg(s
, rt
);
2794 clean_addr
= tcg_temp_new_i64();
2795 gen_pc_plus_diff(s
, clean_addr
, imm
);
2798 do_fp_ld(s
, rt
, clean_addr
, memop
);
2800 /* Only unsigned 32bit loads target 32bit registers. */
2801 bool iss_sf
= opc
!= 0;
2802 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
, false, true, rt
, iss_sf
, false);
2807 * LDNP (Load Pair - non-temporal hint)
2808 * LDP (Load Pair - non vector)
2809 * LDPSW (Load Pair Signed Word - non vector)
2810 * STNP (Store Pair - non-temporal hint)
2811 * STP (Store Pair - non vector)
2812 * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2813 * LDP (Load Pair of SIMD&FP)
2814 * STNP (Store Pair of SIMD&FP - non-temporal hint)
2815 * STP (Store Pair of SIMD&FP)
2817 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
2818 * +-----+-------+---+---+-------+---+-----------------------------+
2819 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
2820 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2822 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
2824 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2825 * V: 0 -> GPR, 1 -> Vector
2826 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2827 * 10 -> signed offset, 11 -> pre-index
2828 * L: 0 -> Store 1 -> Load
2830 * Rt, Rt2 = GPR or SIMD registers to be stored
2831 * Rn = general purpose register containing address
2832 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2834 static void disas_ldst_pair(DisasContext
*s
, uint32_t insn
)
2836 int rt
= extract32(insn
, 0, 5);
2837 int rn
= extract32(insn
, 5, 5);
2838 int rt2
= extract32(insn
, 10, 5);
2839 uint64_t offset
= sextract64(insn
, 15, 7);
2840 int index
= extract32(insn
, 23, 2);
2841 bool is_vector
= extract32(insn
, 26, 1);
2842 bool is_load
= extract32(insn
, 22, 1);
2843 int opc
= extract32(insn
, 30, 2);
2845 bool is_signed
= false;
2846 bool postindex
= false;
2848 bool set_tag
= false;
2850 TCGv_i64 clean_addr
, dirty_addr
;
2855 unallocated_encoding(s
);
2861 } else if (opc
== 1 && !is_load
) {
2863 if (!dc_isar_feature(aa64_mte_insn_reg
, s
) || index
== 0) {
2864 unallocated_encoding(s
);
2870 size
= 2 + extract32(opc
, 1, 1);
2871 is_signed
= extract32(opc
, 0, 1);
2872 if (!is_load
&& is_signed
) {
2873 unallocated_encoding(s
);
2879 case 1: /* post-index */
2884 /* signed offset with "non-temporal" hint. Since we don't emulate
2885 * caches we don't care about hints to the cache system about
2886 * data access patterns, and handle this identically to plain
2890 /* There is no non-temporal-hint version of LDPSW */
2891 unallocated_encoding(s
);
2896 case 2: /* signed offset, rn not updated */
2899 case 3: /* pre-index */
2905 if (is_vector
&& !fp_access_check(s
)) {
2909 offset
<<= (set_tag
? LOG2_TAG_GRANULE
: size
);
2912 gen_check_sp_alignment(s
);
2915 dirty_addr
= read_cpu_reg_sp(s
, rn
, 1);
2917 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
2923 * TODO: We could rely on the stores below, at least for
2924 * system mode, if we arrange to add MO_ALIGN_16.
2926 gen_helper_stg_stub(cpu_env
, dirty_addr
);
2927 } else if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2928 gen_helper_stg_parallel(cpu_env
, dirty_addr
, dirty_addr
);
2930 gen_helper_stg(cpu_env
, dirty_addr
, dirty_addr
);
2934 clean_addr
= gen_mte_checkN(s
, dirty_addr
, !is_load
,
2935 (wback
|| rn
!= 31) && !set_tag
, 2 << size
);
2938 MemOp mop
= finalize_memop_asimd(s
, size
);
2941 do_fp_ld(s
, rt
, clean_addr
, mop
);
2943 do_fp_st(s
, rt
, clean_addr
, mop
);
2945 tcg_gen_addi_i64(clean_addr
, clean_addr
, 1 << size
);
2947 do_fp_ld(s
, rt2
, clean_addr
, mop
);
2949 do_fp_st(s
, rt2
, clean_addr
, mop
);
2952 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2953 TCGv_i64 tcg_rt2
= cpu_reg(s
, rt2
);
2954 MemOp mop
= size
+ 1;
2957 * With LSE2, non-sign-extending pairs are treated atomically if
2958 * aligned, and if unaligned one of the pair will be completely
2959 * within a 16-byte block and that element will be atomic.
2960 * Otherwise each element is separately atomic.
2961 * In all cases, issue one operation with the correct atomicity.
2963 * This treats sign-extending loads like zero-extending loads,
2964 * since that reuses the most code below.
2967 mop
|= (size
== 2 ? MO_ALIGN_4
: MO_ALIGN_8
);
2969 mop
= finalize_memop_pair(s
, mop
);
2973 int o2
= s
->be_data
== MO_LE
? 32 : 0;
2976 tcg_gen_qemu_ld_i64(tcg_rt
, clean_addr
, get_mem_index(s
), mop
);
2978 tcg_gen_sextract_i64(tcg_rt2
, tcg_rt
, o2
, 32);
2979 tcg_gen_sextract_i64(tcg_rt
, tcg_rt
, o1
, 32);
2981 tcg_gen_extract_i64(tcg_rt2
, tcg_rt
, o2
, 32);
2982 tcg_gen_extract_i64(tcg_rt
, tcg_rt
, o1
, 32);
2985 TCGv_i128 tmp
= tcg_temp_new_i128();
2987 tcg_gen_qemu_ld_i128(tmp
, clean_addr
, get_mem_index(s
), mop
);
2988 if (s
->be_data
== MO_LE
) {
2989 tcg_gen_extr_i128_i64(tcg_rt
, tcg_rt2
, tmp
);
2991 tcg_gen_extr_i128_i64(tcg_rt2
, tcg_rt
, tmp
);
2996 TCGv_i64 tmp
= tcg_temp_new_i64();
2998 if (s
->be_data
== MO_LE
) {
2999 tcg_gen_concat32_i64(tmp
, tcg_rt
, tcg_rt2
);
3001 tcg_gen_concat32_i64(tmp
, tcg_rt2
, tcg_rt
);
3003 tcg_gen_qemu_st_i64(tmp
, clean_addr
, get_mem_index(s
), mop
);
3005 TCGv_i128 tmp
= tcg_temp_new_i128();
3007 if (s
->be_data
== MO_LE
) {
3008 tcg_gen_concat_i64_i128(tmp
, tcg_rt
, tcg_rt2
);
3010 tcg_gen_concat_i64_i128(tmp
, tcg_rt2
, tcg_rt
);
3012 tcg_gen_qemu_st_i128(tmp
, clean_addr
, get_mem_index(s
), mop
);
3019 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3021 tcg_gen_mov_i64(cpu_reg_sp(s
, rn
), dirty_addr
);
3026 * Load/store (immediate post-indexed)
3027 * Load/store (immediate pre-indexed)
3028 * Load/store (unscaled immediate)
3030 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
3031 * +----+-------+---+-----+-----+---+--------+-----+------+------+
3032 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
3033 * +----+-------+---+-----+-----+---+--------+-----+------+------+
3035 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
3037 * V = 0 -> non-vector
3038 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
3039 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3041 static void disas_ldst_reg_imm9(DisasContext
*s
, uint32_t insn
,
3047 int rn
= extract32(insn
, 5, 5);
3048 int imm9
= sextract32(insn
, 12, 9);
3049 int idx
= extract32(insn
, 10, 2);
3050 bool is_signed
= false;
3051 bool is_store
= false;
3052 bool is_extended
= false;
3053 bool is_unpriv
= (idx
== 2);
3059 TCGv_i64 clean_addr
, dirty_addr
;
3062 size
|= (opc
& 2) << 1;
3063 if (size
> 4 || is_unpriv
) {
3064 unallocated_encoding(s
);
3067 is_store
= ((opc
& 1) == 0);
3068 if (!fp_access_check(s
)) {
3071 memop
= finalize_memop_asimd(s
, size
);
3073 if (size
== 3 && opc
== 2) {
3074 /* PRFM - prefetch */
3076 unallocated_encoding(s
);
3081 if (opc
== 3 && size
> 1) {
3082 unallocated_encoding(s
);
3085 is_store
= (opc
== 0);
3086 is_signed
= !is_store
&& extract32(opc
, 1, 1);
3087 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
3088 memop
= finalize_memop(s
, size
+ is_signed
* MO_SIGN
);
3106 g_assert_not_reached();
3109 iss_valid
= !is_vector
&& !writeback
;
3112 gen_check_sp_alignment(s
);
3115 dirty_addr
= read_cpu_reg_sp(s
, rn
, 1);
3117 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, imm9
);
3120 memidx
= is_unpriv
? get_a64_user_mem_index(s
) : get_mem_index(s
);
3122 clean_addr
= gen_mte_check1_mmuidx(s
, dirty_addr
, is_store
,
3123 writeback
|| rn
!= 31,
3124 size
, is_unpriv
, memidx
);
3128 do_fp_st(s
, rt
, clean_addr
, memop
);
3130 do_fp_ld(s
, rt
, clean_addr
, memop
);
3133 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
3134 bool iss_sf
= disas_ldst_compute_iss_sf(size
, is_signed
, opc
);
3137 do_gpr_st_memidx(s
, tcg_rt
, clean_addr
, memop
, memidx
,
3138 iss_valid
, rt
, iss_sf
, false);
3140 do_gpr_ld_memidx(s
, tcg_rt
, clean_addr
, memop
,
3141 is_extended
, memidx
,
3142 iss_valid
, rt
, iss_sf
, false);
3147 TCGv_i64 tcg_rn
= cpu_reg_sp(s
, rn
);
3149 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, imm9
);
3151 tcg_gen_mov_i64(tcg_rn
, dirty_addr
);
3156 * Load/store (register offset)
3158 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3159 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
3160 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
3161 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
3164 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3165 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3167 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3168 * opc<0>: 0 -> store, 1 -> load
3169 * V: 1 -> vector/simd
3170 * opt: extend encoding (see DecodeRegExtend)
3171 * S: if S=1 then scale (essentially index by sizeof(size))
3172 * Rt: register to transfer into/out of
3173 * Rn: address register or SP for base
3174 * Rm: offset register or ZR for offset
3176 static void disas_ldst_reg_roffset(DisasContext
*s
, uint32_t insn
,
3182 int rn
= extract32(insn
, 5, 5);
3183 int shift
= extract32(insn
, 12, 1);
3184 int rm
= extract32(insn
, 16, 5);
3185 int opt
= extract32(insn
, 13, 3);
3186 bool is_signed
= false;
3187 bool is_store
= false;
3188 bool is_extended
= false;
3189 TCGv_i64 tcg_rm
, clean_addr
, dirty_addr
;
3192 if (extract32(opt
, 1, 1) == 0) {
3193 unallocated_encoding(s
);
3198 size
|= (opc
& 2) << 1;
3200 unallocated_encoding(s
);
3203 is_store
= !extract32(opc
, 0, 1);
3204 if (!fp_access_check(s
)) {
3208 if (size
== 3 && opc
== 2) {
3209 /* PRFM - prefetch */
3212 if (opc
== 3 && size
> 1) {
3213 unallocated_encoding(s
);
3216 is_store
= (opc
== 0);
3217 is_signed
= !is_store
&& extract32(opc
, 1, 1);
3218 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
3222 gen_check_sp_alignment(s
);
3224 dirty_addr
= read_cpu_reg_sp(s
, rn
, 1);
3226 tcg_rm
= read_cpu_reg(s
, rm
, 1);
3227 ext_and_shift_reg(tcg_rm
, tcg_rm
, opt
, shift
? size
: 0);
3229 tcg_gen_add_i64(dirty_addr
, dirty_addr
, tcg_rm
);
3231 memop
= finalize_memop(s
, size
+ is_signed
* MO_SIGN
);
3232 clean_addr
= gen_mte_check1(s
, dirty_addr
, is_store
, true, memop
);
3236 do_fp_st(s
, rt
, clean_addr
, memop
);
3238 do_fp_ld(s
, rt
, clean_addr
, memop
);
3241 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
3242 bool iss_sf
= disas_ldst_compute_iss_sf(size
, is_signed
, opc
);
3245 do_gpr_st(s
, tcg_rt
, clean_addr
, memop
,
3246 true, rt
, iss_sf
, false);
3248 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
,
3249 is_extended
, true, rt
, iss_sf
, false);
3255 * Load/store (unsigned immediate)
3257 * 31 30 29 27 26 25 24 23 22 21 10 9 5
3258 * +----+-------+---+-----+-----+------------+-------+------+
3259 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
3260 * +----+-------+---+-----+-----+------------+-------+------+
3263 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3264 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3266 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3267 * opc<0>: 0 -> store, 1 -> load
3268 * Rn: base address register (inc SP)
3269 * Rt: target register
3271 static void disas_ldst_reg_unsigned_imm(DisasContext
*s
, uint32_t insn
,
3277 int rn
= extract32(insn
, 5, 5);
3278 unsigned int imm12
= extract32(insn
, 10, 12);
3279 unsigned int offset
;
3280 TCGv_i64 clean_addr
, dirty_addr
;
3282 bool is_signed
= false;
3283 bool is_extended
= false;
3287 size
|= (opc
& 2) << 1;
3289 unallocated_encoding(s
);
3292 is_store
= !extract32(opc
, 0, 1);
3293 if (!fp_access_check(s
)) {
3297 if (size
== 3 && opc
== 2) {
3298 /* PRFM - prefetch */
3301 if (opc
== 3 && size
> 1) {
3302 unallocated_encoding(s
);
3305 is_store
= (opc
== 0);
3306 is_signed
= !is_store
&& extract32(opc
, 1, 1);
3307 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
3311 gen_check_sp_alignment(s
);
3313 dirty_addr
= read_cpu_reg_sp(s
, rn
, 1);
3314 offset
= imm12
<< size
;
3315 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3317 memop
= finalize_memop(s
, size
+ is_signed
* MO_SIGN
);
3318 clean_addr
= gen_mte_check1(s
, dirty_addr
, is_store
, rn
!= 31, memop
);
3322 do_fp_st(s
, rt
, clean_addr
, memop
);
3324 do_fp_ld(s
, rt
, clean_addr
, memop
);
3327 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
3328 bool iss_sf
= disas_ldst_compute_iss_sf(size
, is_signed
, opc
);
3330 do_gpr_st(s
, tcg_rt
, clean_addr
, memop
, true, rt
, iss_sf
, false);
3332 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
,
3333 is_extended
, true, rt
, iss_sf
, false);
3338 /* Atomic memory operations
3340 * 31 30 27 26 24 22 21 16 15 12 10 5 0
3341 * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
3342 * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt |
3343 * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
3345 * Rt: the result register
3346 * Rn: base address or SP
3347 * Rs: the source register for the operation
3348 * V: vector flag (always 0 as of v8.3)
3352 static void disas_ldst_atomic(DisasContext
*s
, uint32_t insn
,
3353 int size
, int rt
, bool is_vector
)
3355 int rs
= extract32(insn
, 16, 5);
3356 int rn
= extract32(insn
, 5, 5);
3357 int o3_opc
= extract32(insn
, 12, 4);
3358 bool r
= extract32(insn
, 22, 1);
3359 bool a
= extract32(insn
, 23, 1);
3360 TCGv_i64 tcg_rs
, tcg_rt
, clean_addr
;
3361 AtomicThreeOpFn
*fn
= NULL
;
3362 MemOp mop
= finalize_memop(s
, size
| MO_ALIGN
);
3364 if (is_vector
|| !dc_isar_feature(aa64_atomics
, s
)) {
3365 unallocated_encoding(s
);
3369 case 000: /* LDADD */
3370 fn
= tcg_gen_atomic_fetch_add_i64
;
3372 case 001: /* LDCLR */
3373 fn
= tcg_gen_atomic_fetch_and_i64
;
3375 case 002: /* LDEOR */
3376 fn
= tcg_gen_atomic_fetch_xor_i64
;
3378 case 003: /* LDSET */
3379 fn
= tcg_gen_atomic_fetch_or_i64
;
3381 case 004: /* LDSMAX */
3382 fn
= tcg_gen_atomic_fetch_smax_i64
;
3385 case 005: /* LDSMIN */
3386 fn
= tcg_gen_atomic_fetch_smin_i64
;
3389 case 006: /* LDUMAX */
3390 fn
= tcg_gen_atomic_fetch_umax_i64
;
3392 case 007: /* LDUMIN */
3393 fn
= tcg_gen_atomic_fetch_umin_i64
;
3396 fn
= tcg_gen_atomic_xchg_i64
;
3398 case 014: /* LDAPR, LDAPRH, LDAPRB */
3399 if (!dc_isar_feature(aa64_rcpc_8_3
, s
) ||
3400 rs
!= 31 || a
!= 1 || r
!= 0) {
3401 unallocated_encoding(s
);
3406 unallocated_encoding(s
);
3411 gen_check_sp_alignment(s
);
3413 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
), false, rn
!= 31, mop
);
3415 if (o3_opc
== 014) {
3417 * LDAPR* are a special case because they are a simple load, not a
3418 * fetch-and-do-something op.
3419 * The architectural consistency requirements here are weaker than
3420 * full load-acquire (we only need "load-acquire processor consistent"),
3421 * but we choose to implement them as full LDAQ.
3423 do_gpr_ld(s
, cpu_reg(s
, rt
), clean_addr
, mop
, false,
3424 true, rt
, disas_ldst_compute_iss_sf(size
, false, 0), true);
3425 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
3429 tcg_rs
= read_cpu_reg(s
, rs
, true);
3430 tcg_rt
= cpu_reg(s
, rt
);
3432 if (o3_opc
== 1) { /* LDCLR */
3433 tcg_gen_not_i64(tcg_rs
, tcg_rs
);
3436 /* The tcg atomic primitives are all full barriers. Therefore we
3437 * can ignore the Acquire and Release bits of this instruction.
3439 fn(tcg_rt
, clean_addr
, tcg_rs
, get_mem_index(s
), mop
);
3441 if ((mop
& MO_SIGN
) && size
!= MO_64
) {
3442 tcg_gen_ext32u_i64(tcg_rt
, tcg_rt
);
3447 * PAC memory operations
3449 * 31 30 27 26 24 22 21 12 11 10 5 0
3450 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3451 * | size | 1 1 1 | V | 0 0 | M S | 1 | imm9 | W | 1 | Rn | Rt |
3452 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3454 * Rt: the result register
3455 * Rn: base address or SP
3456 * V: vector flag (always 0 as of v8.3)
3457 * M: clear for key DA, set for key DB
3458 * W: pre-indexing flag
3461 static void disas_ldst_pac(DisasContext
*s
, uint32_t insn
,
3462 int size
, int rt
, bool is_vector
)
3464 int rn
= extract32(insn
, 5, 5);
3465 bool is_wback
= extract32(insn
, 11, 1);
3466 bool use_key_a
= !extract32(insn
, 23, 1);
3468 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3471 if (size
!= 3 || is_vector
|| !dc_isar_feature(aa64_pauth
, s
)) {
3472 unallocated_encoding(s
);
3477 gen_check_sp_alignment(s
);
3479 dirty_addr
= read_cpu_reg_sp(s
, rn
, 1);
3481 if (s
->pauth_active
) {
3483 gen_helper_autda(dirty_addr
, cpu_env
, dirty_addr
,
3484 tcg_constant_i64(0));
3486 gen_helper_autdb(dirty_addr
, cpu_env
, dirty_addr
,
3487 tcg_constant_i64(0));
3491 /* Form the 10-bit signed, scaled offset. */
3492 offset
= (extract32(insn
, 22, 1) << 9) | extract32(insn
, 12, 9);
3493 offset
= sextract32(offset
<< size
, 0, 10 + size
);
3494 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3496 memop
= finalize_memop(s
, size
);
3498 /* Note that "clean" and "dirty" here refer to TBI not PAC. */
3499 clean_addr
= gen_mte_check1(s
, dirty_addr
, false,
3500 is_wback
|| rn
!= 31, memop
);
3502 tcg_rt
= cpu_reg(s
, rt
);
3503 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
,
3504 /* extend */ false, /* iss_valid */ !is_wback
,
3505 /* iss_srt */ rt
, /* iss_sf */ true, /* iss_ar */ false);
3508 tcg_gen_mov_i64(cpu_reg_sp(s
, rn
), dirty_addr
);
3513 * LDAPR/STLR (unscaled immediate)
3515 * 31 30 24 22 21 12 10 5 0
3516 * +------+-------------+-----+---+--------+-----+----+-----+
3517 * | size | 0 1 1 0 0 1 | opc | 0 | imm9 | 0 0 | Rn | Rt |
3518 * +------+-------------+-----+---+--------+-----+----+-----+
3520 * Rt: source or destination register
3522 * imm9: unscaled immediate offset
3523 * opc: 00: STLUR*, 01/10/11: various LDAPUR*
3524 * size: size of load/store
3526 static void disas_ldst_ldapr_stlr(DisasContext
*s
, uint32_t insn
)
3528 int rt
= extract32(insn
, 0, 5);
3529 int rn
= extract32(insn
, 5, 5);
3530 int offset
= sextract32(insn
, 12, 9);
3531 int opc
= extract32(insn
, 22, 2);
3532 int size
= extract32(insn
, 30, 2);
3533 TCGv_i64 clean_addr
, dirty_addr
;
3534 bool is_store
= false;
3535 bool extend
= false;
3539 if (!dc_isar_feature(aa64_rcpc_8_4
, s
)) {
3540 unallocated_encoding(s
);
3544 /* TODO: ARMv8.4-LSE SCTLR.nAA */
3545 mop
= finalize_memop(s
, size
| MO_ALIGN
);
3548 case 0: /* STLURB */
3551 case 1: /* LDAPUR* */
3553 case 2: /* LDAPURS* 64-bit variant */
3555 unallocated_encoding(s
);
3560 case 3: /* LDAPURS* 32-bit variant */
3562 unallocated_encoding(s
);
3566 extend
= true; /* zero-extend 32->64 after signed load */
3569 g_assert_not_reached();
3572 iss_sf
= disas_ldst_compute_iss_sf(size
, (mop
& MO_SIGN
) != 0, opc
);
3575 gen_check_sp_alignment(s
);
3578 dirty_addr
= read_cpu_reg_sp(s
, rn
, 1);
3579 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3580 clean_addr
= clean_data_tbi(s
, dirty_addr
);
3583 /* Store-Release semantics */
3584 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
3585 do_gpr_st(s
, cpu_reg(s
, rt
), clean_addr
, mop
, true, rt
, iss_sf
, true);
3588 * Load-AcquirePC semantics; we implement as the slightly more
3589 * restrictive Load-Acquire.
3591 do_gpr_ld(s
, cpu_reg(s
, rt
), clean_addr
, mop
,
3592 extend
, true, rt
, iss_sf
, true);
3593 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
3597 /* Load/store register (all forms) */
3598 static void disas_ldst_reg(DisasContext
*s
, uint32_t insn
)
3600 int rt
= extract32(insn
, 0, 5);
3601 int opc
= extract32(insn
, 22, 2);
3602 bool is_vector
= extract32(insn
, 26, 1);
3603 int size
= extract32(insn
, 30, 2);
3605 switch (extract32(insn
, 24, 2)) {
3607 if (extract32(insn
, 21, 1) == 0) {
3608 /* Load/store register (unscaled immediate)
3609 * Load/store immediate pre/post-indexed
3610 * Load/store register unprivileged
3612 disas_ldst_reg_imm9(s
, insn
, opc
, size
, rt
, is_vector
);
3615 switch (extract32(insn
, 10, 2)) {
3617 disas_ldst_atomic(s
, insn
, size
, rt
, is_vector
);
3620 disas_ldst_reg_roffset(s
, insn
, opc
, size
, rt
, is_vector
);
3623 disas_ldst_pac(s
, insn
, size
, rt
, is_vector
);
3628 disas_ldst_reg_unsigned_imm(s
, insn
, opc
, size
, rt
, is_vector
);
3631 unallocated_encoding(s
);
3634 /* AdvSIMD load/store multiple structures
3636 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
3637 * +---+---+---------------+---+-------------+--------+------+------+------+
3638 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
3639 * +---+---+---------------+---+-------------+--------+------+------+------+
3641 * AdvSIMD load/store multiple structures (post-indexed)
3643 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
3644 * +---+---+---------------+---+---+---------+--------+------+------+------+
3645 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
3646 * +---+---+---------------+---+---+---------+--------+------+------+------+
3648 * Rt: first (or only) SIMD&FP register to be transferred
3649 * Rn: base address or SP
3650 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3652 static void disas_ldst_multiple_struct(DisasContext
*s
, uint32_t insn
)
3654 int rt
= extract32(insn
, 0, 5);
3655 int rn
= extract32(insn
, 5, 5);
3656 int rm
= extract32(insn
, 16, 5);
3657 int size
= extract32(insn
, 10, 2);
3658 int opcode
= extract32(insn
, 12, 4);
3659 bool is_store
= !extract32(insn
, 22, 1);
3660 bool is_postidx
= extract32(insn
, 23, 1);
3661 bool is_q
= extract32(insn
, 30, 1);
3662 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3663 MemOp endian
, align
, mop
;
3665 int total
; /* total bytes */
3666 int elements
; /* elements per vector */
3667 int rpt
; /* num iterations */
3668 int selem
; /* structure elements */
3671 if (extract32(insn
, 31, 1) || extract32(insn
, 21, 1)) {
3672 unallocated_encoding(s
);
3676 if (!is_postidx
&& rm
!= 0) {
3677 unallocated_encoding(s
);
3681 /* From the shared decode logic */
3712 unallocated_encoding(s
);
3716 if (size
== 3 && !is_q
&& selem
!= 1) {
3718 unallocated_encoding(s
);
3722 if (!fp_access_check(s
)) {
3727 gen_check_sp_alignment(s
);
3730 /* For our purposes, bytes are always little-endian. */
3731 endian
= s
->be_data
;
3736 total
= rpt
* selem
* (is_q
? 16 : 8);
3737 tcg_rn
= cpu_reg_sp(s
, rn
);
3740 * Issue the MTE check vs the logical repeat count, before we
3741 * promote consecutive little-endian elements below.
3743 clean_addr
= gen_mte_checkN(s
, tcg_rn
, is_store
, is_postidx
|| rn
!= 31,
3747 * Consecutive little-endian elements from a single register
3748 * can be promoted to a larger little-endian operation.
3751 if (selem
== 1 && endian
== MO_LE
) {
3752 align
= pow2_align(size
);
3755 if (!s
->align_mem
) {
3758 mop
= endian
| size
| align
;
3760 elements
= (is_q
? 16 : 8) >> size
;
3761 tcg_ebytes
= tcg_constant_i64(1 << size
);
3762 for (r
= 0; r
< rpt
; r
++) {
3764 for (e
= 0; e
< elements
; e
++) {
3766 for (xs
= 0; xs
< selem
; xs
++) {
3767 int tt
= (rt
+ r
+ xs
) % 32;
3769 do_vec_st(s
, tt
, e
, clean_addr
, mop
);
3771 do_vec_ld(s
, tt
, e
, clean_addr
, mop
);
3773 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3779 /* For non-quad operations, setting a slice of the low
3780 * 64 bits of the register clears the high 64 bits (in
3781 * the ARM ARM pseudocode this is implicit in the fact
3782 * that 'rval' is a 64 bit wide variable).
3783 * For quad operations, we might still need to zero the
3786 for (r
= 0; r
< rpt
* selem
; r
++) {
3787 int tt
= (rt
+ r
) % 32;
3788 clear_vec_high(s
, is_q
, tt
);
3794 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3796 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, rm
));
3801 /* AdvSIMD load/store single structure
3803 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3804 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3805 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
3806 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3808 * AdvSIMD load/store single structure (post-indexed)
3810 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3811 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3812 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
3813 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3815 * Rt: first (or only) SIMD&FP register to be transferred
3816 * Rn: base address or SP
3817 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3818 * index = encoded in Q:S:size dependent on size
3820 * lane_size = encoded in R, opc
3821 * transfer width = encoded in opc, S, size
3823 static void disas_ldst_single_struct(DisasContext
*s
, uint32_t insn
)
3825 int rt
= extract32(insn
, 0, 5);
3826 int rn
= extract32(insn
, 5, 5);
3827 int rm
= extract32(insn
, 16, 5);
3828 int size
= extract32(insn
, 10, 2);
3829 int S
= extract32(insn
, 12, 1);
3830 int opc
= extract32(insn
, 13, 3);
3831 int R
= extract32(insn
, 21, 1);
3832 int is_load
= extract32(insn
, 22, 1);
3833 int is_postidx
= extract32(insn
, 23, 1);
3834 int is_q
= extract32(insn
, 30, 1);
3836 int scale
= extract32(opc
, 1, 2);
3837 int selem
= (extract32(opc
, 0, 1) << 1 | R
) + 1;
3838 bool replicate
= false;
3839 int index
= is_q
<< 3 | S
<< 2 | size
;
3841 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3844 if (extract32(insn
, 31, 1)) {
3845 unallocated_encoding(s
);
3848 if (!is_postidx
&& rm
!= 0) {
3849 unallocated_encoding(s
);
3855 if (!is_load
|| S
) {
3856 unallocated_encoding(s
);
3865 if (extract32(size
, 0, 1)) {
3866 unallocated_encoding(s
);
3872 if (extract32(size
, 1, 1)) {
3873 unallocated_encoding(s
);
3876 if (!extract32(size
, 0, 1)) {
3880 unallocated_encoding(s
);
3888 g_assert_not_reached();
3891 if (!fp_access_check(s
)) {
3896 gen_check_sp_alignment(s
);
3899 total
= selem
<< scale
;
3900 tcg_rn
= cpu_reg_sp(s
, rn
);
3902 clean_addr
= gen_mte_checkN(s
, tcg_rn
, !is_load
, is_postidx
|| rn
!= 31,
3904 mop
= finalize_memop(s
, scale
);
3906 tcg_ebytes
= tcg_constant_i64(1 << scale
);
3907 for (xs
= 0; xs
< selem
; xs
++) {
3909 /* Load and replicate to all elements */
3910 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
3912 tcg_gen_qemu_ld_i64(tcg_tmp
, clean_addr
, get_mem_index(s
), mop
);
3913 tcg_gen_gvec_dup_i64(scale
, vec_full_reg_offset(s
, rt
),
3914 (is_q
+ 1) * 8, vec_full_reg_size(s
),
3917 /* Load/store one element per register */
3919 do_vec_ld(s
, rt
, index
, clean_addr
, mop
);
3921 do_vec_st(s
, rt
, index
, clean_addr
, mop
);
3924 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3930 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3932 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, rm
));
3938 * Load/Store memory tags
3940 * 31 30 29 24 22 21 12 10 5 0
3941 * +-----+-------------+-----+---+------+-----+------+------+
3942 * | 1 1 | 0 1 1 0 0 1 | op1 | 1 | imm9 | op2 | Rn | Rt |
3943 * +-----+-------------+-----+---+------+-----+------+------+
3945 static void disas_ldst_tag(DisasContext
*s
, uint32_t insn
)
3947 int rt
= extract32(insn
, 0, 5);
3948 int rn
= extract32(insn
, 5, 5);
3949 uint64_t offset
= sextract64(insn
, 12, 9) << LOG2_TAG_GRANULE
;
3950 int op2
= extract32(insn
, 10, 2);
3951 int op1
= extract32(insn
, 22, 2);
3952 bool is_load
= false, is_pair
= false, is_zero
= false, is_mult
= false;
3954 TCGv_i64 addr
, clean_addr
, tcg_rt
;
3956 /* We checked insn bits [29:24,21] in the caller. */
3957 if (extract32(insn
, 30, 2) != 3) {
3958 goto do_unallocated
;
3962 * @index is a tri-state variable which has 3 states:
3963 * < 0 : post-index, writeback
3964 * = 0 : signed offset
3965 * > 0 : pre-index, writeback
3974 if (s
->current_el
== 0 || offset
!= 0) {
3975 goto do_unallocated
;
3977 is_mult
= is_zero
= true;
3997 if (s
->current_el
== 0 || offset
!= 0) {
3998 goto do_unallocated
;
4006 is_pair
= is_zero
= true;
4010 if (s
->current_el
== 0 || offset
!= 0) {
4011 goto do_unallocated
;
4013 is_mult
= is_load
= true;
4019 unallocated_encoding(s
);
4024 ? !dc_isar_feature(aa64_mte
, s
)
4025 : !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
4026 goto do_unallocated
;
4030 gen_check_sp_alignment(s
);
4033 addr
= read_cpu_reg_sp(s
, rn
, true);
4035 /* pre-index or signed offset */
4036 tcg_gen_addi_i64(addr
, addr
, offset
);
4040 tcg_rt
= cpu_reg(s
, rt
);
4043 int size
= 4 << s
->dcz_blocksize
;
4046 gen_helper_stzgm_tags(cpu_env
, addr
, tcg_rt
);
4049 * The non-tags portion of STZGM is mostly like DC_ZVA,
4050 * except the alignment happens before the access.
4052 clean_addr
= clean_data_tbi(s
, addr
);
4053 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
4054 gen_helper_dc_zva(cpu_env
, clean_addr
);
4055 } else if (s
->ata
) {
4057 gen_helper_ldgm(tcg_rt
, cpu_env
, addr
);
4059 gen_helper_stgm(cpu_env
, addr
, tcg_rt
);
4062 MMUAccessType acc
= is_load
? MMU_DATA_LOAD
: MMU_DATA_STORE
;
4063 int size
= 4 << GMID_EL1_BS
;
4065 clean_addr
= clean_data_tbi(s
, addr
);
4066 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
4067 gen_probe_access(s
, clean_addr
, acc
, size
);
4070 /* The result tags are zeros. */
4071 tcg_gen_movi_i64(tcg_rt
, 0);
4078 tcg_gen_andi_i64(addr
, addr
, -TAG_GRANULE
);
4079 tcg_rt
= cpu_reg(s
, rt
);
4081 gen_helper_ldg(tcg_rt
, cpu_env
, addr
, tcg_rt
);
4083 clean_addr
= clean_data_tbi(s
, addr
);
4084 gen_probe_access(s
, clean_addr
, MMU_DATA_LOAD
, MO_8
);
4085 gen_address_with_allocation_tag0(tcg_rt
, addr
);
4088 tcg_rt
= cpu_reg_sp(s
, rt
);
4091 * For STG and ST2G, we need to check alignment and probe memory.
4092 * TODO: For STZG and STZ2G, we could rely on the stores below,
4093 * at least for system mode; user-only won't enforce alignment.
4096 gen_helper_st2g_stub(cpu_env
, addr
);
4098 gen_helper_stg_stub(cpu_env
, addr
);
4100 } else if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
4102 gen_helper_st2g_parallel(cpu_env
, addr
, tcg_rt
);
4104 gen_helper_stg_parallel(cpu_env
, addr
, tcg_rt
);
4108 gen_helper_st2g(cpu_env
, addr
, tcg_rt
);
4110 gen_helper_stg(cpu_env
, addr
, tcg_rt
);
4116 TCGv_i64 clean_addr
= clean_data_tbi(s
, addr
);
4117 TCGv_i64 zero64
= tcg_constant_i64(0);
4118 TCGv_i128 zero128
= tcg_temp_new_i128();
4119 int mem_index
= get_mem_index(s
);
4120 MemOp mop
= finalize_memop(s
, MO_128
| MO_ALIGN
);
4122 tcg_gen_concat_i64_i128(zero128
, zero64
, zero64
);
4124 /* This is 1 or 2 atomic 16-byte operations. */
4125 tcg_gen_qemu_st_i128(zero128
, clean_addr
, mem_index
, mop
);
4127 tcg_gen_addi_i64(clean_addr
, clean_addr
, 16);
4128 tcg_gen_qemu_st_i128(zero128
, clean_addr
, mem_index
, mop
);
4133 /* pre-index or post-index */
4136 tcg_gen_addi_i64(addr
, addr
, offset
);
4138 tcg_gen_mov_i64(cpu_reg_sp(s
, rn
), addr
);
4142 /* Loads and stores */
4143 static void disas_ldst(DisasContext
*s
, uint32_t insn
)
4145 switch (extract32(insn
, 24, 6)) {
4146 case 0x08: /* Load/store exclusive */
4147 disas_ldst_excl(s
, insn
);
4149 case 0x18: case 0x1c: /* Load register (literal) */
4150 disas_ld_lit(s
, insn
);
4152 case 0x28: case 0x29:
4153 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
4154 disas_ldst_pair(s
, insn
);
4156 case 0x38: case 0x39:
4157 case 0x3c: case 0x3d: /* Load/store register (all forms) */
4158 disas_ldst_reg(s
, insn
);
4160 case 0x0c: /* AdvSIMD load/store multiple structures */
4161 disas_ldst_multiple_struct(s
, insn
);
4163 case 0x0d: /* AdvSIMD load/store single structure */
4164 disas_ldst_single_struct(s
, insn
);
4167 if (extract32(insn
, 21, 1) != 0) {
4168 disas_ldst_tag(s
, insn
);
4169 } else if (extract32(insn
, 10, 2) == 0) {
4170 disas_ldst_ldapr_stlr(s
, insn
);
4172 unallocated_encoding(s
);
4176 unallocated_encoding(s
);
4181 typedef void ArithTwoOp(TCGv_i64
, TCGv_i64
, TCGv_i64
);
4183 static bool gen_rri(DisasContext
*s
, arg_rri_sf
*a
,
4184 bool rd_sp
, bool rn_sp
, ArithTwoOp
*fn
)
4186 TCGv_i64 tcg_rn
= rn_sp
? cpu_reg_sp(s
, a
->rn
) : cpu_reg(s
, a
->rn
);
4187 TCGv_i64 tcg_rd
= rd_sp
? cpu_reg_sp(s
, a
->rd
) : cpu_reg(s
, a
->rd
);
4188 TCGv_i64 tcg_imm
= tcg_constant_i64(a
->imm
);
4190 fn(tcg_rd
, tcg_rn
, tcg_imm
);
4192 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4198 * PC-rel. addressing
4201 static bool trans_ADR(DisasContext
*s
, arg_ri
*a
)
4203 gen_pc_plus_diff(s
, cpu_reg(s
, a
->rd
), a
->imm
);
4207 static bool trans_ADRP(DisasContext
*s
, arg_ri
*a
)
4209 int64_t offset
= (int64_t)a
->imm
<< 12;
4211 /* The page offset is ok for CF_PCREL. */
4212 offset
-= s
->pc_curr
& 0xfff;
4213 gen_pc_plus_diff(s
, cpu_reg(s
, a
->rd
), offset
);
4218 * Add/subtract (immediate)
4220 TRANS(ADD_i
, gen_rri
, a
, 1, 1, tcg_gen_add_i64
)
4221 TRANS(SUB_i
, gen_rri
, a
, 1, 1, tcg_gen_sub_i64
)
4222 TRANS(ADDS_i
, gen_rri
, a
, 0, 1, a
->sf
? gen_add64_CC
: gen_add32_CC
)
4223 TRANS(SUBS_i
, gen_rri
, a
, 0, 1, a
->sf
? gen_sub64_CC
: gen_sub32_CC
)
4226 * Add/subtract (immediate, with tags)
4229 static bool gen_add_sub_imm_with_tags(DisasContext
*s
, arg_rri_tag
*a
,
4232 TCGv_i64 tcg_rn
, tcg_rd
;
4235 imm
= a
->uimm6
<< LOG2_TAG_GRANULE
;
4240 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
4241 tcg_rd
= cpu_reg_sp(s
, a
->rd
);
4244 gen_helper_addsubg(tcg_rd
, cpu_env
, tcg_rn
,
4245 tcg_constant_i32(imm
),
4246 tcg_constant_i32(a
->uimm4
));
4248 tcg_gen_addi_i64(tcg_rd
, tcg_rn
, imm
);
4249 gen_address_with_allocation_tag0(tcg_rd
, tcg_rd
);
4254 TRANS_FEAT(ADDG_i
, aa64_mte_insn_reg
, gen_add_sub_imm_with_tags
, a
, false)
4255 TRANS_FEAT(SUBG_i
, aa64_mte_insn_reg
, gen_add_sub_imm_with_tags
, a
, true)
4257 /* The input should be a value in the bottom e bits (with higher
4258 * bits zero); returns that value replicated into every element
4259 * of size e in a 64 bit integer.
4261 static uint64_t bitfield_replicate(uint64_t mask
, unsigned int e
)
4272 * Logical (immediate)
4276 * Simplified variant of pseudocode DecodeBitMasks() for the case where we
4277 * only require the wmask. Returns false if the imms/immr/immn are a reserved
4278 * value (ie should cause a guest UNDEF exception), and true if they are
4279 * valid, in which case the decoded bit pattern is written to result.
4281 bool logic_imm_decode_wmask(uint64_t *result
, unsigned int immn
,
4282 unsigned int imms
, unsigned int immr
)
4285 unsigned e
, levels
, s
, r
;
4288 assert(immn
< 2 && imms
< 64 && immr
< 64);
4290 /* The bit patterns we create here are 64 bit patterns which
4291 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
4292 * 64 bits each. Each element contains the same value: a run
4293 * of between 1 and e-1 non-zero bits, rotated within the
4294 * element by between 0 and e-1 bits.
4296 * The element size and run length are encoded into immn (1 bit)
4297 * and imms (6 bits) as follows:
4298 * 64 bit elements: immn = 1, imms = <length of run - 1>
4299 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
4300 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
4301 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
4302 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
4303 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
4304 * Notice that immn = 0, imms = 11111x is the only combination
4305 * not covered by one of the above options; this is reserved.
4306 * Further, <length of run - 1> all-ones is a reserved pattern.
4308 * In all cases the rotation is by immr % e (and immr is 6 bits).
4311 /* First determine the element size */
4312 len
= 31 - clz32((immn
<< 6) | (~imms
& 0x3f));
4314 /* This is the immn == 0, imms == 0x11111x case */
4324 /* <length of run - 1> mustn't be all-ones. */
4328 /* Create the value of one element: s+1 set bits rotated
4329 * by r within the element (which is e bits wide)...
4331 mask
= MAKE_64BIT_MASK(0, s
+ 1);
4333 mask
= (mask
>> r
) | (mask
<< (e
- r
));
4334 mask
&= MAKE_64BIT_MASK(0, e
);
4336 /* ...then replicate the element over the whole 64 bit value */
4337 mask
= bitfield_replicate(mask
, e
);
4342 static bool gen_rri_log(DisasContext
*s
, arg_rri_log
*a
, bool set_cc
,
4343 void (*fn
)(TCGv_i64
, TCGv_i64
, int64_t))
4345 TCGv_i64 tcg_rd
, tcg_rn
;
4348 /* Some immediate field values are reserved. */
4349 if (!logic_imm_decode_wmask(&imm
, extract32(a
->dbm
, 12, 1),
4350 extract32(a
->dbm
, 0, 6),
4351 extract32(a
->dbm
, 6, 6))) {
4355 imm
&= 0xffffffffull
;
4358 tcg_rd
= set_cc
? cpu_reg(s
, a
->rd
) : cpu_reg_sp(s
, a
->rd
);
4359 tcg_rn
= cpu_reg(s
, a
->rn
);
4361 fn(tcg_rd
, tcg_rn
, imm
);
4363 gen_logic_CC(a
->sf
, tcg_rd
);
4366 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4371 TRANS(AND_i
, gen_rri_log
, a
, false, tcg_gen_andi_i64
)
4372 TRANS(ORR_i
, gen_rri_log
, a
, false, tcg_gen_ori_i64
)
4373 TRANS(EOR_i
, gen_rri_log
, a
, false, tcg_gen_xori_i64
)
4374 TRANS(ANDS_i
, gen_rri_log
, a
, true, tcg_gen_andi_i64
)
4377 * Move wide (immediate)
4380 static bool trans_MOVZ(DisasContext
*s
, arg_movw
*a
)
4382 int pos
= a
->hw
<< 4;
4383 tcg_gen_movi_i64(cpu_reg(s
, a
->rd
), (uint64_t)a
->imm
<< pos
);
4387 static bool trans_MOVN(DisasContext
*s
, arg_movw
*a
)
4389 int pos
= a
->hw
<< 4;
4390 uint64_t imm
= a
->imm
;
4392 imm
= ~(imm
<< pos
);
4394 imm
= (uint32_t)imm
;
4396 tcg_gen_movi_i64(cpu_reg(s
, a
->rd
), imm
);
4400 static bool trans_MOVK(DisasContext
*s
, arg_movw
*a
)
4402 int pos
= a
->hw
<< 4;
4403 TCGv_i64 tcg_rd
, tcg_im
;
4405 tcg_rd
= cpu_reg(s
, a
->rd
);
4406 tcg_im
= tcg_constant_i64(a
->imm
);
4407 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_im
, pos
, 16);
4409 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4418 static bool trans_SBFM(DisasContext
*s
, arg_SBFM
*a
)
4420 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4421 TCGv_i64 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4422 unsigned int bitsize
= a
->sf
? 64 : 32;
4423 unsigned int ri
= a
->immr
;
4424 unsigned int si
= a
->imms
;
4425 unsigned int pos
, len
;
4428 /* Wd<s-r:0> = Wn<s:r> */
4429 len
= (si
- ri
) + 1;
4430 tcg_gen_sextract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
4432 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4435 /* Wd<32+s-r,32-r> = Wn<s:0> */
4437 pos
= (bitsize
- ri
) & (bitsize
- 1);
4441 * Sign extend the destination field from len to fill the
4442 * balance of the word. Let the deposit below insert all
4443 * of those sign bits.
4445 tcg_gen_sextract_i64(tcg_tmp
, tcg_tmp
, 0, len
);
4450 * We start with zero, and we haven't modified any bits outside
4451 * bitsize, therefore no final zero-extension is unneeded for !sf.
4453 tcg_gen_deposit_z_i64(tcg_rd
, tcg_tmp
, pos
, len
);
4458 static bool trans_UBFM(DisasContext
*s
, arg_UBFM
*a
)
4460 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4461 TCGv_i64 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4462 unsigned int bitsize
= a
->sf
? 64 : 32;
4463 unsigned int ri
= a
->immr
;
4464 unsigned int si
= a
->imms
;
4465 unsigned int pos
, len
;
4467 tcg_rd
= cpu_reg(s
, a
->rd
);
4468 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4471 /* Wd<s-r:0> = Wn<s:r> */
4472 len
= (si
- ri
) + 1;
4473 tcg_gen_extract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
4475 /* Wd<32+s-r,32-r> = Wn<s:0> */
4477 pos
= (bitsize
- ri
) & (bitsize
- 1);
4478 tcg_gen_deposit_z_i64(tcg_rd
, tcg_tmp
, pos
, len
);
4483 static bool trans_BFM(DisasContext
*s
, arg_BFM
*a
)
4485 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4486 TCGv_i64 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4487 unsigned int bitsize
= a
->sf
? 64 : 32;
4488 unsigned int ri
= a
->immr
;
4489 unsigned int si
= a
->imms
;
4490 unsigned int pos
, len
;
4492 tcg_rd
= cpu_reg(s
, a
->rd
);
4493 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4496 /* Wd<s-r:0> = Wn<s:r> */
4497 tcg_gen_shri_i64(tcg_tmp
, tcg_tmp
, ri
);
4498 len
= (si
- ri
) + 1;
4501 /* Wd<32+s-r,32-r> = Wn<s:0> */
4503 pos
= (bitsize
- ri
) & (bitsize
- 1);
4506 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_tmp
, pos
, len
);
4508 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4513 static bool trans_EXTR(DisasContext
*s
, arg_extract
*a
)
4515 TCGv_i64 tcg_rd
, tcg_rm
, tcg_rn
;
4517 tcg_rd
= cpu_reg(s
, a
->rd
);
4519 if (unlikely(a
->imm
== 0)) {
4521 * tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4522 * so an extract from bit 0 is a special case.
4525 tcg_gen_mov_i64(tcg_rd
, cpu_reg(s
, a
->rm
));
4527 tcg_gen_ext32u_i64(tcg_rd
, cpu_reg(s
, a
->rm
));
4530 tcg_rm
= cpu_reg(s
, a
->rm
);
4531 tcg_rn
= cpu_reg(s
, a
->rn
);
4534 /* Specialization to ROR happens in EXTRACT2. */
4535 tcg_gen_extract2_i64(tcg_rd
, tcg_rm
, tcg_rn
, a
->imm
);
4537 TCGv_i32 t0
= tcg_temp_new_i32();
4539 tcg_gen_extrl_i64_i32(t0
, tcg_rm
);
4540 if (a
->rm
== a
->rn
) {
4541 tcg_gen_rotri_i32(t0
, t0
, a
->imm
);
4543 TCGv_i32 t1
= tcg_temp_new_i32();
4544 tcg_gen_extrl_i64_i32(t1
, tcg_rn
);
4545 tcg_gen_extract2_i32(t0
, t0
, t1
, a
->imm
);
4547 tcg_gen_extu_i32_i64(tcg_rd
, t0
);
4553 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
4554 * Note that it is the caller's responsibility to ensure that the
4555 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
4556 * mandated semantics for out of range shifts.
4558 static void shift_reg(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
4559 enum a64_shift_type shift_type
, TCGv_i64 shift_amount
)
4561 switch (shift_type
) {
4562 case A64_SHIFT_TYPE_LSL
:
4563 tcg_gen_shl_i64(dst
, src
, shift_amount
);
4565 case A64_SHIFT_TYPE_LSR
:
4566 tcg_gen_shr_i64(dst
, src
, shift_amount
);
4568 case A64_SHIFT_TYPE_ASR
:
4570 tcg_gen_ext32s_i64(dst
, src
);
4572 tcg_gen_sar_i64(dst
, sf
? src
: dst
, shift_amount
);
4574 case A64_SHIFT_TYPE_ROR
:
4576 tcg_gen_rotr_i64(dst
, src
, shift_amount
);
4579 t0
= tcg_temp_new_i32();
4580 t1
= tcg_temp_new_i32();
4581 tcg_gen_extrl_i64_i32(t0
, src
);
4582 tcg_gen_extrl_i64_i32(t1
, shift_amount
);
4583 tcg_gen_rotr_i32(t0
, t0
, t1
);
4584 tcg_gen_extu_i32_i64(dst
, t0
);
4588 assert(FALSE
); /* all shift types should be handled */
4592 if (!sf
) { /* zero extend final result */
4593 tcg_gen_ext32u_i64(dst
, dst
);
4597 /* Shift a TCGv src by immediate, put result in dst.
4598 * The shift amount must be in range (this should always be true as the
4599 * relevant instructions will UNDEF on bad shift immediates).
4601 static void shift_reg_imm(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
4602 enum a64_shift_type shift_type
, unsigned int shift_i
)
4604 assert(shift_i
< (sf
? 64 : 32));
4607 tcg_gen_mov_i64(dst
, src
);
4609 shift_reg(dst
, src
, sf
, shift_type
, tcg_constant_i64(shift_i
));
4613 /* Logical (shifted register)
4614 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4615 * +----+-----+-----------+-------+---+------+--------+------+------+
4616 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
4617 * +----+-----+-----------+-------+---+------+--------+------+------+
4619 static void disas_logic_reg(DisasContext
*s
, uint32_t insn
)
4621 TCGv_i64 tcg_rd
, tcg_rn
, tcg_rm
;
4622 unsigned int sf
, opc
, shift_type
, invert
, rm
, shift_amount
, rn
, rd
;
4624 sf
= extract32(insn
, 31, 1);
4625 opc
= extract32(insn
, 29, 2);
4626 shift_type
= extract32(insn
, 22, 2);
4627 invert
= extract32(insn
, 21, 1);
4628 rm
= extract32(insn
, 16, 5);
4629 shift_amount
= extract32(insn
, 10, 6);
4630 rn
= extract32(insn
, 5, 5);
4631 rd
= extract32(insn
, 0, 5);
4633 if (!sf
&& (shift_amount
& (1 << 5))) {
4634 unallocated_encoding(s
);
4638 tcg_rd
= cpu_reg(s
, rd
);
4640 if (opc
== 1 && shift_amount
== 0 && shift_type
== 0 && rn
== 31) {
4641 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
4642 * register-register MOV and MVN, so it is worth special casing.
4644 tcg_rm
= cpu_reg(s
, rm
);
4646 tcg_gen_not_i64(tcg_rd
, tcg_rm
);
4648 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4652 tcg_gen_mov_i64(tcg_rd
, tcg_rm
);
4654 tcg_gen_ext32u_i64(tcg_rd
, tcg_rm
);
4660 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4663 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, shift_amount
);
4666 tcg_rn
= cpu_reg(s
, rn
);
4668 switch (opc
| (invert
<< 2)) {
4671 tcg_gen_and_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4674 tcg_gen_or_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4677 tcg_gen_xor_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4681 tcg_gen_andc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4684 tcg_gen_orc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4687 tcg_gen_eqv_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4695 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4699 gen_logic_CC(sf
, tcg_rd
);
4704 * Add/subtract (extended register)
4706 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
4707 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4708 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
4709 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4711 * sf: 0 -> 32bit, 1 -> 64bit
4712 * op: 0 -> add , 1 -> sub
4715 * option: extension type (see DecodeRegExtend)
4716 * imm3: optional shift to Rm
4718 * Rd = Rn + LSL(extend(Rm), amount)
4720 static void disas_add_sub_ext_reg(DisasContext
*s
, uint32_t insn
)
4722 int rd
= extract32(insn
, 0, 5);
4723 int rn
= extract32(insn
, 5, 5);
4724 int imm3
= extract32(insn
, 10, 3);
4725 int option
= extract32(insn
, 13, 3);
4726 int rm
= extract32(insn
, 16, 5);
4727 int opt
= extract32(insn
, 22, 2);
4728 bool setflags
= extract32(insn
, 29, 1);
4729 bool sub_op
= extract32(insn
, 30, 1);
4730 bool sf
= extract32(insn
, 31, 1);
4732 TCGv_i64 tcg_rm
, tcg_rn
; /* temps */
4734 TCGv_i64 tcg_result
;
4736 if (imm3
> 4 || opt
!= 0) {
4737 unallocated_encoding(s
);
4741 /* non-flag setting ops may use SP */
4743 tcg_rd
= cpu_reg_sp(s
, rd
);
4745 tcg_rd
= cpu_reg(s
, rd
);
4747 tcg_rn
= read_cpu_reg_sp(s
, rn
, sf
);
4749 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4750 ext_and_shift_reg(tcg_rm
, tcg_rm
, option
, imm3
);
4752 tcg_result
= tcg_temp_new_i64();
4756 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
4758 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
4762 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4764 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4769 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
4771 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
4776 * Add/subtract (shifted register)
4778 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4779 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4780 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
4781 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4783 * sf: 0 -> 32bit, 1 -> 64bit
4784 * op: 0 -> add , 1 -> sub
4786 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4787 * imm6: Shift amount to apply to Rm before the add/sub
4789 static void disas_add_sub_reg(DisasContext
*s
, uint32_t insn
)
4791 int rd
= extract32(insn
, 0, 5);
4792 int rn
= extract32(insn
, 5, 5);
4793 int imm6
= extract32(insn
, 10, 6);
4794 int rm
= extract32(insn
, 16, 5);
4795 int shift_type
= extract32(insn
, 22, 2);
4796 bool setflags
= extract32(insn
, 29, 1);
4797 bool sub_op
= extract32(insn
, 30, 1);
4798 bool sf
= extract32(insn
, 31, 1);
4800 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4801 TCGv_i64 tcg_rn
, tcg_rm
;
4802 TCGv_i64 tcg_result
;
4804 if ((shift_type
== 3) || (!sf
&& (imm6
> 31))) {
4805 unallocated_encoding(s
);
4809 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
4810 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4812 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, imm6
);
4814 tcg_result
= tcg_temp_new_i64();
4818 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
4820 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
4824 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4826 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4831 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
4833 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
4837 /* Data-processing (3 source)
4839 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
4840 * +--+------+-----------+------+------+----+------+------+------+
4841 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
4842 * +--+------+-----------+------+------+----+------+------+------+
4844 static void disas_data_proc_3src(DisasContext
*s
, uint32_t insn
)
4846 int rd
= extract32(insn
, 0, 5);
4847 int rn
= extract32(insn
, 5, 5);
4848 int ra
= extract32(insn
, 10, 5);
4849 int rm
= extract32(insn
, 16, 5);
4850 int op_id
= (extract32(insn
, 29, 3) << 4) |
4851 (extract32(insn
, 21, 3) << 1) |
4852 extract32(insn
, 15, 1);
4853 bool sf
= extract32(insn
, 31, 1);
4854 bool is_sub
= extract32(op_id
, 0, 1);
4855 bool is_high
= extract32(op_id
, 2, 1);
4856 bool is_signed
= false;
4861 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
4863 case 0x42: /* SMADDL */
4864 case 0x43: /* SMSUBL */
4865 case 0x44: /* SMULH */
4868 case 0x0: /* MADD (32bit) */
4869 case 0x1: /* MSUB (32bit) */
4870 case 0x40: /* MADD (64bit) */
4871 case 0x41: /* MSUB (64bit) */
4872 case 0x4a: /* UMADDL */
4873 case 0x4b: /* UMSUBL */
4874 case 0x4c: /* UMULH */
4877 unallocated_encoding(s
);
4882 TCGv_i64 low_bits
= tcg_temp_new_i64(); /* low bits discarded */
4883 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4884 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
4885 TCGv_i64 tcg_rm
= cpu_reg(s
, rm
);
4888 tcg_gen_muls2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
4890 tcg_gen_mulu2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
4895 tcg_op1
= tcg_temp_new_i64();
4896 tcg_op2
= tcg_temp_new_i64();
4897 tcg_tmp
= tcg_temp_new_i64();
4900 tcg_gen_mov_i64(tcg_op1
, cpu_reg(s
, rn
));
4901 tcg_gen_mov_i64(tcg_op2
, cpu_reg(s
, rm
));
4904 tcg_gen_ext32s_i64(tcg_op1
, cpu_reg(s
, rn
));
4905 tcg_gen_ext32s_i64(tcg_op2
, cpu_reg(s
, rm
));
4907 tcg_gen_ext32u_i64(tcg_op1
, cpu_reg(s
, rn
));
4908 tcg_gen_ext32u_i64(tcg_op2
, cpu_reg(s
, rm
));
4912 if (ra
== 31 && !is_sub
) {
4913 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
4914 tcg_gen_mul_i64(cpu_reg(s
, rd
), tcg_op1
, tcg_op2
);
4916 tcg_gen_mul_i64(tcg_tmp
, tcg_op1
, tcg_op2
);
4918 tcg_gen_sub_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
4920 tcg_gen_add_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
4925 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), cpu_reg(s
, rd
));
4929 /* Add/subtract (with carry)
4930 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
4931 * +--+--+--+------------------------+------+-------------+------+-----+
4932 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd |
4933 * +--+--+--+------------------------+------+-------------+------+-----+
4936 static void disas_adc_sbc(DisasContext
*s
, uint32_t insn
)
4938 unsigned int sf
, op
, setflags
, rm
, rn
, rd
;
4939 TCGv_i64 tcg_y
, tcg_rn
, tcg_rd
;
4941 sf
= extract32(insn
, 31, 1);
4942 op
= extract32(insn
, 30, 1);
4943 setflags
= extract32(insn
, 29, 1);
4944 rm
= extract32(insn
, 16, 5);
4945 rn
= extract32(insn
, 5, 5);
4946 rd
= extract32(insn
, 0, 5);
4948 tcg_rd
= cpu_reg(s
, rd
);
4949 tcg_rn
= cpu_reg(s
, rn
);
4952 tcg_y
= tcg_temp_new_i64();
4953 tcg_gen_not_i64(tcg_y
, cpu_reg(s
, rm
));
4955 tcg_y
= cpu_reg(s
, rm
);
4959 gen_adc_CC(sf
, tcg_rd
, tcg_rn
, tcg_y
);
4961 gen_adc(sf
, tcg_rd
, tcg_rn
, tcg_y
);
4966 * Rotate right into flags
4967 * 31 30 29 21 15 10 5 4 0
4968 * +--+--+--+-----------------+--------+-----------+------+--+------+
4969 * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask |
4970 * +--+--+--+-----------------+--------+-----------+------+--+------+
4972 static void disas_rotate_right_into_flags(DisasContext
*s
, uint32_t insn
)
4974 int mask
= extract32(insn
, 0, 4);
4975 int o2
= extract32(insn
, 4, 1);
4976 int rn
= extract32(insn
, 5, 5);
4977 int imm6
= extract32(insn
, 15, 6);
4978 int sf_op_s
= extract32(insn
, 29, 3);
4982 if (sf_op_s
!= 5 || o2
!= 0 || !dc_isar_feature(aa64_condm_4
, s
)) {
4983 unallocated_encoding(s
);
4987 tcg_rn
= read_cpu_reg(s
, rn
, 1);
4988 tcg_gen_rotri_i64(tcg_rn
, tcg_rn
, imm6
);
4990 nzcv
= tcg_temp_new_i32();
4991 tcg_gen_extrl_i64_i32(nzcv
, tcg_rn
);
4993 if (mask
& 8) { /* N */
4994 tcg_gen_shli_i32(cpu_NF
, nzcv
, 31 - 3);
4996 if (mask
& 4) { /* Z */
4997 tcg_gen_not_i32(cpu_ZF
, nzcv
);
4998 tcg_gen_andi_i32(cpu_ZF
, cpu_ZF
, 4);
5000 if (mask
& 2) { /* C */
5001 tcg_gen_extract_i32(cpu_CF
, nzcv
, 1, 1);
5003 if (mask
& 1) { /* V */
5004 tcg_gen_shli_i32(cpu_VF
, nzcv
, 31 - 0);
5009 * Evaluate into flags
5010 * 31 30 29 21 15 14 10 5 4 0
5011 * +--+--+--+-----------------+---------+----+---------+------+--+------+
5012 * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask |
5013 * +--+--+--+-----------------+---------+----+---------+------+--+------+
5015 static void disas_evaluate_into_flags(DisasContext
*s
, uint32_t insn
)
5017 int o3_mask
= extract32(insn
, 0, 5);
5018 int rn
= extract32(insn
, 5, 5);
5019 int o2
= extract32(insn
, 15, 6);
5020 int sz
= extract32(insn
, 14, 1);
5021 int sf_op_s
= extract32(insn
, 29, 3);
5025 if (sf_op_s
!= 1 || o2
!= 0 || o3_mask
!= 0xd ||
5026 !dc_isar_feature(aa64_condm_4
, s
)) {
5027 unallocated_encoding(s
);
5030 shift
= sz
? 16 : 24; /* SETF16 or SETF8 */
5032 tmp
= tcg_temp_new_i32();
5033 tcg_gen_extrl_i64_i32(tmp
, cpu_reg(s
, rn
));
5034 tcg_gen_shli_i32(cpu_NF
, tmp
, shift
);
5035 tcg_gen_shli_i32(cpu_VF
, tmp
, shift
- 1);
5036 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
5037 tcg_gen_xor_i32(cpu_VF
, cpu_VF
, cpu_NF
);
5040 /* Conditional compare (immediate / register)
5041 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
5042 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
5043 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
5044 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
5047 static void disas_cc(DisasContext
*s
, uint32_t insn
)
5049 unsigned int sf
, op
, y
, cond
, rn
, nzcv
, is_imm
;
5050 TCGv_i32 tcg_t0
, tcg_t1
, tcg_t2
;
5051 TCGv_i64 tcg_tmp
, tcg_y
, tcg_rn
;
5054 if (!extract32(insn
, 29, 1)) {
5055 unallocated_encoding(s
);
5058 if (insn
& (1 << 10 | 1 << 4)) {
5059 unallocated_encoding(s
);
5062 sf
= extract32(insn
, 31, 1);
5063 op
= extract32(insn
, 30, 1);
5064 is_imm
= extract32(insn
, 11, 1);
5065 y
= extract32(insn
, 16, 5); /* y = rm (reg) or imm5 (imm) */
5066 cond
= extract32(insn
, 12, 4);
5067 rn
= extract32(insn
, 5, 5);
5068 nzcv
= extract32(insn
, 0, 4);
5070 /* Set T0 = !COND. */
5071 tcg_t0
= tcg_temp_new_i32();
5072 arm_test_cc(&c
, cond
);
5073 tcg_gen_setcondi_i32(tcg_invert_cond(c
.cond
), tcg_t0
, c
.value
, 0);
5075 /* Load the arguments for the new comparison. */
5077 tcg_y
= tcg_temp_new_i64();
5078 tcg_gen_movi_i64(tcg_y
, y
);
5080 tcg_y
= cpu_reg(s
, y
);
5082 tcg_rn
= cpu_reg(s
, rn
);
5084 /* Set the flags for the new comparison. */
5085 tcg_tmp
= tcg_temp_new_i64();
5087 gen_sub_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
5089 gen_add_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
5092 /* If COND was false, force the flags to #nzcv. Compute two masks
5093 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
5094 * For tcg hosts that support ANDC, we can make do with just T1.
5095 * In either case, allow the tcg optimizer to delete any unused mask.
5097 tcg_t1
= tcg_temp_new_i32();
5098 tcg_t2
= tcg_temp_new_i32();
5099 tcg_gen_neg_i32(tcg_t1
, tcg_t0
);
5100 tcg_gen_subi_i32(tcg_t2
, tcg_t0
, 1);
5102 if (nzcv
& 8) { /* N */
5103 tcg_gen_or_i32(cpu_NF
, cpu_NF
, tcg_t1
);
5105 if (TCG_TARGET_HAS_andc_i32
) {
5106 tcg_gen_andc_i32(cpu_NF
, cpu_NF
, tcg_t1
);
5108 tcg_gen_and_i32(cpu_NF
, cpu_NF
, tcg_t2
);
5111 if (nzcv
& 4) { /* Z */
5112 if (TCG_TARGET_HAS_andc_i32
) {
5113 tcg_gen_andc_i32(cpu_ZF
, cpu_ZF
, tcg_t1
);
5115 tcg_gen_and_i32(cpu_ZF
, cpu_ZF
, tcg_t2
);
5118 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, tcg_t0
);
5120 if (nzcv
& 2) { /* C */
5121 tcg_gen_or_i32(cpu_CF
, cpu_CF
, tcg_t0
);
5123 if (TCG_TARGET_HAS_andc_i32
) {
5124 tcg_gen_andc_i32(cpu_CF
, cpu_CF
, tcg_t1
);
5126 tcg_gen_and_i32(cpu_CF
, cpu_CF
, tcg_t2
);
5129 if (nzcv
& 1) { /* V */
5130 tcg_gen_or_i32(cpu_VF
, cpu_VF
, tcg_t1
);
5132 if (TCG_TARGET_HAS_andc_i32
) {
5133 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tcg_t1
);
5135 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tcg_t2
);
5140 /* Conditional select
5141 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
5142 * +----+----+---+-----------------+------+------+-----+------+------+
5143 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
5144 * +----+----+---+-----------------+------+------+-----+------+------+
5146 static void disas_cond_select(DisasContext
*s
, uint32_t insn
)
5148 unsigned int sf
, else_inv
, rm
, cond
, else_inc
, rn
, rd
;
5149 TCGv_i64 tcg_rd
, zero
;
5152 if (extract32(insn
, 29, 1) || extract32(insn
, 11, 1)) {
5153 /* S == 1 or op2<1> == 1 */
5154 unallocated_encoding(s
);
5157 sf
= extract32(insn
, 31, 1);
5158 else_inv
= extract32(insn
, 30, 1);
5159 rm
= extract32(insn
, 16, 5);
5160 cond
= extract32(insn
, 12, 4);
5161 else_inc
= extract32(insn
, 10, 1);
5162 rn
= extract32(insn
, 5, 5);
5163 rd
= extract32(insn
, 0, 5);
5165 tcg_rd
= cpu_reg(s
, rd
);
5167 a64_test_cc(&c
, cond
);
5168 zero
= tcg_constant_i64(0);
5170 if (rn
== 31 && rm
== 31 && (else_inc
^ else_inv
)) {
5172 tcg_gen_setcond_i64(tcg_invert_cond(c
.cond
), tcg_rd
, c
.value
, zero
);
5174 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
5177 TCGv_i64 t_true
= cpu_reg(s
, rn
);
5178 TCGv_i64 t_false
= read_cpu_reg(s
, rm
, 1);
5179 if (else_inv
&& else_inc
) {
5180 tcg_gen_neg_i64(t_false
, t_false
);
5181 } else if (else_inv
) {
5182 tcg_gen_not_i64(t_false
, t_false
);
5183 } else if (else_inc
) {
5184 tcg_gen_addi_i64(t_false
, t_false
, 1);
5186 tcg_gen_movcond_i64(c
.cond
, tcg_rd
, c
.value
, zero
, t_true
, t_false
);
5190 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
5194 static void handle_clz(DisasContext
*s
, unsigned int sf
,
5195 unsigned int rn
, unsigned int rd
)
5197 TCGv_i64 tcg_rd
, tcg_rn
;
5198 tcg_rd
= cpu_reg(s
, rd
);
5199 tcg_rn
= cpu_reg(s
, rn
);
5202 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
5204 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
5205 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
5206 tcg_gen_clzi_i32(tcg_tmp32
, tcg_tmp32
, 32);
5207 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
5211 static void handle_cls(DisasContext
*s
, unsigned int sf
,
5212 unsigned int rn
, unsigned int rd
)
5214 TCGv_i64 tcg_rd
, tcg_rn
;
5215 tcg_rd
= cpu_reg(s
, rd
);
5216 tcg_rn
= cpu_reg(s
, rn
);
5219 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
5221 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
5222 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
5223 tcg_gen_clrsb_i32(tcg_tmp32
, tcg_tmp32
);
5224 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
5228 static void handle_rbit(DisasContext
*s
, unsigned int sf
,
5229 unsigned int rn
, unsigned int rd
)
5231 TCGv_i64 tcg_rd
, tcg_rn
;
5232 tcg_rd
= cpu_reg(s
, rd
);
5233 tcg_rn
= cpu_reg(s
, rn
);
5236 gen_helper_rbit64(tcg_rd
, tcg_rn
);
5238 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
5239 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
5240 gen_helper_rbit(tcg_tmp32
, tcg_tmp32
);
5241 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
5245 /* REV with sf==1, opcode==3 ("REV64") */
5246 static void handle_rev64(DisasContext
*s
, unsigned int sf
,
5247 unsigned int rn
, unsigned int rd
)
5250 unallocated_encoding(s
);
5253 tcg_gen_bswap64_i64(cpu_reg(s
, rd
), cpu_reg(s
, rn
));
5256 /* REV with sf==0, opcode==2
5257 * REV32 (sf==1, opcode==2)
5259 static void handle_rev32(DisasContext
*s
, unsigned int sf
,
5260 unsigned int rn
, unsigned int rd
)
5262 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5263 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
5266 tcg_gen_bswap64_i64(tcg_rd
, tcg_rn
);
5267 tcg_gen_rotri_i64(tcg_rd
, tcg_rd
, 32);
5269 tcg_gen_bswap32_i64(tcg_rd
, tcg_rn
, TCG_BSWAP_OZ
);
5273 /* REV16 (opcode==1) */
5274 static void handle_rev16(DisasContext
*s
, unsigned int sf
,
5275 unsigned int rn
, unsigned int rd
)
5277 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5278 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
5279 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
5280 TCGv_i64 mask
= tcg_constant_i64(sf
? 0x00ff00ff00ff00ffull
: 0x00ff00ff);
5282 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 8);
5283 tcg_gen_and_i64(tcg_rd
, tcg_rn
, mask
);
5284 tcg_gen_and_i64(tcg_tmp
, tcg_tmp
, mask
);
5285 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, 8);
5286 tcg_gen_or_i64(tcg_rd
, tcg_rd
, tcg_tmp
);
5289 /* Data-processing (1 source)
5290 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5291 * +----+---+---+-----------------+---------+--------+------+------+
5292 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
5293 * +----+---+---+-----------------+---------+--------+------+------+
5295 static void disas_data_proc_1src(DisasContext
*s
, uint32_t insn
)
5297 unsigned int sf
, opcode
, opcode2
, rn
, rd
;
5300 if (extract32(insn
, 29, 1)) {
5301 unallocated_encoding(s
);
5305 sf
= extract32(insn
, 31, 1);
5306 opcode
= extract32(insn
, 10, 6);
5307 opcode2
= extract32(insn
, 16, 5);
5308 rn
= extract32(insn
, 5, 5);
5309 rd
= extract32(insn
, 0, 5);
5311 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5313 switch (MAP(sf
, opcode2
, opcode
)) {
5314 case MAP(0, 0x00, 0x00): /* RBIT */
5315 case MAP(1, 0x00, 0x00):
5316 handle_rbit(s
, sf
, rn
, rd
);
5318 case MAP(0, 0x00, 0x01): /* REV16 */
5319 case MAP(1, 0x00, 0x01):
5320 handle_rev16(s
, sf
, rn
, rd
);
5322 case MAP(0, 0x00, 0x02): /* REV/REV32 */
5323 case MAP(1, 0x00, 0x02):
5324 handle_rev32(s
, sf
, rn
, rd
);
5326 case MAP(1, 0x00, 0x03): /* REV64 */
5327 handle_rev64(s
, sf
, rn
, rd
);
5329 case MAP(0, 0x00, 0x04): /* CLZ */
5330 case MAP(1, 0x00, 0x04):
5331 handle_clz(s
, sf
, rn
, rd
);
5333 case MAP(0, 0x00, 0x05): /* CLS */
5334 case MAP(1, 0x00, 0x05):
5335 handle_cls(s
, sf
, rn
, rd
);
5337 case MAP(1, 0x01, 0x00): /* PACIA */
5338 if (s
->pauth_active
) {
5339 tcg_rd
= cpu_reg(s
, rd
);
5340 gen_helper_pacia(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5341 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5342 goto do_unallocated
;
5345 case MAP(1, 0x01, 0x01): /* PACIB */
5346 if (s
->pauth_active
) {
5347 tcg_rd
= cpu_reg(s
, rd
);
5348 gen_helper_pacib(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5349 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5350 goto do_unallocated
;
5353 case MAP(1, 0x01, 0x02): /* PACDA */
5354 if (s
->pauth_active
) {
5355 tcg_rd
= cpu_reg(s
, rd
);
5356 gen_helper_pacda(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5357 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5358 goto do_unallocated
;
5361 case MAP(1, 0x01, 0x03): /* PACDB */
5362 if (s
->pauth_active
) {
5363 tcg_rd
= cpu_reg(s
, rd
);
5364 gen_helper_pacdb(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5365 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5366 goto do_unallocated
;
5369 case MAP(1, 0x01, 0x04): /* AUTIA */
5370 if (s
->pauth_active
) {
5371 tcg_rd
= cpu_reg(s
, rd
);
5372 gen_helper_autia(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5373 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5374 goto do_unallocated
;
5377 case MAP(1, 0x01, 0x05): /* AUTIB */
5378 if (s
->pauth_active
) {
5379 tcg_rd
= cpu_reg(s
, rd
);
5380 gen_helper_autib(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5381 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5382 goto do_unallocated
;
5385 case MAP(1, 0x01, 0x06): /* AUTDA */
5386 if (s
->pauth_active
) {
5387 tcg_rd
= cpu_reg(s
, rd
);
5388 gen_helper_autda(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5389 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5390 goto do_unallocated
;
5393 case MAP(1, 0x01, 0x07): /* AUTDB */
5394 if (s
->pauth_active
) {
5395 tcg_rd
= cpu_reg(s
, rd
);
5396 gen_helper_autdb(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5397 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5398 goto do_unallocated
;
5401 case MAP(1, 0x01, 0x08): /* PACIZA */
5402 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5403 goto do_unallocated
;
5404 } else if (s
->pauth_active
) {
5405 tcg_rd
= cpu_reg(s
, rd
);
5406 gen_helper_pacia(tcg_rd
, cpu_env
, tcg_rd
, tcg_constant_i64(0));
5409 case MAP(1, 0x01, 0x09): /* PACIZB */
5410 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5411 goto do_unallocated
;
5412 } else if (s
->pauth_active
) {
5413 tcg_rd
= cpu_reg(s
, rd
);
5414 gen_helper_pacib(tcg_rd
, cpu_env
, tcg_rd
, tcg_constant_i64(0));
5417 case MAP(1, 0x01, 0x0a): /* PACDZA */
5418 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5419 goto do_unallocated
;
5420 } else if (s
->pauth_active
) {
5421 tcg_rd
= cpu_reg(s
, rd
);
5422 gen_helper_pacda(tcg_rd
, cpu_env
, tcg_rd
, tcg_constant_i64(0));
5425 case MAP(1, 0x01, 0x0b): /* PACDZB */
5426 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5427 goto do_unallocated
;
5428 } else if (s
->pauth_active
) {
5429 tcg_rd
= cpu_reg(s
, rd
);
5430 gen_helper_pacdb(tcg_rd
, cpu_env
, tcg_rd
, tcg_constant_i64(0));
5433 case MAP(1, 0x01, 0x0c): /* AUTIZA */
5434 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5435 goto do_unallocated
;
5436 } else if (s
->pauth_active
) {
5437 tcg_rd
= cpu_reg(s
, rd
);
5438 gen_helper_autia(tcg_rd
, cpu_env
, tcg_rd
, tcg_constant_i64(0));
5441 case MAP(1, 0x01, 0x0d): /* AUTIZB */
5442 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5443 goto do_unallocated
;
5444 } else if (s
->pauth_active
) {
5445 tcg_rd
= cpu_reg(s
, rd
);
5446 gen_helper_autib(tcg_rd
, cpu_env
, tcg_rd
, tcg_constant_i64(0));
5449 case MAP(1, 0x01, 0x0e): /* AUTDZA */
5450 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5451 goto do_unallocated
;
5452 } else if (s
->pauth_active
) {
5453 tcg_rd
= cpu_reg(s
, rd
);
5454 gen_helper_autda(tcg_rd
, cpu_env
, tcg_rd
, tcg_constant_i64(0));
5457 case MAP(1, 0x01, 0x0f): /* AUTDZB */
5458 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5459 goto do_unallocated
;
5460 } else if (s
->pauth_active
) {
5461 tcg_rd
= cpu_reg(s
, rd
);
5462 gen_helper_autdb(tcg_rd
, cpu_env
, tcg_rd
, tcg_constant_i64(0));
5465 case MAP(1, 0x01, 0x10): /* XPACI */
5466 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5467 goto do_unallocated
;
5468 } else if (s
->pauth_active
) {
5469 tcg_rd
= cpu_reg(s
, rd
);
5470 gen_helper_xpaci(tcg_rd
, cpu_env
, tcg_rd
);
5473 case MAP(1, 0x01, 0x11): /* XPACD */
5474 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5475 goto do_unallocated
;
5476 } else if (s
->pauth_active
) {
5477 tcg_rd
= cpu_reg(s
, rd
);
5478 gen_helper_xpacd(tcg_rd
, cpu_env
, tcg_rd
);
5483 unallocated_encoding(s
);
5490 static void handle_div(DisasContext
*s
, bool is_signed
, unsigned int sf
,
5491 unsigned int rm
, unsigned int rn
, unsigned int rd
)
5493 TCGv_i64 tcg_n
, tcg_m
, tcg_rd
;
5494 tcg_rd
= cpu_reg(s
, rd
);
5496 if (!sf
&& is_signed
) {
5497 tcg_n
= tcg_temp_new_i64();
5498 tcg_m
= tcg_temp_new_i64();
5499 tcg_gen_ext32s_i64(tcg_n
, cpu_reg(s
, rn
));
5500 tcg_gen_ext32s_i64(tcg_m
, cpu_reg(s
, rm
));
5502 tcg_n
= read_cpu_reg(s
, rn
, sf
);
5503 tcg_m
= read_cpu_reg(s
, rm
, sf
);
5507 gen_helper_sdiv64(tcg_rd
, tcg_n
, tcg_m
);
5509 gen_helper_udiv64(tcg_rd
, tcg_n
, tcg_m
);
5512 if (!sf
) { /* zero extend final result */
5513 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
5517 /* LSLV, LSRV, ASRV, RORV */
5518 static void handle_shift_reg(DisasContext
*s
,
5519 enum a64_shift_type shift_type
, unsigned int sf
,
5520 unsigned int rm
, unsigned int rn
, unsigned int rd
)
5522 TCGv_i64 tcg_shift
= tcg_temp_new_i64();
5523 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5524 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
5526 tcg_gen_andi_i64(tcg_shift
, cpu_reg(s
, rm
), sf
? 63 : 31);
5527 shift_reg(tcg_rd
, tcg_rn
, sf
, shift_type
, tcg_shift
);
5530 /* CRC32[BHWX], CRC32C[BHWX] */
5531 static void handle_crc32(DisasContext
*s
,
5532 unsigned int sf
, unsigned int sz
, bool crc32c
,
5533 unsigned int rm
, unsigned int rn
, unsigned int rd
)
5535 TCGv_i64 tcg_acc
, tcg_val
;
5538 if (!dc_isar_feature(aa64_crc32
, s
)
5539 || (sf
== 1 && sz
!= 3)
5540 || (sf
== 0 && sz
== 3)) {
5541 unallocated_encoding(s
);
5546 tcg_val
= cpu_reg(s
, rm
);
5560 g_assert_not_reached();
5562 tcg_val
= tcg_temp_new_i64();
5563 tcg_gen_andi_i64(tcg_val
, cpu_reg(s
, rm
), mask
);
5566 tcg_acc
= cpu_reg(s
, rn
);
5567 tcg_bytes
= tcg_constant_i32(1 << sz
);
5570 gen_helper_crc32c_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
5572 gen_helper_crc32_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
5576 /* Data-processing (2 source)
5577 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5578 * +----+---+---+-----------------+------+--------+------+------+
5579 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
5580 * +----+---+---+-----------------+------+--------+------+------+
5582 static void disas_data_proc_2src(DisasContext
*s
, uint32_t insn
)
5584 unsigned int sf
, rm
, opcode
, rn
, rd
, setflag
;
5585 sf
= extract32(insn
, 31, 1);
5586 setflag
= extract32(insn
, 29, 1);
5587 rm
= extract32(insn
, 16, 5);
5588 opcode
= extract32(insn
, 10, 6);
5589 rn
= extract32(insn
, 5, 5);
5590 rd
= extract32(insn
, 0, 5);
5592 if (setflag
&& opcode
!= 0) {
5593 unallocated_encoding(s
);
5598 case 0: /* SUBP(S) */
5599 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
5600 goto do_unallocated
;
5602 TCGv_i64 tcg_n
, tcg_m
, tcg_d
;
5604 tcg_n
= read_cpu_reg_sp(s
, rn
, true);
5605 tcg_m
= read_cpu_reg_sp(s
, rm
, true);
5606 tcg_gen_sextract_i64(tcg_n
, tcg_n
, 0, 56);
5607 tcg_gen_sextract_i64(tcg_m
, tcg_m
, 0, 56);
5608 tcg_d
= cpu_reg(s
, rd
);
5611 gen_sub_CC(true, tcg_d
, tcg_n
, tcg_m
);
5613 tcg_gen_sub_i64(tcg_d
, tcg_n
, tcg_m
);
5618 handle_div(s
, false, sf
, rm
, rn
, rd
);
5621 handle_div(s
, true, sf
, rm
, rn
, rd
);
5624 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
5625 goto do_unallocated
;
5628 gen_helper_irg(cpu_reg_sp(s
, rd
), cpu_env
,
5629 cpu_reg_sp(s
, rn
), cpu_reg(s
, rm
));
5631 gen_address_with_allocation_tag0(cpu_reg_sp(s
, rd
),
5636 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
5637 goto do_unallocated
;
5639 TCGv_i64 t
= tcg_temp_new_i64();
5641 tcg_gen_extract_i64(t
, cpu_reg_sp(s
, rn
), 56, 4);
5642 tcg_gen_shl_i64(t
, tcg_constant_i64(1), t
);
5643 tcg_gen_or_i64(cpu_reg(s
, rd
), cpu_reg(s
, rm
), t
);
5647 handle_shift_reg(s
, A64_SHIFT_TYPE_LSL
, sf
, rm
, rn
, rd
);
5650 handle_shift_reg(s
, A64_SHIFT_TYPE_LSR
, sf
, rm
, rn
, rd
);
5653 handle_shift_reg(s
, A64_SHIFT_TYPE_ASR
, sf
, rm
, rn
, rd
);
5656 handle_shift_reg(s
, A64_SHIFT_TYPE_ROR
, sf
, rm
, rn
, rd
);
5658 case 12: /* PACGA */
5659 if (sf
== 0 || !dc_isar_feature(aa64_pauth
, s
)) {
5660 goto do_unallocated
;
5662 gen_helper_pacga(cpu_reg(s
, rd
), cpu_env
,
5663 cpu_reg(s
, rn
), cpu_reg_sp(s
, rm
));
5672 case 23: /* CRC32 */
5674 int sz
= extract32(opcode
, 0, 2);
5675 bool crc32c
= extract32(opcode
, 2, 1);
5676 handle_crc32(s
, sf
, sz
, crc32c
, rm
, rn
, rd
);
5681 unallocated_encoding(s
);
5687 * Data processing - register
5688 * 31 30 29 28 25 21 20 16 10 0
5689 * +--+---+--+---+-------+-----+-------+-------+---------+
5690 * | |op0| |op1| 1 0 1 | op2 | | op3 | |
5691 * +--+---+--+---+-------+-----+-------+-------+---------+
5693 static void disas_data_proc_reg(DisasContext
*s
, uint32_t insn
)
5695 int op0
= extract32(insn
, 30, 1);
5696 int op1
= extract32(insn
, 28, 1);
5697 int op2
= extract32(insn
, 21, 4);
5698 int op3
= extract32(insn
, 10, 6);
5703 /* Add/sub (extended register) */
5704 disas_add_sub_ext_reg(s
, insn
);
5706 /* Add/sub (shifted register) */
5707 disas_add_sub_reg(s
, insn
);
5710 /* Logical (shifted register) */
5711 disas_logic_reg(s
, insn
);
5719 case 0x00: /* Add/subtract (with carry) */
5720 disas_adc_sbc(s
, insn
);
5723 case 0x01: /* Rotate right into flags */
5725 disas_rotate_right_into_flags(s
, insn
);
5728 case 0x02: /* Evaluate into flags */
5732 disas_evaluate_into_flags(s
, insn
);
5736 goto do_unallocated
;
5740 case 0x2: /* Conditional compare */
5741 disas_cc(s
, insn
); /* both imm and reg forms */
5744 case 0x4: /* Conditional select */
5745 disas_cond_select(s
, insn
);
5748 case 0x6: /* Data-processing */
5749 if (op0
) { /* (1 source) */
5750 disas_data_proc_1src(s
, insn
);
5751 } else { /* (2 source) */
5752 disas_data_proc_2src(s
, insn
);
5755 case 0x8 ... 0xf: /* (3 source) */
5756 disas_data_proc_3src(s
, insn
);
5761 unallocated_encoding(s
);
5766 static void handle_fp_compare(DisasContext
*s
, int size
,
5767 unsigned int rn
, unsigned int rm
,
5768 bool cmp_with_zero
, bool signal_all_nans
)
5770 TCGv_i64 tcg_flags
= tcg_temp_new_i64();
5771 TCGv_ptr fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
5773 if (size
== MO_64
) {
5774 TCGv_i64 tcg_vn
, tcg_vm
;
5776 tcg_vn
= read_fp_dreg(s
, rn
);
5777 if (cmp_with_zero
) {
5778 tcg_vm
= tcg_constant_i64(0);
5780 tcg_vm
= read_fp_dreg(s
, rm
);
5782 if (signal_all_nans
) {
5783 gen_helper_vfp_cmped_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5785 gen_helper_vfp_cmpd_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5788 TCGv_i32 tcg_vn
= tcg_temp_new_i32();
5789 TCGv_i32 tcg_vm
= tcg_temp_new_i32();
5791 read_vec_element_i32(s
, tcg_vn
, rn
, 0, size
);
5792 if (cmp_with_zero
) {
5793 tcg_gen_movi_i32(tcg_vm
, 0);
5795 read_vec_element_i32(s
, tcg_vm
, rm
, 0, size
);
5800 if (signal_all_nans
) {
5801 gen_helper_vfp_cmpes_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5803 gen_helper_vfp_cmps_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5807 if (signal_all_nans
) {
5808 gen_helper_vfp_cmpeh_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5810 gen_helper_vfp_cmph_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5814 g_assert_not_reached();
5818 gen_set_nzcv(tcg_flags
);
5821 /* Floating point compare
5822 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
5823 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5824 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
5825 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5827 static void disas_fp_compare(DisasContext
*s
, uint32_t insn
)
5829 unsigned int mos
, type
, rm
, op
, rn
, opc
, op2r
;
5832 mos
= extract32(insn
, 29, 3);
5833 type
= extract32(insn
, 22, 2);
5834 rm
= extract32(insn
, 16, 5);
5835 op
= extract32(insn
, 14, 2);
5836 rn
= extract32(insn
, 5, 5);
5837 opc
= extract32(insn
, 3, 2);
5838 op2r
= extract32(insn
, 0, 3);
5840 if (mos
|| op
|| op2r
) {
5841 unallocated_encoding(s
);
5854 if (dc_isar_feature(aa64_fp16
, s
)) {
5859 unallocated_encoding(s
);
5863 if (!fp_access_check(s
)) {
5867 handle_fp_compare(s
, size
, rn
, rm
, opc
& 1, opc
& 2);
5870 /* Floating point conditional compare
5871 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
5872 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5873 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
5874 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5876 static void disas_fp_ccomp(DisasContext
*s
, uint32_t insn
)
5878 unsigned int mos
, type
, rm
, cond
, rn
, op
, nzcv
;
5879 TCGLabel
*label_continue
= NULL
;
5882 mos
= extract32(insn
, 29, 3);
5883 type
= extract32(insn
, 22, 2);
5884 rm
= extract32(insn
, 16, 5);
5885 cond
= extract32(insn
, 12, 4);
5886 rn
= extract32(insn
, 5, 5);
5887 op
= extract32(insn
, 4, 1);
5888 nzcv
= extract32(insn
, 0, 4);
5891 unallocated_encoding(s
);
5904 if (dc_isar_feature(aa64_fp16
, s
)) {
5909 unallocated_encoding(s
);
5913 if (!fp_access_check(s
)) {
5917 if (cond
< 0x0e) { /* not always */
5918 TCGLabel
*label_match
= gen_new_label();
5919 label_continue
= gen_new_label();
5920 arm_gen_test_cc(cond
, label_match
);
5922 gen_set_nzcv(tcg_constant_i64(nzcv
<< 28));
5923 tcg_gen_br(label_continue
);
5924 gen_set_label(label_match
);
5927 handle_fp_compare(s
, size
, rn
, rm
, false, op
);
5930 gen_set_label(label_continue
);
5934 /* Floating point conditional select
5935 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
5936 * +---+---+---+-----------+------+---+------+------+-----+------+------+
5937 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
5938 * +---+---+---+-----------+------+---+------+------+-----+------+------+
5940 static void disas_fp_csel(DisasContext
*s
, uint32_t insn
)
5942 unsigned int mos
, type
, rm
, cond
, rn
, rd
;
5943 TCGv_i64 t_true
, t_false
;
5947 mos
= extract32(insn
, 29, 3);
5948 type
= extract32(insn
, 22, 2);
5949 rm
= extract32(insn
, 16, 5);
5950 cond
= extract32(insn
, 12, 4);
5951 rn
= extract32(insn
, 5, 5);
5952 rd
= extract32(insn
, 0, 5);
5955 unallocated_encoding(s
);
5968 if (dc_isar_feature(aa64_fp16
, s
)) {
5973 unallocated_encoding(s
);
5977 if (!fp_access_check(s
)) {
5981 /* Zero extend sreg & hreg inputs to 64 bits now. */
5982 t_true
= tcg_temp_new_i64();
5983 t_false
= tcg_temp_new_i64();
5984 read_vec_element(s
, t_true
, rn
, 0, sz
);
5985 read_vec_element(s
, t_false
, rm
, 0, sz
);
5987 a64_test_cc(&c
, cond
);
5988 tcg_gen_movcond_i64(c
.cond
, t_true
, c
.value
, tcg_constant_i64(0),
5991 /* Note that sregs & hregs write back zeros to the high bits,
5992 and we've already done the zero-extension. */
5993 write_fp_dreg(s
, rd
, t_true
);
5996 /* Floating-point data-processing (1 source) - half precision */
5997 static void handle_fp_1src_half(DisasContext
*s
, int opcode
, int rd
, int rn
)
5999 TCGv_ptr fpst
= NULL
;
6000 TCGv_i32 tcg_op
= read_fp_hreg(s
, rn
);
6001 TCGv_i32 tcg_res
= tcg_temp_new_i32();
6004 case 0x0: /* FMOV */
6005 tcg_gen_mov_i32(tcg_res
, tcg_op
);
6007 case 0x1: /* FABS */
6008 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
6010 case 0x2: /* FNEG */
6011 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
6013 case 0x3: /* FSQRT */
6014 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6015 gen_helper_sqrt_f16(tcg_res
, tcg_op
, fpst
);
6017 case 0x8: /* FRINTN */
6018 case 0x9: /* FRINTP */
6019 case 0xa: /* FRINTM */
6020 case 0xb: /* FRINTZ */
6021 case 0xc: /* FRINTA */
6025 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6026 tcg_rmode
= gen_set_rmode(opcode
& 7, fpst
);
6027 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
6028 gen_restore_rmode(tcg_rmode
, fpst
);
6031 case 0xe: /* FRINTX */
6032 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6033 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, fpst
);
6035 case 0xf: /* FRINTI */
6036 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6037 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
6040 g_assert_not_reached();
6043 write_fp_sreg(s
, rd
, tcg_res
);
6046 /* Floating-point data-processing (1 source) - single precision */
6047 static void handle_fp_1src_single(DisasContext
*s
, int opcode
, int rd
, int rn
)
6049 void (*gen_fpst
)(TCGv_i32
, TCGv_i32
, TCGv_ptr
);
6050 TCGv_i32 tcg_op
, tcg_res
;
6054 tcg_op
= read_fp_sreg(s
, rn
);
6055 tcg_res
= tcg_temp_new_i32();
6058 case 0x0: /* FMOV */
6059 tcg_gen_mov_i32(tcg_res
, tcg_op
);
6061 case 0x1: /* FABS */
6062 gen_helper_vfp_abss(tcg_res
, tcg_op
);
6064 case 0x2: /* FNEG */
6065 gen_helper_vfp_negs(tcg_res
, tcg_op
);
6067 case 0x3: /* FSQRT */
6068 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, cpu_env
);
6070 case 0x6: /* BFCVT */
6071 gen_fpst
= gen_helper_bfcvt
;
6073 case 0x8: /* FRINTN */
6074 case 0x9: /* FRINTP */
6075 case 0xa: /* FRINTM */
6076 case 0xb: /* FRINTZ */
6077 case 0xc: /* FRINTA */
6079 gen_fpst
= gen_helper_rints
;
6081 case 0xe: /* FRINTX */
6082 gen_fpst
= gen_helper_rints_exact
;
6084 case 0xf: /* FRINTI */
6085 gen_fpst
= gen_helper_rints
;
6087 case 0x10: /* FRINT32Z */
6088 rmode
= FPROUNDING_ZERO
;
6089 gen_fpst
= gen_helper_frint32_s
;
6091 case 0x11: /* FRINT32X */
6092 gen_fpst
= gen_helper_frint32_s
;
6094 case 0x12: /* FRINT64Z */
6095 rmode
= FPROUNDING_ZERO
;
6096 gen_fpst
= gen_helper_frint64_s
;
6098 case 0x13: /* FRINT64X */
6099 gen_fpst
= gen_helper_frint64_s
;
6102 g_assert_not_reached();
6105 fpst
= fpstatus_ptr(FPST_FPCR
);
6107 TCGv_i32 tcg_rmode
= gen_set_rmode(rmode
, fpst
);
6108 gen_fpst(tcg_res
, tcg_op
, fpst
);
6109 gen_restore_rmode(tcg_rmode
, fpst
);
6111 gen_fpst(tcg_res
, tcg_op
, fpst
);
6115 write_fp_sreg(s
, rd
, tcg_res
);
6118 /* Floating-point data-processing (1 source) - double precision */
6119 static void handle_fp_1src_double(DisasContext
*s
, int opcode
, int rd
, int rn
)
6121 void (*gen_fpst
)(TCGv_i64
, TCGv_i64
, TCGv_ptr
);
6122 TCGv_i64 tcg_op
, tcg_res
;
6127 case 0x0: /* FMOV */
6128 gen_gvec_fn2(s
, false, rd
, rn
, tcg_gen_gvec_mov
, 0);
6132 tcg_op
= read_fp_dreg(s
, rn
);
6133 tcg_res
= tcg_temp_new_i64();
6136 case 0x1: /* FABS */
6137 gen_helper_vfp_absd(tcg_res
, tcg_op
);
6139 case 0x2: /* FNEG */
6140 gen_helper_vfp_negd(tcg_res
, tcg_op
);
6142 case 0x3: /* FSQRT */
6143 gen_helper_vfp_sqrtd(tcg_res
, tcg_op
, cpu_env
);
6145 case 0x8: /* FRINTN */
6146 case 0x9: /* FRINTP */
6147 case 0xa: /* FRINTM */
6148 case 0xb: /* FRINTZ */
6149 case 0xc: /* FRINTA */
6151 gen_fpst
= gen_helper_rintd
;
6153 case 0xe: /* FRINTX */
6154 gen_fpst
= gen_helper_rintd_exact
;
6156 case 0xf: /* FRINTI */
6157 gen_fpst
= gen_helper_rintd
;
6159 case 0x10: /* FRINT32Z */
6160 rmode
= FPROUNDING_ZERO
;
6161 gen_fpst
= gen_helper_frint32_d
;
6163 case 0x11: /* FRINT32X */
6164 gen_fpst
= gen_helper_frint32_d
;
6166 case 0x12: /* FRINT64Z */
6167 rmode
= FPROUNDING_ZERO
;
6168 gen_fpst
= gen_helper_frint64_d
;
6170 case 0x13: /* FRINT64X */
6171 gen_fpst
= gen_helper_frint64_d
;
6174 g_assert_not_reached();
6177 fpst
= fpstatus_ptr(FPST_FPCR
);
6179 TCGv_i32 tcg_rmode
= gen_set_rmode(rmode
, fpst
);
6180 gen_fpst(tcg_res
, tcg_op
, fpst
);
6181 gen_restore_rmode(tcg_rmode
, fpst
);
6183 gen_fpst(tcg_res
, tcg_op
, fpst
);
6187 write_fp_dreg(s
, rd
, tcg_res
);
6190 static void handle_fp_fcvt(DisasContext
*s
, int opcode
,
6191 int rd
, int rn
, int dtype
, int ntype
)
6196 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
6198 /* Single to double */
6199 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
6200 gen_helper_vfp_fcvtds(tcg_rd
, tcg_rn
, cpu_env
);
6201 write_fp_dreg(s
, rd
, tcg_rd
);
6203 /* Single to half */
6204 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
6205 TCGv_i32 ahp
= get_ahp_flag();
6206 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6208 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd
, tcg_rn
, fpst
, ahp
);
6209 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6210 write_fp_sreg(s
, rd
, tcg_rd
);
6216 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
6217 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
6219 /* Double to single */
6220 gen_helper_vfp_fcvtsd(tcg_rd
, tcg_rn
, cpu_env
);
6222 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6223 TCGv_i32 ahp
= get_ahp_flag();
6224 /* Double to half */
6225 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd
, tcg_rn
, fpst
, ahp
);
6226 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6228 write_fp_sreg(s
, rd
, tcg_rd
);
6233 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
6234 TCGv_ptr tcg_fpst
= fpstatus_ptr(FPST_FPCR
);
6235 TCGv_i32 tcg_ahp
= get_ahp_flag();
6236 tcg_gen_ext16u_i32(tcg_rn
, tcg_rn
);
6238 /* Half to single */
6239 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
6240 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd
, tcg_rn
, tcg_fpst
, tcg_ahp
);
6241 write_fp_sreg(s
, rd
, tcg_rd
);
6243 /* Half to double */
6244 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
6245 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd
, tcg_rn
, tcg_fpst
, tcg_ahp
);
6246 write_fp_dreg(s
, rd
, tcg_rd
);
6251 g_assert_not_reached();
6255 /* Floating point data-processing (1 source)
6256 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
6257 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6258 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
6259 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6261 static void disas_fp_1src(DisasContext
*s
, uint32_t insn
)
6263 int mos
= extract32(insn
, 29, 3);
6264 int type
= extract32(insn
, 22, 2);
6265 int opcode
= extract32(insn
, 15, 6);
6266 int rn
= extract32(insn
, 5, 5);
6267 int rd
= extract32(insn
, 0, 5);
6270 goto do_unallocated
;
6274 case 0x4: case 0x5: case 0x7:
6276 /* FCVT between half, single and double precision */
6277 int dtype
= extract32(opcode
, 0, 2);
6278 if (type
== 2 || dtype
== type
) {
6279 goto do_unallocated
;
6281 if (!fp_access_check(s
)) {
6285 handle_fp_fcvt(s
, opcode
, rd
, rn
, dtype
, type
);
6289 case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
6290 if (type
> 1 || !dc_isar_feature(aa64_frint
, s
)) {
6291 goto do_unallocated
;
6297 /* 32-to-32 and 64-to-64 ops */
6300 if (!fp_access_check(s
)) {
6303 handle_fp_1src_single(s
, opcode
, rd
, rn
);
6306 if (!fp_access_check(s
)) {
6309 handle_fp_1src_double(s
, opcode
, rd
, rn
);
6312 if (!dc_isar_feature(aa64_fp16
, s
)) {
6313 goto do_unallocated
;
6316 if (!fp_access_check(s
)) {
6319 handle_fp_1src_half(s
, opcode
, rd
, rn
);
6322 goto do_unallocated
;
6329 if (!dc_isar_feature(aa64_bf16
, s
)) {
6330 goto do_unallocated
;
6332 if (!fp_access_check(s
)) {
6335 handle_fp_1src_single(s
, opcode
, rd
, rn
);
6338 goto do_unallocated
;
6344 unallocated_encoding(s
);
6349 /* Floating-point data-processing (2 source) - single precision */
6350 static void handle_fp_2src_single(DisasContext
*s
, int opcode
,
6351 int rd
, int rn
, int rm
)
6358 tcg_res
= tcg_temp_new_i32();
6359 fpst
= fpstatus_ptr(FPST_FPCR
);
6360 tcg_op1
= read_fp_sreg(s
, rn
);
6361 tcg_op2
= read_fp_sreg(s
, rm
);
6364 case 0x0: /* FMUL */
6365 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6367 case 0x1: /* FDIV */
6368 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6370 case 0x2: /* FADD */
6371 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6373 case 0x3: /* FSUB */
6374 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6376 case 0x4: /* FMAX */
6377 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6379 case 0x5: /* FMIN */
6380 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6382 case 0x6: /* FMAXNM */
6383 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6385 case 0x7: /* FMINNM */
6386 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6388 case 0x8: /* FNMUL */
6389 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6390 gen_helper_vfp_negs(tcg_res
, tcg_res
);
6394 write_fp_sreg(s
, rd
, tcg_res
);
6397 /* Floating-point data-processing (2 source) - double precision */
6398 static void handle_fp_2src_double(DisasContext
*s
, int opcode
,
6399 int rd
, int rn
, int rm
)
6406 tcg_res
= tcg_temp_new_i64();
6407 fpst
= fpstatus_ptr(FPST_FPCR
);
6408 tcg_op1
= read_fp_dreg(s
, rn
);
6409 tcg_op2
= read_fp_dreg(s
, rm
);
6412 case 0x0: /* FMUL */
6413 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6415 case 0x1: /* FDIV */
6416 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6418 case 0x2: /* FADD */
6419 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6421 case 0x3: /* FSUB */
6422 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6424 case 0x4: /* FMAX */
6425 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6427 case 0x5: /* FMIN */
6428 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6430 case 0x6: /* FMAXNM */
6431 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6433 case 0x7: /* FMINNM */
6434 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6436 case 0x8: /* FNMUL */
6437 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6438 gen_helper_vfp_negd(tcg_res
, tcg_res
);
6442 write_fp_dreg(s
, rd
, tcg_res
);
6445 /* Floating-point data-processing (2 source) - half precision */
6446 static void handle_fp_2src_half(DisasContext
*s
, int opcode
,
6447 int rd
, int rn
, int rm
)
6454 tcg_res
= tcg_temp_new_i32();
6455 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6456 tcg_op1
= read_fp_hreg(s
, rn
);
6457 tcg_op2
= read_fp_hreg(s
, rm
);
6460 case 0x0: /* FMUL */
6461 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6463 case 0x1: /* FDIV */
6464 gen_helper_advsimd_divh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6466 case 0x2: /* FADD */
6467 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6469 case 0x3: /* FSUB */
6470 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6472 case 0x4: /* FMAX */
6473 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6475 case 0x5: /* FMIN */
6476 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6478 case 0x6: /* FMAXNM */
6479 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6481 case 0x7: /* FMINNM */
6482 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6484 case 0x8: /* FNMUL */
6485 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6486 tcg_gen_xori_i32(tcg_res
, tcg_res
, 0x8000);
6489 g_assert_not_reached();
6492 write_fp_sreg(s
, rd
, tcg_res
);
6495 /* Floating point data-processing (2 source)
6496 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
6497 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6498 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
6499 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6501 static void disas_fp_2src(DisasContext
*s
, uint32_t insn
)
6503 int mos
= extract32(insn
, 29, 3);
6504 int type
= extract32(insn
, 22, 2);
6505 int rd
= extract32(insn
, 0, 5);
6506 int rn
= extract32(insn
, 5, 5);
6507 int rm
= extract32(insn
, 16, 5);
6508 int opcode
= extract32(insn
, 12, 4);
6510 if (opcode
> 8 || mos
) {
6511 unallocated_encoding(s
);
6517 if (!fp_access_check(s
)) {
6520 handle_fp_2src_single(s
, opcode
, rd
, rn
, rm
);
6523 if (!fp_access_check(s
)) {
6526 handle_fp_2src_double(s
, opcode
, rd
, rn
, rm
);
6529 if (!dc_isar_feature(aa64_fp16
, s
)) {
6530 unallocated_encoding(s
);
6533 if (!fp_access_check(s
)) {
6536 handle_fp_2src_half(s
, opcode
, rd
, rn
, rm
);
6539 unallocated_encoding(s
);
6543 /* Floating-point data-processing (3 source) - single precision */
6544 static void handle_fp_3src_single(DisasContext
*s
, bool o0
, bool o1
,
6545 int rd
, int rn
, int rm
, int ra
)
6547 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
;
6548 TCGv_i32 tcg_res
= tcg_temp_new_i32();
6549 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6551 tcg_op1
= read_fp_sreg(s
, rn
);
6552 tcg_op2
= read_fp_sreg(s
, rm
);
6553 tcg_op3
= read_fp_sreg(s
, ra
);
6555 /* These are fused multiply-add, and must be done as one
6556 * floating point operation with no rounding between the
6557 * multiplication and addition steps.
6558 * NB that doing the negations here as separate steps is
6559 * correct : an input NaN should come out with its sign bit
6560 * flipped if it is a negated-input.
6563 gen_helper_vfp_negs(tcg_op3
, tcg_op3
);
6567 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
6570 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
6572 write_fp_sreg(s
, rd
, tcg_res
);
6575 /* Floating-point data-processing (3 source) - double precision */
6576 static void handle_fp_3src_double(DisasContext
*s
, bool o0
, bool o1
,
6577 int rd
, int rn
, int rm
, int ra
)
6579 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
;
6580 TCGv_i64 tcg_res
= tcg_temp_new_i64();
6581 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6583 tcg_op1
= read_fp_dreg(s
, rn
);
6584 tcg_op2
= read_fp_dreg(s
, rm
);
6585 tcg_op3
= read_fp_dreg(s
, ra
);
6587 /* These are fused multiply-add, and must be done as one
6588 * floating point operation with no rounding between the
6589 * multiplication and addition steps.
6590 * NB that doing the negations here as separate steps is
6591 * correct : an input NaN should come out with its sign bit
6592 * flipped if it is a negated-input.
6595 gen_helper_vfp_negd(tcg_op3
, tcg_op3
);
6599 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
6602 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
6604 write_fp_dreg(s
, rd
, tcg_res
);
6607 /* Floating-point data-processing (3 source) - half precision */
6608 static void handle_fp_3src_half(DisasContext
*s
, bool o0
, bool o1
,
6609 int rd
, int rn
, int rm
, int ra
)
6611 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
;
6612 TCGv_i32 tcg_res
= tcg_temp_new_i32();
6613 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6615 tcg_op1
= read_fp_hreg(s
, rn
);
6616 tcg_op2
= read_fp_hreg(s
, rm
);
6617 tcg_op3
= read_fp_hreg(s
, ra
);
6619 /* These are fused multiply-add, and must be done as one
6620 * floating point operation with no rounding between the
6621 * multiplication and addition steps.
6622 * NB that doing the negations here as separate steps is
6623 * correct : an input NaN should come out with its sign bit
6624 * flipped if it is a negated-input.
6627 tcg_gen_xori_i32(tcg_op3
, tcg_op3
, 0x8000);
6631 tcg_gen_xori_i32(tcg_op1
, tcg_op1
, 0x8000);
6634 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
6636 write_fp_sreg(s
, rd
, tcg_res
);
6639 /* Floating point data-processing (3 source)
6640 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
6641 * +---+---+---+-----------+------+----+------+----+------+------+------+
6642 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
6643 * +---+---+---+-----------+------+----+------+----+------+------+------+
6645 static void disas_fp_3src(DisasContext
*s
, uint32_t insn
)
6647 int mos
= extract32(insn
, 29, 3);
6648 int type
= extract32(insn
, 22, 2);
6649 int rd
= extract32(insn
, 0, 5);
6650 int rn
= extract32(insn
, 5, 5);
6651 int ra
= extract32(insn
, 10, 5);
6652 int rm
= extract32(insn
, 16, 5);
6653 bool o0
= extract32(insn
, 15, 1);
6654 bool o1
= extract32(insn
, 21, 1);
6657 unallocated_encoding(s
);
6663 if (!fp_access_check(s
)) {
6666 handle_fp_3src_single(s
, o0
, o1
, rd
, rn
, rm
, ra
);
6669 if (!fp_access_check(s
)) {
6672 handle_fp_3src_double(s
, o0
, o1
, rd
, rn
, rm
, ra
);
6675 if (!dc_isar_feature(aa64_fp16
, s
)) {
6676 unallocated_encoding(s
);
6679 if (!fp_access_check(s
)) {
6682 handle_fp_3src_half(s
, o0
, o1
, rd
, rn
, rm
, ra
);
6685 unallocated_encoding(s
);
6689 /* Floating point immediate
6690 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
6691 * +---+---+---+-----------+------+---+------------+-------+------+------+
6692 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
6693 * +---+---+---+-----------+------+---+------------+-------+------+------+
6695 static void disas_fp_imm(DisasContext
*s
, uint32_t insn
)
6697 int rd
= extract32(insn
, 0, 5);
6698 int imm5
= extract32(insn
, 5, 5);
6699 int imm8
= extract32(insn
, 13, 8);
6700 int type
= extract32(insn
, 22, 2);
6701 int mos
= extract32(insn
, 29, 3);
6706 unallocated_encoding(s
);
6719 if (dc_isar_feature(aa64_fp16
, s
)) {
6724 unallocated_encoding(s
);
6728 if (!fp_access_check(s
)) {
6732 imm
= vfp_expand_imm(sz
, imm8
);
6733 write_fp_dreg(s
, rd
, tcg_constant_i64(imm
));
6736 /* Handle floating point <=> fixed point conversions. Note that we can
6737 * also deal with fp <=> integer conversions as a special case (scale == 64)
6738 * OPTME: consider handling that special case specially or at least skipping
6739 * the call to scalbn in the helpers for zero shifts.
6741 static void handle_fpfpcvt(DisasContext
*s
, int rd
, int rn
, int opcode
,
6742 bool itof
, int rmode
, int scale
, int sf
, int type
)
6744 bool is_signed
= !(opcode
& 1);
6745 TCGv_ptr tcg_fpstatus
;
6746 TCGv_i32 tcg_shift
, tcg_single
;
6747 TCGv_i64 tcg_double
;
6749 tcg_fpstatus
= fpstatus_ptr(type
== 3 ? FPST_FPCR_F16
: FPST_FPCR
);
6751 tcg_shift
= tcg_constant_i32(64 - scale
);
6754 TCGv_i64 tcg_int
= cpu_reg(s
, rn
);
6756 TCGv_i64 tcg_extend
= tcg_temp_new_i64();
6759 tcg_gen_ext32s_i64(tcg_extend
, tcg_int
);
6761 tcg_gen_ext32u_i64(tcg_extend
, tcg_int
);
6764 tcg_int
= tcg_extend
;
6768 case 1: /* float64 */
6769 tcg_double
= tcg_temp_new_i64();
6771 gen_helper_vfp_sqtod(tcg_double
, tcg_int
,
6772 tcg_shift
, tcg_fpstatus
);
6774 gen_helper_vfp_uqtod(tcg_double
, tcg_int
,
6775 tcg_shift
, tcg_fpstatus
);
6777 write_fp_dreg(s
, rd
, tcg_double
);
6780 case 0: /* float32 */
6781 tcg_single
= tcg_temp_new_i32();
6783 gen_helper_vfp_sqtos(tcg_single
, tcg_int
,
6784 tcg_shift
, tcg_fpstatus
);
6786 gen_helper_vfp_uqtos(tcg_single
, tcg_int
,
6787 tcg_shift
, tcg_fpstatus
);
6789 write_fp_sreg(s
, rd
, tcg_single
);
6792 case 3: /* float16 */
6793 tcg_single
= tcg_temp_new_i32();
6795 gen_helper_vfp_sqtoh(tcg_single
, tcg_int
,
6796 tcg_shift
, tcg_fpstatus
);
6798 gen_helper_vfp_uqtoh(tcg_single
, tcg_int
,
6799 tcg_shift
, tcg_fpstatus
);
6801 write_fp_sreg(s
, rd
, tcg_single
);
6805 g_assert_not_reached();
6808 TCGv_i64 tcg_int
= cpu_reg(s
, rd
);
6811 if (extract32(opcode
, 2, 1)) {
6812 /* There are too many rounding modes to all fit into rmode,
6813 * so FCVTA[US] is a special case.
6815 rmode
= FPROUNDING_TIEAWAY
;
6818 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
6821 case 1: /* float64 */
6822 tcg_double
= read_fp_dreg(s
, rn
);
6825 gen_helper_vfp_tosld(tcg_int
, tcg_double
,
6826 tcg_shift
, tcg_fpstatus
);
6828 gen_helper_vfp_tosqd(tcg_int
, tcg_double
,
6829 tcg_shift
, tcg_fpstatus
);
6833 gen_helper_vfp_tould(tcg_int
, tcg_double
,
6834 tcg_shift
, tcg_fpstatus
);
6836 gen_helper_vfp_touqd(tcg_int
, tcg_double
,
6837 tcg_shift
, tcg_fpstatus
);
6841 tcg_gen_ext32u_i64(tcg_int
, tcg_int
);
6845 case 0: /* float32 */
6846 tcg_single
= read_fp_sreg(s
, rn
);
6849 gen_helper_vfp_tosqs(tcg_int
, tcg_single
,
6850 tcg_shift
, tcg_fpstatus
);
6852 gen_helper_vfp_touqs(tcg_int
, tcg_single
,
6853 tcg_shift
, tcg_fpstatus
);
6856 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
6858 gen_helper_vfp_tosls(tcg_dest
, tcg_single
,
6859 tcg_shift
, tcg_fpstatus
);
6861 gen_helper_vfp_touls(tcg_dest
, tcg_single
,
6862 tcg_shift
, tcg_fpstatus
);
6864 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
6868 case 3: /* float16 */
6869 tcg_single
= read_fp_sreg(s
, rn
);
6872 gen_helper_vfp_tosqh(tcg_int
, tcg_single
,
6873 tcg_shift
, tcg_fpstatus
);
6875 gen_helper_vfp_touqh(tcg_int
, tcg_single
,
6876 tcg_shift
, tcg_fpstatus
);
6879 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
6881 gen_helper_vfp_toslh(tcg_dest
, tcg_single
,
6882 tcg_shift
, tcg_fpstatus
);
6884 gen_helper_vfp_toulh(tcg_dest
, tcg_single
,
6885 tcg_shift
, tcg_fpstatus
);
6887 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
6892 g_assert_not_reached();
6895 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
6899 /* Floating point <-> fixed point conversions
6900 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
6901 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6902 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
6903 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6905 static void disas_fp_fixed_conv(DisasContext
*s
, uint32_t insn
)
6907 int rd
= extract32(insn
, 0, 5);
6908 int rn
= extract32(insn
, 5, 5);
6909 int scale
= extract32(insn
, 10, 6);
6910 int opcode
= extract32(insn
, 16, 3);
6911 int rmode
= extract32(insn
, 19, 2);
6912 int type
= extract32(insn
, 22, 2);
6913 bool sbit
= extract32(insn
, 29, 1);
6914 bool sf
= extract32(insn
, 31, 1);
6917 if (sbit
|| (!sf
&& scale
< 32)) {
6918 unallocated_encoding(s
);
6923 case 0: /* float32 */
6924 case 1: /* float64 */
6926 case 3: /* float16 */
6927 if (dc_isar_feature(aa64_fp16
, s
)) {
6932 unallocated_encoding(s
);
6936 switch ((rmode
<< 3) | opcode
) {
6937 case 0x2: /* SCVTF */
6938 case 0x3: /* UCVTF */
6941 case 0x18: /* FCVTZS */
6942 case 0x19: /* FCVTZU */
6946 unallocated_encoding(s
);
6950 if (!fp_access_check(s
)) {
6954 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, FPROUNDING_ZERO
, scale
, sf
, type
);
6957 static void handle_fmov(DisasContext
*s
, int rd
, int rn
, int type
, bool itof
)
6959 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
6960 * without conversion.
6964 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
6970 tmp
= tcg_temp_new_i64();
6971 tcg_gen_ext32u_i64(tmp
, tcg_rn
);
6972 write_fp_dreg(s
, rd
, tmp
);
6976 write_fp_dreg(s
, rd
, tcg_rn
);
6979 /* 64 bit to top half. */
6980 tcg_gen_st_i64(tcg_rn
, cpu_env
, fp_reg_hi_offset(s
, rd
));
6981 clear_vec_high(s
, true, rd
);
6985 tmp
= tcg_temp_new_i64();
6986 tcg_gen_ext16u_i64(tmp
, tcg_rn
);
6987 write_fp_dreg(s
, rd
, tmp
);
6990 g_assert_not_reached();
6993 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
6998 tcg_gen_ld32u_i64(tcg_rd
, cpu_env
, fp_reg_offset(s
, rn
, MO_32
));
7002 tcg_gen_ld_i64(tcg_rd
, cpu_env
, fp_reg_offset(s
, rn
, MO_64
));
7005 /* 64 bits from top half */
7006 tcg_gen_ld_i64(tcg_rd
, cpu_env
, fp_reg_hi_offset(s
, rn
));
7010 tcg_gen_ld16u_i64(tcg_rd
, cpu_env
, fp_reg_offset(s
, rn
, MO_16
));
7013 g_assert_not_reached();
7018 static void handle_fjcvtzs(DisasContext
*s
, int rd
, int rn
)
7020 TCGv_i64 t
= read_fp_dreg(s
, rn
);
7021 TCGv_ptr fpstatus
= fpstatus_ptr(FPST_FPCR
);
7023 gen_helper_fjcvtzs(t
, t
, fpstatus
);
7025 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), t
);
7026 tcg_gen_extrh_i64_i32(cpu_ZF
, t
);
7027 tcg_gen_movi_i32(cpu_CF
, 0);
7028 tcg_gen_movi_i32(cpu_NF
, 0);
7029 tcg_gen_movi_i32(cpu_VF
, 0);
7032 /* Floating point <-> integer conversions
7033 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
7034 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
7035 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
7036 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
7038 static void disas_fp_int_conv(DisasContext
*s
, uint32_t insn
)
7040 int rd
= extract32(insn
, 0, 5);
7041 int rn
= extract32(insn
, 5, 5);
7042 int opcode
= extract32(insn
, 16, 3);
7043 int rmode
= extract32(insn
, 19, 2);
7044 int type
= extract32(insn
, 22, 2);
7045 bool sbit
= extract32(insn
, 29, 1);
7046 bool sf
= extract32(insn
, 31, 1);
7050 goto do_unallocated
;
7058 case 4: /* FCVTAS */
7059 case 5: /* FCVTAU */
7061 goto do_unallocated
;
7064 case 0: /* FCVT[NPMZ]S */
7065 case 1: /* FCVT[NPMZ]U */
7067 case 0: /* float32 */
7068 case 1: /* float64 */
7070 case 3: /* float16 */
7071 if (!dc_isar_feature(aa64_fp16
, s
)) {
7072 goto do_unallocated
;
7076 goto do_unallocated
;
7078 if (!fp_access_check(s
)) {
7081 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, rmode
, 64, sf
, type
);
7085 switch (sf
<< 7 | type
<< 5 | rmode
<< 3 | opcode
) {
7086 case 0b01100110: /* FMOV half <-> 32-bit int */
7088 case 0b11100110: /* FMOV half <-> 64-bit int */
7090 if (!dc_isar_feature(aa64_fp16
, s
)) {
7091 goto do_unallocated
;
7094 case 0b00000110: /* FMOV 32-bit */
7096 case 0b10100110: /* FMOV 64-bit */
7098 case 0b11001110: /* FMOV top half of 128-bit */
7100 if (!fp_access_check(s
)) {
7104 handle_fmov(s
, rd
, rn
, type
, itof
);
7107 case 0b00111110: /* FJCVTZS */
7108 if (!dc_isar_feature(aa64_jscvt
, s
)) {
7109 goto do_unallocated
;
7110 } else if (fp_access_check(s
)) {
7111 handle_fjcvtzs(s
, rd
, rn
);
7117 unallocated_encoding(s
);
7124 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
7125 * 31 30 29 28 25 24 0
7126 * +---+---+---+---------+-----------------------------+
7127 * | | 0 | | 1 1 1 1 | |
7128 * +---+---+---+---------+-----------------------------+
7130 static void disas_data_proc_fp(DisasContext
*s
, uint32_t insn
)
7132 if (extract32(insn
, 24, 1)) {
7133 /* Floating point data-processing (3 source) */
7134 disas_fp_3src(s
, insn
);
7135 } else if (extract32(insn
, 21, 1) == 0) {
7136 /* Floating point to fixed point conversions */
7137 disas_fp_fixed_conv(s
, insn
);
7139 switch (extract32(insn
, 10, 2)) {
7141 /* Floating point conditional compare */
7142 disas_fp_ccomp(s
, insn
);
7145 /* Floating point data-processing (2 source) */
7146 disas_fp_2src(s
, insn
);
7149 /* Floating point conditional select */
7150 disas_fp_csel(s
, insn
);
7153 switch (ctz32(extract32(insn
, 12, 4))) {
7154 case 0: /* [15:12] == xxx1 */
7155 /* Floating point immediate */
7156 disas_fp_imm(s
, insn
);
7158 case 1: /* [15:12] == xx10 */
7159 /* Floating point compare */
7160 disas_fp_compare(s
, insn
);
7162 case 2: /* [15:12] == x100 */
7163 /* Floating point data-processing (1 source) */
7164 disas_fp_1src(s
, insn
);
7166 case 3: /* [15:12] == 1000 */
7167 unallocated_encoding(s
);
7169 default: /* [15:12] == 0000 */
7170 /* Floating point <-> integer conversions */
7171 disas_fp_int_conv(s
, insn
);
7179 static void do_ext64(DisasContext
*s
, TCGv_i64 tcg_left
, TCGv_i64 tcg_right
,
7182 /* Extract 64 bits from the middle of two concatenated 64 bit
7183 * vector register slices left:right. The extracted bits start
7184 * at 'pos' bits into the right (least significant) side.
7185 * We return the result in tcg_right, and guarantee not to
7188 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
7189 assert(pos
> 0 && pos
< 64);
7191 tcg_gen_shri_i64(tcg_right
, tcg_right
, pos
);
7192 tcg_gen_shli_i64(tcg_tmp
, tcg_left
, 64 - pos
);
7193 tcg_gen_or_i64(tcg_right
, tcg_right
, tcg_tmp
);
7197 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
7198 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7199 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
7200 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7202 static void disas_simd_ext(DisasContext
*s
, uint32_t insn
)
7204 int is_q
= extract32(insn
, 30, 1);
7205 int op2
= extract32(insn
, 22, 2);
7206 int imm4
= extract32(insn
, 11, 4);
7207 int rm
= extract32(insn
, 16, 5);
7208 int rn
= extract32(insn
, 5, 5);
7209 int rd
= extract32(insn
, 0, 5);
7210 int pos
= imm4
<< 3;
7211 TCGv_i64 tcg_resl
, tcg_resh
;
7213 if (op2
!= 0 || (!is_q
&& extract32(imm4
, 3, 1))) {
7214 unallocated_encoding(s
);
7218 if (!fp_access_check(s
)) {
7222 tcg_resh
= tcg_temp_new_i64();
7223 tcg_resl
= tcg_temp_new_i64();
7225 /* Vd gets bits starting at pos bits into Vm:Vn. This is
7226 * either extracting 128 bits from a 128:128 concatenation, or
7227 * extracting 64 bits from a 64:64 concatenation.
7230 read_vec_element(s
, tcg_resl
, rn
, 0, MO_64
);
7232 read_vec_element(s
, tcg_resh
, rm
, 0, MO_64
);
7233 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
7241 EltPosns eltposns
[] = { {rn
, 0}, {rn
, 1}, {rm
, 0}, {rm
, 1} };
7242 EltPosns
*elt
= eltposns
;
7249 read_vec_element(s
, tcg_resl
, elt
->reg
, elt
->elt
, MO_64
);
7251 read_vec_element(s
, tcg_resh
, elt
->reg
, elt
->elt
, MO_64
);
7254 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
7255 tcg_hh
= tcg_temp_new_i64();
7256 read_vec_element(s
, tcg_hh
, elt
->reg
, elt
->elt
, MO_64
);
7257 do_ext64(s
, tcg_hh
, tcg_resh
, pos
);
7261 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
7263 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
7265 clear_vec_high(s
, is_q
, rd
);
7269 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
7270 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7271 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
7272 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7274 static void disas_simd_tb(DisasContext
*s
, uint32_t insn
)
7276 int op2
= extract32(insn
, 22, 2);
7277 int is_q
= extract32(insn
, 30, 1);
7278 int rm
= extract32(insn
, 16, 5);
7279 int rn
= extract32(insn
, 5, 5);
7280 int rd
= extract32(insn
, 0, 5);
7281 int is_tbx
= extract32(insn
, 12, 1);
7282 int len
= (extract32(insn
, 13, 2) + 1) * 16;
7285 unallocated_encoding(s
);
7289 if (!fp_access_check(s
)) {
7293 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s
, rd
),
7294 vec_full_reg_offset(s
, rm
), cpu_env
,
7295 is_q
? 16 : 8, vec_full_reg_size(s
),
7296 (len
<< 6) | (is_tbx
<< 5) | rn
,
7297 gen_helper_simd_tblx
);
7301 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
7302 * +---+---+-------------+------+---+------+---+------------------+------+
7303 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
7304 * +---+---+-------------+------+---+------+---+------------------+------+
7306 static void disas_simd_zip_trn(DisasContext
*s
, uint32_t insn
)
7308 int rd
= extract32(insn
, 0, 5);
7309 int rn
= extract32(insn
, 5, 5);
7310 int rm
= extract32(insn
, 16, 5);
7311 int size
= extract32(insn
, 22, 2);
7312 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
7313 * bit 2 indicates 1 vs 2 variant of the insn.
7315 int opcode
= extract32(insn
, 12, 2);
7316 bool part
= extract32(insn
, 14, 1);
7317 bool is_q
= extract32(insn
, 30, 1);
7318 int esize
= 8 << size
;
7320 int datasize
= is_q
? 128 : 64;
7321 int elements
= datasize
/ esize
;
7322 TCGv_i64 tcg_res
[2], tcg_ele
;
7324 if (opcode
== 0 || (size
== 3 && !is_q
)) {
7325 unallocated_encoding(s
);
7329 if (!fp_access_check(s
)) {
7333 tcg_res
[0] = tcg_temp_new_i64();
7334 tcg_res
[1] = is_q
? tcg_temp_new_i64() : NULL
;
7335 tcg_ele
= tcg_temp_new_i64();
7337 for (i
= 0; i
< elements
; i
++) {
7341 case 1: /* UZP1/2 */
7343 int midpoint
= elements
/ 2;
7345 read_vec_element(s
, tcg_ele
, rn
, 2 * i
+ part
, size
);
7347 read_vec_element(s
, tcg_ele
, rm
,
7348 2 * (i
- midpoint
) + part
, size
);
7352 case 2: /* TRN1/2 */
7354 read_vec_element(s
, tcg_ele
, rm
, (i
& ~1) + part
, size
);
7356 read_vec_element(s
, tcg_ele
, rn
, (i
& ~1) + part
, size
);
7359 case 3: /* ZIP1/2 */
7361 int base
= part
* elements
/ 2;
7363 read_vec_element(s
, tcg_ele
, rm
, base
+ (i
>> 1), size
);
7365 read_vec_element(s
, tcg_ele
, rn
, base
+ (i
>> 1), size
);
7370 g_assert_not_reached();
7373 w
= (i
* esize
) / 64;
7374 o
= (i
* esize
) % 64;
7376 tcg_gen_mov_i64(tcg_res
[w
], tcg_ele
);
7378 tcg_gen_shli_i64(tcg_ele
, tcg_ele
, o
);
7379 tcg_gen_or_i64(tcg_res
[w
], tcg_res
[w
], tcg_ele
);
7383 for (i
= 0; i
<= is_q
; ++i
) {
7384 write_vec_element(s
, tcg_res
[i
], rd
, i
, MO_64
);
7386 clear_vec_high(s
, is_q
, rd
);
7390 * do_reduction_op helper
7392 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
7393 * important for correct NaN propagation that we do these
7394 * operations in exactly the order specified by the pseudocode.
7396 * This is a recursive function, TCG temps should be freed by the
7397 * calling function once it is done with the values.
7399 static TCGv_i32
do_reduction_op(DisasContext
*s
, int fpopcode
, int rn
,
7400 int esize
, int size
, int vmap
, TCGv_ptr fpst
)
7402 if (esize
== size
) {
7404 MemOp msize
= esize
== 16 ? MO_16
: MO_32
;
7407 /* We should have one register left here */
7408 assert(ctpop8(vmap
) == 1);
7409 element
= ctz32(vmap
);
7410 assert(element
< 8);
7412 tcg_elem
= tcg_temp_new_i32();
7413 read_vec_element_i32(s
, tcg_elem
, rn
, element
, msize
);
7416 int bits
= size
/ 2;
7417 int shift
= ctpop8(vmap
) / 2;
7418 int vmap_lo
= (vmap
>> shift
) & vmap
;
7419 int vmap_hi
= (vmap
& ~vmap_lo
);
7420 TCGv_i32 tcg_hi
, tcg_lo
, tcg_res
;
7422 tcg_hi
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_hi
, fpst
);
7423 tcg_lo
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_lo
, fpst
);
7424 tcg_res
= tcg_temp_new_i32();
7427 case 0x0c: /* fmaxnmv half-precision */
7428 gen_helper_advsimd_maxnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7430 case 0x0f: /* fmaxv half-precision */
7431 gen_helper_advsimd_maxh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7433 case 0x1c: /* fminnmv half-precision */
7434 gen_helper_advsimd_minnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7436 case 0x1f: /* fminv half-precision */
7437 gen_helper_advsimd_minh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7439 case 0x2c: /* fmaxnmv */
7440 gen_helper_vfp_maxnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7442 case 0x2f: /* fmaxv */
7443 gen_helper_vfp_maxs(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7445 case 0x3c: /* fminnmv */
7446 gen_helper_vfp_minnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7448 case 0x3f: /* fminv */
7449 gen_helper_vfp_mins(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7452 g_assert_not_reached();
7458 /* AdvSIMD across lanes
7459 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7460 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7461 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
7462 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7464 static void disas_simd_across_lanes(DisasContext
*s
, uint32_t insn
)
7466 int rd
= extract32(insn
, 0, 5);
7467 int rn
= extract32(insn
, 5, 5);
7468 int size
= extract32(insn
, 22, 2);
7469 int opcode
= extract32(insn
, 12, 5);
7470 bool is_q
= extract32(insn
, 30, 1);
7471 bool is_u
= extract32(insn
, 29, 1);
7473 bool is_min
= false;
7477 TCGv_i64 tcg_res
, tcg_elt
;
7480 case 0x1b: /* ADDV */
7482 unallocated_encoding(s
);
7486 case 0x3: /* SADDLV, UADDLV */
7487 case 0xa: /* SMAXV, UMAXV */
7488 case 0x1a: /* SMINV, UMINV */
7489 if (size
== 3 || (size
== 2 && !is_q
)) {
7490 unallocated_encoding(s
);
7494 case 0xc: /* FMAXNMV, FMINNMV */
7495 case 0xf: /* FMAXV, FMINV */
7496 /* Bit 1 of size field encodes min vs max and the actual size
7497 * depends on the encoding of the U bit. If not set (and FP16
7498 * enabled) then we do half-precision float instead of single
7501 is_min
= extract32(size
, 1, 1);
7503 if (!is_u
&& dc_isar_feature(aa64_fp16
, s
)) {
7505 } else if (!is_u
|| !is_q
|| extract32(size
, 0, 1)) {
7506 unallocated_encoding(s
);
7513 unallocated_encoding(s
);
7517 if (!fp_access_check(s
)) {
7522 elements
= (is_q
? 128 : 64) / esize
;
7524 tcg_res
= tcg_temp_new_i64();
7525 tcg_elt
= tcg_temp_new_i64();
7527 /* These instructions operate across all lanes of a vector
7528 * to produce a single result. We can guarantee that a 64
7529 * bit intermediate is sufficient:
7530 * + for [US]ADDLV the maximum element size is 32 bits, and
7531 * the result type is 64 bits
7532 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
7533 * same as the element size, which is 32 bits at most
7534 * For the integer operations we can choose to work at 64
7535 * or 32 bits and truncate at the end; for simplicity
7536 * we use 64 bits always. The floating point
7537 * ops do require 32 bit intermediates, though.
7540 read_vec_element(s
, tcg_res
, rn
, 0, size
| (is_u
? 0 : MO_SIGN
));
7542 for (i
= 1; i
< elements
; i
++) {
7543 read_vec_element(s
, tcg_elt
, rn
, i
, size
| (is_u
? 0 : MO_SIGN
));
7546 case 0x03: /* SADDLV / UADDLV */
7547 case 0x1b: /* ADDV */
7548 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_elt
);
7550 case 0x0a: /* SMAXV / UMAXV */
7552 tcg_gen_umax_i64(tcg_res
, tcg_res
, tcg_elt
);
7554 tcg_gen_smax_i64(tcg_res
, tcg_res
, tcg_elt
);
7557 case 0x1a: /* SMINV / UMINV */
7559 tcg_gen_umin_i64(tcg_res
, tcg_res
, tcg_elt
);
7561 tcg_gen_smin_i64(tcg_res
, tcg_res
, tcg_elt
);
7565 g_assert_not_reached();
7570 /* Floating point vector reduction ops which work across 32
7571 * bit (single) or 16 bit (half-precision) intermediates.
7572 * Note that correct NaN propagation requires that we do these
7573 * operations in exactly the order specified by the pseudocode.
7575 TCGv_ptr fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
7576 int fpopcode
= opcode
| is_min
<< 4 | is_u
<< 5;
7577 int vmap
= (1 << elements
) - 1;
7578 TCGv_i32 tcg_res32
= do_reduction_op(s
, fpopcode
, rn
, esize
,
7579 (is_q
? 128 : 64), vmap
, fpst
);
7580 tcg_gen_extu_i32_i64(tcg_res
, tcg_res32
);
7583 /* Now truncate the result to the width required for the final output */
7584 if (opcode
== 0x03) {
7585 /* SADDLV, UADDLV: result is 2*esize */
7591 tcg_gen_ext8u_i64(tcg_res
, tcg_res
);
7594 tcg_gen_ext16u_i64(tcg_res
, tcg_res
);
7597 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
7602 g_assert_not_reached();
7605 write_fp_dreg(s
, rd
, tcg_res
);
7608 /* DUP (Element, Vector)
7610 * 31 30 29 21 20 16 15 10 9 5 4 0
7611 * +---+---+-------------------+--------+-------------+------+------+
7612 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7613 * +---+---+-------------------+--------+-------------+------+------+
7615 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7617 static void handle_simd_dupe(DisasContext
*s
, int is_q
, int rd
, int rn
,
7620 int size
= ctz32(imm5
);
7623 if (size
> 3 || (size
== 3 && !is_q
)) {
7624 unallocated_encoding(s
);
7628 if (!fp_access_check(s
)) {
7632 index
= imm5
>> (size
+ 1);
7633 tcg_gen_gvec_dup_mem(size
, vec_full_reg_offset(s
, rd
),
7634 vec_reg_offset(s
, rn
, index
, size
),
7635 is_q
? 16 : 8, vec_full_reg_size(s
));
7638 /* DUP (element, scalar)
7639 * 31 21 20 16 15 10 9 5 4 0
7640 * +-----------------------+--------+-------------+------+------+
7641 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7642 * +-----------------------+--------+-------------+------+------+
7644 static void handle_simd_dupes(DisasContext
*s
, int rd
, int rn
,
7647 int size
= ctz32(imm5
);
7652 unallocated_encoding(s
);
7656 if (!fp_access_check(s
)) {
7660 index
= imm5
>> (size
+ 1);
7662 /* This instruction just extracts the specified element and
7663 * zero-extends it into the bottom of the destination register.
7665 tmp
= tcg_temp_new_i64();
7666 read_vec_element(s
, tmp
, rn
, index
, size
);
7667 write_fp_dreg(s
, rd
, tmp
);
7672 * 31 30 29 21 20 16 15 10 9 5 4 0
7673 * +---+---+-------------------+--------+-------------+------+------+
7674 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
7675 * +---+---+-------------------+--------+-------------+------+------+
7677 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7679 static void handle_simd_dupg(DisasContext
*s
, int is_q
, int rd
, int rn
,
7682 int size
= ctz32(imm5
);
7683 uint32_t dofs
, oprsz
, maxsz
;
7685 if (size
> 3 || ((size
== 3) && !is_q
)) {
7686 unallocated_encoding(s
);
7690 if (!fp_access_check(s
)) {
7694 dofs
= vec_full_reg_offset(s
, rd
);
7695 oprsz
= is_q
? 16 : 8;
7696 maxsz
= vec_full_reg_size(s
);
7698 tcg_gen_gvec_dup_i64(size
, dofs
, oprsz
, maxsz
, cpu_reg(s
, rn
));
7703 * 31 21 20 16 15 14 11 10 9 5 4 0
7704 * +-----------------------+--------+------------+---+------+------+
7705 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7706 * +-----------------------+--------+------------+---+------+------+
7708 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7709 * index: encoded in imm5<4:size+1>
7711 static void handle_simd_inse(DisasContext
*s
, int rd
, int rn
,
7714 int size
= ctz32(imm5
);
7715 int src_index
, dst_index
;
7719 unallocated_encoding(s
);
7723 if (!fp_access_check(s
)) {
7727 dst_index
= extract32(imm5
, 1+size
, 5);
7728 src_index
= extract32(imm4
, size
, 4);
7730 tmp
= tcg_temp_new_i64();
7732 read_vec_element(s
, tmp
, rn
, src_index
, size
);
7733 write_vec_element(s
, tmp
, rd
, dst_index
, size
);
7735 /* INS is considered a 128-bit write for SVE. */
7736 clear_vec_high(s
, true, rd
);
7742 * 31 21 20 16 15 10 9 5 4 0
7743 * +-----------------------+--------+-------------+------+------+
7744 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
7745 * +-----------------------+--------+-------------+------+------+
7747 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7748 * index: encoded in imm5<4:size+1>
7750 static void handle_simd_insg(DisasContext
*s
, int rd
, int rn
, int imm5
)
7752 int size
= ctz32(imm5
);
7756 unallocated_encoding(s
);
7760 if (!fp_access_check(s
)) {
7764 idx
= extract32(imm5
, 1 + size
, 4 - size
);
7765 write_vec_element(s
, cpu_reg(s
, rn
), rd
, idx
, size
);
7767 /* INS is considered a 128-bit write for SVE. */
7768 clear_vec_high(s
, true, rd
);
7775 * 31 30 29 21 20 16 15 12 10 9 5 4 0
7776 * +---+---+-------------------+--------+-------------+------+------+
7777 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
7778 * +---+---+-------------------+--------+-------------+------+------+
7780 * U: unsigned when set
7781 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7783 static void handle_simd_umov_smov(DisasContext
*s
, int is_q
, int is_signed
,
7784 int rn
, int rd
, int imm5
)
7786 int size
= ctz32(imm5
);
7790 /* Check for UnallocatedEncodings */
7792 if (size
> 2 || (size
== 2 && !is_q
)) {
7793 unallocated_encoding(s
);
7798 || (size
< 3 && is_q
)
7799 || (size
== 3 && !is_q
)) {
7800 unallocated_encoding(s
);
7805 if (!fp_access_check(s
)) {
7809 element
= extract32(imm5
, 1+size
, 4);
7811 tcg_rd
= cpu_reg(s
, rd
);
7812 read_vec_element(s
, tcg_rd
, rn
, element
, size
| (is_signed
? MO_SIGN
: 0));
7813 if (is_signed
&& !is_q
) {
7814 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
7819 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
7820 * +---+---+----+-----------------+------+---+------+---+------+------+
7821 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7822 * +---+---+----+-----------------+------+---+------+---+------+------+
7824 static void disas_simd_copy(DisasContext
*s
, uint32_t insn
)
7826 int rd
= extract32(insn
, 0, 5);
7827 int rn
= extract32(insn
, 5, 5);
7828 int imm4
= extract32(insn
, 11, 4);
7829 int op
= extract32(insn
, 29, 1);
7830 int is_q
= extract32(insn
, 30, 1);
7831 int imm5
= extract32(insn
, 16, 5);
7836 handle_simd_inse(s
, rd
, rn
, imm4
, imm5
);
7838 unallocated_encoding(s
);
7843 /* DUP (element - vector) */
7844 handle_simd_dupe(s
, is_q
, rd
, rn
, imm5
);
7848 handle_simd_dupg(s
, is_q
, rd
, rn
, imm5
);
7853 handle_simd_insg(s
, rd
, rn
, imm5
);
7855 unallocated_encoding(s
);
7860 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
7861 handle_simd_umov_smov(s
, is_q
, (imm4
== 5), rn
, rd
, imm5
);
7864 unallocated_encoding(s
);
7870 /* AdvSIMD modified immediate
7871 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
7872 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7873 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
7874 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7876 * There are a number of operations that can be carried out here:
7877 * MOVI - move (shifted) imm into register
7878 * MVNI - move inverted (shifted) imm into register
7879 * ORR - bitwise OR of (shifted) imm with register
7880 * BIC - bitwise clear of (shifted) imm with register
7881 * With ARMv8.2 we also have:
7882 * FMOV half-precision
7884 static void disas_simd_mod_imm(DisasContext
*s
, uint32_t insn
)
7886 int rd
= extract32(insn
, 0, 5);
7887 int cmode
= extract32(insn
, 12, 4);
7888 int o2
= extract32(insn
, 11, 1);
7889 uint64_t abcdefgh
= extract32(insn
, 5, 5) | (extract32(insn
, 16, 3) << 5);
7890 bool is_neg
= extract32(insn
, 29, 1);
7891 bool is_q
= extract32(insn
, 30, 1);
7894 if (o2
!= 0 || ((cmode
== 0xf) && is_neg
&& !is_q
)) {
7895 /* Check for FMOV (vector, immediate) - half-precision */
7896 if (!(dc_isar_feature(aa64_fp16
, s
) && o2
&& cmode
== 0xf)) {
7897 unallocated_encoding(s
);
7902 if (!fp_access_check(s
)) {
7906 if (cmode
== 15 && o2
&& !is_neg
) {
7907 /* FMOV (vector, immediate) - half-precision */
7908 imm
= vfp_expand_imm(MO_16
, abcdefgh
);
7909 /* now duplicate across the lanes */
7910 imm
= dup_const(MO_16
, imm
);
7912 imm
= asimd_imm_const(abcdefgh
, cmode
, is_neg
);
7915 if (!((cmode
& 0x9) == 0x1 || (cmode
& 0xd) == 0x9)) {
7916 /* MOVI or MVNI, with MVNI negation handled above. */
7917 tcg_gen_gvec_dup_imm(MO_64
, vec_full_reg_offset(s
, rd
), is_q
? 16 : 8,
7918 vec_full_reg_size(s
), imm
);
7920 /* ORR or BIC, with BIC negation to AND handled above. */
7922 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_andi
, MO_64
);
7924 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_ori
, MO_64
);
7929 /* AdvSIMD scalar copy
7930 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
7931 * +-----+----+-----------------+------+---+------+---+------+------+
7932 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7933 * +-----+----+-----------------+------+---+------+---+------+------+
7935 static void disas_simd_scalar_copy(DisasContext
*s
, uint32_t insn
)
7937 int rd
= extract32(insn
, 0, 5);
7938 int rn
= extract32(insn
, 5, 5);
7939 int imm4
= extract32(insn
, 11, 4);
7940 int imm5
= extract32(insn
, 16, 5);
7941 int op
= extract32(insn
, 29, 1);
7943 if (op
!= 0 || imm4
!= 0) {
7944 unallocated_encoding(s
);
7948 /* DUP (element, scalar) */
7949 handle_simd_dupes(s
, rd
, rn
, imm5
);
7952 /* AdvSIMD scalar pairwise
7953 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7954 * +-----+---+-----------+------+-----------+--------+-----+------+------+
7955 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
7956 * +-----+---+-----------+------+-----------+--------+-----+------+------+
7958 static void disas_simd_scalar_pairwise(DisasContext
*s
, uint32_t insn
)
7960 int u
= extract32(insn
, 29, 1);
7961 int size
= extract32(insn
, 22, 2);
7962 int opcode
= extract32(insn
, 12, 5);
7963 int rn
= extract32(insn
, 5, 5);
7964 int rd
= extract32(insn
, 0, 5);
7967 /* For some ops (the FP ones), size[1] is part of the encoding.
7968 * For ADDP strictly it is not but size[1] is always 1 for valid
7971 opcode
|= (extract32(size
, 1, 1) << 5);
7974 case 0x3b: /* ADDP */
7975 if (u
|| size
!= 3) {
7976 unallocated_encoding(s
);
7979 if (!fp_access_check(s
)) {
7985 case 0xc: /* FMAXNMP */
7986 case 0xd: /* FADDP */
7987 case 0xf: /* FMAXP */
7988 case 0x2c: /* FMINNMP */
7989 case 0x2f: /* FMINP */
7990 /* FP op, size[0] is 32 or 64 bit*/
7992 if (!dc_isar_feature(aa64_fp16
, s
)) {
7993 unallocated_encoding(s
);
7999 size
= extract32(size
, 0, 1) ? MO_64
: MO_32
;
8002 if (!fp_access_check(s
)) {
8006 fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8009 unallocated_encoding(s
);
8013 if (size
== MO_64
) {
8014 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
8015 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
8016 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8018 read_vec_element(s
, tcg_op1
, rn
, 0, MO_64
);
8019 read_vec_element(s
, tcg_op2
, rn
, 1, MO_64
);
8022 case 0x3b: /* ADDP */
8023 tcg_gen_add_i64(tcg_res
, tcg_op1
, tcg_op2
);
8025 case 0xc: /* FMAXNMP */
8026 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8028 case 0xd: /* FADDP */
8029 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8031 case 0xf: /* FMAXP */
8032 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8034 case 0x2c: /* FMINNMP */
8035 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8037 case 0x2f: /* FMINP */
8038 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8041 g_assert_not_reached();
8044 write_fp_dreg(s
, rd
, tcg_res
);
8046 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
8047 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
8048 TCGv_i32 tcg_res
= tcg_temp_new_i32();
8050 read_vec_element_i32(s
, tcg_op1
, rn
, 0, size
);
8051 read_vec_element_i32(s
, tcg_op2
, rn
, 1, size
);
8053 if (size
== MO_16
) {
8055 case 0xc: /* FMAXNMP */
8056 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8058 case 0xd: /* FADDP */
8059 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8061 case 0xf: /* FMAXP */
8062 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8064 case 0x2c: /* FMINNMP */
8065 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8067 case 0x2f: /* FMINP */
8068 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8071 g_assert_not_reached();
8075 case 0xc: /* FMAXNMP */
8076 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8078 case 0xd: /* FADDP */
8079 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8081 case 0xf: /* FMAXP */
8082 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8084 case 0x2c: /* FMINNMP */
8085 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8087 case 0x2f: /* FMINP */
8088 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8091 g_assert_not_reached();
8095 write_fp_sreg(s
, rd
, tcg_res
);
8100 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
8102 * This code is handles the common shifting code and is used by both
8103 * the vector and scalar code.
8105 static void handle_shri_with_rndacc(TCGv_i64 tcg_res
, TCGv_i64 tcg_src
,
8106 TCGv_i64 tcg_rnd
, bool accumulate
,
8107 bool is_u
, int size
, int shift
)
8109 bool extended_result
= false;
8110 bool round
= tcg_rnd
!= NULL
;
8112 TCGv_i64 tcg_src_hi
;
8114 if (round
&& size
== 3) {
8115 extended_result
= true;
8116 ext_lshift
= 64 - shift
;
8117 tcg_src_hi
= tcg_temp_new_i64();
8118 } else if (shift
== 64) {
8119 if (!accumulate
&& is_u
) {
8120 /* result is zero */
8121 tcg_gen_movi_i64(tcg_res
, 0);
8126 /* Deal with the rounding step */
8128 if (extended_result
) {
8129 TCGv_i64 tcg_zero
= tcg_constant_i64(0);
8131 /* take care of sign extending tcg_res */
8132 tcg_gen_sari_i64(tcg_src_hi
, tcg_src
, 63);
8133 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
8134 tcg_src
, tcg_src_hi
,
8137 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
8142 tcg_gen_add_i64(tcg_src
, tcg_src
, tcg_rnd
);
8146 /* Now do the shift right */
8147 if (round
&& extended_result
) {
8148 /* extended case, >64 bit precision required */
8149 if (ext_lshift
== 0) {
8150 /* special case, only high bits matter */
8151 tcg_gen_mov_i64(tcg_src
, tcg_src_hi
);
8153 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
8154 tcg_gen_shli_i64(tcg_src_hi
, tcg_src_hi
, ext_lshift
);
8155 tcg_gen_or_i64(tcg_src
, tcg_src
, tcg_src_hi
);
8160 /* essentially shifting in 64 zeros */
8161 tcg_gen_movi_i64(tcg_src
, 0);
8163 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
8167 /* effectively extending the sign-bit */
8168 tcg_gen_sari_i64(tcg_src
, tcg_src
, 63);
8170 tcg_gen_sari_i64(tcg_src
, tcg_src
, shift
);
8176 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_src
);
8178 tcg_gen_mov_i64(tcg_res
, tcg_src
);
8182 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
8183 static void handle_scalar_simd_shri(DisasContext
*s
,
8184 bool is_u
, int immh
, int immb
,
8185 int opcode
, int rn
, int rd
)
8188 int immhb
= immh
<< 3 | immb
;
8189 int shift
= 2 * (8 << size
) - immhb
;
8190 bool accumulate
= false;
8192 bool insert
= false;
8197 if (!extract32(immh
, 3, 1)) {
8198 unallocated_encoding(s
);
8202 if (!fp_access_check(s
)) {
8207 case 0x02: /* SSRA / USRA (accumulate) */
8210 case 0x04: /* SRSHR / URSHR (rounding) */
8213 case 0x06: /* SRSRA / URSRA (accum + rounding) */
8214 accumulate
= round
= true;
8216 case 0x08: /* SRI */
8222 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
8227 tcg_rn
= read_fp_dreg(s
, rn
);
8228 tcg_rd
= (accumulate
|| insert
) ? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
8231 /* shift count same as element size is valid but does nothing;
8232 * special case to avoid potential shift by 64.
8234 int esize
= 8 << size
;
8235 if (shift
!= esize
) {
8236 tcg_gen_shri_i64(tcg_rn
, tcg_rn
, shift
);
8237 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, 0, esize
- shift
);
8240 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
8241 accumulate
, is_u
, size
, shift
);
8244 write_fp_dreg(s
, rd
, tcg_rd
);
8247 /* SHL/SLI - Scalar shift left */
8248 static void handle_scalar_simd_shli(DisasContext
*s
, bool insert
,
8249 int immh
, int immb
, int opcode
,
8252 int size
= 32 - clz32(immh
) - 1;
8253 int immhb
= immh
<< 3 | immb
;
8254 int shift
= immhb
- (8 << size
);
8258 if (!extract32(immh
, 3, 1)) {
8259 unallocated_encoding(s
);
8263 if (!fp_access_check(s
)) {
8267 tcg_rn
= read_fp_dreg(s
, rn
);
8268 tcg_rd
= insert
? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
8271 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, shift
, 64 - shift
);
8273 tcg_gen_shli_i64(tcg_rd
, tcg_rn
, shift
);
8276 write_fp_dreg(s
, rd
, tcg_rd
);
8279 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
8280 * (signed/unsigned) narrowing */
8281 static void handle_vec_simd_sqshrn(DisasContext
*s
, bool is_scalar
, bool is_q
,
8282 bool is_u_shift
, bool is_u_narrow
,
8283 int immh
, int immb
, int opcode
,
8286 int immhb
= immh
<< 3 | immb
;
8287 int size
= 32 - clz32(immh
) - 1;
8288 int esize
= 8 << size
;
8289 int shift
= (2 * esize
) - immhb
;
8290 int elements
= is_scalar
? 1 : (64 / esize
);
8291 bool round
= extract32(opcode
, 0, 1);
8292 MemOp ldop
= (size
+ 1) | (is_u_shift
? 0 : MO_SIGN
);
8293 TCGv_i64 tcg_rn
, tcg_rd
, tcg_round
;
8294 TCGv_i32 tcg_rd_narrowed
;
8297 static NeonGenNarrowEnvFn
* const signed_narrow_fns
[4][2] = {
8298 { gen_helper_neon_narrow_sat_s8
,
8299 gen_helper_neon_unarrow_sat8
},
8300 { gen_helper_neon_narrow_sat_s16
,
8301 gen_helper_neon_unarrow_sat16
},
8302 { gen_helper_neon_narrow_sat_s32
,
8303 gen_helper_neon_unarrow_sat32
},
8306 static NeonGenNarrowEnvFn
* const unsigned_narrow_fns
[4] = {
8307 gen_helper_neon_narrow_sat_u8
,
8308 gen_helper_neon_narrow_sat_u16
,
8309 gen_helper_neon_narrow_sat_u32
,
8312 NeonGenNarrowEnvFn
*narrowfn
;
8318 if (extract32(immh
, 3, 1)) {
8319 unallocated_encoding(s
);
8323 if (!fp_access_check(s
)) {
8328 narrowfn
= unsigned_narrow_fns
[size
];
8330 narrowfn
= signed_narrow_fns
[size
][is_u_narrow
? 1 : 0];
8333 tcg_rn
= tcg_temp_new_i64();
8334 tcg_rd
= tcg_temp_new_i64();
8335 tcg_rd_narrowed
= tcg_temp_new_i32();
8336 tcg_final
= tcg_temp_new_i64();
8339 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
8344 for (i
= 0; i
< elements
; i
++) {
8345 read_vec_element(s
, tcg_rn
, rn
, i
, ldop
);
8346 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
8347 false, is_u_shift
, size
+1, shift
);
8348 narrowfn(tcg_rd_narrowed
, cpu_env
, tcg_rd
);
8349 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd_narrowed
);
8351 tcg_gen_mov_i64(tcg_final
, tcg_rd
);
8353 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
8358 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
8360 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
8362 clear_vec_high(s
, is_q
, rd
);
8365 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
8366 static void handle_simd_qshl(DisasContext
*s
, bool scalar
, bool is_q
,
8367 bool src_unsigned
, bool dst_unsigned
,
8368 int immh
, int immb
, int rn
, int rd
)
8370 int immhb
= immh
<< 3 | immb
;
8371 int size
= 32 - clz32(immh
) - 1;
8372 int shift
= immhb
- (8 << size
);
8376 assert(!(scalar
&& is_q
));
8379 if (!is_q
&& extract32(immh
, 3, 1)) {
8380 unallocated_encoding(s
);
8384 /* Since we use the variable-shift helpers we must
8385 * replicate the shift count into each element of
8386 * the tcg_shift value.
8390 shift
|= shift
<< 8;
8393 shift
|= shift
<< 16;
8399 g_assert_not_reached();
8403 if (!fp_access_check(s
)) {
8408 TCGv_i64 tcg_shift
= tcg_constant_i64(shift
);
8409 static NeonGenTwo64OpEnvFn
* const fns
[2][2] = {
8410 { gen_helper_neon_qshl_s64
, gen_helper_neon_qshlu_s64
},
8411 { NULL
, gen_helper_neon_qshl_u64
},
8413 NeonGenTwo64OpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
];
8414 int maxpass
= is_q
? 2 : 1;
8416 for (pass
= 0; pass
< maxpass
; pass
++) {
8417 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8419 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8420 genfn(tcg_op
, cpu_env
, tcg_op
, tcg_shift
);
8421 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
8423 clear_vec_high(s
, is_q
, rd
);
8425 TCGv_i32 tcg_shift
= tcg_constant_i32(shift
);
8426 static NeonGenTwoOpEnvFn
* const fns
[2][2][3] = {
8428 { gen_helper_neon_qshl_s8
,
8429 gen_helper_neon_qshl_s16
,
8430 gen_helper_neon_qshl_s32
},
8431 { gen_helper_neon_qshlu_s8
,
8432 gen_helper_neon_qshlu_s16
,
8433 gen_helper_neon_qshlu_s32
}
8435 { NULL
, NULL
, NULL
},
8436 { gen_helper_neon_qshl_u8
,
8437 gen_helper_neon_qshl_u16
,
8438 gen_helper_neon_qshl_u32
}
8441 NeonGenTwoOpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
][size
];
8442 MemOp memop
= scalar
? size
: MO_32
;
8443 int maxpass
= scalar
? 1 : is_q
? 4 : 2;
8445 for (pass
= 0; pass
< maxpass
; pass
++) {
8446 TCGv_i32 tcg_op
= tcg_temp_new_i32();
8448 read_vec_element_i32(s
, tcg_op
, rn
, pass
, memop
);
8449 genfn(tcg_op
, cpu_env
, tcg_op
, tcg_shift
);
8453 tcg_gen_ext8u_i32(tcg_op
, tcg_op
);
8456 tcg_gen_ext16u_i32(tcg_op
, tcg_op
);
8461 g_assert_not_reached();
8463 write_fp_sreg(s
, rd
, tcg_op
);
8465 write_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
8470 clear_vec_high(s
, is_q
, rd
);
8475 /* Common vector code for handling integer to FP conversion */
8476 static void handle_simd_intfp_conv(DisasContext
*s
, int rd
, int rn
,
8477 int elements
, int is_signed
,
8478 int fracbits
, int size
)
8480 TCGv_ptr tcg_fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8481 TCGv_i32 tcg_shift
= NULL
;
8483 MemOp mop
= size
| (is_signed
? MO_SIGN
: 0);
8486 if (fracbits
|| size
== MO_64
) {
8487 tcg_shift
= tcg_constant_i32(fracbits
);
8490 if (size
== MO_64
) {
8491 TCGv_i64 tcg_int64
= tcg_temp_new_i64();
8492 TCGv_i64 tcg_double
= tcg_temp_new_i64();
8494 for (pass
= 0; pass
< elements
; pass
++) {
8495 read_vec_element(s
, tcg_int64
, rn
, pass
, mop
);
8498 gen_helper_vfp_sqtod(tcg_double
, tcg_int64
,
8499 tcg_shift
, tcg_fpst
);
8501 gen_helper_vfp_uqtod(tcg_double
, tcg_int64
,
8502 tcg_shift
, tcg_fpst
);
8504 if (elements
== 1) {
8505 write_fp_dreg(s
, rd
, tcg_double
);
8507 write_vec_element(s
, tcg_double
, rd
, pass
, MO_64
);
8511 TCGv_i32 tcg_int32
= tcg_temp_new_i32();
8512 TCGv_i32 tcg_float
= tcg_temp_new_i32();
8514 for (pass
= 0; pass
< elements
; pass
++) {
8515 read_vec_element_i32(s
, tcg_int32
, rn
, pass
, mop
);
8521 gen_helper_vfp_sltos(tcg_float
, tcg_int32
,
8522 tcg_shift
, tcg_fpst
);
8524 gen_helper_vfp_ultos(tcg_float
, tcg_int32
,
8525 tcg_shift
, tcg_fpst
);
8529 gen_helper_vfp_sitos(tcg_float
, tcg_int32
, tcg_fpst
);
8531 gen_helper_vfp_uitos(tcg_float
, tcg_int32
, tcg_fpst
);
8538 gen_helper_vfp_sltoh(tcg_float
, tcg_int32
,
8539 tcg_shift
, tcg_fpst
);
8541 gen_helper_vfp_ultoh(tcg_float
, tcg_int32
,
8542 tcg_shift
, tcg_fpst
);
8546 gen_helper_vfp_sitoh(tcg_float
, tcg_int32
, tcg_fpst
);
8548 gen_helper_vfp_uitoh(tcg_float
, tcg_int32
, tcg_fpst
);
8553 g_assert_not_reached();
8556 if (elements
== 1) {
8557 write_fp_sreg(s
, rd
, tcg_float
);
8559 write_vec_element_i32(s
, tcg_float
, rd
, pass
, size
);
8564 clear_vec_high(s
, elements
<< size
== 16, rd
);
8567 /* UCVTF/SCVTF - Integer to FP conversion */
8568 static void handle_simd_shift_intfp_conv(DisasContext
*s
, bool is_scalar
,
8569 bool is_q
, bool is_u
,
8570 int immh
, int immb
, int opcode
,
8573 int size
, elements
, fracbits
;
8574 int immhb
= immh
<< 3 | immb
;
8578 if (!is_scalar
&& !is_q
) {
8579 unallocated_encoding(s
);
8582 } else if (immh
& 4) {
8584 } else if (immh
& 2) {
8586 if (!dc_isar_feature(aa64_fp16
, s
)) {
8587 unallocated_encoding(s
);
8591 /* immh == 0 would be a failure of the decode logic */
8592 g_assert(immh
== 1);
8593 unallocated_encoding(s
);
8600 elements
= (8 << is_q
) >> size
;
8602 fracbits
= (16 << size
) - immhb
;
8604 if (!fp_access_check(s
)) {
8608 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !is_u
, fracbits
, size
);
8611 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
8612 static void handle_simd_shift_fpint_conv(DisasContext
*s
, bool is_scalar
,
8613 bool is_q
, bool is_u
,
8614 int immh
, int immb
, int rn
, int rd
)
8616 int immhb
= immh
<< 3 | immb
;
8617 int pass
, size
, fracbits
;
8618 TCGv_ptr tcg_fpstatus
;
8619 TCGv_i32 tcg_rmode
, tcg_shift
;
8623 if (!is_scalar
&& !is_q
) {
8624 unallocated_encoding(s
);
8627 } else if (immh
& 0x4) {
8629 } else if (immh
& 0x2) {
8631 if (!dc_isar_feature(aa64_fp16
, s
)) {
8632 unallocated_encoding(s
);
8636 /* Should have split out AdvSIMD modified immediate earlier. */
8638 unallocated_encoding(s
);
8642 if (!fp_access_check(s
)) {
8646 assert(!(is_scalar
&& is_q
));
8648 tcg_fpstatus
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8649 tcg_rmode
= gen_set_rmode(FPROUNDING_ZERO
, tcg_fpstatus
);
8650 fracbits
= (16 << size
) - immhb
;
8651 tcg_shift
= tcg_constant_i32(fracbits
);
8653 if (size
== MO_64
) {
8654 int maxpass
= is_scalar
? 1 : 2;
8656 for (pass
= 0; pass
< maxpass
; pass
++) {
8657 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8659 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8661 gen_helper_vfp_touqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
8663 gen_helper_vfp_tosqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
8665 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
8667 clear_vec_high(s
, is_q
, rd
);
8669 void (*fn
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
8670 int maxpass
= is_scalar
? 1 : ((8 << is_q
) >> size
);
8675 fn
= gen_helper_vfp_touhh
;
8677 fn
= gen_helper_vfp_toshh
;
8682 fn
= gen_helper_vfp_touls
;
8684 fn
= gen_helper_vfp_tosls
;
8688 g_assert_not_reached();
8691 for (pass
= 0; pass
< maxpass
; pass
++) {
8692 TCGv_i32 tcg_op
= tcg_temp_new_i32();
8694 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
8695 fn(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
8697 write_fp_sreg(s
, rd
, tcg_op
);
8699 write_vec_element_i32(s
, tcg_op
, rd
, pass
, size
);
8703 clear_vec_high(s
, is_q
, rd
);
8707 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
8710 /* AdvSIMD scalar shift by immediate
8711 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
8712 * +-----+---+-------------+------+------+--------+---+------+------+
8713 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
8714 * +-----+---+-------------+------+------+--------+---+------+------+
8716 * This is the scalar version so it works on a fixed sized registers
8718 static void disas_simd_scalar_shift_imm(DisasContext
*s
, uint32_t insn
)
8720 int rd
= extract32(insn
, 0, 5);
8721 int rn
= extract32(insn
, 5, 5);
8722 int opcode
= extract32(insn
, 11, 5);
8723 int immb
= extract32(insn
, 16, 3);
8724 int immh
= extract32(insn
, 19, 4);
8725 bool is_u
= extract32(insn
, 29, 1);
8728 unallocated_encoding(s
);
8733 case 0x08: /* SRI */
8735 unallocated_encoding(s
);
8739 case 0x00: /* SSHR / USHR */
8740 case 0x02: /* SSRA / USRA */
8741 case 0x04: /* SRSHR / URSHR */
8742 case 0x06: /* SRSRA / URSRA */
8743 handle_scalar_simd_shri(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
8745 case 0x0a: /* SHL / SLI */
8746 handle_scalar_simd_shli(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
8748 case 0x1c: /* SCVTF, UCVTF */
8749 handle_simd_shift_intfp_conv(s
, true, false, is_u
, immh
, immb
,
8752 case 0x10: /* SQSHRUN, SQSHRUN2 */
8753 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
8755 unallocated_encoding(s
);
8758 handle_vec_simd_sqshrn(s
, true, false, false, true,
8759 immh
, immb
, opcode
, rn
, rd
);
8761 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
8762 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
8763 handle_vec_simd_sqshrn(s
, true, false, is_u
, is_u
,
8764 immh
, immb
, opcode
, rn
, rd
);
8766 case 0xc: /* SQSHLU */
8768 unallocated_encoding(s
);
8771 handle_simd_qshl(s
, true, false, false, true, immh
, immb
, rn
, rd
);
8773 case 0xe: /* SQSHL, UQSHL */
8774 handle_simd_qshl(s
, true, false, is_u
, is_u
, immh
, immb
, rn
, rd
);
8776 case 0x1f: /* FCVTZS, FCVTZU */
8777 handle_simd_shift_fpint_conv(s
, true, false, is_u
, immh
, immb
, rn
, rd
);
8780 unallocated_encoding(s
);
8785 /* AdvSIMD scalar three different
8786 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
8787 * +-----+---+-----------+------+---+------+--------+-----+------+------+
8788 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
8789 * +-----+---+-----------+------+---+------+--------+-----+------+------+
8791 static void disas_simd_scalar_three_reg_diff(DisasContext
*s
, uint32_t insn
)
8793 bool is_u
= extract32(insn
, 29, 1);
8794 int size
= extract32(insn
, 22, 2);
8795 int opcode
= extract32(insn
, 12, 4);
8796 int rm
= extract32(insn
, 16, 5);
8797 int rn
= extract32(insn
, 5, 5);
8798 int rd
= extract32(insn
, 0, 5);
8801 unallocated_encoding(s
);
8806 case 0x9: /* SQDMLAL, SQDMLAL2 */
8807 case 0xb: /* SQDMLSL, SQDMLSL2 */
8808 case 0xd: /* SQDMULL, SQDMULL2 */
8809 if (size
== 0 || size
== 3) {
8810 unallocated_encoding(s
);
8815 unallocated_encoding(s
);
8819 if (!fp_access_check(s
)) {
8824 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
8825 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
8826 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8828 read_vec_element(s
, tcg_op1
, rn
, 0, MO_32
| MO_SIGN
);
8829 read_vec_element(s
, tcg_op2
, rm
, 0, MO_32
| MO_SIGN
);
8831 tcg_gen_mul_i64(tcg_res
, tcg_op1
, tcg_op2
);
8832 gen_helper_neon_addl_saturate_s64(tcg_res
, cpu_env
, tcg_res
, tcg_res
);
8835 case 0xd: /* SQDMULL, SQDMULL2 */
8837 case 0xb: /* SQDMLSL, SQDMLSL2 */
8838 tcg_gen_neg_i64(tcg_res
, tcg_res
);
8840 case 0x9: /* SQDMLAL, SQDMLAL2 */
8841 read_vec_element(s
, tcg_op1
, rd
, 0, MO_64
);
8842 gen_helper_neon_addl_saturate_s64(tcg_res
, cpu_env
,
8846 g_assert_not_reached();
8849 write_fp_dreg(s
, rd
, tcg_res
);
8851 TCGv_i32 tcg_op1
= read_fp_hreg(s
, rn
);
8852 TCGv_i32 tcg_op2
= read_fp_hreg(s
, rm
);
8853 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8855 gen_helper_neon_mull_s16(tcg_res
, tcg_op1
, tcg_op2
);
8856 gen_helper_neon_addl_saturate_s32(tcg_res
, cpu_env
, tcg_res
, tcg_res
);
8859 case 0xd: /* SQDMULL, SQDMULL2 */
8861 case 0xb: /* SQDMLSL, SQDMLSL2 */
8862 gen_helper_neon_negl_u32(tcg_res
, tcg_res
);
8864 case 0x9: /* SQDMLAL, SQDMLAL2 */
8866 TCGv_i64 tcg_op3
= tcg_temp_new_i64();
8867 read_vec_element(s
, tcg_op3
, rd
, 0, MO_32
);
8868 gen_helper_neon_addl_saturate_s32(tcg_res
, cpu_env
,
8873 g_assert_not_reached();
8876 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
8877 write_fp_dreg(s
, rd
, tcg_res
);
8881 static void handle_3same_64(DisasContext
*s
, int opcode
, bool u
,
8882 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
, TCGv_i64 tcg_rm
)
8884 /* Handle 64x64->64 opcodes which are shared between the scalar
8885 * and vector 3-same groups. We cover every opcode where size == 3
8886 * is valid in either the three-reg-same (integer, not pairwise)
8887 * or scalar-three-reg-same groups.
8892 case 0x1: /* SQADD */
8894 gen_helper_neon_qadd_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
8896 gen_helper_neon_qadd_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
8899 case 0x5: /* SQSUB */
8901 gen_helper_neon_qsub_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
8903 gen_helper_neon_qsub_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
8906 case 0x6: /* CMGT, CMHI */
8907 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
8908 * We implement this using setcond (test) and then negating.
8910 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
8912 tcg_gen_setcond_i64(cond
, tcg_rd
, tcg_rn
, tcg_rm
);
8913 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
8915 case 0x7: /* CMGE, CMHS */
8916 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
8918 case 0x11: /* CMTST, CMEQ */
8923 gen_cmtst_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8925 case 0x8: /* SSHL, USHL */
8927 gen_ushl_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8929 gen_sshl_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8932 case 0x9: /* SQSHL, UQSHL */
8934 gen_helper_neon_qshl_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
8936 gen_helper_neon_qshl_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
8939 case 0xa: /* SRSHL, URSHL */
8941 gen_helper_neon_rshl_u64(tcg_rd
, tcg_rn
, tcg_rm
);
8943 gen_helper_neon_rshl_s64(tcg_rd
, tcg_rn
, tcg_rm
);
8946 case 0xb: /* SQRSHL, UQRSHL */
8948 gen_helper_neon_qrshl_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
8950 gen_helper_neon_qrshl_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
8953 case 0x10: /* ADD, SUB */
8955 tcg_gen_sub_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8957 tcg_gen_add_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8961 g_assert_not_reached();
8965 /* Handle the 3-same-operands float operations; shared by the scalar
8966 * and vector encodings. The caller must filter out any encodings
8967 * not allocated for the encoding it is dealing with.
8969 static void handle_3same_float(DisasContext
*s
, int size
, int elements
,
8970 int fpopcode
, int rd
, int rn
, int rm
)
8973 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
8975 for (pass
= 0; pass
< elements
; pass
++) {
8978 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
8979 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
8980 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8982 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
8983 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
8986 case 0x39: /* FMLS */
8987 /* As usual for ARM, separate negation for fused multiply-add */
8988 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
8990 case 0x19: /* FMLA */
8991 read_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
8992 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
,
8995 case 0x18: /* FMAXNM */
8996 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8998 case 0x1a: /* FADD */
8999 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9001 case 0x1b: /* FMULX */
9002 gen_helper_vfp_mulxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9004 case 0x1c: /* FCMEQ */
9005 gen_helper_neon_ceq_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9007 case 0x1e: /* FMAX */
9008 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9010 case 0x1f: /* FRECPS */
9011 gen_helper_recpsf_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9013 case 0x38: /* FMINNM */
9014 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9016 case 0x3a: /* FSUB */
9017 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9019 case 0x3e: /* FMIN */
9020 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9022 case 0x3f: /* FRSQRTS */
9023 gen_helper_rsqrtsf_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9025 case 0x5b: /* FMUL */
9026 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9028 case 0x5c: /* FCMGE */
9029 gen_helper_neon_cge_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9031 case 0x5d: /* FACGE */
9032 gen_helper_neon_acge_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9034 case 0x5f: /* FDIV */
9035 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9037 case 0x7a: /* FABD */
9038 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9039 gen_helper_vfp_absd(tcg_res
, tcg_res
);
9041 case 0x7c: /* FCMGT */
9042 gen_helper_neon_cgt_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9044 case 0x7d: /* FACGT */
9045 gen_helper_neon_acgt_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9048 g_assert_not_reached();
9051 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
9054 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
9055 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
9056 TCGv_i32 tcg_res
= tcg_temp_new_i32();
9058 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
9059 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
9062 case 0x39: /* FMLS */
9063 /* As usual for ARM, separate negation for fused multiply-add */
9064 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
9066 case 0x19: /* FMLA */
9067 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
9068 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
,
9071 case 0x1a: /* FADD */
9072 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9074 case 0x1b: /* FMULX */
9075 gen_helper_vfp_mulxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9077 case 0x1c: /* FCMEQ */
9078 gen_helper_neon_ceq_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9080 case 0x1e: /* FMAX */
9081 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9083 case 0x1f: /* FRECPS */
9084 gen_helper_recpsf_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9086 case 0x18: /* FMAXNM */
9087 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9089 case 0x38: /* FMINNM */
9090 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9092 case 0x3a: /* FSUB */
9093 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9095 case 0x3e: /* FMIN */
9096 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9098 case 0x3f: /* FRSQRTS */
9099 gen_helper_rsqrtsf_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9101 case 0x5b: /* FMUL */
9102 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9104 case 0x5c: /* FCMGE */
9105 gen_helper_neon_cge_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9107 case 0x5d: /* FACGE */
9108 gen_helper_neon_acge_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9110 case 0x5f: /* FDIV */
9111 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9113 case 0x7a: /* FABD */
9114 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9115 gen_helper_vfp_abss(tcg_res
, tcg_res
);
9117 case 0x7c: /* FCMGT */
9118 gen_helper_neon_cgt_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9120 case 0x7d: /* FACGT */
9121 gen_helper_neon_acgt_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9124 g_assert_not_reached();
9127 if (elements
== 1) {
9128 /* scalar single so clear high part */
9129 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
9131 tcg_gen_extu_i32_i64(tcg_tmp
, tcg_res
);
9132 write_vec_element(s
, tcg_tmp
, rd
, pass
, MO_64
);
9134 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
9139 clear_vec_high(s
, elements
* (size
? 8 : 4) > 8, rd
);
9142 /* AdvSIMD scalar three same
9143 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
9144 * +-----+---+-----------+------+---+------+--------+---+------+------+
9145 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
9146 * +-----+---+-----------+------+---+------+--------+---+------+------+
9148 static void disas_simd_scalar_three_reg_same(DisasContext
*s
, uint32_t insn
)
9150 int rd
= extract32(insn
, 0, 5);
9151 int rn
= extract32(insn
, 5, 5);
9152 int opcode
= extract32(insn
, 11, 5);
9153 int rm
= extract32(insn
, 16, 5);
9154 int size
= extract32(insn
, 22, 2);
9155 bool u
= extract32(insn
, 29, 1);
9158 if (opcode
>= 0x18) {
9159 /* Floating point: U, size[1] and opcode indicate operation */
9160 int fpopcode
= opcode
| (extract32(size
, 1, 1) << 5) | (u
<< 6);
9162 case 0x1b: /* FMULX */
9163 case 0x1f: /* FRECPS */
9164 case 0x3f: /* FRSQRTS */
9165 case 0x5d: /* FACGE */
9166 case 0x7d: /* FACGT */
9167 case 0x1c: /* FCMEQ */
9168 case 0x5c: /* FCMGE */
9169 case 0x7c: /* FCMGT */
9170 case 0x7a: /* FABD */
9173 unallocated_encoding(s
);
9177 if (!fp_access_check(s
)) {
9181 handle_3same_float(s
, extract32(size
, 0, 1), 1, fpopcode
, rd
, rn
, rm
);
9186 case 0x1: /* SQADD, UQADD */
9187 case 0x5: /* SQSUB, UQSUB */
9188 case 0x9: /* SQSHL, UQSHL */
9189 case 0xb: /* SQRSHL, UQRSHL */
9191 case 0x8: /* SSHL, USHL */
9192 case 0xa: /* SRSHL, URSHL */
9193 case 0x6: /* CMGT, CMHI */
9194 case 0x7: /* CMGE, CMHS */
9195 case 0x11: /* CMTST, CMEQ */
9196 case 0x10: /* ADD, SUB (vector) */
9198 unallocated_encoding(s
);
9202 case 0x16: /* SQDMULH, SQRDMULH (vector) */
9203 if (size
!= 1 && size
!= 2) {
9204 unallocated_encoding(s
);
9209 unallocated_encoding(s
);
9213 if (!fp_access_check(s
)) {
9217 tcg_rd
= tcg_temp_new_i64();
9220 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
9221 TCGv_i64 tcg_rm
= read_fp_dreg(s
, rm
);
9223 handle_3same_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rm
);
9225 /* Do a single operation on the lowest element in the vector.
9226 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
9227 * no side effects for all these operations.
9228 * OPTME: special-purpose helpers would avoid doing some
9229 * unnecessary work in the helper for the 8 and 16 bit cases.
9231 NeonGenTwoOpEnvFn
*genenvfn
;
9232 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
9233 TCGv_i32 tcg_rm
= tcg_temp_new_i32();
9234 TCGv_i32 tcg_rd32
= tcg_temp_new_i32();
9236 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
9237 read_vec_element_i32(s
, tcg_rm
, rm
, 0, size
);
9240 case 0x1: /* SQADD, UQADD */
9242 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9243 { gen_helper_neon_qadd_s8
, gen_helper_neon_qadd_u8
},
9244 { gen_helper_neon_qadd_s16
, gen_helper_neon_qadd_u16
},
9245 { gen_helper_neon_qadd_s32
, gen_helper_neon_qadd_u32
},
9247 genenvfn
= fns
[size
][u
];
9250 case 0x5: /* SQSUB, UQSUB */
9252 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9253 { gen_helper_neon_qsub_s8
, gen_helper_neon_qsub_u8
},
9254 { gen_helper_neon_qsub_s16
, gen_helper_neon_qsub_u16
},
9255 { gen_helper_neon_qsub_s32
, gen_helper_neon_qsub_u32
},
9257 genenvfn
= fns
[size
][u
];
9260 case 0x9: /* SQSHL, UQSHL */
9262 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9263 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
9264 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
9265 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
9267 genenvfn
= fns
[size
][u
];
9270 case 0xb: /* SQRSHL, UQRSHL */
9272 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9273 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
9274 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
9275 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
9277 genenvfn
= fns
[size
][u
];
9280 case 0x16: /* SQDMULH, SQRDMULH */
9282 static NeonGenTwoOpEnvFn
* const fns
[2][2] = {
9283 { gen_helper_neon_qdmulh_s16
, gen_helper_neon_qrdmulh_s16
},
9284 { gen_helper_neon_qdmulh_s32
, gen_helper_neon_qrdmulh_s32
},
9286 assert(size
== 1 || size
== 2);
9287 genenvfn
= fns
[size
- 1][u
];
9291 g_assert_not_reached();
9294 genenvfn(tcg_rd32
, cpu_env
, tcg_rn
, tcg_rm
);
9295 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd32
);
9298 write_fp_dreg(s
, rd
, tcg_rd
);
9301 /* AdvSIMD scalar three same FP16
9302 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
9303 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9304 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
9305 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9306 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
9307 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
9309 static void disas_simd_scalar_three_reg_same_fp16(DisasContext
*s
,
9312 int rd
= extract32(insn
, 0, 5);
9313 int rn
= extract32(insn
, 5, 5);
9314 int opcode
= extract32(insn
, 11, 3);
9315 int rm
= extract32(insn
, 16, 5);
9316 bool u
= extract32(insn
, 29, 1);
9317 bool a
= extract32(insn
, 23, 1);
9318 int fpopcode
= opcode
| (a
<< 3) | (u
<< 4);
9325 case 0x03: /* FMULX */
9326 case 0x04: /* FCMEQ (reg) */
9327 case 0x07: /* FRECPS */
9328 case 0x0f: /* FRSQRTS */
9329 case 0x14: /* FCMGE (reg) */
9330 case 0x15: /* FACGE */
9331 case 0x1a: /* FABD */
9332 case 0x1c: /* FCMGT (reg) */
9333 case 0x1d: /* FACGT */
9336 unallocated_encoding(s
);
9340 if (!dc_isar_feature(aa64_fp16
, s
)) {
9341 unallocated_encoding(s
);
9344 if (!fp_access_check(s
)) {
9348 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
9350 tcg_op1
= read_fp_hreg(s
, rn
);
9351 tcg_op2
= read_fp_hreg(s
, rm
);
9352 tcg_res
= tcg_temp_new_i32();
9355 case 0x03: /* FMULX */
9356 gen_helper_advsimd_mulxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9358 case 0x04: /* FCMEQ (reg) */
9359 gen_helper_advsimd_ceq_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9361 case 0x07: /* FRECPS */
9362 gen_helper_recpsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9364 case 0x0f: /* FRSQRTS */
9365 gen_helper_rsqrtsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9367 case 0x14: /* FCMGE (reg) */
9368 gen_helper_advsimd_cge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9370 case 0x15: /* FACGE */
9371 gen_helper_advsimd_acge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9373 case 0x1a: /* FABD */
9374 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9375 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0x7fff);
9377 case 0x1c: /* FCMGT (reg) */
9378 gen_helper_advsimd_cgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9380 case 0x1d: /* FACGT */
9381 gen_helper_advsimd_acgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9384 g_assert_not_reached();
9387 write_fp_sreg(s
, rd
, tcg_res
);
9390 /* AdvSIMD scalar three same extra
9391 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
9392 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9393 * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
9394 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9396 static void disas_simd_scalar_three_reg_same_extra(DisasContext
*s
,
9399 int rd
= extract32(insn
, 0, 5);
9400 int rn
= extract32(insn
, 5, 5);
9401 int opcode
= extract32(insn
, 11, 4);
9402 int rm
= extract32(insn
, 16, 5);
9403 int size
= extract32(insn
, 22, 2);
9404 bool u
= extract32(insn
, 29, 1);
9405 TCGv_i32 ele1
, ele2
, ele3
;
9409 switch (u
* 16 + opcode
) {
9410 case 0x10: /* SQRDMLAH (vector) */
9411 case 0x11: /* SQRDMLSH (vector) */
9412 if (size
!= 1 && size
!= 2) {
9413 unallocated_encoding(s
);
9416 feature
= dc_isar_feature(aa64_rdm
, s
);
9419 unallocated_encoding(s
);
9423 unallocated_encoding(s
);
9426 if (!fp_access_check(s
)) {
9430 /* Do a single operation on the lowest element in the vector.
9431 * We use the standard Neon helpers and rely on 0 OP 0 == 0
9432 * with no side effects for all these operations.
9433 * OPTME: special-purpose helpers would avoid doing some
9434 * unnecessary work in the helper for the 16 bit cases.
9436 ele1
= tcg_temp_new_i32();
9437 ele2
= tcg_temp_new_i32();
9438 ele3
= tcg_temp_new_i32();
9440 read_vec_element_i32(s
, ele1
, rn
, 0, size
);
9441 read_vec_element_i32(s
, ele2
, rm
, 0, size
);
9442 read_vec_element_i32(s
, ele3
, rd
, 0, size
);
9445 case 0x0: /* SQRDMLAH */
9447 gen_helper_neon_qrdmlah_s16(ele3
, cpu_env
, ele1
, ele2
, ele3
);
9449 gen_helper_neon_qrdmlah_s32(ele3
, cpu_env
, ele1
, ele2
, ele3
);
9452 case 0x1: /* SQRDMLSH */
9454 gen_helper_neon_qrdmlsh_s16(ele3
, cpu_env
, ele1
, ele2
, ele3
);
9456 gen_helper_neon_qrdmlsh_s32(ele3
, cpu_env
, ele1
, ele2
, ele3
);
9460 g_assert_not_reached();
9463 res
= tcg_temp_new_i64();
9464 tcg_gen_extu_i32_i64(res
, ele3
);
9465 write_fp_dreg(s
, rd
, res
);
9468 static void handle_2misc_64(DisasContext
*s
, int opcode
, bool u
,
9469 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
,
9470 TCGv_i32 tcg_rmode
, TCGv_ptr tcg_fpstatus
)
9472 /* Handle 64->64 opcodes which are shared between the scalar and
9473 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9474 * is valid in either group and also the double-precision fp ops.
9475 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9481 case 0x4: /* CLS, CLZ */
9483 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
9485 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
9489 /* This opcode is shared with CNT and RBIT but we have earlier
9490 * enforced that size == 3 if and only if this is the NOT insn.
9492 tcg_gen_not_i64(tcg_rd
, tcg_rn
);
9494 case 0x7: /* SQABS, SQNEG */
9496 gen_helper_neon_qneg_s64(tcg_rd
, cpu_env
, tcg_rn
);
9498 gen_helper_neon_qabs_s64(tcg_rd
, cpu_env
, tcg_rn
);
9501 case 0xa: /* CMLT */
9502 /* 64 bit integer comparison against zero, result is
9503 * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
9508 tcg_gen_setcondi_i64(cond
, tcg_rd
, tcg_rn
, 0);
9509 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
9511 case 0x8: /* CMGT, CMGE */
9512 cond
= u
? TCG_COND_GE
: TCG_COND_GT
;
9514 case 0x9: /* CMEQ, CMLE */
9515 cond
= u
? TCG_COND_LE
: TCG_COND_EQ
;
9517 case 0xb: /* ABS, NEG */
9519 tcg_gen_neg_i64(tcg_rd
, tcg_rn
);
9521 tcg_gen_abs_i64(tcg_rd
, tcg_rn
);
9524 case 0x2f: /* FABS */
9525 gen_helper_vfp_absd(tcg_rd
, tcg_rn
);
9527 case 0x6f: /* FNEG */
9528 gen_helper_vfp_negd(tcg_rd
, tcg_rn
);
9530 case 0x7f: /* FSQRT */
9531 gen_helper_vfp_sqrtd(tcg_rd
, tcg_rn
, cpu_env
);
9533 case 0x1a: /* FCVTNS */
9534 case 0x1b: /* FCVTMS */
9535 case 0x1c: /* FCVTAS */
9536 case 0x3a: /* FCVTPS */
9537 case 0x3b: /* FCVTZS */
9538 gen_helper_vfp_tosqd(tcg_rd
, tcg_rn
, tcg_constant_i32(0), tcg_fpstatus
);
9540 case 0x5a: /* FCVTNU */
9541 case 0x5b: /* FCVTMU */
9542 case 0x5c: /* FCVTAU */
9543 case 0x7a: /* FCVTPU */
9544 case 0x7b: /* FCVTZU */
9545 gen_helper_vfp_touqd(tcg_rd
, tcg_rn
, tcg_constant_i32(0), tcg_fpstatus
);
9547 case 0x18: /* FRINTN */
9548 case 0x19: /* FRINTM */
9549 case 0x38: /* FRINTP */
9550 case 0x39: /* FRINTZ */
9551 case 0x58: /* FRINTA */
9552 case 0x79: /* FRINTI */
9553 gen_helper_rintd(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9555 case 0x59: /* FRINTX */
9556 gen_helper_rintd_exact(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9558 case 0x1e: /* FRINT32Z */
9559 case 0x5e: /* FRINT32X */
9560 gen_helper_frint32_d(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9562 case 0x1f: /* FRINT64Z */
9563 case 0x5f: /* FRINT64X */
9564 gen_helper_frint64_d(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9567 g_assert_not_reached();
9571 static void handle_2misc_fcmp_zero(DisasContext
*s
, int opcode
,
9572 bool is_scalar
, bool is_u
, bool is_q
,
9573 int size
, int rn
, int rd
)
9575 bool is_double
= (size
== MO_64
);
9578 if (!fp_access_check(s
)) {
9582 fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
9585 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9586 TCGv_i64 tcg_zero
= tcg_constant_i64(0);
9587 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9588 NeonGenTwoDoubleOpFn
*genfn
;
9593 case 0x2e: /* FCMLT (zero) */
9596 case 0x2c: /* FCMGT (zero) */
9597 genfn
= gen_helper_neon_cgt_f64
;
9599 case 0x2d: /* FCMEQ (zero) */
9600 genfn
= gen_helper_neon_ceq_f64
;
9602 case 0x6d: /* FCMLE (zero) */
9605 case 0x6c: /* FCMGE (zero) */
9606 genfn
= gen_helper_neon_cge_f64
;
9609 g_assert_not_reached();
9612 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9613 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9615 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
9617 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
9619 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
9622 clear_vec_high(s
, !is_scalar
, rd
);
9624 TCGv_i32 tcg_op
= tcg_temp_new_i32();
9625 TCGv_i32 tcg_zero
= tcg_constant_i32(0);
9626 TCGv_i32 tcg_res
= tcg_temp_new_i32();
9627 NeonGenTwoSingleOpFn
*genfn
;
9629 int pass
, maxpasses
;
9631 if (size
== MO_16
) {
9633 case 0x2e: /* FCMLT (zero) */
9636 case 0x2c: /* FCMGT (zero) */
9637 genfn
= gen_helper_advsimd_cgt_f16
;
9639 case 0x2d: /* FCMEQ (zero) */
9640 genfn
= gen_helper_advsimd_ceq_f16
;
9642 case 0x6d: /* FCMLE (zero) */
9645 case 0x6c: /* FCMGE (zero) */
9646 genfn
= gen_helper_advsimd_cge_f16
;
9649 g_assert_not_reached();
9653 case 0x2e: /* FCMLT (zero) */
9656 case 0x2c: /* FCMGT (zero) */
9657 genfn
= gen_helper_neon_cgt_f32
;
9659 case 0x2d: /* FCMEQ (zero) */
9660 genfn
= gen_helper_neon_ceq_f32
;
9662 case 0x6d: /* FCMLE (zero) */
9665 case 0x6c: /* FCMGE (zero) */
9666 genfn
= gen_helper_neon_cge_f32
;
9669 g_assert_not_reached();
9676 int vector_size
= 8 << is_q
;
9677 maxpasses
= vector_size
>> size
;
9680 for (pass
= 0; pass
< maxpasses
; pass
++) {
9681 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
9683 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
9685 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
9688 write_fp_sreg(s
, rd
, tcg_res
);
9690 write_vec_element_i32(s
, tcg_res
, rd
, pass
, size
);
9695 clear_vec_high(s
, is_q
, rd
);
9700 static void handle_2misc_reciprocal(DisasContext
*s
, int opcode
,
9701 bool is_scalar
, bool is_u
, bool is_q
,
9702 int size
, int rn
, int rd
)
9704 bool is_double
= (size
== 3);
9705 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
9708 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9709 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9712 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9713 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9715 case 0x3d: /* FRECPE */
9716 gen_helper_recpe_f64(tcg_res
, tcg_op
, fpst
);
9718 case 0x3f: /* FRECPX */
9719 gen_helper_frecpx_f64(tcg_res
, tcg_op
, fpst
);
9721 case 0x7d: /* FRSQRTE */
9722 gen_helper_rsqrte_f64(tcg_res
, tcg_op
, fpst
);
9725 g_assert_not_reached();
9727 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
9729 clear_vec_high(s
, !is_scalar
, rd
);
9731 TCGv_i32 tcg_op
= tcg_temp_new_i32();
9732 TCGv_i32 tcg_res
= tcg_temp_new_i32();
9733 int pass
, maxpasses
;
9738 maxpasses
= is_q
? 4 : 2;
9741 for (pass
= 0; pass
< maxpasses
; pass
++) {
9742 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
9745 case 0x3c: /* URECPE */
9746 gen_helper_recpe_u32(tcg_res
, tcg_op
);
9748 case 0x3d: /* FRECPE */
9749 gen_helper_recpe_f32(tcg_res
, tcg_op
, fpst
);
9751 case 0x3f: /* FRECPX */
9752 gen_helper_frecpx_f32(tcg_res
, tcg_op
, fpst
);
9754 case 0x7d: /* FRSQRTE */
9755 gen_helper_rsqrte_f32(tcg_res
, tcg_op
, fpst
);
9758 g_assert_not_reached();
9762 write_fp_sreg(s
, rd
, tcg_res
);
9764 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
9768 clear_vec_high(s
, is_q
, rd
);
9773 static void handle_2misc_narrow(DisasContext
*s
, bool scalar
,
9774 int opcode
, bool u
, bool is_q
,
9775 int size
, int rn
, int rd
)
9777 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
9778 * in the source becomes a size element in the destination).
9781 TCGv_i32 tcg_res
[2];
9782 int destelt
= is_q
? 2 : 0;
9783 int passes
= scalar
? 1 : 2;
9786 tcg_res
[1] = tcg_constant_i32(0);
9789 for (pass
= 0; pass
< passes
; pass
++) {
9790 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9791 NeonGenNarrowFn
*genfn
= NULL
;
9792 NeonGenNarrowEnvFn
*genenvfn
= NULL
;
9795 read_vec_element(s
, tcg_op
, rn
, pass
, size
+ 1);
9797 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9799 tcg_res
[pass
] = tcg_temp_new_i32();
9802 case 0x12: /* XTN, SQXTUN */
9804 static NeonGenNarrowFn
* const xtnfns
[3] = {
9805 gen_helper_neon_narrow_u8
,
9806 gen_helper_neon_narrow_u16
,
9807 tcg_gen_extrl_i64_i32
,
9809 static NeonGenNarrowEnvFn
* const sqxtunfns
[3] = {
9810 gen_helper_neon_unarrow_sat8
,
9811 gen_helper_neon_unarrow_sat16
,
9812 gen_helper_neon_unarrow_sat32
,
9815 genenvfn
= sqxtunfns
[size
];
9817 genfn
= xtnfns
[size
];
9821 case 0x14: /* SQXTN, UQXTN */
9823 static NeonGenNarrowEnvFn
* const fns
[3][2] = {
9824 { gen_helper_neon_narrow_sat_s8
,
9825 gen_helper_neon_narrow_sat_u8
},
9826 { gen_helper_neon_narrow_sat_s16
,
9827 gen_helper_neon_narrow_sat_u16
},
9828 { gen_helper_neon_narrow_sat_s32
,
9829 gen_helper_neon_narrow_sat_u32
},
9831 genenvfn
= fns
[size
][u
];
9834 case 0x16: /* FCVTN, FCVTN2 */
9835 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
9837 gen_helper_vfp_fcvtsd(tcg_res
[pass
], tcg_op
, cpu_env
);
9839 TCGv_i32 tcg_lo
= tcg_temp_new_i32();
9840 TCGv_i32 tcg_hi
= tcg_temp_new_i32();
9841 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
9842 TCGv_i32 ahp
= get_ahp_flag();
9844 tcg_gen_extr_i64_i32(tcg_lo
, tcg_hi
, tcg_op
);
9845 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo
, tcg_lo
, fpst
, ahp
);
9846 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi
, tcg_hi
, fpst
, ahp
);
9847 tcg_gen_deposit_i32(tcg_res
[pass
], tcg_lo
, tcg_hi
, 16, 16);
9850 case 0x36: /* BFCVTN, BFCVTN2 */
9852 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
9853 gen_helper_bfcvt_pair(tcg_res
[pass
], tcg_op
, fpst
);
9856 case 0x56: /* FCVTXN, FCVTXN2 */
9857 /* 64 bit to 32 bit float conversion
9858 * with von Neumann rounding (round to odd)
9861 gen_helper_fcvtx_f64_to_f32(tcg_res
[pass
], tcg_op
, cpu_env
);
9864 g_assert_not_reached();
9868 genfn(tcg_res
[pass
], tcg_op
);
9869 } else if (genenvfn
) {
9870 genenvfn(tcg_res
[pass
], cpu_env
, tcg_op
);
9874 for (pass
= 0; pass
< 2; pass
++) {
9875 write_vec_element_i32(s
, tcg_res
[pass
], rd
, destelt
+ pass
, MO_32
);
9877 clear_vec_high(s
, is_q
, rd
);
9880 /* Remaining saturating accumulating ops */
9881 static void handle_2misc_satacc(DisasContext
*s
, bool is_scalar
, bool is_u
,
9882 bool is_q
, int size
, int rn
, int rd
)
9884 bool is_double
= (size
== 3);
9887 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
9888 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
9891 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9892 read_vec_element(s
, tcg_rn
, rn
, pass
, MO_64
);
9893 read_vec_element(s
, tcg_rd
, rd
, pass
, MO_64
);
9895 if (is_u
) { /* USQADD */
9896 gen_helper_neon_uqadd_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9897 } else { /* SUQADD */
9898 gen_helper_neon_sqadd_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9900 write_vec_element(s
, tcg_rd
, rd
, pass
, MO_64
);
9902 clear_vec_high(s
, !is_scalar
, rd
);
9904 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
9905 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
9906 int pass
, maxpasses
;
9911 maxpasses
= is_q
? 4 : 2;
9914 for (pass
= 0; pass
< maxpasses
; pass
++) {
9916 read_vec_element_i32(s
, tcg_rn
, rn
, pass
, size
);
9917 read_vec_element_i32(s
, tcg_rd
, rd
, pass
, size
);
9919 read_vec_element_i32(s
, tcg_rn
, rn
, pass
, MO_32
);
9920 read_vec_element_i32(s
, tcg_rd
, rd
, pass
, MO_32
);
9923 if (is_u
) { /* USQADD */
9926 gen_helper_neon_uqadd_s8(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9929 gen_helper_neon_uqadd_s16(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9932 gen_helper_neon_uqadd_s32(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9935 g_assert_not_reached();
9937 } else { /* SUQADD */
9940 gen_helper_neon_sqadd_u8(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9943 gen_helper_neon_sqadd_u16(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9946 gen_helper_neon_sqadd_u32(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9949 g_assert_not_reached();
9954 write_vec_element(s
, tcg_constant_i64(0), rd
, 0, MO_64
);
9956 write_vec_element_i32(s
, tcg_rd
, rd
, pass
, MO_32
);
9958 clear_vec_high(s
, is_q
, rd
);
9962 /* AdvSIMD scalar two reg misc
9963 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
9964 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9965 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
9966 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9968 static void disas_simd_scalar_two_reg_misc(DisasContext
*s
, uint32_t insn
)
9970 int rd
= extract32(insn
, 0, 5);
9971 int rn
= extract32(insn
, 5, 5);
9972 int opcode
= extract32(insn
, 12, 5);
9973 int size
= extract32(insn
, 22, 2);
9974 bool u
= extract32(insn
, 29, 1);
9975 bool is_fcvt
= false;
9978 TCGv_ptr tcg_fpstatus
;
9981 case 0x3: /* USQADD / SUQADD*/
9982 if (!fp_access_check(s
)) {
9985 handle_2misc_satacc(s
, true, u
, false, size
, rn
, rd
);
9987 case 0x7: /* SQABS / SQNEG */
9989 case 0xa: /* CMLT */
9991 unallocated_encoding(s
);
9995 case 0x8: /* CMGT, CMGE */
9996 case 0x9: /* CMEQ, CMLE */
9997 case 0xb: /* ABS, NEG */
9999 unallocated_encoding(s
);
10003 case 0x12: /* SQXTUN */
10005 unallocated_encoding(s
);
10009 case 0x14: /* SQXTN, UQXTN */
10011 unallocated_encoding(s
);
10014 if (!fp_access_check(s
)) {
10017 handle_2misc_narrow(s
, true, opcode
, u
, false, size
, rn
, rd
);
10020 case 0x16 ... 0x1d:
10022 /* Floating point: U, size[1] and opcode indicate operation;
10023 * size[0] indicates single or double precision.
10025 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
10026 size
= extract32(size
, 0, 1) ? 3 : 2;
10028 case 0x2c: /* FCMGT (zero) */
10029 case 0x2d: /* FCMEQ (zero) */
10030 case 0x2e: /* FCMLT (zero) */
10031 case 0x6c: /* FCMGE (zero) */
10032 case 0x6d: /* FCMLE (zero) */
10033 handle_2misc_fcmp_zero(s
, opcode
, true, u
, true, size
, rn
, rd
);
10035 case 0x1d: /* SCVTF */
10036 case 0x5d: /* UCVTF */
10038 bool is_signed
= (opcode
== 0x1d);
10039 if (!fp_access_check(s
)) {
10042 handle_simd_intfp_conv(s
, rd
, rn
, 1, is_signed
, 0, size
);
10045 case 0x3d: /* FRECPE */
10046 case 0x3f: /* FRECPX */
10047 case 0x7d: /* FRSQRTE */
10048 if (!fp_access_check(s
)) {
10051 handle_2misc_reciprocal(s
, opcode
, true, u
, true, size
, rn
, rd
);
10053 case 0x1a: /* FCVTNS */
10054 case 0x1b: /* FCVTMS */
10055 case 0x3a: /* FCVTPS */
10056 case 0x3b: /* FCVTZS */
10057 case 0x5a: /* FCVTNU */
10058 case 0x5b: /* FCVTMU */
10059 case 0x7a: /* FCVTPU */
10060 case 0x7b: /* FCVTZU */
10062 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
10064 case 0x1c: /* FCVTAS */
10065 case 0x5c: /* FCVTAU */
10066 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
10068 rmode
= FPROUNDING_TIEAWAY
;
10070 case 0x56: /* FCVTXN, FCVTXN2 */
10072 unallocated_encoding(s
);
10075 if (!fp_access_check(s
)) {
10078 handle_2misc_narrow(s
, true, opcode
, u
, false, size
- 1, rn
, rd
);
10081 unallocated_encoding(s
);
10086 unallocated_encoding(s
);
10090 if (!fp_access_check(s
)) {
10095 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR
);
10096 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
10098 tcg_fpstatus
= NULL
;
10103 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
10104 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
10106 handle_2misc_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rmode
, tcg_fpstatus
);
10107 write_fp_dreg(s
, rd
, tcg_rd
);
10109 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
10110 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
10112 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
10115 case 0x7: /* SQABS, SQNEG */
10117 NeonGenOneOpEnvFn
*genfn
;
10118 static NeonGenOneOpEnvFn
* const fns
[3][2] = {
10119 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
10120 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
10121 { gen_helper_neon_qabs_s32
, gen_helper_neon_qneg_s32
},
10123 genfn
= fns
[size
][u
];
10124 genfn(tcg_rd
, cpu_env
, tcg_rn
);
10127 case 0x1a: /* FCVTNS */
10128 case 0x1b: /* FCVTMS */
10129 case 0x1c: /* FCVTAS */
10130 case 0x3a: /* FCVTPS */
10131 case 0x3b: /* FCVTZS */
10132 gen_helper_vfp_tosls(tcg_rd
, tcg_rn
, tcg_constant_i32(0),
10135 case 0x5a: /* FCVTNU */
10136 case 0x5b: /* FCVTMU */
10137 case 0x5c: /* FCVTAU */
10138 case 0x7a: /* FCVTPU */
10139 case 0x7b: /* FCVTZU */
10140 gen_helper_vfp_touls(tcg_rd
, tcg_rn
, tcg_constant_i32(0),
10144 g_assert_not_reached();
10147 write_fp_sreg(s
, rd
, tcg_rd
);
10151 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
10155 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
10156 static void handle_vec_simd_shri(DisasContext
*s
, bool is_q
, bool is_u
,
10157 int immh
, int immb
, int opcode
, int rn
, int rd
)
10159 int size
= 32 - clz32(immh
) - 1;
10160 int immhb
= immh
<< 3 | immb
;
10161 int shift
= 2 * (8 << size
) - immhb
;
10162 GVecGen2iFn
*gvec_fn
;
10164 if (extract32(immh
, 3, 1) && !is_q
) {
10165 unallocated_encoding(s
);
10168 tcg_debug_assert(size
<= 3);
10170 if (!fp_access_check(s
)) {
10175 case 0x02: /* SSRA / USRA (accumulate) */
10176 gvec_fn
= is_u
? gen_gvec_usra
: gen_gvec_ssra
;
10179 case 0x08: /* SRI */
10180 gvec_fn
= gen_gvec_sri
;
10183 case 0x00: /* SSHR / USHR */
10185 if (shift
== 8 << size
) {
10186 /* Shift count the same size as element size produces zero. */
10187 tcg_gen_gvec_dup_imm(size
, vec_full_reg_offset(s
, rd
),
10188 is_q
? 16 : 8, vec_full_reg_size(s
), 0);
10191 gvec_fn
= tcg_gen_gvec_shri
;
10193 /* Shift count the same size as element size produces all sign. */
10194 if (shift
== 8 << size
) {
10197 gvec_fn
= tcg_gen_gvec_sari
;
10201 case 0x04: /* SRSHR / URSHR (rounding) */
10202 gvec_fn
= is_u
? gen_gvec_urshr
: gen_gvec_srshr
;
10205 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10206 gvec_fn
= is_u
? gen_gvec_ursra
: gen_gvec_srsra
;
10210 g_assert_not_reached();
10213 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, gvec_fn
, size
);
10216 /* SHL/SLI - Vector shift left */
10217 static void handle_vec_simd_shli(DisasContext
*s
, bool is_q
, bool insert
,
10218 int immh
, int immb
, int opcode
, int rn
, int rd
)
10220 int size
= 32 - clz32(immh
) - 1;
10221 int immhb
= immh
<< 3 | immb
;
10222 int shift
= immhb
- (8 << size
);
10224 /* Range of size is limited by decode: immh is a non-zero 4 bit field */
10225 assert(size
>= 0 && size
<= 3);
10227 if (extract32(immh
, 3, 1) && !is_q
) {
10228 unallocated_encoding(s
);
10232 if (!fp_access_check(s
)) {
10237 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, gen_gvec_sli
, size
);
10239 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, tcg_gen_gvec_shli
, size
);
10243 /* USHLL/SHLL - Vector shift left with widening */
10244 static void handle_vec_simd_wshli(DisasContext
*s
, bool is_q
, bool is_u
,
10245 int immh
, int immb
, int opcode
, int rn
, int rd
)
10247 int size
= 32 - clz32(immh
) - 1;
10248 int immhb
= immh
<< 3 | immb
;
10249 int shift
= immhb
- (8 << size
);
10251 int esize
= 8 << size
;
10252 int elements
= dsize
/esize
;
10253 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
10254 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
10258 unallocated_encoding(s
);
10262 if (!fp_access_check(s
)) {
10266 /* For the LL variants the store is larger than the load,
10267 * so if rd == rn we would overwrite parts of our input.
10268 * So load everything right now and use shifts in the main loop.
10270 read_vec_element(s
, tcg_rn
, rn
, is_q
? 1 : 0, MO_64
);
10272 for (i
= 0; i
< elements
; i
++) {
10273 tcg_gen_shri_i64(tcg_rd
, tcg_rn
, i
* esize
);
10274 ext_and_shift_reg(tcg_rd
, tcg_rd
, size
| (!is_u
<< 2), 0);
10275 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, shift
);
10276 write_vec_element(s
, tcg_rd
, rd
, i
, size
+ 1);
10280 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
10281 static void handle_vec_simd_shrn(DisasContext
*s
, bool is_q
,
10282 int immh
, int immb
, int opcode
, int rn
, int rd
)
10284 int immhb
= immh
<< 3 | immb
;
10285 int size
= 32 - clz32(immh
) - 1;
10287 int esize
= 8 << size
;
10288 int elements
= dsize
/esize
;
10289 int shift
= (2 * esize
) - immhb
;
10290 bool round
= extract32(opcode
, 0, 1);
10291 TCGv_i64 tcg_rn
, tcg_rd
, tcg_final
;
10292 TCGv_i64 tcg_round
;
10295 if (extract32(immh
, 3, 1)) {
10296 unallocated_encoding(s
);
10300 if (!fp_access_check(s
)) {
10304 tcg_rn
= tcg_temp_new_i64();
10305 tcg_rd
= tcg_temp_new_i64();
10306 tcg_final
= tcg_temp_new_i64();
10307 read_vec_element(s
, tcg_final
, rd
, is_q
? 1 : 0, MO_64
);
10310 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
10315 for (i
= 0; i
< elements
; i
++) {
10316 read_vec_element(s
, tcg_rn
, rn
, i
, size
+1);
10317 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
10318 false, true, size
+1, shift
);
10320 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
10324 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
10326 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
10329 clear_vec_high(s
, is_q
, rd
);
10333 /* AdvSIMD shift by immediate
10334 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
10335 * +---+---+---+-------------+------+------+--------+---+------+------+
10336 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
10337 * +---+---+---+-------------+------+------+--------+---+------+------+
10339 static void disas_simd_shift_imm(DisasContext
*s
, uint32_t insn
)
10341 int rd
= extract32(insn
, 0, 5);
10342 int rn
= extract32(insn
, 5, 5);
10343 int opcode
= extract32(insn
, 11, 5);
10344 int immb
= extract32(insn
, 16, 3);
10345 int immh
= extract32(insn
, 19, 4);
10346 bool is_u
= extract32(insn
, 29, 1);
10347 bool is_q
= extract32(insn
, 30, 1);
10349 /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
10353 case 0x08: /* SRI */
10355 unallocated_encoding(s
);
10359 case 0x00: /* SSHR / USHR */
10360 case 0x02: /* SSRA / USRA (accumulate) */
10361 case 0x04: /* SRSHR / URSHR (rounding) */
10362 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10363 handle_vec_simd_shri(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10365 case 0x0a: /* SHL / SLI */
10366 handle_vec_simd_shli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10368 case 0x10: /* SHRN */
10369 case 0x11: /* RSHRN / SQRSHRUN */
10371 handle_vec_simd_sqshrn(s
, false, is_q
, false, true, immh
, immb
,
10374 handle_vec_simd_shrn(s
, is_q
, immh
, immb
, opcode
, rn
, rd
);
10377 case 0x12: /* SQSHRN / UQSHRN */
10378 case 0x13: /* SQRSHRN / UQRSHRN */
10379 handle_vec_simd_sqshrn(s
, false, is_q
, is_u
, is_u
, immh
, immb
,
10382 case 0x14: /* SSHLL / USHLL */
10383 handle_vec_simd_wshli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10385 case 0x1c: /* SCVTF / UCVTF */
10386 handle_simd_shift_intfp_conv(s
, false, is_q
, is_u
, immh
, immb
,
10389 case 0xc: /* SQSHLU */
10391 unallocated_encoding(s
);
10394 handle_simd_qshl(s
, false, is_q
, false, true, immh
, immb
, rn
, rd
);
10396 case 0xe: /* SQSHL, UQSHL */
10397 handle_simd_qshl(s
, false, is_q
, is_u
, is_u
, immh
, immb
, rn
, rd
);
10399 case 0x1f: /* FCVTZS/ FCVTZU */
10400 handle_simd_shift_fpint_conv(s
, false, is_q
, is_u
, immh
, immb
, rn
, rd
);
10403 unallocated_encoding(s
);
10408 /* Generate code to do a "long" addition or subtraction, ie one done in
10409 * TCGv_i64 on vector lanes twice the width specified by size.
10411 static void gen_neon_addl(int size
, bool is_sub
, TCGv_i64 tcg_res
,
10412 TCGv_i64 tcg_op1
, TCGv_i64 tcg_op2
)
10414 static NeonGenTwo64OpFn
* const fns
[3][2] = {
10415 { gen_helper_neon_addl_u16
, gen_helper_neon_subl_u16
},
10416 { gen_helper_neon_addl_u32
, gen_helper_neon_subl_u32
},
10417 { tcg_gen_add_i64
, tcg_gen_sub_i64
},
10419 NeonGenTwo64OpFn
*genfn
;
10422 genfn
= fns
[size
][is_sub
];
10423 genfn(tcg_res
, tcg_op1
, tcg_op2
);
10426 static void handle_3rd_widening(DisasContext
*s
, int is_q
, int is_u
, int size
,
10427 int opcode
, int rd
, int rn
, int rm
)
10429 /* 3-reg-different widening insns: 64 x 64 -> 128 */
10430 TCGv_i64 tcg_res
[2];
10433 tcg_res
[0] = tcg_temp_new_i64();
10434 tcg_res
[1] = tcg_temp_new_i64();
10436 /* Does this op do an adding accumulate, a subtracting accumulate,
10437 * or no accumulate at all?
10455 read_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
10456 read_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
10459 /* size == 2 means two 32x32->64 operations; this is worth special
10460 * casing because we can generally handle it inline.
10463 for (pass
= 0; pass
< 2; pass
++) {
10464 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10465 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10466 TCGv_i64 tcg_passres
;
10467 MemOp memop
= MO_32
| (is_u
? 0 : MO_SIGN
);
10469 int elt
= pass
+ is_q
* 2;
10471 read_vec_element(s
, tcg_op1
, rn
, elt
, memop
);
10472 read_vec_element(s
, tcg_op2
, rm
, elt
, memop
);
10475 tcg_passres
= tcg_res
[pass
];
10477 tcg_passres
= tcg_temp_new_i64();
10481 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10482 tcg_gen_add_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10484 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10485 tcg_gen_sub_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10487 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10488 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10490 TCGv_i64 tcg_tmp1
= tcg_temp_new_i64();
10491 TCGv_i64 tcg_tmp2
= tcg_temp_new_i64();
10493 tcg_gen_sub_i64(tcg_tmp1
, tcg_op1
, tcg_op2
);
10494 tcg_gen_sub_i64(tcg_tmp2
, tcg_op2
, tcg_op1
);
10495 tcg_gen_movcond_i64(is_u
? TCG_COND_GEU
: TCG_COND_GE
,
10497 tcg_op1
, tcg_op2
, tcg_tmp1
, tcg_tmp2
);
10500 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10501 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10502 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10503 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10505 case 9: /* SQDMLAL, SQDMLAL2 */
10506 case 11: /* SQDMLSL, SQDMLSL2 */
10507 case 13: /* SQDMULL, SQDMULL2 */
10508 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10509 gen_helper_neon_addl_saturate_s64(tcg_passres
, cpu_env
,
10510 tcg_passres
, tcg_passres
);
10513 g_assert_not_reached();
10516 if (opcode
== 9 || opcode
== 11) {
10517 /* saturating accumulate ops */
10519 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
10521 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], cpu_env
,
10522 tcg_res
[pass
], tcg_passres
);
10523 } else if (accop
> 0) {
10524 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
10525 } else if (accop
< 0) {
10526 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
10530 /* size 0 or 1, generally helper functions */
10531 for (pass
= 0; pass
< 2; pass
++) {
10532 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10533 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10534 TCGv_i64 tcg_passres
;
10535 int elt
= pass
+ is_q
* 2;
10537 read_vec_element_i32(s
, tcg_op1
, rn
, elt
, MO_32
);
10538 read_vec_element_i32(s
, tcg_op2
, rm
, elt
, MO_32
);
10541 tcg_passres
= tcg_res
[pass
];
10543 tcg_passres
= tcg_temp_new_i64();
10547 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10548 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10550 TCGv_i64 tcg_op2_64
= tcg_temp_new_i64();
10551 static NeonGenWidenFn
* const widenfns
[2][2] = {
10552 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
10553 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
10555 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
10557 widenfn(tcg_op2_64
, tcg_op2
);
10558 widenfn(tcg_passres
, tcg_op1
);
10559 gen_neon_addl(size
, (opcode
== 2), tcg_passres
,
10560 tcg_passres
, tcg_op2_64
);
10563 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10564 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10567 gen_helper_neon_abdl_u16(tcg_passres
, tcg_op1
, tcg_op2
);
10569 gen_helper_neon_abdl_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10573 gen_helper_neon_abdl_u32(tcg_passres
, tcg_op1
, tcg_op2
);
10575 gen_helper_neon_abdl_s32(tcg_passres
, tcg_op1
, tcg_op2
);
10579 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10580 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10581 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10584 gen_helper_neon_mull_u8(tcg_passres
, tcg_op1
, tcg_op2
);
10586 gen_helper_neon_mull_s8(tcg_passres
, tcg_op1
, tcg_op2
);
10590 gen_helper_neon_mull_u16(tcg_passres
, tcg_op1
, tcg_op2
);
10592 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10596 case 9: /* SQDMLAL, SQDMLAL2 */
10597 case 11: /* SQDMLSL, SQDMLSL2 */
10598 case 13: /* SQDMULL, SQDMULL2 */
10600 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10601 gen_helper_neon_addl_saturate_s32(tcg_passres
, cpu_env
,
10602 tcg_passres
, tcg_passres
);
10605 g_assert_not_reached();
10609 if (opcode
== 9 || opcode
== 11) {
10610 /* saturating accumulate ops */
10612 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
10614 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], cpu_env
,
10618 gen_neon_addl(size
, (accop
< 0), tcg_res
[pass
],
10619 tcg_res
[pass
], tcg_passres
);
10625 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
10626 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
10629 static void handle_3rd_wide(DisasContext
*s
, int is_q
, int is_u
, int size
,
10630 int opcode
, int rd
, int rn
, int rm
)
10632 TCGv_i64 tcg_res
[2];
10633 int part
= is_q
? 2 : 0;
10636 for (pass
= 0; pass
< 2; pass
++) {
10637 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10638 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10639 TCGv_i64 tcg_op2_wide
= tcg_temp_new_i64();
10640 static NeonGenWidenFn
* const widenfns
[3][2] = {
10641 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
10642 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
10643 { tcg_gen_ext_i32_i64
, tcg_gen_extu_i32_i64
},
10645 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
10647 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
10648 read_vec_element_i32(s
, tcg_op2
, rm
, part
+ pass
, MO_32
);
10649 widenfn(tcg_op2_wide
, tcg_op2
);
10650 tcg_res
[pass
] = tcg_temp_new_i64();
10651 gen_neon_addl(size
, (opcode
== 3),
10652 tcg_res
[pass
], tcg_op1
, tcg_op2_wide
);
10655 for (pass
= 0; pass
< 2; pass
++) {
10656 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
10660 static void do_narrow_round_high_u32(TCGv_i32 res
, TCGv_i64 in
)
10662 tcg_gen_addi_i64(in
, in
, 1U << 31);
10663 tcg_gen_extrh_i64_i32(res
, in
);
10666 static void handle_3rd_narrowing(DisasContext
*s
, int is_q
, int is_u
, int size
,
10667 int opcode
, int rd
, int rn
, int rm
)
10669 TCGv_i32 tcg_res
[2];
10670 int part
= is_q
? 2 : 0;
10673 for (pass
= 0; pass
< 2; pass
++) {
10674 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10675 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10676 TCGv_i64 tcg_wideres
= tcg_temp_new_i64();
10677 static NeonGenNarrowFn
* const narrowfns
[3][2] = {
10678 { gen_helper_neon_narrow_high_u8
,
10679 gen_helper_neon_narrow_round_high_u8
},
10680 { gen_helper_neon_narrow_high_u16
,
10681 gen_helper_neon_narrow_round_high_u16
},
10682 { tcg_gen_extrh_i64_i32
, do_narrow_round_high_u32
},
10684 NeonGenNarrowFn
*gennarrow
= narrowfns
[size
][is_u
];
10686 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
10687 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
10689 gen_neon_addl(size
, (opcode
== 6), tcg_wideres
, tcg_op1
, tcg_op2
);
10691 tcg_res
[pass
] = tcg_temp_new_i32();
10692 gennarrow(tcg_res
[pass
], tcg_wideres
);
10695 for (pass
= 0; pass
< 2; pass
++) {
10696 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
+ part
, MO_32
);
10698 clear_vec_high(s
, is_q
, rd
);
10701 /* AdvSIMD three different
10702 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
10703 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10704 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
10705 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10707 static void disas_simd_three_reg_diff(DisasContext
*s
, uint32_t insn
)
10709 /* Instructions in this group fall into three basic classes
10710 * (in each case with the operation working on each element in
10711 * the input vectors):
10712 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
10714 * (2) wide 64 x 128 -> 128
10715 * (3) narrowing 128 x 128 -> 64
10716 * Here we do initial decode, catch unallocated cases and
10717 * dispatch to separate functions for each class.
10719 int is_q
= extract32(insn
, 30, 1);
10720 int is_u
= extract32(insn
, 29, 1);
10721 int size
= extract32(insn
, 22, 2);
10722 int opcode
= extract32(insn
, 12, 4);
10723 int rm
= extract32(insn
, 16, 5);
10724 int rn
= extract32(insn
, 5, 5);
10725 int rd
= extract32(insn
, 0, 5);
10728 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10729 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10730 /* 64 x 128 -> 128 */
10732 unallocated_encoding(s
);
10735 if (!fp_access_check(s
)) {
10738 handle_3rd_wide(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10740 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10741 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10742 /* 128 x 128 -> 64 */
10744 unallocated_encoding(s
);
10747 if (!fp_access_check(s
)) {
10750 handle_3rd_narrowing(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10752 case 14: /* PMULL, PMULL2 */
10754 unallocated_encoding(s
);
10758 case 0: /* PMULL.P8 */
10759 if (!fp_access_check(s
)) {
10762 /* The Q field specifies lo/hi half input for this insn. */
10763 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, is_q
,
10764 gen_helper_neon_pmull_h
);
10767 case 3: /* PMULL.P64 */
10768 if (!dc_isar_feature(aa64_pmull
, s
)) {
10769 unallocated_encoding(s
);
10772 if (!fp_access_check(s
)) {
10775 /* The Q field specifies lo/hi half input for this insn. */
10776 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, is_q
,
10777 gen_helper_gvec_pmull_q
);
10781 unallocated_encoding(s
);
10785 case 9: /* SQDMLAL, SQDMLAL2 */
10786 case 11: /* SQDMLSL, SQDMLSL2 */
10787 case 13: /* SQDMULL, SQDMULL2 */
10788 if (is_u
|| size
== 0) {
10789 unallocated_encoding(s
);
10793 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10794 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10795 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10796 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10797 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10798 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10799 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10800 /* 64 x 64 -> 128 */
10802 unallocated_encoding(s
);
10805 if (!fp_access_check(s
)) {
10809 handle_3rd_widening(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10812 /* opcode 15 not allocated */
10813 unallocated_encoding(s
);
10818 /* Logic op (opcode == 3) subgroup of C3.6.16. */
10819 static void disas_simd_3same_logic(DisasContext
*s
, uint32_t insn
)
10821 int rd
= extract32(insn
, 0, 5);
10822 int rn
= extract32(insn
, 5, 5);
10823 int rm
= extract32(insn
, 16, 5);
10824 int size
= extract32(insn
, 22, 2);
10825 bool is_u
= extract32(insn
, 29, 1);
10826 bool is_q
= extract32(insn
, 30, 1);
10828 if (!fp_access_check(s
)) {
10832 switch (size
+ 4 * is_u
) {
10834 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_and
, 0);
10837 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_andc
, 0);
10840 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_or
, 0);
10843 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_orc
, 0);
10846 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_xor
, 0);
10849 case 5: /* BSL bitwise select */
10850 gen_gvec_fn4(s
, is_q
, rd
, rd
, rn
, rm
, tcg_gen_gvec_bitsel
, 0);
10852 case 6: /* BIT, bitwise insert if true */
10853 gen_gvec_fn4(s
, is_q
, rd
, rm
, rn
, rd
, tcg_gen_gvec_bitsel
, 0);
10855 case 7: /* BIF, bitwise insert if false */
10856 gen_gvec_fn4(s
, is_q
, rd
, rm
, rd
, rn
, tcg_gen_gvec_bitsel
, 0);
10860 g_assert_not_reached();
10864 /* Pairwise op subgroup of C3.6.16.
10866 * This is called directly or via the handle_3same_float for float pairwise
10867 * operations where the opcode and size are calculated differently.
10869 static void handle_simd_3same_pair(DisasContext
*s
, int is_q
, int u
, int opcode
,
10870 int size
, int rn
, int rm
, int rd
)
10875 /* Floating point operations need fpst */
10876 if (opcode
>= 0x58) {
10877 fpst
= fpstatus_ptr(FPST_FPCR
);
10882 if (!fp_access_check(s
)) {
10886 /* These operations work on the concatenated rm:rn, with each pair of
10887 * adjacent elements being operated on to produce an element in the result.
10890 TCGv_i64 tcg_res
[2];
10892 for (pass
= 0; pass
< 2; pass
++) {
10893 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10894 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10895 int passreg
= (pass
== 0) ? rn
: rm
;
10897 read_vec_element(s
, tcg_op1
, passreg
, 0, MO_64
);
10898 read_vec_element(s
, tcg_op2
, passreg
, 1, MO_64
);
10899 tcg_res
[pass
] = tcg_temp_new_i64();
10902 case 0x17: /* ADDP */
10903 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
10905 case 0x58: /* FMAXNMP */
10906 gen_helper_vfp_maxnumd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10908 case 0x5a: /* FADDP */
10909 gen_helper_vfp_addd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10911 case 0x5e: /* FMAXP */
10912 gen_helper_vfp_maxd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10914 case 0x78: /* FMINNMP */
10915 gen_helper_vfp_minnumd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10917 case 0x7e: /* FMINP */
10918 gen_helper_vfp_mind(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10921 g_assert_not_reached();
10925 for (pass
= 0; pass
< 2; pass
++) {
10926 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
10929 int maxpass
= is_q
? 4 : 2;
10930 TCGv_i32 tcg_res
[4];
10932 for (pass
= 0; pass
< maxpass
; pass
++) {
10933 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10934 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10935 NeonGenTwoOpFn
*genfn
= NULL
;
10936 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
10937 int passelt
= (is_q
&& (pass
& 1)) ? 2 : 0;
10939 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_32
);
10940 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_32
);
10941 tcg_res
[pass
] = tcg_temp_new_i32();
10944 case 0x17: /* ADDP */
10946 static NeonGenTwoOpFn
* const fns
[3] = {
10947 gen_helper_neon_padd_u8
,
10948 gen_helper_neon_padd_u16
,
10954 case 0x14: /* SMAXP, UMAXP */
10956 static NeonGenTwoOpFn
* const fns
[3][2] = {
10957 { gen_helper_neon_pmax_s8
, gen_helper_neon_pmax_u8
},
10958 { gen_helper_neon_pmax_s16
, gen_helper_neon_pmax_u16
},
10959 { tcg_gen_smax_i32
, tcg_gen_umax_i32
},
10961 genfn
= fns
[size
][u
];
10964 case 0x15: /* SMINP, UMINP */
10966 static NeonGenTwoOpFn
* const fns
[3][2] = {
10967 { gen_helper_neon_pmin_s8
, gen_helper_neon_pmin_u8
},
10968 { gen_helper_neon_pmin_s16
, gen_helper_neon_pmin_u16
},
10969 { tcg_gen_smin_i32
, tcg_gen_umin_i32
},
10971 genfn
= fns
[size
][u
];
10974 /* The FP operations are all on single floats (32 bit) */
10975 case 0x58: /* FMAXNMP */
10976 gen_helper_vfp_maxnums(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10978 case 0x5a: /* FADDP */
10979 gen_helper_vfp_adds(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10981 case 0x5e: /* FMAXP */
10982 gen_helper_vfp_maxs(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10984 case 0x78: /* FMINNMP */
10985 gen_helper_vfp_minnums(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10987 case 0x7e: /* FMINP */
10988 gen_helper_vfp_mins(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10991 g_assert_not_reached();
10994 /* FP ops called directly, otherwise call now */
10996 genfn(tcg_res
[pass
], tcg_op1
, tcg_op2
);
11000 for (pass
= 0; pass
< maxpass
; pass
++) {
11001 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
11003 clear_vec_high(s
, is_q
, rd
);
11007 /* Floating point op subgroup of C3.6.16. */
11008 static void disas_simd_3same_float(DisasContext
*s
, uint32_t insn
)
11010 /* For floating point ops, the U, size[1] and opcode bits
11011 * together indicate the operation. size[0] indicates single
11014 int fpopcode
= extract32(insn
, 11, 5)
11015 | (extract32(insn
, 23, 1) << 5)
11016 | (extract32(insn
, 29, 1) << 6);
11017 int is_q
= extract32(insn
, 30, 1);
11018 int size
= extract32(insn
, 22, 1);
11019 int rm
= extract32(insn
, 16, 5);
11020 int rn
= extract32(insn
, 5, 5);
11021 int rd
= extract32(insn
, 0, 5);
11023 int datasize
= is_q
? 128 : 64;
11024 int esize
= 32 << size
;
11025 int elements
= datasize
/ esize
;
11027 if (size
== 1 && !is_q
) {
11028 unallocated_encoding(s
);
11032 switch (fpopcode
) {
11033 case 0x58: /* FMAXNMP */
11034 case 0x5a: /* FADDP */
11035 case 0x5e: /* FMAXP */
11036 case 0x78: /* FMINNMP */
11037 case 0x7e: /* FMINP */
11038 if (size
&& !is_q
) {
11039 unallocated_encoding(s
);
11042 handle_simd_3same_pair(s
, is_q
, 0, fpopcode
, size
? MO_64
: MO_32
,
11045 case 0x1b: /* FMULX */
11046 case 0x1f: /* FRECPS */
11047 case 0x3f: /* FRSQRTS */
11048 case 0x5d: /* FACGE */
11049 case 0x7d: /* FACGT */
11050 case 0x19: /* FMLA */
11051 case 0x39: /* FMLS */
11052 case 0x18: /* FMAXNM */
11053 case 0x1a: /* FADD */
11054 case 0x1c: /* FCMEQ */
11055 case 0x1e: /* FMAX */
11056 case 0x38: /* FMINNM */
11057 case 0x3a: /* FSUB */
11058 case 0x3e: /* FMIN */
11059 case 0x5b: /* FMUL */
11060 case 0x5c: /* FCMGE */
11061 case 0x5f: /* FDIV */
11062 case 0x7a: /* FABD */
11063 case 0x7c: /* FCMGT */
11064 if (!fp_access_check(s
)) {
11067 handle_3same_float(s
, size
, elements
, fpopcode
, rd
, rn
, rm
);
11070 case 0x1d: /* FMLAL */
11071 case 0x3d: /* FMLSL */
11072 case 0x59: /* FMLAL2 */
11073 case 0x79: /* FMLSL2 */
11074 if (size
& 1 || !dc_isar_feature(aa64_fhm
, s
)) {
11075 unallocated_encoding(s
);
11078 if (fp_access_check(s
)) {
11079 int is_s
= extract32(insn
, 23, 1);
11080 int is_2
= extract32(insn
, 29, 1);
11081 int data
= (is_2
<< 1) | is_s
;
11082 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
11083 vec_full_reg_offset(s
, rn
),
11084 vec_full_reg_offset(s
, rm
), cpu_env
,
11085 is_q
? 16 : 8, vec_full_reg_size(s
),
11086 data
, gen_helper_gvec_fmlal_a64
);
11091 unallocated_encoding(s
);
11096 /* Integer op subgroup of C3.6.16. */
11097 static void disas_simd_3same_int(DisasContext
*s
, uint32_t insn
)
11099 int is_q
= extract32(insn
, 30, 1);
11100 int u
= extract32(insn
, 29, 1);
11101 int size
= extract32(insn
, 22, 2);
11102 int opcode
= extract32(insn
, 11, 5);
11103 int rm
= extract32(insn
, 16, 5);
11104 int rn
= extract32(insn
, 5, 5);
11105 int rd
= extract32(insn
, 0, 5);
11110 case 0x13: /* MUL, PMUL */
11111 if (u
&& size
!= 0) {
11112 unallocated_encoding(s
);
11116 case 0x0: /* SHADD, UHADD */
11117 case 0x2: /* SRHADD, URHADD */
11118 case 0x4: /* SHSUB, UHSUB */
11119 case 0xc: /* SMAX, UMAX */
11120 case 0xd: /* SMIN, UMIN */
11121 case 0xe: /* SABD, UABD */
11122 case 0xf: /* SABA, UABA */
11123 case 0x12: /* MLA, MLS */
11125 unallocated_encoding(s
);
11129 case 0x16: /* SQDMULH, SQRDMULH */
11130 if (size
== 0 || size
== 3) {
11131 unallocated_encoding(s
);
11136 if (size
== 3 && !is_q
) {
11137 unallocated_encoding(s
);
11143 if (!fp_access_check(s
)) {
11148 case 0x01: /* SQADD, UQADD */
11150 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uqadd_qc
, size
);
11152 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqadd_qc
, size
);
11155 case 0x05: /* SQSUB, UQSUB */
11157 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uqsub_qc
, size
);
11159 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqsub_qc
, size
);
11162 case 0x08: /* SSHL, USHL */
11164 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_ushl
, size
);
11166 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sshl
, size
);
11169 case 0x0c: /* SMAX, UMAX */
11171 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_umax
, size
);
11173 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_smax
, size
);
11176 case 0x0d: /* SMIN, UMIN */
11178 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_umin
, size
);
11180 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_smin
, size
);
11183 case 0xe: /* SABD, UABD */
11185 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uabd
, size
);
11187 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sabd
, size
);
11190 case 0xf: /* SABA, UABA */
11192 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uaba
, size
);
11194 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_saba
, size
);
11197 case 0x10: /* ADD, SUB */
11199 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_sub
, size
);
11201 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_add
, size
);
11204 case 0x13: /* MUL, PMUL */
11205 if (!u
) { /* MUL */
11206 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_mul
, size
);
11207 } else { /* PMUL */
11208 gen_gvec_op3_ool(s
, is_q
, rd
, rn
, rm
, 0, gen_helper_gvec_pmul_b
);
11211 case 0x12: /* MLA, MLS */
11213 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_mls
, size
);
11215 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_mla
, size
);
11218 case 0x16: /* SQDMULH, SQRDMULH */
11220 static gen_helper_gvec_3_ptr
* const fns
[2][2] = {
11221 { gen_helper_neon_sqdmulh_h
, gen_helper_neon_sqrdmulh_h
},
11222 { gen_helper_neon_sqdmulh_s
, gen_helper_neon_sqrdmulh_s
},
11224 gen_gvec_op3_qc(s
, is_q
, rd
, rn
, rm
, fns
[size
- 1][u
]);
11228 if (!u
) { /* CMTST */
11229 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_cmtst
, size
);
11233 cond
= TCG_COND_EQ
;
11235 case 0x06: /* CMGT, CMHI */
11236 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
11238 case 0x07: /* CMGE, CMHS */
11239 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
11241 tcg_gen_gvec_cmp(cond
, size
, vec_full_reg_offset(s
, rd
),
11242 vec_full_reg_offset(s
, rn
),
11243 vec_full_reg_offset(s
, rm
),
11244 is_q
? 16 : 8, vec_full_reg_size(s
));
11250 for (pass
= 0; pass
< 2; pass
++) {
11251 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11252 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
11253 TCGv_i64 tcg_res
= tcg_temp_new_i64();
11255 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
11256 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
11258 handle_3same_64(s
, opcode
, u
, tcg_res
, tcg_op1
, tcg_op2
);
11260 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
11263 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
11264 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11265 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11266 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11267 NeonGenTwoOpFn
*genfn
= NULL
;
11268 NeonGenTwoOpEnvFn
*genenvfn
= NULL
;
11270 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
11271 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
11274 case 0x0: /* SHADD, UHADD */
11276 static NeonGenTwoOpFn
* const fns
[3][2] = {
11277 { gen_helper_neon_hadd_s8
, gen_helper_neon_hadd_u8
},
11278 { gen_helper_neon_hadd_s16
, gen_helper_neon_hadd_u16
},
11279 { gen_helper_neon_hadd_s32
, gen_helper_neon_hadd_u32
},
11281 genfn
= fns
[size
][u
];
11284 case 0x2: /* SRHADD, URHADD */
11286 static NeonGenTwoOpFn
* const fns
[3][2] = {
11287 { gen_helper_neon_rhadd_s8
, gen_helper_neon_rhadd_u8
},
11288 { gen_helper_neon_rhadd_s16
, gen_helper_neon_rhadd_u16
},
11289 { gen_helper_neon_rhadd_s32
, gen_helper_neon_rhadd_u32
},
11291 genfn
= fns
[size
][u
];
11294 case 0x4: /* SHSUB, UHSUB */
11296 static NeonGenTwoOpFn
* const fns
[3][2] = {
11297 { gen_helper_neon_hsub_s8
, gen_helper_neon_hsub_u8
},
11298 { gen_helper_neon_hsub_s16
, gen_helper_neon_hsub_u16
},
11299 { gen_helper_neon_hsub_s32
, gen_helper_neon_hsub_u32
},
11301 genfn
= fns
[size
][u
];
11304 case 0x9: /* SQSHL, UQSHL */
11306 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
11307 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
11308 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
11309 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
11311 genenvfn
= fns
[size
][u
];
11314 case 0xa: /* SRSHL, URSHL */
11316 static NeonGenTwoOpFn
* const fns
[3][2] = {
11317 { gen_helper_neon_rshl_s8
, gen_helper_neon_rshl_u8
},
11318 { gen_helper_neon_rshl_s16
, gen_helper_neon_rshl_u16
},
11319 { gen_helper_neon_rshl_s32
, gen_helper_neon_rshl_u32
},
11321 genfn
= fns
[size
][u
];
11324 case 0xb: /* SQRSHL, UQRSHL */
11326 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
11327 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
11328 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
11329 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
11331 genenvfn
= fns
[size
][u
];
11335 g_assert_not_reached();
11339 genenvfn(tcg_res
, cpu_env
, tcg_op1
, tcg_op2
);
11341 genfn(tcg_res
, tcg_op1
, tcg_op2
);
11344 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
11347 clear_vec_high(s
, is_q
, rd
);
11350 /* AdvSIMD three same
11351 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
11352 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11353 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
11354 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11356 static void disas_simd_three_reg_same(DisasContext
*s
, uint32_t insn
)
11358 int opcode
= extract32(insn
, 11, 5);
11361 case 0x3: /* logic ops */
11362 disas_simd_3same_logic(s
, insn
);
11364 case 0x17: /* ADDP */
11365 case 0x14: /* SMAXP, UMAXP */
11366 case 0x15: /* SMINP, UMINP */
11368 /* Pairwise operations */
11369 int is_q
= extract32(insn
, 30, 1);
11370 int u
= extract32(insn
, 29, 1);
11371 int size
= extract32(insn
, 22, 2);
11372 int rm
= extract32(insn
, 16, 5);
11373 int rn
= extract32(insn
, 5, 5);
11374 int rd
= extract32(insn
, 0, 5);
11375 if (opcode
== 0x17) {
11376 if (u
|| (size
== 3 && !is_q
)) {
11377 unallocated_encoding(s
);
11382 unallocated_encoding(s
);
11386 handle_simd_3same_pair(s
, is_q
, u
, opcode
, size
, rn
, rm
, rd
);
11389 case 0x18 ... 0x31:
11390 /* floating point ops, sz[1] and U are part of opcode */
11391 disas_simd_3same_float(s
, insn
);
11394 disas_simd_3same_int(s
, insn
);
11400 * Advanced SIMD three same (ARMv8.2 FP16 variants)
11402 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
11403 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11404 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
11405 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11407 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11408 * (register), FACGE, FABD, FCMGT (register) and FACGT.
11411 static void disas_simd_three_reg_same_fp16(DisasContext
*s
, uint32_t insn
)
11413 int opcode
= extract32(insn
, 11, 3);
11414 int u
= extract32(insn
, 29, 1);
11415 int a
= extract32(insn
, 23, 1);
11416 int is_q
= extract32(insn
, 30, 1);
11417 int rm
= extract32(insn
, 16, 5);
11418 int rn
= extract32(insn
, 5, 5);
11419 int rd
= extract32(insn
, 0, 5);
11421 * For these floating point ops, the U, a and opcode bits
11422 * together indicate the operation.
11424 int fpopcode
= opcode
| (a
<< 3) | (u
<< 4);
11425 int datasize
= is_q
? 128 : 64;
11426 int elements
= datasize
/ 16;
11431 switch (fpopcode
) {
11432 case 0x0: /* FMAXNM */
11433 case 0x1: /* FMLA */
11434 case 0x2: /* FADD */
11435 case 0x3: /* FMULX */
11436 case 0x4: /* FCMEQ */
11437 case 0x6: /* FMAX */
11438 case 0x7: /* FRECPS */
11439 case 0x8: /* FMINNM */
11440 case 0x9: /* FMLS */
11441 case 0xa: /* FSUB */
11442 case 0xe: /* FMIN */
11443 case 0xf: /* FRSQRTS */
11444 case 0x13: /* FMUL */
11445 case 0x14: /* FCMGE */
11446 case 0x15: /* FACGE */
11447 case 0x17: /* FDIV */
11448 case 0x1a: /* FABD */
11449 case 0x1c: /* FCMGT */
11450 case 0x1d: /* FACGT */
11453 case 0x10: /* FMAXNMP */
11454 case 0x12: /* FADDP */
11455 case 0x16: /* FMAXP */
11456 case 0x18: /* FMINNMP */
11457 case 0x1e: /* FMINP */
11461 unallocated_encoding(s
);
11465 if (!dc_isar_feature(aa64_fp16
, s
)) {
11466 unallocated_encoding(s
);
11470 if (!fp_access_check(s
)) {
11474 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
11477 int maxpass
= is_q
? 8 : 4;
11478 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11479 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11480 TCGv_i32 tcg_res
[8];
11482 for (pass
= 0; pass
< maxpass
; pass
++) {
11483 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
11484 int passelt
= (pass
<< 1) & (maxpass
- 1);
11486 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_16
);
11487 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_16
);
11488 tcg_res
[pass
] = tcg_temp_new_i32();
11490 switch (fpopcode
) {
11491 case 0x10: /* FMAXNMP */
11492 gen_helper_advsimd_maxnumh(tcg_res
[pass
], tcg_op1
, tcg_op2
,
11495 case 0x12: /* FADDP */
11496 gen_helper_advsimd_addh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11498 case 0x16: /* FMAXP */
11499 gen_helper_advsimd_maxh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11501 case 0x18: /* FMINNMP */
11502 gen_helper_advsimd_minnumh(tcg_res
[pass
], tcg_op1
, tcg_op2
,
11505 case 0x1e: /* FMINP */
11506 gen_helper_advsimd_minh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11509 g_assert_not_reached();
11513 for (pass
= 0; pass
< maxpass
; pass
++) {
11514 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_16
);
11517 for (pass
= 0; pass
< elements
; pass
++) {
11518 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11519 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11520 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11522 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_16
);
11523 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_16
);
11525 switch (fpopcode
) {
11526 case 0x0: /* FMAXNM */
11527 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11529 case 0x1: /* FMLA */
11530 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11531 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_res
,
11534 case 0x2: /* FADD */
11535 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11537 case 0x3: /* FMULX */
11538 gen_helper_advsimd_mulxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11540 case 0x4: /* FCMEQ */
11541 gen_helper_advsimd_ceq_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11543 case 0x6: /* FMAX */
11544 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11546 case 0x7: /* FRECPS */
11547 gen_helper_recpsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11549 case 0x8: /* FMINNM */
11550 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11552 case 0x9: /* FMLS */
11553 /* As usual for ARM, separate negation for fused multiply-add */
11554 tcg_gen_xori_i32(tcg_op1
, tcg_op1
, 0x8000);
11555 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11556 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_res
,
11559 case 0xa: /* FSUB */
11560 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11562 case 0xe: /* FMIN */
11563 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11565 case 0xf: /* FRSQRTS */
11566 gen_helper_rsqrtsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11568 case 0x13: /* FMUL */
11569 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11571 case 0x14: /* FCMGE */
11572 gen_helper_advsimd_cge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11574 case 0x15: /* FACGE */
11575 gen_helper_advsimd_acge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11577 case 0x17: /* FDIV */
11578 gen_helper_advsimd_divh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11580 case 0x1a: /* FABD */
11581 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11582 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0x7fff);
11584 case 0x1c: /* FCMGT */
11585 gen_helper_advsimd_cgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11587 case 0x1d: /* FACGT */
11588 gen_helper_advsimd_acgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11591 g_assert_not_reached();
11594 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11598 clear_vec_high(s
, is_q
, rd
);
11601 /* AdvSIMD three same extra
11602 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
11603 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11604 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
11605 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11607 static void disas_simd_three_reg_same_extra(DisasContext
*s
, uint32_t insn
)
11609 int rd
= extract32(insn
, 0, 5);
11610 int rn
= extract32(insn
, 5, 5);
11611 int opcode
= extract32(insn
, 11, 4);
11612 int rm
= extract32(insn
, 16, 5);
11613 int size
= extract32(insn
, 22, 2);
11614 bool u
= extract32(insn
, 29, 1);
11615 bool is_q
= extract32(insn
, 30, 1);
11619 switch (u
* 16 + opcode
) {
11620 case 0x10: /* SQRDMLAH (vector) */
11621 case 0x11: /* SQRDMLSH (vector) */
11622 if (size
!= 1 && size
!= 2) {
11623 unallocated_encoding(s
);
11626 feature
= dc_isar_feature(aa64_rdm
, s
);
11628 case 0x02: /* SDOT (vector) */
11629 case 0x12: /* UDOT (vector) */
11630 if (size
!= MO_32
) {
11631 unallocated_encoding(s
);
11634 feature
= dc_isar_feature(aa64_dp
, s
);
11636 case 0x03: /* USDOT */
11637 if (size
!= MO_32
) {
11638 unallocated_encoding(s
);
11641 feature
= dc_isar_feature(aa64_i8mm
, s
);
11643 case 0x04: /* SMMLA */
11644 case 0x14: /* UMMLA */
11645 case 0x05: /* USMMLA */
11646 if (!is_q
|| size
!= MO_32
) {
11647 unallocated_encoding(s
);
11650 feature
= dc_isar_feature(aa64_i8mm
, s
);
11652 case 0x18: /* FCMLA, #0 */
11653 case 0x19: /* FCMLA, #90 */
11654 case 0x1a: /* FCMLA, #180 */
11655 case 0x1b: /* FCMLA, #270 */
11656 case 0x1c: /* FCADD, #90 */
11657 case 0x1e: /* FCADD, #270 */
11659 || (size
== 1 && !dc_isar_feature(aa64_fp16
, s
))
11660 || (size
== 3 && !is_q
)) {
11661 unallocated_encoding(s
);
11664 feature
= dc_isar_feature(aa64_fcma
, s
);
11666 case 0x1d: /* BFMMLA */
11667 if (size
!= MO_16
|| !is_q
) {
11668 unallocated_encoding(s
);
11671 feature
= dc_isar_feature(aa64_bf16
, s
);
11675 case 1: /* BFDOT */
11676 case 3: /* BFMLAL{B,T} */
11677 feature
= dc_isar_feature(aa64_bf16
, s
);
11680 unallocated_encoding(s
);
11685 unallocated_encoding(s
);
11689 unallocated_encoding(s
);
11692 if (!fp_access_check(s
)) {
11697 case 0x0: /* SQRDMLAH (vector) */
11698 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqrdmlah_qc
, size
);
11701 case 0x1: /* SQRDMLSH (vector) */
11702 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqrdmlsh_qc
, size
);
11705 case 0x2: /* SDOT / UDOT */
11706 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0,
11707 u
? gen_helper_gvec_udot_b
: gen_helper_gvec_sdot_b
);
11710 case 0x3: /* USDOT */
11711 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_usdot_b
);
11714 case 0x04: /* SMMLA, UMMLA */
11715 gen_gvec_op4_ool(s
, 1, rd
, rn
, rm
, rd
, 0,
11716 u
? gen_helper_gvec_ummla_b
11717 : gen_helper_gvec_smmla_b
);
11719 case 0x05: /* USMMLA */
11720 gen_gvec_op4_ool(s
, 1, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_usmmla_b
);
11723 case 0x8: /* FCMLA, #0 */
11724 case 0x9: /* FCMLA, #90 */
11725 case 0xa: /* FCMLA, #180 */
11726 case 0xb: /* FCMLA, #270 */
11727 rot
= extract32(opcode
, 0, 2);
11730 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, true, rot
,
11731 gen_helper_gvec_fcmlah
);
11734 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, false, rot
,
11735 gen_helper_gvec_fcmlas
);
11738 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, false, rot
,
11739 gen_helper_gvec_fcmlad
);
11742 g_assert_not_reached();
11746 case 0xc: /* FCADD, #90 */
11747 case 0xe: /* FCADD, #270 */
11748 rot
= extract32(opcode
, 1, 1);
11751 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11752 gen_helper_gvec_fcaddh
);
11755 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11756 gen_helper_gvec_fcadds
);
11759 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11760 gen_helper_gvec_fcaddd
);
11763 g_assert_not_reached();
11767 case 0xd: /* BFMMLA */
11768 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_bfmmla
);
11772 case 1: /* BFDOT */
11773 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_bfdot
);
11775 case 3: /* BFMLAL{B,T} */
11776 gen_gvec_op4_fpst(s
, 1, rd
, rn
, rm
, rd
, false, is_q
,
11777 gen_helper_gvec_bfmlal
);
11780 g_assert_not_reached();
11785 g_assert_not_reached();
11789 static void handle_2misc_widening(DisasContext
*s
, int opcode
, bool is_q
,
11790 int size
, int rn
, int rd
)
11792 /* Handle 2-reg-misc ops which are widening (so each size element
11793 * in the source becomes a 2*size element in the destination.
11794 * The only instruction like this is FCVTL.
11799 /* 32 -> 64 bit fp conversion */
11800 TCGv_i64 tcg_res
[2];
11801 int srcelt
= is_q
? 2 : 0;
11803 for (pass
= 0; pass
< 2; pass
++) {
11804 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11805 tcg_res
[pass
] = tcg_temp_new_i64();
11807 read_vec_element_i32(s
, tcg_op
, rn
, srcelt
+ pass
, MO_32
);
11808 gen_helper_vfp_fcvtds(tcg_res
[pass
], tcg_op
, cpu_env
);
11810 for (pass
= 0; pass
< 2; pass
++) {
11811 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11814 /* 16 -> 32 bit fp conversion */
11815 int srcelt
= is_q
? 4 : 0;
11816 TCGv_i32 tcg_res
[4];
11817 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
11818 TCGv_i32 ahp
= get_ahp_flag();
11820 for (pass
= 0; pass
< 4; pass
++) {
11821 tcg_res
[pass
] = tcg_temp_new_i32();
11823 read_vec_element_i32(s
, tcg_res
[pass
], rn
, srcelt
+ pass
, MO_16
);
11824 gen_helper_vfp_fcvt_f16_to_f32(tcg_res
[pass
], tcg_res
[pass
],
11827 for (pass
= 0; pass
< 4; pass
++) {
11828 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
11833 static void handle_rev(DisasContext
*s
, int opcode
, bool u
,
11834 bool is_q
, int size
, int rn
, int rd
)
11836 int op
= (opcode
<< 1) | u
;
11837 int opsz
= op
+ size
;
11838 int grp_size
= 3 - opsz
;
11839 int dsize
= is_q
? 128 : 64;
11843 unallocated_encoding(s
);
11847 if (!fp_access_check(s
)) {
11852 /* Special case bytes, use bswap op on each group of elements */
11853 int groups
= dsize
/ (8 << grp_size
);
11855 for (i
= 0; i
< groups
; i
++) {
11856 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
11858 read_vec_element(s
, tcg_tmp
, rn
, i
, grp_size
);
11859 switch (grp_size
) {
11861 tcg_gen_bswap16_i64(tcg_tmp
, tcg_tmp
, TCG_BSWAP_IZ
);
11864 tcg_gen_bswap32_i64(tcg_tmp
, tcg_tmp
, TCG_BSWAP_IZ
);
11867 tcg_gen_bswap64_i64(tcg_tmp
, tcg_tmp
);
11870 g_assert_not_reached();
11872 write_vec_element(s
, tcg_tmp
, rd
, i
, grp_size
);
11874 clear_vec_high(s
, is_q
, rd
);
11876 int revmask
= (1 << grp_size
) - 1;
11877 int esize
= 8 << size
;
11878 int elements
= dsize
/ esize
;
11879 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
11880 TCGv_i64 tcg_rd
[2];
11882 for (i
= 0; i
< 2; i
++) {
11883 tcg_rd
[i
] = tcg_temp_new_i64();
11884 tcg_gen_movi_i64(tcg_rd
[i
], 0);
11887 for (i
= 0; i
< elements
; i
++) {
11888 int e_rev
= (i
& 0xf) ^ revmask
;
11889 int w
= (e_rev
* esize
) / 64;
11890 int o
= (e_rev
* esize
) % 64;
11892 read_vec_element(s
, tcg_rn
, rn
, i
, size
);
11893 tcg_gen_deposit_i64(tcg_rd
[w
], tcg_rd
[w
], tcg_rn
, o
, esize
);
11896 for (i
= 0; i
< 2; i
++) {
11897 write_vec_element(s
, tcg_rd
[i
], rd
, i
, MO_64
);
11899 clear_vec_high(s
, true, rd
);
11903 static void handle_2misc_pairwise(DisasContext
*s
, int opcode
, bool u
,
11904 bool is_q
, int size
, int rn
, int rd
)
11906 /* Implement the pairwise operations from 2-misc:
11907 * SADDLP, UADDLP, SADALP, UADALP.
11908 * These all add pairs of elements in the input to produce a
11909 * double-width result element in the output (possibly accumulating).
11911 bool accum
= (opcode
== 0x6);
11912 int maxpass
= is_q
? 2 : 1;
11914 TCGv_i64 tcg_res
[2];
11917 /* 32 + 32 -> 64 op */
11918 MemOp memop
= size
+ (u
? 0 : MO_SIGN
);
11920 for (pass
= 0; pass
< maxpass
; pass
++) {
11921 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11922 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
11924 tcg_res
[pass
] = tcg_temp_new_i64();
11926 read_vec_element(s
, tcg_op1
, rn
, pass
* 2, memop
);
11927 read_vec_element(s
, tcg_op2
, rn
, pass
* 2 + 1, memop
);
11928 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
11930 read_vec_element(s
, tcg_op1
, rd
, pass
, MO_64
);
11931 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
11935 for (pass
= 0; pass
< maxpass
; pass
++) {
11936 TCGv_i64 tcg_op
= tcg_temp_new_i64();
11937 NeonGenOne64OpFn
*genfn
;
11938 static NeonGenOne64OpFn
* const fns
[2][2] = {
11939 { gen_helper_neon_addlp_s8
, gen_helper_neon_addlp_u8
},
11940 { gen_helper_neon_addlp_s16
, gen_helper_neon_addlp_u16
},
11943 genfn
= fns
[size
][u
];
11945 tcg_res
[pass
] = tcg_temp_new_i64();
11947 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
11948 genfn(tcg_res
[pass
], tcg_op
);
11951 read_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
11953 gen_helper_neon_addl_u16(tcg_res
[pass
],
11954 tcg_res
[pass
], tcg_op
);
11956 gen_helper_neon_addl_u32(tcg_res
[pass
],
11957 tcg_res
[pass
], tcg_op
);
11963 tcg_res
[1] = tcg_constant_i64(0);
11965 for (pass
= 0; pass
< 2; pass
++) {
11966 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11970 static void handle_shll(DisasContext
*s
, bool is_q
, int size
, int rn
, int rd
)
11972 /* Implement SHLL and SHLL2 */
11974 int part
= is_q
? 2 : 0;
11975 TCGv_i64 tcg_res
[2];
11977 for (pass
= 0; pass
< 2; pass
++) {
11978 static NeonGenWidenFn
* const widenfns
[3] = {
11979 gen_helper_neon_widen_u8
,
11980 gen_helper_neon_widen_u16
,
11981 tcg_gen_extu_i32_i64
,
11983 NeonGenWidenFn
*widenfn
= widenfns
[size
];
11984 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11986 read_vec_element_i32(s
, tcg_op
, rn
, part
+ pass
, MO_32
);
11987 tcg_res
[pass
] = tcg_temp_new_i64();
11988 widenfn(tcg_res
[pass
], tcg_op
);
11989 tcg_gen_shli_i64(tcg_res
[pass
], tcg_res
[pass
], 8 << size
);
11992 for (pass
= 0; pass
< 2; pass
++) {
11993 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11997 /* AdvSIMD two reg misc
11998 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
11999 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
12000 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
12001 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
12003 static void disas_simd_two_reg_misc(DisasContext
*s
, uint32_t insn
)
12005 int size
= extract32(insn
, 22, 2);
12006 int opcode
= extract32(insn
, 12, 5);
12007 bool u
= extract32(insn
, 29, 1);
12008 bool is_q
= extract32(insn
, 30, 1);
12009 int rn
= extract32(insn
, 5, 5);
12010 int rd
= extract32(insn
, 0, 5);
12011 bool need_fpstatus
= false;
12013 TCGv_i32 tcg_rmode
;
12014 TCGv_ptr tcg_fpstatus
;
12017 case 0x0: /* REV64, REV32 */
12018 case 0x1: /* REV16 */
12019 handle_rev(s
, opcode
, u
, is_q
, size
, rn
, rd
);
12021 case 0x5: /* CNT, NOT, RBIT */
12022 if (u
&& size
== 0) {
12025 } else if (u
&& size
== 1) {
12028 } else if (!u
&& size
== 0) {
12032 unallocated_encoding(s
);
12034 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
12035 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
12037 unallocated_encoding(s
);
12040 if (!fp_access_check(s
)) {
12044 handle_2misc_narrow(s
, false, opcode
, u
, is_q
, size
, rn
, rd
);
12046 case 0x4: /* CLS, CLZ */
12048 unallocated_encoding(s
);
12052 case 0x2: /* SADDLP, UADDLP */
12053 case 0x6: /* SADALP, UADALP */
12055 unallocated_encoding(s
);
12058 if (!fp_access_check(s
)) {
12061 handle_2misc_pairwise(s
, opcode
, u
, is_q
, size
, rn
, rd
);
12063 case 0x13: /* SHLL, SHLL2 */
12064 if (u
== 0 || size
== 3) {
12065 unallocated_encoding(s
);
12068 if (!fp_access_check(s
)) {
12071 handle_shll(s
, is_q
, size
, rn
, rd
);
12073 case 0xa: /* CMLT */
12075 unallocated_encoding(s
);
12079 case 0x8: /* CMGT, CMGE */
12080 case 0x9: /* CMEQ, CMLE */
12081 case 0xb: /* ABS, NEG */
12082 if (size
== 3 && !is_q
) {
12083 unallocated_encoding(s
);
12087 case 0x3: /* SUQADD, USQADD */
12088 if (size
== 3 && !is_q
) {
12089 unallocated_encoding(s
);
12092 if (!fp_access_check(s
)) {
12095 handle_2misc_satacc(s
, false, u
, is_q
, size
, rn
, rd
);
12097 case 0x7: /* SQABS, SQNEG */
12098 if (size
== 3 && !is_q
) {
12099 unallocated_encoding(s
);
12104 case 0x16 ... 0x1f:
12106 /* Floating point: U, size[1] and opcode indicate operation;
12107 * size[0] indicates single or double precision.
12109 int is_double
= extract32(size
, 0, 1);
12110 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
12111 size
= is_double
? 3 : 2;
12113 case 0x2f: /* FABS */
12114 case 0x6f: /* FNEG */
12115 if (size
== 3 && !is_q
) {
12116 unallocated_encoding(s
);
12120 case 0x1d: /* SCVTF */
12121 case 0x5d: /* UCVTF */
12123 bool is_signed
= (opcode
== 0x1d) ? true : false;
12124 int elements
= is_double
? 2 : is_q
? 4 : 2;
12125 if (is_double
&& !is_q
) {
12126 unallocated_encoding(s
);
12129 if (!fp_access_check(s
)) {
12132 handle_simd_intfp_conv(s
, rd
, rn
, elements
, is_signed
, 0, size
);
12135 case 0x2c: /* FCMGT (zero) */
12136 case 0x2d: /* FCMEQ (zero) */
12137 case 0x2e: /* FCMLT (zero) */
12138 case 0x6c: /* FCMGE (zero) */
12139 case 0x6d: /* FCMLE (zero) */
12140 if (size
== 3 && !is_q
) {
12141 unallocated_encoding(s
);
12144 handle_2misc_fcmp_zero(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
12146 case 0x7f: /* FSQRT */
12147 if (size
== 3 && !is_q
) {
12148 unallocated_encoding(s
);
12152 case 0x1a: /* FCVTNS */
12153 case 0x1b: /* FCVTMS */
12154 case 0x3a: /* FCVTPS */
12155 case 0x3b: /* FCVTZS */
12156 case 0x5a: /* FCVTNU */
12157 case 0x5b: /* FCVTMU */
12158 case 0x7a: /* FCVTPU */
12159 case 0x7b: /* FCVTZU */
12160 need_fpstatus
= true;
12161 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
12162 if (size
== 3 && !is_q
) {
12163 unallocated_encoding(s
);
12167 case 0x5c: /* FCVTAU */
12168 case 0x1c: /* FCVTAS */
12169 need_fpstatus
= true;
12170 rmode
= FPROUNDING_TIEAWAY
;
12171 if (size
== 3 && !is_q
) {
12172 unallocated_encoding(s
);
12176 case 0x3c: /* URECPE */
12178 unallocated_encoding(s
);
12182 case 0x3d: /* FRECPE */
12183 case 0x7d: /* FRSQRTE */
12184 if (size
== 3 && !is_q
) {
12185 unallocated_encoding(s
);
12188 if (!fp_access_check(s
)) {
12191 handle_2misc_reciprocal(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
12193 case 0x56: /* FCVTXN, FCVTXN2 */
12195 unallocated_encoding(s
);
12199 case 0x16: /* FCVTN, FCVTN2 */
12200 /* handle_2misc_narrow does a 2*size -> size operation, but these
12201 * instructions encode the source size rather than dest size.
12203 if (!fp_access_check(s
)) {
12206 handle_2misc_narrow(s
, false, opcode
, 0, is_q
, size
- 1, rn
, rd
);
12208 case 0x36: /* BFCVTN, BFCVTN2 */
12209 if (!dc_isar_feature(aa64_bf16
, s
) || size
!= 2) {
12210 unallocated_encoding(s
);
12213 if (!fp_access_check(s
)) {
12216 handle_2misc_narrow(s
, false, opcode
, 0, is_q
, size
- 1, rn
, rd
);
12218 case 0x17: /* FCVTL, FCVTL2 */
12219 if (!fp_access_check(s
)) {
12222 handle_2misc_widening(s
, opcode
, is_q
, size
, rn
, rd
);
12224 case 0x18: /* FRINTN */
12225 case 0x19: /* FRINTM */
12226 case 0x38: /* FRINTP */
12227 case 0x39: /* FRINTZ */
12228 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
12230 case 0x59: /* FRINTX */
12231 case 0x79: /* FRINTI */
12232 need_fpstatus
= true;
12233 if (size
== 3 && !is_q
) {
12234 unallocated_encoding(s
);
12238 case 0x58: /* FRINTA */
12239 rmode
= FPROUNDING_TIEAWAY
;
12240 need_fpstatus
= true;
12241 if (size
== 3 && !is_q
) {
12242 unallocated_encoding(s
);
12246 case 0x7c: /* URSQRTE */
12248 unallocated_encoding(s
);
12252 case 0x1e: /* FRINT32Z */
12253 case 0x1f: /* FRINT64Z */
12254 rmode
= FPROUNDING_ZERO
;
12256 case 0x5e: /* FRINT32X */
12257 case 0x5f: /* FRINT64X */
12258 need_fpstatus
= true;
12259 if ((size
== 3 && !is_q
) || !dc_isar_feature(aa64_frint
, s
)) {
12260 unallocated_encoding(s
);
12265 unallocated_encoding(s
);
12271 unallocated_encoding(s
);
12275 if (!fp_access_check(s
)) {
12279 if (need_fpstatus
|| rmode
>= 0) {
12280 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR
);
12282 tcg_fpstatus
= NULL
;
12285 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
12292 if (u
&& size
== 0) { /* NOT */
12293 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_not
, 0);
12297 case 0x8: /* CMGT, CMGE */
12299 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cge0
, size
);
12301 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cgt0
, size
);
12304 case 0x9: /* CMEQ, CMLE */
12306 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cle0
, size
);
12308 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_ceq0
, size
);
12311 case 0xa: /* CMLT */
12312 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_clt0
, size
);
12315 if (u
) { /* ABS, NEG */
12316 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_neg
, size
);
12318 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_abs
, size
);
12324 /* All 64-bit element operations can be shared with scalar 2misc */
12327 /* Coverity claims (size == 3 && !is_q) has been eliminated
12328 * from all paths leading to here.
12330 tcg_debug_assert(is_q
);
12331 for (pass
= 0; pass
< 2; pass
++) {
12332 TCGv_i64 tcg_op
= tcg_temp_new_i64();
12333 TCGv_i64 tcg_res
= tcg_temp_new_i64();
12335 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
12337 handle_2misc_64(s
, opcode
, u
, tcg_res
, tcg_op
,
12338 tcg_rmode
, tcg_fpstatus
);
12340 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
12345 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
12346 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12347 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12349 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
12352 /* Special cases for 32 bit elements */
12354 case 0x4: /* CLS */
12356 tcg_gen_clzi_i32(tcg_res
, tcg_op
, 32);
12358 tcg_gen_clrsb_i32(tcg_res
, tcg_op
);
12361 case 0x7: /* SQABS, SQNEG */
12363 gen_helper_neon_qneg_s32(tcg_res
, cpu_env
, tcg_op
);
12365 gen_helper_neon_qabs_s32(tcg_res
, cpu_env
, tcg_op
);
12368 case 0x2f: /* FABS */
12369 gen_helper_vfp_abss(tcg_res
, tcg_op
);
12371 case 0x6f: /* FNEG */
12372 gen_helper_vfp_negs(tcg_res
, tcg_op
);
12374 case 0x7f: /* FSQRT */
12375 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, cpu_env
);
12377 case 0x1a: /* FCVTNS */
12378 case 0x1b: /* FCVTMS */
12379 case 0x1c: /* FCVTAS */
12380 case 0x3a: /* FCVTPS */
12381 case 0x3b: /* FCVTZS */
12382 gen_helper_vfp_tosls(tcg_res
, tcg_op
,
12383 tcg_constant_i32(0), tcg_fpstatus
);
12385 case 0x5a: /* FCVTNU */
12386 case 0x5b: /* FCVTMU */
12387 case 0x5c: /* FCVTAU */
12388 case 0x7a: /* FCVTPU */
12389 case 0x7b: /* FCVTZU */
12390 gen_helper_vfp_touls(tcg_res
, tcg_op
,
12391 tcg_constant_i32(0), tcg_fpstatus
);
12393 case 0x18: /* FRINTN */
12394 case 0x19: /* FRINTM */
12395 case 0x38: /* FRINTP */
12396 case 0x39: /* FRINTZ */
12397 case 0x58: /* FRINTA */
12398 case 0x79: /* FRINTI */
12399 gen_helper_rints(tcg_res
, tcg_op
, tcg_fpstatus
);
12401 case 0x59: /* FRINTX */
12402 gen_helper_rints_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
12404 case 0x7c: /* URSQRTE */
12405 gen_helper_rsqrte_u32(tcg_res
, tcg_op
);
12407 case 0x1e: /* FRINT32Z */
12408 case 0x5e: /* FRINT32X */
12409 gen_helper_frint32_s(tcg_res
, tcg_op
, tcg_fpstatus
);
12411 case 0x1f: /* FRINT64Z */
12412 case 0x5f: /* FRINT64X */
12413 gen_helper_frint64_s(tcg_res
, tcg_op
, tcg_fpstatus
);
12416 g_assert_not_reached();
12419 /* Use helpers for 8 and 16 bit elements */
12421 case 0x5: /* CNT, RBIT */
12422 /* For these two insns size is part of the opcode specifier
12423 * (handled earlier); they always operate on byte elements.
12426 gen_helper_neon_rbit_u8(tcg_res
, tcg_op
);
12428 gen_helper_neon_cnt_u8(tcg_res
, tcg_op
);
12431 case 0x7: /* SQABS, SQNEG */
12433 NeonGenOneOpEnvFn
*genfn
;
12434 static NeonGenOneOpEnvFn
* const fns
[2][2] = {
12435 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
12436 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
12438 genfn
= fns
[size
][u
];
12439 genfn(tcg_res
, cpu_env
, tcg_op
);
12442 case 0x4: /* CLS, CLZ */
12445 gen_helper_neon_clz_u8(tcg_res
, tcg_op
);
12447 gen_helper_neon_clz_u16(tcg_res
, tcg_op
);
12451 gen_helper_neon_cls_s8(tcg_res
, tcg_op
);
12453 gen_helper_neon_cls_s16(tcg_res
, tcg_op
);
12458 g_assert_not_reached();
12462 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
12465 clear_vec_high(s
, is_q
, rd
);
12468 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
12472 /* AdvSIMD [scalar] two register miscellaneous (FP16)
12474 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
12475 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12476 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
12477 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12478 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
12479 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
12481 * This actually covers two groups where scalar access is governed by
12482 * bit 28. A bunch of the instructions (float to integral) only exist
12483 * in the vector form and are un-allocated for the scalar decode. Also
12484 * in the scalar decode Q is always 1.
12486 static void disas_simd_two_reg_misc_fp16(DisasContext
*s
, uint32_t insn
)
12488 int fpop
, opcode
, a
, u
;
12492 bool only_in_vector
= false;
12495 TCGv_i32 tcg_rmode
= NULL
;
12496 TCGv_ptr tcg_fpstatus
= NULL
;
12497 bool need_fpst
= true;
12500 if (!dc_isar_feature(aa64_fp16
, s
)) {
12501 unallocated_encoding(s
);
12505 rd
= extract32(insn
, 0, 5);
12506 rn
= extract32(insn
, 5, 5);
12508 a
= extract32(insn
, 23, 1);
12509 u
= extract32(insn
, 29, 1);
12510 is_scalar
= extract32(insn
, 28, 1);
12511 is_q
= extract32(insn
, 30, 1);
12513 opcode
= extract32(insn
, 12, 5);
12514 fpop
= deposit32(opcode
, 5, 1, a
);
12515 fpop
= deposit32(fpop
, 6, 1, u
);
12518 case 0x1d: /* SCVTF */
12519 case 0x5d: /* UCVTF */
12526 elements
= (is_q
? 8 : 4);
12529 if (!fp_access_check(s
)) {
12532 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !u
, 0, MO_16
);
12536 case 0x2c: /* FCMGT (zero) */
12537 case 0x2d: /* FCMEQ (zero) */
12538 case 0x2e: /* FCMLT (zero) */
12539 case 0x6c: /* FCMGE (zero) */
12540 case 0x6d: /* FCMLE (zero) */
12541 handle_2misc_fcmp_zero(s
, fpop
, is_scalar
, 0, is_q
, MO_16
, rn
, rd
);
12543 case 0x3d: /* FRECPE */
12544 case 0x3f: /* FRECPX */
12546 case 0x18: /* FRINTN */
12547 only_in_vector
= true;
12548 rmode
= FPROUNDING_TIEEVEN
;
12550 case 0x19: /* FRINTM */
12551 only_in_vector
= true;
12552 rmode
= FPROUNDING_NEGINF
;
12554 case 0x38: /* FRINTP */
12555 only_in_vector
= true;
12556 rmode
= FPROUNDING_POSINF
;
12558 case 0x39: /* FRINTZ */
12559 only_in_vector
= true;
12560 rmode
= FPROUNDING_ZERO
;
12562 case 0x58: /* FRINTA */
12563 only_in_vector
= true;
12564 rmode
= FPROUNDING_TIEAWAY
;
12566 case 0x59: /* FRINTX */
12567 case 0x79: /* FRINTI */
12568 only_in_vector
= true;
12569 /* current rounding mode */
12571 case 0x1a: /* FCVTNS */
12572 rmode
= FPROUNDING_TIEEVEN
;
12574 case 0x1b: /* FCVTMS */
12575 rmode
= FPROUNDING_NEGINF
;
12577 case 0x1c: /* FCVTAS */
12578 rmode
= FPROUNDING_TIEAWAY
;
12580 case 0x3a: /* FCVTPS */
12581 rmode
= FPROUNDING_POSINF
;
12583 case 0x3b: /* FCVTZS */
12584 rmode
= FPROUNDING_ZERO
;
12586 case 0x5a: /* FCVTNU */
12587 rmode
= FPROUNDING_TIEEVEN
;
12589 case 0x5b: /* FCVTMU */
12590 rmode
= FPROUNDING_NEGINF
;
12592 case 0x5c: /* FCVTAU */
12593 rmode
= FPROUNDING_TIEAWAY
;
12595 case 0x7a: /* FCVTPU */
12596 rmode
= FPROUNDING_POSINF
;
12598 case 0x7b: /* FCVTZU */
12599 rmode
= FPROUNDING_ZERO
;
12601 case 0x2f: /* FABS */
12602 case 0x6f: /* FNEG */
12605 case 0x7d: /* FRSQRTE */
12606 case 0x7f: /* FSQRT (vector) */
12609 unallocated_encoding(s
);
12614 /* Check additional constraints for the scalar encoding */
12617 unallocated_encoding(s
);
12620 /* FRINTxx is only in the vector form */
12621 if (only_in_vector
) {
12622 unallocated_encoding(s
);
12627 if (!fp_access_check(s
)) {
12631 if (rmode
>= 0 || need_fpst
) {
12632 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR_F16
);
12636 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
12640 TCGv_i32 tcg_op
= read_fp_hreg(s
, rn
);
12641 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12644 case 0x1a: /* FCVTNS */
12645 case 0x1b: /* FCVTMS */
12646 case 0x1c: /* FCVTAS */
12647 case 0x3a: /* FCVTPS */
12648 case 0x3b: /* FCVTZS */
12649 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12651 case 0x3d: /* FRECPE */
12652 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12654 case 0x3f: /* FRECPX */
12655 gen_helper_frecpx_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12657 case 0x5a: /* FCVTNU */
12658 case 0x5b: /* FCVTMU */
12659 case 0x5c: /* FCVTAU */
12660 case 0x7a: /* FCVTPU */
12661 case 0x7b: /* FCVTZU */
12662 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12664 case 0x6f: /* FNEG */
12665 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
12667 case 0x7d: /* FRSQRTE */
12668 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12671 g_assert_not_reached();
12674 /* limit any sign extension going on */
12675 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0xffff);
12676 write_fp_sreg(s
, rd
, tcg_res
);
12678 for (pass
= 0; pass
< (is_q
? 8 : 4); pass
++) {
12679 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12680 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12682 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_16
);
12685 case 0x1a: /* FCVTNS */
12686 case 0x1b: /* FCVTMS */
12687 case 0x1c: /* FCVTAS */
12688 case 0x3a: /* FCVTPS */
12689 case 0x3b: /* FCVTZS */
12690 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12692 case 0x3d: /* FRECPE */
12693 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12695 case 0x5a: /* FCVTNU */
12696 case 0x5b: /* FCVTMU */
12697 case 0x5c: /* FCVTAU */
12698 case 0x7a: /* FCVTPU */
12699 case 0x7b: /* FCVTZU */
12700 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12702 case 0x18: /* FRINTN */
12703 case 0x19: /* FRINTM */
12704 case 0x38: /* FRINTP */
12705 case 0x39: /* FRINTZ */
12706 case 0x58: /* FRINTA */
12707 case 0x79: /* FRINTI */
12708 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12710 case 0x59: /* FRINTX */
12711 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
12713 case 0x2f: /* FABS */
12714 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
12716 case 0x6f: /* FNEG */
12717 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
12719 case 0x7d: /* FRSQRTE */
12720 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12722 case 0x7f: /* FSQRT */
12723 gen_helper_sqrt_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12726 g_assert_not_reached();
12729 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
12732 clear_vec_high(s
, is_q
, rd
);
12736 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
12740 /* AdvSIMD scalar x indexed element
12741 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12742 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12743 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12744 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12745 * AdvSIMD vector x indexed element
12746 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12747 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12748 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12749 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12751 static void disas_simd_indexed(DisasContext
*s
, uint32_t insn
)
12753 /* This encoding has two kinds of instruction:
12754 * normal, where we perform elt x idxelt => elt for each
12755 * element in the vector
12756 * long, where we perform elt x idxelt and generate a result of
12757 * double the width of the input element
12758 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12760 bool is_scalar
= extract32(insn
, 28, 1);
12761 bool is_q
= extract32(insn
, 30, 1);
12762 bool u
= extract32(insn
, 29, 1);
12763 int size
= extract32(insn
, 22, 2);
12764 int l
= extract32(insn
, 21, 1);
12765 int m
= extract32(insn
, 20, 1);
12766 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12767 int rm
= extract32(insn
, 16, 4);
12768 int opcode
= extract32(insn
, 12, 4);
12769 int h
= extract32(insn
, 11, 1);
12770 int rn
= extract32(insn
, 5, 5);
12771 int rd
= extract32(insn
, 0, 5);
12772 bool is_long
= false;
12774 bool is_fp16
= false;
12778 switch (16 * u
+ opcode
) {
12779 case 0x08: /* MUL */
12780 case 0x10: /* MLA */
12781 case 0x14: /* MLS */
12783 unallocated_encoding(s
);
12787 case 0x02: /* SMLAL, SMLAL2 */
12788 case 0x12: /* UMLAL, UMLAL2 */
12789 case 0x06: /* SMLSL, SMLSL2 */
12790 case 0x16: /* UMLSL, UMLSL2 */
12791 case 0x0a: /* SMULL, SMULL2 */
12792 case 0x1a: /* UMULL, UMULL2 */
12794 unallocated_encoding(s
);
12799 case 0x03: /* SQDMLAL, SQDMLAL2 */
12800 case 0x07: /* SQDMLSL, SQDMLSL2 */
12801 case 0x0b: /* SQDMULL, SQDMULL2 */
12804 case 0x0c: /* SQDMULH */
12805 case 0x0d: /* SQRDMULH */
12807 case 0x01: /* FMLA */
12808 case 0x05: /* FMLS */
12809 case 0x09: /* FMUL */
12810 case 0x19: /* FMULX */
12813 case 0x1d: /* SQRDMLAH */
12814 case 0x1f: /* SQRDMLSH */
12815 if (!dc_isar_feature(aa64_rdm
, s
)) {
12816 unallocated_encoding(s
);
12820 case 0x0e: /* SDOT */
12821 case 0x1e: /* UDOT */
12822 if (is_scalar
|| size
!= MO_32
|| !dc_isar_feature(aa64_dp
, s
)) {
12823 unallocated_encoding(s
);
12829 case 0: /* SUDOT */
12830 case 2: /* USDOT */
12831 if (is_scalar
|| !dc_isar_feature(aa64_i8mm
, s
)) {
12832 unallocated_encoding(s
);
12837 case 1: /* BFDOT */
12838 if (is_scalar
|| !dc_isar_feature(aa64_bf16
, s
)) {
12839 unallocated_encoding(s
);
12844 case 3: /* BFMLAL{B,T} */
12845 if (is_scalar
|| !dc_isar_feature(aa64_bf16
, s
)) {
12846 unallocated_encoding(s
);
12849 /* can't set is_fp without other incorrect size checks */
12853 unallocated_encoding(s
);
12857 case 0x11: /* FCMLA #0 */
12858 case 0x13: /* FCMLA #90 */
12859 case 0x15: /* FCMLA #180 */
12860 case 0x17: /* FCMLA #270 */
12861 if (is_scalar
|| !dc_isar_feature(aa64_fcma
, s
)) {
12862 unallocated_encoding(s
);
12867 case 0x00: /* FMLAL */
12868 case 0x04: /* FMLSL */
12869 case 0x18: /* FMLAL2 */
12870 case 0x1c: /* FMLSL2 */
12871 if (is_scalar
|| size
!= MO_32
|| !dc_isar_feature(aa64_fhm
, s
)) {
12872 unallocated_encoding(s
);
12876 /* is_fp, but we pass cpu_env not fp_status. */
12879 unallocated_encoding(s
);
12884 case 1: /* normal fp */
12885 /* convert insn encoded size to MemOp size */
12887 case 0: /* half-precision */
12891 case MO_32
: /* single precision */
12892 case MO_64
: /* double precision */
12895 unallocated_encoding(s
);
12900 case 2: /* complex fp */
12901 /* Each indexable element is a complex pair. */
12906 unallocated_encoding(s
);
12914 unallocated_encoding(s
);
12919 default: /* integer */
12923 unallocated_encoding(s
);
12928 if (is_fp16
&& !dc_isar_feature(aa64_fp16
, s
)) {
12929 unallocated_encoding(s
);
12933 /* Given MemOp size, adjust register and indexing. */
12936 index
= h
<< 2 | l
<< 1 | m
;
12939 index
= h
<< 1 | l
;
12944 unallocated_encoding(s
);
12951 g_assert_not_reached();
12954 if (!fp_access_check(s
)) {
12959 fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
12964 switch (16 * u
+ opcode
) {
12965 case 0x0e: /* SDOT */
12966 case 0x1e: /* UDOT */
12967 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
12968 u
? gen_helper_gvec_udot_idx_b
12969 : gen_helper_gvec_sdot_idx_b
);
12972 switch (extract32(insn
, 22, 2)) {
12973 case 0: /* SUDOT */
12974 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
12975 gen_helper_gvec_sudot_idx_b
);
12977 case 1: /* BFDOT */
12978 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
12979 gen_helper_gvec_bfdot_idx
);
12981 case 2: /* USDOT */
12982 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
12983 gen_helper_gvec_usdot_idx_b
);
12985 case 3: /* BFMLAL{B,T} */
12986 gen_gvec_op4_fpst(s
, 1, rd
, rn
, rm
, rd
, 0, (index
<< 1) | is_q
,
12987 gen_helper_gvec_bfmlal_idx
);
12990 g_assert_not_reached();
12991 case 0x11: /* FCMLA #0 */
12992 case 0x13: /* FCMLA #90 */
12993 case 0x15: /* FCMLA #180 */
12994 case 0x17: /* FCMLA #270 */
12996 int rot
= extract32(insn
, 13, 2);
12997 int data
= (index
<< 2) | rot
;
12998 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, rd
),
12999 vec_full_reg_offset(s
, rn
),
13000 vec_full_reg_offset(s
, rm
),
13001 vec_full_reg_offset(s
, rd
), fpst
,
13002 is_q
? 16 : 8, vec_full_reg_size(s
), data
,
13004 ? gen_helper_gvec_fcmlas_idx
13005 : gen_helper_gvec_fcmlah_idx
);
13009 case 0x00: /* FMLAL */
13010 case 0x04: /* FMLSL */
13011 case 0x18: /* FMLAL2 */
13012 case 0x1c: /* FMLSL2 */
13014 int is_s
= extract32(opcode
, 2, 1);
13016 int data
= (index
<< 2) | (is_2
<< 1) | is_s
;
13017 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
13018 vec_full_reg_offset(s
, rn
),
13019 vec_full_reg_offset(s
, rm
), cpu_env
,
13020 is_q
? 16 : 8, vec_full_reg_size(s
),
13021 data
, gen_helper_gvec_fmlal_idx_a64
);
13025 case 0x08: /* MUL */
13026 if (!is_long
&& !is_scalar
) {
13027 static gen_helper_gvec_3
* const fns
[3] = {
13028 gen_helper_gvec_mul_idx_h
,
13029 gen_helper_gvec_mul_idx_s
,
13030 gen_helper_gvec_mul_idx_d
,
13032 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, rd
),
13033 vec_full_reg_offset(s
, rn
),
13034 vec_full_reg_offset(s
, rm
),
13035 is_q
? 16 : 8, vec_full_reg_size(s
),
13036 index
, fns
[size
- 1]);
13041 case 0x10: /* MLA */
13042 if (!is_long
&& !is_scalar
) {
13043 static gen_helper_gvec_4
* const fns
[3] = {
13044 gen_helper_gvec_mla_idx_h
,
13045 gen_helper_gvec_mla_idx_s
,
13046 gen_helper_gvec_mla_idx_d
,
13048 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
13049 vec_full_reg_offset(s
, rn
),
13050 vec_full_reg_offset(s
, rm
),
13051 vec_full_reg_offset(s
, rd
),
13052 is_q
? 16 : 8, vec_full_reg_size(s
),
13053 index
, fns
[size
- 1]);
13058 case 0x14: /* MLS */
13059 if (!is_long
&& !is_scalar
) {
13060 static gen_helper_gvec_4
* const fns
[3] = {
13061 gen_helper_gvec_mls_idx_h
,
13062 gen_helper_gvec_mls_idx_s
,
13063 gen_helper_gvec_mls_idx_d
,
13065 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
13066 vec_full_reg_offset(s
, rn
),
13067 vec_full_reg_offset(s
, rm
),
13068 vec_full_reg_offset(s
, rd
),
13069 is_q
? 16 : 8, vec_full_reg_size(s
),
13070 index
, fns
[size
- 1]);
13077 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
13080 assert(is_fp
&& is_q
&& !is_long
);
13082 read_vec_element(s
, tcg_idx
, rm
, index
, MO_64
);
13084 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
13085 TCGv_i64 tcg_op
= tcg_temp_new_i64();
13086 TCGv_i64 tcg_res
= tcg_temp_new_i64();
13088 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
13090 switch (16 * u
+ opcode
) {
13091 case 0x05: /* FMLS */
13092 /* As usual for ARM, separate negation for fused multiply-add */
13093 gen_helper_vfp_negd(tcg_op
, tcg_op
);
13095 case 0x01: /* FMLA */
13096 read_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
13097 gen_helper_vfp_muladdd(tcg_res
, tcg_op
, tcg_idx
, tcg_res
, fpst
);
13099 case 0x09: /* FMUL */
13100 gen_helper_vfp_muld(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13102 case 0x19: /* FMULX */
13103 gen_helper_vfp_mulxd(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13106 g_assert_not_reached();
13109 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
13112 clear_vec_high(s
, !is_scalar
, rd
);
13113 } else if (!is_long
) {
13114 /* 32 bit floating point, or 16 or 32 bit integer.
13115 * For the 16 bit scalar case we use the usual Neon helpers and
13116 * rely on the fact that 0 op 0 == 0 with no side effects.
13118 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
13119 int pass
, maxpasses
;
13124 maxpasses
= is_q
? 4 : 2;
13127 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
13129 if (size
== 1 && !is_scalar
) {
13130 /* The simplest way to handle the 16x16 indexed ops is to duplicate
13131 * the index into both halves of the 32 bit tcg_idx and then use
13132 * the usual Neon helpers.
13134 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
13137 for (pass
= 0; pass
< maxpasses
; pass
++) {
13138 TCGv_i32 tcg_op
= tcg_temp_new_i32();
13139 TCGv_i32 tcg_res
= tcg_temp_new_i32();
13141 read_vec_element_i32(s
, tcg_op
, rn
, pass
, is_scalar
? size
: MO_32
);
13143 switch (16 * u
+ opcode
) {
13144 case 0x08: /* MUL */
13145 case 0x10: /* MLA */
13146 case 0x14: /* MLS */
13148 static NeonGenTwoOpFn
* const fns
[2][2] = {
13149 { gen_helper_neon_add_u16
, gen_helper_neon_sub_u16
},
13150 { tcg_gen_add_i32
, tcg_gen_sub_i32
},
13152 NeonGenTwoOpFn
*genfn
;
13153 bool is_sub
= opcode
== 0x4;
13156 gen_helper_neon_mul_u16(tcg_res
, tcg_op
, tcg_idx
);
13158 tcg_gen_mul_i32(tcg_res
, tcg_op
, tcg_idx
);
13160 if (opcode
== 0x8) {
13163 read_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
13164 genfn
= fns
[size
- 1][is_sub
];
13165 genfn(tcg_res
, tcg_op
, tcg_res
);
13168 case 0x05: /* FMLS */
13169 case 0x01: /* FMLA */
13170 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
13171 is_scalar
? size
: MO_32
);
13174 if (opcode
== 0x5) {
13175 /* As usual for ARM, separate negation for fused
13177 tcg_gen_xori_i32(tcg_op
, tcg_op
, 0x80008000);
13180 gen_helper_advsimd_muladdh(tcg_res
, tcg_op
, tcg_idx
,
13183 gen_helper_advsimd_muladd2h(tcg_res
, tcg_op
, tcg_idx
,
13188 if (opcode
== 0x5) {
13189 /* As usual for ARM, separate negation for
13190 * fused multiply-add */
13191 tcg_gen_xori_i32(tcg_op
, tcg_op
, 0x80000000);
13193 gen_helper_vfp_muladds(tcg_res
, tcg_op
, tcg_idx
,
13197 g_assert_not_reached();
13200 case 0x09: /* FMUL */
13204 gen_helper_advsimd_mulh(tcg_res
, tcg_op
,
13207 gen_helper_advsimd_mul2h(tcg_res
, tcg_op
,
13212 gen_helper_vfp_muls(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13215 g_assert_not_reached();
13218 case 0x19: /* FMULX */
13222 gen_helper_advsimd_mulxh(tcg_res
, tcg_op
,
13225 gen_helper_advsimd_mulx2h(tcg_res
, tcg_op
,
13230 gen_helper_vfp_mulxs(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13233 g_assert_not_reached();
13236 case 0x0c: /* SQDMULH */
13238 gen_helper_neon_qdmulh_s16(tcg_res
, cpu_env
,
13241 gen_helper_neon_qdmulh_s32(tcg_res
, cpu_env
,
13245 case 0x0d: /* SQRDMULH */
13247 gen_helper_neon_qrdmulh_s16(tcg_res
, cpu_env
,
13250 gen_helper_neon_qrdmulh_s32(tcg_res
, cpu_env
,
13254 case 0x1d: /* SQRDMLAH */
13255 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
13256 is_scalar
? size
: MO_32
);
13258 gen_helper_neon_qrdmlah_s16(tcg_res
, cpu_env
,
13259 tcg_op
, tcg_idx
, tcg_res
);
13261 gen_helper_neon_qrdmlah_s32(tcg_res
, cpu_env
,
13262 tcg_op
, tcg_idx
, tcg_res
);
13265 case 0x1f: /* SQRDMLSH */
13266 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
13267 is_scalar
? size
: MO_32
);
13269 gen_helper_neon_qrdmlsh_s16(tcg_res
, cpu_env
,
13270 tcg_op
, tcg_idx
, tcg_res
);
13272 gen_helper_neon_qrdmlsh_s32(tcg_res
, cpu_env
,
13273 tcg_op
, tcg_idx
, tcg_res
);
13277 g_assert_not_reached();
13281 write_fp_sreg(s
, rd
, tcg_res
);
13283 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
13287 clear_vec_high(s
, is_q
, rd
);
13289 /* long ops: 16x16->32 or 32x32->64 */
13290 TCGv_i64 tcg_res
[2];
13292 bool satop
= extract32(opcode
, 0, 1);
13293 MemOp memop
= MO_32
;
13300 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
13302 read_vec_element(s
, tcg_idx
, rm
, index
, memop
);
13304 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
13305 TCGv_i64 tcg_op
= tcg_temp_new_i64();
13306 TCGv_i64 tcg_passres
;
13312 passelt
= pass
+ (is_q
* 2);
13315 read_vec_element(s
, tcg_op
, rn
, passelt
, memop
);
13317 tcg_res
[pass
] = tcg_temp_new_i64();
13319 if (opcode
== 0xa || opcode
== 0xb) {
13320 /* Non-accumulating ops */
13321 tcg_passres
= tcg_res
[pass
];
13323 tcg_passres
= tcg_temp_new_i64();
13326 tcg_gen_mul_i64(tcg_passres
, tcg_op
, tcg_idx
);
13329 /* saturating, doubling */
13330 gen_helper_neon_addl_saturate_s64(tcg_passres
, cpu_env
,
13331 tcg_passres
, tcg_passres
);
13334 if (opcode
== 0xa || opcode
== 0xb) {
13338 /* Accumulating op: handle accumulate step */
13339 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13342 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13343 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
13345 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13346 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
13348 case 0x7: /* SQDMLSL, SQDMLSL2 */
13349 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
13351 case 0x3: /* SQDMLAL, SQDMLAL2 */
13352 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], cpu_env
,
13357 g_assert_not_reached();
13361 clear_vec_high(s
, !is_scalar
, rd
);
13363 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
13366 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
13369 /* The simplest way to handle the 16x16 indexed ops is to
13370 * duplicate the index into both halves of the 32 bit tcg_idx
13371 * and then use the usual Neon helpers.
13373 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
13376 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
13377 TCGv_i32 tcg_op
= tcg_temp_new_i32();
13378 TCGv_i64 tcg_passres
;
13381 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
13383 read_vec_element_i32(s
, tcg_op
, rn
,
13384 pass
+ (is_q
* 2), MO_32
);
13387 tcg_res
[pass
] = tcg_temp_new_i64();
13389 if (opcode
== 0xa || opcode
== 0xb) {
13390 /* Non-accumulating ops */
13391 tcg_passres
= tcg_res
[pass
];
13393 tcg_passres
= tcg_temp_new_i64();
13396 if (memop
& MO_SIGN
) {
13397 gen_helper_neon_mull_s16(tcg_passres
, tcg_op
, tcg_idx
);
13399 gen_helper_neon_mull_u16(tcg_passres
, tcg_op
, tcg_idx
);
13402 gen_helper_neon_addl_saturate_s32(tcg_passres
, cpu_env
,
13403 tcg_passres
, tcg_passres
);
13406 if (opcode
== 0xa || opcode
== 0xb) {
13410 /* Accumulating op: handle accumulate step */
13411 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13414 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13415 gen_helper_neon_addl_u32(tcg_res
[pass
], tcg_res
[pass
],
13418 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13419 gen_helper_neon_subl_u32(tcg_res
[pass
], tcg_res
[pass
],
13422 case 0x7: /* SQDMLSL, SQDMLSL2 */
13423 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
13425 case 0x3: /* SQDMLAL, SQDMLAL2 */
13426 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], cpu_env
,
13431 g_assert_not_reached();
13436 tcg_gen_ext32u_i64(tcg_res
[0], tcg_res
[0]);
13441 tcg_res
[1] = tcg_constant_i64(0);
13444 for (pass
= 0; pass
< 2; pass
++) {
13445 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13451 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13452 * +-----------------+------+-----------+--------+-----+------+------+
13453 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13454 * +-----------------+------+-----------+--------+-----+------+------+
13456 static void disas_crypto_aes(DisasContext
*s
, uint32_t insn
)
13458 int size
= extract32(insn
, 22, 2);
13459 int opcode
= extract32(insn
, 12, 5);
13460 int rn
= extract32(insn
, 5, 5);
13461 int rd
= extract32(insn
, 0, 5);
13463 gen_helper_gvec_2
*genfn2
= NULL
;
13464 gen_helper_gvec_3
*genfn3
= NULL
;
13466 if (!dc_isar_feature(aa64_aes
, s
) || size
!= 0) {
13467 unallocated_encoding(s
);
13472 case 0x4: /* AESE */
13474 genfn3
= gen_helper_crypto_aese
;
13476 case 0x6: /* AESMC */
13478 genfn2
= gen_helper_crypto_aesmc
;
13480 case 0x5: /* AESD */
13482 genfn3
= gen_helper_crypto_aese
;
13484 case 0x7: /* AESIMC */
13486 genfn2
= gen_helper_crypto_aesmc
;
13489 unallocated_encoding(s
);
13493 if (!fp_access_check(s
)) {
13497 gen_gvec_op2_ool(s
, true, rd
, rn
, decrypt
, genfn2
);
13499 gen_gvec_op3_ool(s
, true, rd
, rd
, rn
, decrypt
, genfn3
);
13503 /* Crypto three-reg SHA
13504 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
13505 * +-----------------+------+---+------+---+--------+-----+------+------+
13506 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
13507 * +-----------------+------+---+------+---+--------+-----+------+------+
13509 static void disas_crypto_three_reg_sha(DisasContext
*s
, uint32_t insn
)
13511 int size
= extract32(insn
, 22, 2);
13512 int opcode
= extract32(insn
, 12, 3);
13513 int rm
= extract32(insn
, 16, 5);
13514 int rn
= extract32(insn
, 5, 5);
13515 int rd
= extract32(insn
, 0, 5);
13516 gen_helper_gvec_3
*genfn
;
13520 unallocated_encoding(s
);
13525 case 0: /* SHA1C */
13526 genfn
= gen_helper_crypto_sha1c
;
13527 feature
= dc_isar_feature(aa64_sha1
, s
);
13529 case 1: /* SHA1P */
13530 genfn
= gen_helper_crypto_sha1p
;
13531 feature
= dc_isar_feature(aa64_sha1
, s
);
13533 case 2: /* SHA1M */
13534 genfn
= gen_helper_crypto_sha1m
;
13535 feature
= dc_isar_feature(aa64_sha1
, s
);
13537 case 3: /* SHA1SU0 */
13538 genfn
= gen_helper_crypto_sha1su0
;
13539 feature
= dc_isar_feature(aa64_sha1
, s
);
13541 case 4: /* SHA256H */
13542 genfn
= gen_helper_crypto_sha256h
;
13543 feature
= dc_isar_feature(aa64_sha256
, s
);
13545 case 5: /* SHA256H2 */
13546 genfn
= gen_helper_crypto_sha256h2
;
13547 feature
= dc_isar_feature(aa64_sha256
, s
);
13549 case 6: /* SHA256SU1 */
13550 genfn
= gen_helper_crypto_sha256su1
;
13551 feature
= dc_isar_feature(aa64_sha256
, s
);
13554 unallocated_encoding(s
);
13559 unallocated_encoding(s
);
13563 if (!fp_access_check(s
)) {
13566 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, 0, genfn
);
13569 /* Crypto two-reg SHA
13570 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13571 * +-----------------+------+-----------+--------+-----+------+------+
13572 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13573 * +-----------------+------+-----------+--------+-----+------+------+
13575 static void disas_crypto_two_reg_sha(DisasContext
*s
, uint32_t insn
)
13577 int size
= extract32(insn
, 22, 2);
13578 int opcode
= extract32(insn
, 12, 5);
13579 int rn
= extract32(insn
, 5, 5);
13580 int rd
= extract32(insn
, 0, 5);
13581 gen_helper_gvec_2
*genfn
;
13585 unallocated_encoding(s
);
13590 case 0: /* SHA1H */
13591 feature
= dc_isar_feature(aa64_sha1
, s
);
13592 genfn
= gen_helper_crypto_sha1h
;
13594 case 1: /* SHA1SU1 */
13595 feature
= dc_isar_feature(aa64_sha1
, s
);
13596 genfn
= gen_helper_crypto_sha1su1
;
13598 case 2: /* SHA256SU0 */
13599 feature
= dc_isar_feature(aa64_sha256
, s
);
13600 genfn
= gen_helper_crypto_sha256su0
;
13603 unallocated_encoding(s
);
13608 unallocated_encoding(s
);
13612 if (!fp_access_check(s
)) {
13615 gen_gvec_op2_ool(s
, true, rd
, rn
, 0, genfn
);
13618 static void gen_rax1_i64(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
)
13620 tcg_gen_rotli_i64(d
, m
, 1);
13621 tcg_gen_xor_i64(d
, d
, n
);
13624 static void gen_rax1_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
, TCGv_vec m
)
13626 tcg_gen_rotli_vec(vece
, d
, m
, 1);
13627 tcg_gen_xor_vec(vece
, d
, d
, n
);
13630 void gen_gvec_rax1(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
13631 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
13633 static const TCGOpcode vecop_list
[] = { INDEX_op_rotli_vec
, 0 };
13634 static const GVecGen3 op
= {
13635 .fni8
= gen_rax1_i64
,
13636 .fniv
= gen_rax1_vec
,
13637 .opt_opc
= vecop_list
,
13638 .fno
= gen_helper_crypto_rax1
,
13641 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &op
);
13644 /* Crypto three-reg SHA512
13645 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13646 * +-----------------------+------+---+---+-----+--------+------+------+
13647 * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
13648 * +-----------------------+------+---+---+-----+--------+------+------+
13650 static void disas_crypto_three_reg_sha512(DisasContext
*s
, uint32_t insn
)
13652 int opcode
= extract32(insn
, 10, 2);
13653 int o
= extract32(insn
, 14, 1);
13654 int rm
= extract32(insn
, 16, 5);
13655 int rn
= extract32(insn
, 5, 5);
13656 int rd
= extract32(insn
, 0, 5);
13658 gen_helper_gvec_3
*oolfn
= NULL
;
13659 GVecGen3Fn
*gvecfn
= NULL
;
13663 case 0: /* SHA512H */
13664 feature
= dc_isar_feature(aa64_sha512
, s
);
13665 oolfn
= gen_helper_crypto_sha512h
;
13667 case 1: /* SHA512H2 */
13668 feature
= dc_isar_feature(aa64_sha512
, s
);
13669 oolfn
= gen_helper_crypto_sha512h2
;
13671 case 2: /* SHA512SU1 */
13672 feature
= dc_isar_feature(aa64_sha512
, s
);
13673 oolfn
= gen_helper_crypto_sha512su1
;
13676 feature
= dc_isar_feature(aa64_sha3
, s
);
13677 gvecfn
= gen_gvec_rax1
;
13680 g_assert_not_reached();
13684 case 0: /* SM3PARTW1 */
13685 feature
= dc_isar_feature(aa64_sm3
, s
);
13686 oolfn
= gen_helper_crypto_sm3partw1
;
13688 case 1: /* SM3PARTW2 */
13689 feature
= dc_isar_feature(aa64_sm3
, s
);
13690 oolfn
= gen_helper_crypto_sm3partw2
;
13692 case 2: /* SM4EKEY */
13693 feature
= dc_isar_feature(aa64_sm4
, s
);
13694 oolfn
= gen_helper_crypto_sm4ekey
;
13697 unallocated_encoding(s
);
13703 unallocated_encoding(s
);
13707 if (!fp_access_check(s
)) {
13712 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, 0, oolfn
);
13714 gen_gvec_fn3(s
, true, rd
, rn
, rm
, gvecfn
, MO_64
);
13718 /* Crypto two-reg SHA512
13719 * 31 12 11 10 9 5 4 0
13720 * +-----------------------------------------+--------+------+------+
13721 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
13722 * +-----------------------------------------+--------+------+------+
13724 static void disas_crypto_two_reg_sha512(DisasContext
*s
, uint32_t insn
)
13726 int opcode
= extract32(insn
, 10, 2);
13727 int rn
= extract32(insn
, 5, 5);
13728 int rd
= extract32(insn
, 0, 5);
13732 case 0: /* SHA512SU0 */
13733 feature
= dc_isar_feature(aa64_sha512
, s
);
13736 feature
= dc_isar_feature(aa64_sm4
, s
);
13739 unallocated_encoding(s
);
13744 unallocated_encoding(s
);
13748 if (!fp_access_check(s
)) {
13753 case 0: /* SHA512SU0 */
13754 gen_gvec_op2_ool(s
, true, rd
, rn
, 0, gen_helper_crypto_sha512su0
);
13757 gen_gvec_op3_ool(s
, true, rd
, rd
, rn
, 0, gen_helper_crypto_sm4e
);
13760 g_assert_not_reached();
13764 /* Crypto four-register
13765 * 31 23 22 21 20 16 15 14 10 9 5 4 0
13766 * +-------------------+-----+------+---+------+------+------+
13767 * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
13768 * +-------------------+-----+------+---+------+------+------+
13770 static void disas_crypto_four_reg(DisasContext
*s
, uint32_t insn
)
13772 int op0
= extract32(insn
, 21, 2);
13773 int rm
= extract32(insn
, 16, 5);
13774 int ra
= extract32(insn
, 10, 5);
13775 int rn
= extract32(insn
, 5, 5);
13776 int rd
= extract32(insn
, 0, 5);
13782 feature
= dc_isar_feature(aa64_sha3
, s
);
13784 case 2: /* SM3SS1 */
13785 feature
= dc_isar_feature(aa64_sm3
, s
);
13788 unallocated_encoding(s
);
13793 unallocated_encoding(s
);
13797 if (!fp_access_check(s
)) {
13802 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
, tcg_res
[2];
13805 tcg_op1
= tcg_temp_new_i64();
13806 tcg_op2
= tcg_temp_new_i64();
13807 tcg_op3
= tcg_temp_new_i64();
13808 tcg_res
[0] = tcg_temp_new_i64();
13809 tcg_res
[1] = tcg_temp_new_i64();
13811 for (pass
= 0; pass
< 2; pass
++) {
13812 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
13813 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
13814 read_vec_element(s
, tcg_op3
, ra
, pass
, MO_64
);
13818 tcg_gen_xor_i64(tcg_res
[pass
], tcg_op2
, tcg_op3
);
13821 tcg_gen_andc_i64(tcg_res
[pass
], tcg_op2
, tcg_op3
);
13823 tcg_gen_xor_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
13825 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
13826 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
13828 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
, tcg_res
, tcg_zero
;
13830 tcg_op1
= tcg_temp_new_i32();
13831 tcg_op2
= tcg_temp_new_i32();
13832 tcg_op3
= tcg_temp_new_i32();
13833 tcg_res
= tcg_temp_new_i32();
13834 tcg_zero
= tcg_constant_i32(0);
13836 read_vec_element_i32(s
, tcg_op1
, rn
, 3, MO_32
);
13837 read_vec_element_i32(s
, tcg_op2
, rm
, 3, MO_32
);
13838 read_vec_element_i32(s
, tcg_op3
, ra
, 3, MO_32
);
13840 tcg_gen_rotri_i32(tcg_res
, tcg_op1
, 20);
13841 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op2
);
13842 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op3
);
13843 tcg_gen_rotri_i32(tcg_res
, tcg_res
, 25);
13845 write_vec_element_i32(s
, tcg_zero
, rd
, 0, MO_32
);
13846 write_vec_element_i32(s
, tcg_zero
, rd
, 1, MO_32
);
13847 write_vec_element_i32(s
, tcg_zero
, rd
, 2, MO_32
);
13848 write_vec_element_i32(s
, tcg_res
, rd
, 3, MO_32
);
13853 * 31 21 20 16 15 10 9 5 4 0
13854 * +-----------------------+------+--------+------+------+
13855 * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
13856 * +-----------------------+------+--------+------+------+
13858 static void disas_crypto_xar(DisasContext
*s
, uint32_t insn
)
13860 int rm
= extract32(insn
, 16, 5);
13861 int imm6
= extract32(insn
, 10, 6);
13862 int rn
= extract32(insn
, 5, 5);
13863 int rd
= extract32(insn
, 0, 5);
13865 if (!dc_isar_feature(aa64_sha3
, s
)) {
13866 unallocated_encoding(s
);
13870 if (!fp_access_check(s
)) {
13874 gen_gvec_xar(MO_64
, vec_full_reg_offset(s
, rd
),
13875 vec_full_reg_offset(s
, rn
),
13876 vec_full_reg_offset(s
, rm
), imm6
, 16,
13877 vec_full_reg_size(s
));
13880 /* Crypto three-reg imm2
13881 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13882 * +-----------------------+------+-----+------+--------+------+------+
13883 * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
13884 * +-----------------------+------+-----+------+--------+------+------+
13886 static void disas_crypto_three_reg_imm2(DisasContext
*s
, uint32_t insn
)
13888 static gen_helper_gvec_3
* const fns
[4] = {
13889 gen_helper_crypto_sm3tt1a
, gen_helper_crypto_sm3tt1b
,
13890 gen_helper_crypto_sm3tt2a
, gen_helper_crypto_sm3tt2b
,
13892 int opcode
= extract32(insn
, 10, 2);
13893 int imm2
= extract32(insn
, 12, 2);
13894 int rm
= extract32(insn
, 16, 5);
13895 int rn
= extract32(insn
, 5, 5);
13896 int rd
= extract32(insn
, 0, 5);
13898 if (!dc_isar_feature(aa64_sm3
, s
)) {
13899 unallocated_encoding(s
);
13903 if (!fp_access_check(s
)) {
13907 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, imm2
, fns
[opcode
]);
13910 /* C3.6 Data processing - SIMD, inc Crypto
13912 * As the decode gets a little complex we are using a table based
13913 * approach for this part of the decode.
13915 static const AArch64DecodeTable data_proc_simd
[] = {
13916 /* pattern , mask , fn */
13917 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same
},
13918 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra
},
13919 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff
},
13920 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc
},
13921 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes
},
13922 { 0x0e000400, 0x9fe08400, disas_simd_copy
},
13923 { 0x0f000000, 0x9f000400, disas_simd_indexed
}, /* vector indexed */
13924 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
13925 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm
},
13926 { 0x0f000400, 0x9f800400, disas_simd_shift_imm
},
13927 { 0x0e000000, 0xbf208c00, disas_simd_tb
},
13928 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn
},
13929 { 0x2e000000, 0xbf208400, disas_simd_ext
},
13930 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same
},
13931 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra
},
13932 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff
},
13933 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc
},
13934 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise
},
13935 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy
},
13936 { 0x5f000000, 0xdf000400, disas_simd_indexed
}, /* scalar indexed */
13937 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm
},
13938 { 0x4e280800, 0xff3e0c00, disas_crypto_aes
},
13939 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha
},
13940 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha
},
13941 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512
},
13942 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512
},
13943 { 0xce000000, 0xff808000, disas_crypto_four_reg
},
13944 { 0xce800000, 0xffe00000, disas_crypto_xar
},
13945 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2
},
13946 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16
},
13947 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16
},
13948 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16
},
13949 { 0x00000000, 0x00000000, NULL
}
13952 static void disas_data_proc_simd(DisasContext
*s
, uint32_t insn
)
13954 /* Note that this is called with all non-FP cases from
13955 * table C3-6 so it must UNDEF for entries not specifically
13956 * allocated to instructions in that table.
13958 AArch64DecodeFn
*fn
= lookup_disas_fn(&data_proc_simd
[0], insn
);
13962 unallocated_encoding(s
);
13966 /* C3.6 Data processing - SIMD and floating point */
13967 static void disas_data_proc_simd_fp(DisasContext
*s
, uint32_t insn
)
13969 if (extract32(insn
, 28, 1) == 1 && extract32(insn
, 30, 1) == 0) {
13970 disas_data_proc_fp(s
, insn
);
13972 /* SIMD, including crypto */
13973 disas_data_proc_simd(s
, insn
);
13977 static bool trans_OK(DisasContext
*s
, arg_OK
*a
)
13982 static bool trans_FAIL(DisasContext
*s
, arg_OK
*a
)
13984 s
->is_nonstreaming
= true;
13990 * @env: The cpu environment
13991 * @s: The DisasContext
13993 * Return true if the page is guarded.
13995 static bool is_guarded_page(CPUARMState
*env
, DisasContext
*s
)
13997 uint64_t addr
= s
->base
.pc_first
;
13998 #ifdef CONFIG_USER_ONLY
13999 return page_get_flags(addr
) & PAGE_BTI
;
14001 CPUTLBEntryFull
*full
;
14003 int mmu_idx
= arm_to_core_mmu_idx(s
->mmu_idx
);
14007 * We test this immediately after reading an insn, which means
14008 * that the TLB entry must be present and valid, and thus this
14009 * access will never raise an exception.
14011 flags
= probe_access_full(env
, addr
, 0, MMU_INST_FETCH
, mmu_idx
,
14012 false, &host
, &full
, 0);
14013 assert(!(flags
& TLB_INVALID_MASK
));
14015 return full
->guarded
;
14020 * btype_destination_ok:
14021 * @insn: The instruction at the branch destination
14022 * @bt: SCTLR_ELx.BT
14023 * @btype: PSTATE.BTYPE, and is non-zero
14025 * On a guarded page, there are a limited number of insns
14026 * that may be present at the branch target:
14027 * - branch target identifiers,
14028 * - paciasp, pacibsp,
14031 * Anything else causes a Branch Target Exception.
14033 * Return true if the branch is compatible, false to raise BTITRAP.
14035 static bool btype_destination_ok(uint32_t insn
, bool bt
, int btype
)
14037 if ((insn
& 0xfffff01fu
) == 0xd503201fu
) {
14039 switch (extract32(insn
, 5, 7)) {
14040 case 0b011001: /* PACIASP */
14041 case 0b011011: /* PACIBSP */
14043 * If SCTLR_ELx.BT, then PACI*SP are not compatible
14044 * with btype == 3. Otherwise all btype are ok.
14046 return !bt
|| btype
!= 3;
14047 case 0b100000: /* BTI */
14048 /* Not compatible with any btype. */
14050 case 0b100010: /* BTI c */
14051 /* Not compatible with btype == 3 */
14053 case 0b100100: /* BTI j */
14054 /* Not compatible with btype == 2 */
14056 case 0b100110: /* BTI jc */
14057 /* Compatible with any btype. */
14061 switch (insn
& 0xffe0001fu
) {
14062 case 0xd4200000u
: /* BRK */
14063 case 0xd4400000u
: /* HLT */
14064 /* Give priority to the breakpoint exception. */
14071 /* C3.1 A64 instruction index by encoding */
14072 static void disas_a64_legacy(DisasContext
*s
, uint32_t insn
)
14074 switch (extract32(insn
, 25, 4)) {
14075 case 0xa: case 0xb: /* Branch, exception generation and system insns */
14076 disas_b_exc_sys(s
, insn
);
14081 case 0xe: /* Loads and stores */
14082 disas_ldst(s
, insn
);
14085 case 0xd: /* Data processing - register */
14086 disas_data_proc_reg(s
, insn
);
14089 case 0xf: /* Data processing - SIMD and floating point */
14090 disas_data_proc_simd_fp(s
, insn
);
14093 unallocated_encoding(s
);
14098 static void aarch64_tr_init_disas_context(DisasContextBase
*dcbase
,
14101 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14102 CPUARMState
*env
= cpu
->env_ptr
;
14103 ARMCPU
*arm_cpu
= env_archcpu(env
);
14104 CPUARMTBFlags tb_flags
= arm_tbflags_from_tb(dc
->base
.tb
);
14105 int bound
, core_mmu_idx
;
14107 dc
->isar
= &arm_cpu
->isar
;
14109 dc
->pc_save
= dc
->base
.pc_first
;
14110 dc
->aarch64
= true;
14113 dc
->be_data
= EX_TBFLAG_ANY(tb_flags
, BE_DATA
) ? MO_BE
: MO_LE
;
14114 dc
->condexec_mask
= 0;
14115 dc
->condexec_cond
= 0;
14116 core_mmu_idx
= EX_TBFLAG_ANY(tb_flags
, MMUIDX
);
14117 dc
->mmu_idx
= core_to_aa64_mmu_idx(core_mmu_idx
);
14118 dc
->tbii
= EX_TBFLAG_A64(tb_flags
, TBII
);
14119 dc
->tbid
= EX_TBFLAG_A64(tb_flags
, TBID
);
14120 dc
->tcma
= EX_TBFLAG_A64(tb_flags
, TCMA
);
14121 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
14122 #if !defined(CONFIG_USER_ONLY)
14123 dc
->user
= (dc
->current_el
== 0);
14125 dc
->fp_excp_el
= EX_TBFLAG_ANY(tb_flags
, FPEXC_EL
);
14126 dc
->align_mem
= EX_TBFLAG_ANY(tb_flags
, ALIGN_MEM
);
14127 dc
->pstate_il
= EX_TBFLAG_ANY(tb_flags
, PSTATE__IL
);
14128 dc
->fgt_active
= EX_TBFLAG_ANY(tb_flags
, FGT_ACTIVE
);
14129 dc
->fgt_svc
= EX_TBFLAG_ANY(tb_flags
, FGT_SVC
);
14130 dc
->fgt_eret
= EX_TBFLAG_A64(tb_flags
, FGT_ERET
);
14131 dc
->sve_excp_el
= EX_TBFLAG_A64(tb_flags
, SVEEXC_EL
);
14132 dc
->sme_excp_el
= EX_TBFLAG_A64(tb_flags
, SMEEXC_EL
);
14133 dc
->vl
= (EX_TBFLAG_A64(tb_flags
, VL
) + 1) * 16;
14134 dc
->svl
= (EX_TBFLAG_A64(tb_flags
, SVL
) + 1) * 16;
14135 dc
->pauth_active
= EX_TBFLAG_A64(tb_flags
, PAUTH_ACTIVE
);
14136 dc
->bt
= EX_TBFLAG_A64(tb_flags
, BT
);
14137 dc
->btype
= EX_TBFLAG_A64(tb_flags
, BTYPE
);
14138 dc
->unpriv
= EX_TBFLAG_A64(tb_flags
, UNPRIV
);
14139 dc
->ata
= EX_TBFLAG_A64(tb_flags
, ATA
);
14140 dc
->mte_active
[0] = EX_TBFLAG_A64(tb_flags
, MTE_ACTIVE
);
14141 dc
->mte_active
[1] = EX_TBFLAG_A64(tb_flags
, MTE0_ACTIVE
);
14142 dc
->pstate_sm
= EX_TBFLAG_A64(tb_flags
, PSTATE_SM
);
14143 dc
->pstate_za
= EX_TBFLAG_A64(tb_flags
, PSTATE_ZA
);
14144 dc
->sme_trap_nonstreaming
= EX_TBFLAG_A64(tb_flags
, SME_TRAP_NONSTREAMING
);
14146 dc
->vec_stride
= 0;
14147 dc
->cp_regs
= arm_cpu
->cp_regs
;
14148 dc
->features
= env
->features
;
14149 dc
->dcz_blocksize
= arm_cpu
->dcz_blocksize
;
14151 #ifdef CONFIG_USER_ONLY
14152 /* In sve_probe_page, we assume TBI is enabled. */
14153 tcg_debug_assert(dc
->tbid
& 1);
14156 dc
->lse2
= dc_isar_feature(aa64_lse2
, dc
);
14158 /* Single step state. The code-generation logic here is:
14160 * generate code with no special handling for single-stepping (except
14161 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
14162 * this happens anyway because those changes are all system register or
14164 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
14165 * emit code for one insn
14166 * emit code to clear PSTATE.SS
14167 * emit code to generate software step exception for completed step
14168 * end TB (as usual for having generated an exception)
14169 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
14170 * emit code to generate a software step exception
14173 dc
->ss_active
= EX_TBFLAG_ANY(tb_flags
, SS_ACTIVE
);
14174 dc
->pstate_ss
= EX_TBFLAG_ANY(tb_flags
, PSTATE__SS
);
14175 dc
->is_ldex
= false;
14177 /* Bound the number of insns to execute to those left on the page. */
14178 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
14180 /* If architectural single step active, limit to 1. */
14181 if (dc
->ss_active
) {
14184 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
14187 static void aarch64_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
14191 static void aarch64_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
14193 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14194 target_ulong pc_arg
= dc
->base
.pc_next
;
14196 if (tb_cflags(dcbase
->tb
) & CF_PCREL
) {
14197 pc_arg
&= ~TARGET_PAGE_MASK
;
14199 tcg_gen_insn_start(pc_arg
, 0, 0);
14200 dc
->insn_start
= tcg_last_op();
14203 static void aarch64_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
14205 DisasContext
*s
= container_of(dcbase
, DisasContext
, base
);
14206 CPUARMState
*env
= cpu
->env_ptr
;
14207 uint64_t pc
= s
->base
.pc_next
;
14210 /* Singlestep exceptions have the highest priority. */
14211 if (s
->ss_active
&& !s
->pstate_ss
) {
14212 /* Singlestep state is Active-pending.
14213 * If we're in this state at the start of a TB then either
14214 * a) we just took an exception to an EL which is being debugged
14215 * and this is the first insn in the exception handler
14216 * b) debug exceptions were masked and we just unmasked them
14217 * without changing EL (eg by clearing PSTATE.D)
14218 * In either case we're going to take a swstep exception in the
14219 * "did not step an insn" case, and so the syndrome ISV and EX
14220 * bits should be zero.
14222 assert(s
->base
.num_insns
== 1);
14223 gen_swstep_exception(s
, 0, 0);
14224 s
->base
.is_jmp
= DISAS_NORETURN
;
14225 s
->base
.pc_next
= pc
+ 4;
14231 * PC alignment fault. This has priority over the instruction abort
14232 * that we would receive from a translation fault via arm_ldl_code.
14233 * This should only be possible after an indirect branch, at the
14236 assert(s
->base
.num_insns
== 1);
14237 gen_helper_exception_pc_alignment(cpu_env
, tcg_constant_tl(pc
));
14238 s
->base
.is_jmp
= DISAS_NORETURN
;
14239 s
->base
.pc_next
= QEMU_ALIGN_UP(pc
, 4);
14244 insn
= arm_ldl_code(env
, &s
->base
, pc
, s
->sctlr_b
);
14246 s
->base
.pc_next
= pc
+ 4;
14248 s
->fp_access_checked
= false;
14249 s
->sve_access_checked
= false;
14251 if (s
->pstate_il
) {
14253 * Illegal execution state. This has priority over BTI
14254 * exceptions, but comes after instruction abort exceptions.
14256 gen_exception_insn(s
, 0, EXCP_UDEF
, syn_illegalstate());
14260 if (dc_isar_feature(aa64_bti
, s
)) {
14261 if (s
->base
.num_insns
== 1) {
14263 * At the first insn of the TB, compute s->guarded_page.
14264 * We delayed computing this until successfully reading
14265 * the first insn of the TB, above. This (mostly) ensures
14266 * that the softmmu tlb entry has been populated, and the
14267 * page table GP bit is available.
14269 * Note that we need to compute this even if btype == 0,
14270 * because this value is used for BR instructions later
14271 * where ENV is not available.
14273 s
->guarded_page
= is_guarded_page(env
, s
);
14275 /* First insn can have btype set to non-zero. */
14276 tcg_debug_assert(s
->btype
>= 0);
14279 * Note that the Branch Target Exception has fairly high
14280 * priority -- below debugging exceptions but above most
14281 * everything else. This allows us to handle this now
14282 * instead of waiting until the insn is otherwise decoded.
14286 && !btype_destination_ok(insn
, s
->bt
, s
->btype
)) {
14287 gen_exception_insn(s
, 0, EXCP_UDEF
, syn_btitrap(s
->btype
));
14291 /* Not the first insn: btype must be 0. */
14292 tcg_debug_assert(s
->btype
== 0);
14296 s
->is_nonstreaming
= false;
14297 if (s
->sme_trap_nonstreaming
) {
14298 disas_sme_fa64(s
, insn
);
14301 if (!disas_a64(s
, insn
) &&
14302 !disas_sme(s
, insn
) &&
14303 !disas_sve(s
, insn
)) {
14304 disas_a64_legacy(s
, insn
);
14308 * After execution of most insns, btype is reset to 0.
14309 * Note that we set btype == -1 when the insn sets btype.
14311 if (s
->btype
> 0 && s
->base
.is_jmp
!= DISAS_NORETURN
) {
14316 static void aarch64_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
14318 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14320 if (unlikely(dc
->ss_active
)) {
14321 /* Note that this means single stepping WFI doesn't halt the CPU.
14322 * For conditional branch insns this is harmless unreachable code as
14323 * gen_goto_tb() has already handled emitting the debug exception
14324 * (and thus a tb-jump is not possible when singlestepping).
14326 switch (dc
->base
.is_jmp
) {
14328 gen_a64_update_pc(dc
, 4);
14332 gen_step_complete_exception(dc
);
14334 case DISAS_NORETURN
:
14338 switch (dc
->base
.is_jmp
) {
14340 case DISAS_TOO_MANY
:
14341 gen_goto_tb(dc
, 1, 4);
14344 case DISAS_UPDATE_EXIT
:
14345 gen_a64_update_pc(dc
, 4);
14348 tcg_gen_exit_tb(NULL
, 0);
14350 case DISAS_UPDATE_NOCHAIN
:
14351 gen_a64_update_pc(dc
, 4);
14354 tcg_gen_lookup_and_goto_ptr();
14356 case DISAS_NORETURN
:
14360 gen_a64_update_pc(dc
, 4);
14361 gen_helper_wfe(cpu_env
);
14364 gen_a64_update_pc(dc
, 4);
14365 gen_helper_yield(cpu_env
);
14369 * This is a special case because we don't want to just halt
14370 * the CPU if trying to debug across a WFI.
14372 gen_a64_update_pc(dc
, 4);
14373 gen_helper_wfi(cpu_env
, tcg_constant_i32(4));
14375 * The helper doesn't necessarily throw an exception, but we
14376 * must go back to the main loop to check for interrupts anyway.
14378 tcg_gen_exit_tb(NULL
, 0);
14384 static void aarch64_tr_disas_log(const DisasContextBase
*dcbase
,
14385 CPUState
*cpu
, FILE *logfile
)
14387 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14389 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
14390 target_disas(logfile
, cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
14393 const TranslatorOps aarch64_translator_ops
= {
14394 .init_disas_context
= aarch64_tr_init_disas_context
,
14395 .tb_start
= aarch64_tr_tb_start
,
14396 .insn_start
= aarch64_tr_insn_start
,
14397 .translate_insn
= aarch64_tr_translate_insn
,
14398 .tb_stop
= aarch64_tr_tb_stop
,
14399 .disas_log
= aarch64_tr_disas_log
,