]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/translate-a64.c
target/arm: Add MTE bits to tb_flags
[mirror_qemu.git] / target / arm / translate-a64.c
1 /*
2 * AArch64 translation
3 *
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg-op.h"
24 #include "tcg/tcg-op-gvec.h"
25 #include "qemu/log.h"
26 #include "arm_ldst.h"
27 #include "translate.h"
28 #include "internals.h"
29 #include "qemu/host-utils.h"
30
31 #include "hw/semihosting/semihost.h"
32 #include "exec/gen-icount.h"
33
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
36 #include "exec/log.h"
37
38 #include "trace-tcg.h"
39 #include "translate-a64.h"
40 #include "qemu/atomic128.h"
41
42 static TCGv_i64 cpu_X[32];
43 static TCGv_i64 cpu_pc;
44
45 /* Load/store exclusive handling */
46 static TCGv_i64 cpu_exclusive_high;
47
48 static const char *regnames[] = {
49 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
50 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
51 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
52 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
53 };
54
55 enum a64_shift_type {
56 A64_SHIFT_TYPE_LSL = 0,
57 A64_SHIFT_TYPE_LSR = 1,
58 A64_SHIFT_TYPE_ASR = 2,
59 A64_SHIFT_TYPE_ROR = 3
60 };
61
62 /* Table based decoder typedefs - used when the relevant bits for decode
63 * are too awkwardly scattered across the instruction (eg SIMD).
64 */
65 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
66
67 typedef struct AArch64DecodeTable {
68 uint32_t pattern;
69 uint32_t mask;
70 AArch64DecodeFn *disas_fn;
71 } AArch64DecodeTable;
72
73 /* initialize TCG globals. */
74 void a64_translate_init(void)
75 {
76 int i;
77
78 cpu_pc = tcg_global_mem_new_i64(cpu_env,
79 offsetof(CPUARMState, pc),
80 "pc");
81 for (i = 0; i < 32; i++) {
82 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
83 offsetof(CPUARMState, xregs[i]),
84 regnames[i]);
85 }
86
87 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
88 offsetof(CPUARMState, exclusive_high), "exclusive_high");
89 }
90
91 /*
92 * Return the core mmu_idx to use for A64 "unprivileged load/store" insns
93 */
94 static int get_a64_user_mem_index(DisasContext *s)
95 {
96 /*
97 * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
98 * which is the usual mmu_idx for this cpu state.
99 */
100 ARMMMUIdx useridx = s->mmu_idx;
101
102 if (s->unpriv) {
103 /*
104 * We have pre-computed the condition for AccType_UNPRIV.
105 * Therefore we should never get here with a mmu_idx for
106 * which we do not know the corresponding user mmu_idx.
107 */
108 switch (useridx) {
109 case ARMMMUIdx_E10_1:
110 case ARMMMUIdx_E10_1_PAN:
111 useridx = ARMMMUIdx_E10_0;
112 break;
113 case ARMMMUIdx_E20_2:
114 case ARMMMUIdx_E20_2_PAN:
115 useridx = ARMMMUIdx_E20_0;
116 break;
117 case ARMMMUIdx_SE10_1:
118 case ARMMMUIdx_SE10_1_PAN:
119 useridx = ARMMMUIdx_SE10_0;
120 break;
121 default:
122 g_assert_not_reached();
123 }
124 }
125 return arm_to_core_mmu_idx(useridx);
126 }
127
128 static void reset_btype(DisasContext *s)
129 {
130 if (s->btype != 0) {
131 TCGv_i32 zero = tcg_const_i32(0);
132 tcg_gen_st_i32(zero, cpu_env, offsetof(CPUARMState, btype));
133 tcg_temp_free_i32(zero);
134 s->btype = 0;
135 }
136 }
137
138 static void set_btype(DisasContext *s, int val)
139 {
140 TCGv_i32 tcg_val;
141
142 /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
143 tcg_debug_assert(val >= 1 && val <= 3);
144
145 tcg_val = tcg_const_i32(val);
146 tcg_gen_st_i32(tcg_val, cpu_env, offsetof(CPUARMState, btype));
147 tcg_temp_free_i32(tcg_val);
148 s->btype = -1;
149 }
150
151 void gen_a64_set_pc_im(uint64_t val)
152 {
153 tcg_gen_movi_i64(cpu_pc, val);
154 }
155
156 /*
157 * Handle Top Byte Ignore (TBI) bits.
158 *
159 * If address tagging is enabled via the TCR TBI bits:
160 * + for EL2 and EL3 there is only one TBI bit, and if it is set
161 * then the address is zero-extended, clearing bits [63:56]
162 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
163 * and TBI1 controls addressses with bit 55 == 1.
164 * If the appropriate TBI bit is set for the address then
165 * the address is sign-extended from bit 55 into bits [63:56]
166 *
167 * Here We have concatenated TBI{1,0} into tbi.
168 */
169 static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
170 TCGv_i64 src, int tbi)
171 {
172 if (tbi == 0) {
173 /* Load unmodified address */
174 tcg_gen_mov_i64(dst, src);
175 } else if (!regime_has_2_ranges(s->mmu_idx)) {
176 /* Force tag byte to all zero */
177 tcg_gen_extract_i64(dst, src, 0, 56);
178 } else {
179 /* Sign-extend from bit 55. */
180 tcg_gen_sextract_i64(dst, src, 0, 56);
181
182 if (tbi != 3) {
183 TCGv_i64 tcg_zero = tcg_const_i64(0);
184
185 /*
186 * The two TBI bits differ.
187 * If tbi0, then !tbi1: only use the extension if positive.
188 * if !tbi0, then tbi1: only use the extension if negative.
189 */
190 tcg_gen_movcond_i64(tbi == 1 ? TCG_COND_GE : TCG_COND_LT,
191 dst, dst, tcg_zero, dst, src);
192 tcg_temp_free_i64(tcg_zero);
193 }
194 }
195 }
196
197 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
198 {
199 /*
200 * If address tagging is enabled for instructions via the TCR TBI bits,
201 * then loading an address into the PC will clear out any tag.
202 */
203 gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
204 }
205
206 /*
207 * Return a "clean" address for ADDR according to TBID.
208 * This is always a fresh temporary, as we need to be able to
209 * increment this independently of a dirty write-back address.
210 */
211 static TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
212 {
213 TCGv_i64 clean = new_tmp_a64(s);
214 /*
215 * In order to get the correct value in the FAR_ELx register,
216 * we must present the memory subsystem with the "dirty" address
217 * including the TBI. In system mode we can make this work via
218 * the TLB, dropping the TBI during translation. But for user-only
219 * mode we don't have that option, and must remove the top byte now.
220 */
221 #ifdef CONFIG_USER_ONLY
222 gen_top_byte_ignore(s, clean, addr, s->tbid);
223 #else
224 tcg_gen_mov_i64(clean, addr);
225 #endif
226 return clean;
227 }
228
229 typedef struct DisasCompare64 {
230 TCGCond cond;
231 TCGv_i64 value;
232 } DisasCompare64;
233
234 static void a64_test_cc(DisasCompare64 *c64, int cc)
235 {
236 DisasCompare c32;
237
238 arm_test_cc(&c32, cc);
239
240 /* Sign-extend the 32-bit value so that the GE/LT comparisons work
241 * properly. The NE/EQ comparisons are also fine with this choice. */
242 c64->cond = c32.cond;
243 c64->value = tcg_temp_new_i64();
244 tcg_gen_ext_i32_i64(c64->value, c32.value);
245
246 arm_free_cc(&c32);
247 }
248
249 static void a64_free_cc(DisasCompare64 *c64)
250 {
251 tcg_temp_free_i64(c64->value);
252 }
253
254 static void gen_exception_internal(int excp)
255 {
256 TCGv_i32 tcg_excp = tcg_const_i32(excp);
257
258 assert(excp_is_internal(excp));
259 gen_helper_exception_internal(cpu_env, tcg_excp);
260 tcg_temp_free_i32(tcg_excp);
261 }
262
263 static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
264 {
265 gen_a64_set_pc_im(pc);
266 gen_exception_internal(excp);
267 s->base.is_jmp = DISAS_NORETURN;
268 }
269
270 static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
271 uint32_t syndrome, uint32_t target_el)
272 {
273 gen_a64_set_pc_im(pc);
274 gen_exception(excp, syndrome, target_el);
275 s->base.is_jmp = DISAS_NORETURN;
276 }
277
278 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
279 {
280 TCGv_i32 tcg_syn;
281
282 gen_a64_set_pc_im(s->pc_curr);
283 tcg_syn = tcg_const_i32(syndrome);
284 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
285 tcg_temp_free_i32(tcg_syn);
286 s->base.is_jmp = DISAS_NORETURN;
287 }
288
289 static void gen_step_complete_exception(DisasContext *s)
290 {
291 /* We just completed step of an insn. Move from Active-not-pending
292 * to Active-pending, and then also take the swstep exception.
293 * This corresponds to making the (IMPDEF) choice to prioritize
294 * swstep exceptions over asynchronous exceptions taken to an exception
295 * level where debug is disabled. This choice has the advantage that
296 * we do not need to maintain internal state corresponding to the
297 * ISV/EX syndrome bits between completion of the step and generation
298 * of the exception, and our syndrome information is always correct.
299 */
300 gen_ss_advance(s);
301 gen_swstep_exception(s, 1, s->is_ldex);
302 s->base.is_jmp = DISAS_NORETURN;
303 }
304
305 static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
306 {
307 /* No direct tb linking with singlestep (either QEMU's or the ARM
308 * debug architecture kind) or deterministic io
309 */
310 if (s->base.singlestep_enabled || s->ss_active ||
311 (tb_cflags(s->base.tb) & CF_LAST_IO)) {
312 return false;
313 }
314
315 #ifndef CONFIG_USER_ONLY
316 /* Only link tbs from inside the same guest page */
317 if ((s->base.tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
318 return false;
319 }
320 #endif
321
322 return true;
323 }
324
325 static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
326 {
327 TranslationBlock *tb;
328
329 tb = s->base.tb;
330 if (use_goto_tb(s, n, dest)) {
331 tcg_gen_goto_tb(n);
332 gen_a64_set_pc_im(dest);
333 tcg_gen_exit_tb(tb, n);
334 s->base.is_jmp = DISAS_NORETURN;
335 } else {
336 gen_a64_set_pc_im(dest);
337 if (s->ss_active) {
338 gen_step_complete_exception(s);
339 } else if (s->base.singlestep_enabled) {
340 gen_exception_internal(EXCP_DEBUG);
341 } else {
342 tcg_gen_lookup_and_goto_ptr();
343 s->base.is_jmp = DISAS_NORETURN;
344 }
345 }
346 }
347
348 void unallocated_encoding(DisasContext *s)
349 {
350 /* Unallocated and reserved encodings are uncategorized */
351 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
352 default_exception_el(s));
353 }
354
355 static void init_tmp_a64_array(DisasContext *s)
356 {
357 #ifdef CONFIG_DEBUG_TCG
358 memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
359 #endif
360 s->tmp_a64_count = 0;
361 }
362
363 static void free_tmp_a64(DisasContext *s)
364 {
365 int i;
366 for (i = 0; i < s->tmp_a64_count; i++) {
367 tcg_temp_free_i64(s->tmp_a64[i]);
368 }
369 init_tmp_a64_array(s);
370 }
371
372 TCGv_i64 new_tmp_a64(DisasContext *s)
373 {
374 assert(s->tmp_a64_count < TMP_A64_MAX);
375 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
376 }
377
378 TCGv_i64 new_tmp_a64_zero(DisasContext *s)
379 {
380 TCGv_i64 t = new_tmp_a64(s);
381 tcg_gen_movi_i64(t, 0);
382 return t;
383 }
384
385 /*
386 * Register access functions
387 *
388 * These functions are used for directly accessing a register in where
389 * changes to the final register value are likely to be made. If you
390 * need to use a register for temporary calculation (e.g. index type
391 * operations) use the read_* form.
392 *
393 * B1.2.1 Register mappings
394 *
395 * In instruction register encoding 31 can refer to ZR (zero register) or
396 * the SP (stack pointer) depending on context. In QEMU's case we map SP
397 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
398 * This is the point of the _sp forms.
399 */
400 TCGv_i64 cpu_reg(DisasContext *s, int reg)
401 {
402 if (reg == 31) {
403 return new_tmp_a64_zero(s);
404 } else {
405 return cpu_X[reg];
406 }
407 }
408
409 /* register access for when 31 == SP */
410 TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
411 {
412 return cpu_X[reg];
413 }
414
415 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
416 * representing the register contents. This TCGv is an auto-freed
417 * temporary so it need not be explicitly freed, and may be modified.
418 */
419 TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
420 {
421 TCGv_i64 v = new_tmp_a64(s);
422 if (reg != 31) {
423 if (sf) {
424 tcg_gen_mov_i64(v, cpu_X[reg]);
425 } else {
426 tcg_gen_ext32u_i64(v, cpu_X[reg]);
427 }
428 } else {
429 tcg_gen_movi_i64(v, 0);
430 }
431 return v;
432 }
433
434 TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
435 {
436 TCGv_i64 v = new_tmp_a64(s);
437 if (sf) {
438 tcg_gen_mov_i64(v, cpu_X[reg]);
439 } else {
440 tcg_gen_ext32u_i64(v, cpu_X[reg]);
441 }
442 return v;
443 }
444
445 /* Return the offset into CPUARMState of a slice (from
446 * the least significant end) of FP register Qn (ie
447 * Dn, Sn, Hn or Bn).
448 * (Note that this is not the same mapping as for A32; see cpu.h)
449 */
450 static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
451 {
452 return vec_reg_offset(s, regno, 0, size);
453 }
454
455 /* Offset of the high half of the 128 bit vector Qn */
456 static inline int fp_reg_hi_offset(DisasContext *s, int regno)
457 {
458 return vec_reg_offset(s, regno, 1, MO_64);
459 }
460
461 /* Convenience accessors for reading and writing single and double
462 * FP registers. Writing clears the upper parts of the associated
463 * 128 bit vector register, as required by the architecture.
464 * Note that unlike the GP register accessors, the values returned
465 * by the read functions must be manually freed.
466 */
467 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
468 {
469 TCGv_i64 v = tcg_temp_new_i64();
470
471 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
472 return v;
473 }
474
475 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
476 {
477 TCGv_i32 v = tcg_temp_new_i32();
478
479 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
480 return v;
481 }
482
483 static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
484 {
485 TCGv_i32 v = tcg_temp_new_i32();
486
487 tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
488 return v;
489 }
490
491 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
492 * If SVE is not enabled, then there are only 128 bits in the vector.
493 */
494 static void clear_vec_high(DisasContext *s, bool is_q, int rd)
495 {
496 unsigned ofs = fp_reg_offset(s, rd, MO_64);
497 unsigned vsz = vec_full_reg_size(s);
498
499 /* Nop move, with side effect of clearing the tail. */
500 tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz);
501 }
502
503 void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
504 {
505 unsigned ofs = fp_reg_offset(s, reg, MO_64);
506
507 tcg_gen_st_i64(v, cpu_env, ofs);
508 clear_vec_high(s, false, reg);
509 }
510
511 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
512 {
513 TCGv_i64 tmp = tcg_temp_new_i64();
514
515 tcg_gen_extu_i32_i64(tmp, v);
516 write_fp_dreg(s, reg, tmp);
517 tcg_temp_free_i64(tmp);
518 }
519
520 TCGv_ptr get_fpstatus_ptr(bool is_f16)
521 {
522 TCGv_ptr statusptr = tcg_temp_new_ptr();
523 int offset;
524
525 /* In A64 all instructions (both FP and Neon) use the FPCR; there
526 * is no equivalent of the A32 Neon "standard FPSCR value".
527 * However half-precision operations operate under a different
528 * FZ16 flag and use vfp.fp_status_f16 instead of vfp.fp_status.
529 */
530 if (is_f16) {
531 offset = offsetof(CPUARMState, vfp.fp_status_f16);
532 } else {
533 offset = offsetof(CPUARMState, vfp.fp_status);
534 }
535 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
536 return statusptr;
537 }
538
539 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
540 static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
541 GVecGen2Fn *gvec_fn, int vece)
542 {
543 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
544 is_q ? 16 : 8, vec_full_reg_size(s));
545 }
546
547 /* Expand a 2-operand + immediate AdvSIMD vector operation using
548 * an expander function.
549 */
550 static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
551 int64_t imm, GVecGen2iFn *gvec_fn, int vece)
552 {
553 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
554 imm, is_q ? 16 : 8, vec_full_reg_size(s));
555 }
556
557 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
558 static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
559 GVecGen3Fn *gvec_fn, int vece)
560 {
561 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
562 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
563 }
564
565 /* Expand a 4-operand AdvSIMD vector operation using an expander function. */
566 static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
567 int rx, GVecGen4Fn *gvec_fn, int vece)
568 {
569 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
570 vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
571 is_q ? 16 : 8, vec_full_reg_size(s));
572 }
573
574 /* Expand a 2-operand operation using an out-of-line helper. */
575 static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd,
576 int rn, int data, gen_helper_gvec_2 *fn)
577 {
578 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
579 vec_full_reg_offset(s, rn),
580 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
581 }
582
583 /* Expand a 3-operand operation using an out-of-line helper. */
584 static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
585 int rn, int rm, int data, gen_helper_gvec_3 *fn)
586 {
587 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
588 vec_full_reg_offset(s, rn),
589 vec_full_reg_offset(s, rm),
590 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
591 }
592
593 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
594 * an out-of-line helper.
595 */
596 static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
597 int rm, bool is_fp16, int data,
598 gen_helper_gvec_3_ptr *fn)
599 {
600 TCGv_ptr fpst = get_fpstatus_ptr(is_fp16);
601 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
602 vec_full_reg_offset(s, rn),
603 vec_full_reg_offset(s, rm), fpst,
604 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
605 tcg_temp_free_ptr(fpst);
606 }
607
608 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
609 * than the 32 bit equivalent.
610 */
611 static inline void gen_set_NZ64(TCGv_i64 result)
612 {
613 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
614 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
615 }
616
617 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
618 static inline void gen_logic_CC(int sf, TCGv_i64 result)
619 {
620 if (sf) {
621 gen_set_NZ64(result);
622 } else {
623 tcg_gen_extrl_i64_i32(cpu_ZF, result);
624 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
625 }
626 tcg_gen_movi_i32(cpu_CF, 0);
627 tcg_gen_movi_i32(cpu_VF, 0);
628 }
629
630 /* dest = T0 + T1; compute C, N, V and Z flags */
631 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
632 {
633 if (sf) {
634 TCGv_i64 result, flag, tmp;
635 result = tcg_temp_new_i64();
636 flag = tcg_temp_new_i64();
637 tmp = tcg_temp_new_i64();
638
639 tcg_gen_movi_i64(tmp, 0);
640 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
641
642 tcg_gen_extrl_i64_i32(cpu_CF, flag);
643
644 gen_set_NZ64(result);
645
646 tcg_gen_xor_i64(flag, result, t0);
647 tcg_gen_xor_i64(tmp, t0, t1);
648 tcg_gen_andc_i64(flag, flag, tmp);
649 tcg_temp_free_i64(tmp);
650 tcg_gen_extrh_i64_i32(cpu_VF, flag);
651
652 tcg_gen_mov_i64(dest, result);
653 tcg_temp_free_i64(result);
654 tcg_temp_free_i64(flag);
655 } else {
656 /* 32 bit arithmetic */
657 TCGv_i32 t0_32 = tcg_temp_new_i32();
658 TCGv_i32 t1_32 = tcg_temp_new_i32();
659 TCGv_i32 tmp = tcg_temp_new_i32();
660
661 tcg_gen_movi_i32(tmp, 0);
662 tcg_gen_extrl_i64_i32(t0_32, t0);
663 tcg_gen_extrl_i64_i32(t1_32, t1);
664 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
665 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
666 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
667 tcg_gen_xor_i32(tmp, t0_32, t1_32);
668 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
669 tcg_gen_extu_i32_i64(dest, cpu_NF);
670
671 tcg_temp_free_i32(tmp);
672 tcg_temp_free_i32(t0_32);
673 tcg_temp_free_i32(t1_32);
674 }
675 }
676
677 /* dest = T0 - T1; compute C, N, V and Z flags */
678 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
679 {
680 if (sf) {
681 /* 64 bit arithmetic */
682 TCGv_i64 result, flag, tmp;
683
684 result = tcg_temp_new_i64();
685 flag = tcg_temp_new_i64();
686 tcg_gen_sub_i64(result, t0, t1);
687
688 gen_set_NZ64(result);
689
690 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
691 tcg_gen_extrl_i64_i32(cpu_CF, flag);
692
693 tcg_gen_xor_i64(flag, result, t0);
694 tmp = tcg_temp_new_i64();
695 tcg_gen_xor_i64(tmp, t0, t1);
696 tcg_gen_and_i64(flag, flag, tmp);
697 tcg_temp_free_i64(tmp);
698 tcg_gen_extrh_i64_i32(cpu_VF, flag);
699 tcg_gen_mov_i64(dest, result);
700 tcg_temp_free_i64(flag);
701 tcg_temp_free_i64(result);
702 } else {
703 /* 32 bit arithmetic */
704 TCGv_i32 t0_32 = tcg_temp_new_i32();
705 TCGv_i32 t1_32 = tcg_temp_new_i32();
706 TCGv_i32 tmp;
707
708 tcg_gen_extrl_i64_i32(t0_32, t0);
709 tcg_gen_extrl_i64_i32(t1_32, t1);
710 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
711 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
712 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
713 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
714 tmp = tcg_temp_new_i32();
715 tcg_gen_xor_i32(tmp, t0_32, t1_32);
716 tcg_temp_free_i32(t0_32);
717 tcg_temp_free_i32(t1_32);
718 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
719 tcg_temp_free_i32(tmp);
720 tcg_gen_extu_i32_i64(dest, cpu_NF);
721 }
722 }
723
724 /* dest = T0 + T1 + CF; do not compute flags. */
725 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
726 {
727 TCGv_i64 flag = tcg_temp_new_i64();
728 tcg_gen_extu_i32_i64(flag, cpu_CF);
729 tcg_gen_add_i64(dest, t0, t1);
730 tcg_gen_add_i64(dest, dest, flag);
731 tcg_temp_free_i64(flag);
732
733 if (!sf) {
734 tcg_gen_ext32u_i64(dest, dest);
735 }
736 }
737
738 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
739 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
740 {
741 if (sf) {
742 TCGv_i64 result, cf_64, vf_64, tmp;
743 result = tcg_temp_new_i64();
744 cf_64 = tcg_temp_new_i64();
745 vf_64 = tcg_temp_new_i64();
746 tmp = tcg_const_i64(0);
747
748 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
749 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
750 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
751 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
752 gen_set_NZ64(result);
753
754 tcg_gen_xor_i64(vf_64, result, t0);
755 tcg_gen_xor_i64(tmp, t0, t1);
756 tcg_gen_andc_i64(vf_64, vf_64, tmp);
757 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
758
759 tcg_gen_mov_i64(dest, result);
760
761 tcg_temp_free_i64(tmp);
762 tcg_temp_free_i64(vf_64);
763 tcg_temp_free_i64(cf_64);
764 tcg_temp_free_i64(result);
765 } else {
766 TCGv_i32 t0_32, t1_32, tmp;
767 t0_32 = tcg_temp_new_i32();
768 t1_32 = tcg_temp_new_i32();
769 tmp = tcg_const_i32(0);
770
771 tcg_gen_extrl_i64_i32(t0_32, t0);
772 tcg_gen_extrl_i64_i32(t1_32, t1);
773 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
774 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
775
776 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
777 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
778 tcg_gen_xor_i32(tmp, t0_32, t1_32);
779 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
780 tcg_gen_extu_i32_i64(dest, cpu_NF);
781
782 tcg_temp_free_i32(tmp);
783 tcg_temp_free_i32(t1_32);
784 tcg_temp_free_i32(t0_32);
785 }
786 }
787
788 /*
789 * Load/Store generators
790 */
791
792 /*
793 * Store from GPR register to memory.
794 */
795 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
796 TCGv_i64 tcg_addr, int size, int memidx,
797 bool iss_valid,
798 unsigned int iss_srt,
799 bool iss_sf, bool iss_ar)
800 {
801 g_assert(size <= 3);
802 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
803
804 if (iss_valid) {
805 uint32_t syn;
806
807 syn = syn_data_abort_with_iss(0,
808 size,
809 false,
810 iss_srt,
811 iss_sf,
812 iss_ar,
813 0, 0, 0, 0, 0, false);
814 disas_set_insn_syndrome(s, syn);
815 }
816 }
817
818 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
819 TCGv_i64 tcg_addr, int size,
820 bool iss_valid,
821 unsigned int iss_srt,
822 bool iss_sf, bool iss_ar)
823 {
824 do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
825 iss_valid, iss_srt, iss_sf, iss_ar);
826 }
827
828 /*
829 * Load from memory to GPR register
830 */
831 static void do_gpr_ld_memidx(DisasContext *s,
832 TCGv_i64 dest, TCGv_i64 tcg_addr,
833 int size, bool is_signed,
834 bool extend, int memidx,
835 bool iss_valid, unsigned int iss_srt,
836 bool iss_sf, bool iss_ar)
837 {
838 MemOp memop = s->be_data + size;
839
840 g_assert(size <= 3);
841
842 if (is_signed) {
843 memop += MO_SIGN;
844 }
845
846 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
847
848 if (extend && is_signed) {
849 g_assert(size < 3);
850 tcg_gen_ext32u_i64(dest, dest);
851 }
852
853 if (iss_valid) {
854 uint32_t syn;
855
856 syn = syn_data_abort_with_iss(0,
857 size,
858 is_signed,
859 iss_srt,
860 iss_sf,
861 iss_ar,
862 0, 0, 0, 0, 0, false);
863 disas_set_insn_syndrome(s, syn);
864 }
865 }
866
867 static void do_gpr_ld(DisasContext *s,
868 TCGv_i64 dest, TCGv_i64 tcg_addr,
869 int size, bool is_signed, bool extend,
870 bool iss_valid, unsigned int iss_srt,
871 bool iss_sf, bool iss_ar)
872 {
873 do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
874 get_mem_index(s),
875 iss_valid, iss_srt, iss_sf, iss_ar);
876 }
877
878 /*
879 * Store from FP register to memory
880 */
881 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
882 {
883 /* This writes the bottom N bits of a 128 bit wide vector to memory */
884 TCGv_i64 tmp = tcg_temp_new_i64();
885 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
886 if (size < 4) {
887 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
888 s->be_data + size);
889 } else {
890 bool be = s->be_data == MO_BE;
891 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
892
893 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
894 tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
895 s->be_data | MO_Q);
896 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
897 tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
898 s->be_data | MO_Q);
899 tcg_temp_free_i64(tcg_hiaddr);
900 }
901
902 tcg_temp_free_i64(tmp);
903 }
904
905 /*
906 * Load from memory to FP register
907 */
908 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
909 {
910 /* This always zero-extends and writes to a full 128 bit wide vector */
911 TCGv_i64 tmplo = tcg_temp_new_i64();
912 TCGv_i64 tmphi = NULL;
913
914 if (size < 4) {
915 MemOp memop = s->be_data + size;
916 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
917 } else {
918 bool be = s->be_data == MO_BE;
919 TCGv_i64 tcg_hiaddr;
920
921 tmphi = tcg_temp_new_i64();
922 tcg_hiaddr = tcg_temp_new_i64();
923
924 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
925 tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
926 s->be_data | MO_Q);
927 tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
928 s->be_data | MO_Q);
929 tcg_temp_free_i64(tcg_hiaddr);
930 }
931
932 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
933 tcg_temp_free_i64(tmplo);
934
935 if (tmphi) {
936 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
937 tcg_temp_free_i64(tmphi);
938 }
939 clear_vec_high(s, tmphi != NULL, destidx);
940 }
941
942 /*
943 * Vector load/store helpers.
944 *
945 * The principal difference between this and a FP load is that we don't
946 * zero extend as we are filling a partial chunk of the vector register.
947 * These functions don't support 128 bit loads/stores, which would be
948 * normal load/store operations.
949 *
950 * The _i32 versions are useful when operating on 32 bit quantities
951 * (eg for floating point single or using Neon helper functions).
952 */
953
954 /* Get value of an element within a vector register */
955 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
956 int element, MemOp memop)
957 {
958 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
959 switch (memop) {
960 case MO_8:
961 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
962 break;
963 case MO_16:
964 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
965 break;
966 case MO_32:
967 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
968 break;
969 case MO_8|MO_SIGN:
970 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
971 break;
972 case MO_16|MO_SIGN:
973 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
974 break;
975 case MO_32|MO_SIGN:
976 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
977 break;
978 case MO_64:
979 case MO_64|MO_SIGN:
980 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
981 break;
982 default:
983 g_assert_not_reached();
984 }
985 }
986
987 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
988 int element, MemOp memop)
989 {
990 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
991 switch (memop) {
992 case MO_8:
993 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
994 break;
995 case MO_16:
996 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
997 break;
998 case MO_8|MO_SIGN:
999 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1000 break;
1001 case MO_16|MO_SIGN:
1002 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1003 break;
1004 case MO_32:
1005 case MO_32|MO_SIGN:
1006 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1007 break;
1008 default:
1009 g_assert_not_reached();
1010 }
1011 }
1012
1013 /* Set value of an element within a vector register */
1014 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1015 int element, MemOp memop)
1016 {
1017 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1018 switch (memop) {
1019 case MO_8:
1020 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1021 break;
1022 case MO_16:
1023 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1024 break;
1025 case MO_32:
1026 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1027 break;
1028 case MO_64:
1029 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1030 break;
1031 default:
1032 g_assert_not_reached();
1033 }
1034 }
1035
1036 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1037 int destidx, int element, MemOp memop)
1038 {
1039 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1040 switch (memop) {
1041 case MO_8:
1042 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1043 break;
1044 case MO_16:
1045 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1046 break;
1047 case MO_32:
1048 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1049 break;
1050 default:
1051 g_assert_not_reached();
1052 }
1053 }
1054
1055 /* Store from vector register to memory */
1056 static void do_vec_st(DisasContext *s, int srcidx, int element,
1057 TCGv_i64 tcg_addr, int size, MemOp endian)
1058 {
1059 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1060
1061 read_vec_element(s, tcg_tmp, srcidx, element, size);
1062 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
1063
1064 tcg_temp_free_i64(tcg_tmp);
1065 }
1066
1067 /* Load from memory to vector register */
1068 static void do_vec_ld(DisasContext *s, int destidx, int element,
1069 TCGv_i64 tcg_addr, int size, MemOp endian)
1070 {
1071 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1072
1073 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
1074 write_vec_element(s, tcg_tmp, destidx, element, size);
1075
1076 tcg_temp_free_i64(tcg_tmp);
1077 }
1078
1079 /* Check that FP/Neon access is enabled. If it is, return
1080 * true. If not, emit code to generate an appropriate exception,
1081 * and return false; the caller should not emit any code for
1082 * the instruction. Note that this check must happen after all
1083 * unallocated-encoding checks (otherwise the syndrome information
1084 * for the resulting exception will be incorrect).
1085 */
1086 static inline bool fp_access_check(DisasContext *s)
1087 {
1088 assert(!s->fp_access_checked);
1089 s->fp_access_checked = true;
1090
1091 if (!s->fp_excp_el) {
1092 return true;
1093 }
1094
1095 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1096 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
1097 return false;
1098 }
1099
1100 /* Check that SVE access is enabled. If it is, return true.
1101 * If not, emit code to generate an appropriate exception and return false.
1102 */
1103 bool sve_access_check(DisasContext *s)
1104 {
1105 if (s->sve_excp_el) {
1106 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_sve_access_trap(),
1107 s->sve_excp_el);
1108 return false;
1109 }
1110 return fp_access_check(s);
1111 }
1112
1113 /*
1114 * This utility function is for doing register extension with an
1115 * optional shift. You will likely want to pass a temporary for the
1116 * destination register. See DecodeRegExtend() in the ARM ARM.
1117 */
1118 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1119 int option, unsigned int shift)
1120 {
1121 int extsize = extract32(option, 0, 2);
1122 bool is_signed = extract32(option, 2, 1);
1123
1124 if (is_signed) {
1125 switch (extsize) {
1126 case 0:
1127 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1128 break;
1129 case 1:
1130 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1131 break;
1132 case 2:
1133 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1134 break;
1135 case 3:
1136 tcg_gen_mov_i64(tcg_out, tcg_in);
1137 break;
1138 }
1139 } else {
1140 switch (extsize) {
1141 case 0:
1142 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1143 break;
1144 case 1:
1145 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1146 break;
1147 case 2:
1148 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1149 break;
1150 case 3:
1151 tcg_gen_mov_i64(tcg_out, tcg_in);
1152 break;
1153 }
1154 }
1155
1156 if (shift) {
1157 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1158 }
1159 }
1160
1161 static inline void gen_check_sp_alignment(DisasContext *s)
1162 {
1163 /* The AArch64 architecture mandates that (if enabled via PSTATE
1164 * or SCTLR bits) there is a check that SP is 16-aligned on every
1165 * SP-relative load or store (with an exception generated if it is not).
1166 * In line with general QEMU practice regarding misaligned accesses,
1167 * we omit these checks for the sake of guest program performance.
1168 * This function is provided as a hook so we can more easily add these
1169 * checks in future (possibly as a "favour catching guest program bugs
1170 * over speed" user selectable option).
1171 */
1172 }
1173
1174 /*
1175 * This provides a simple table based table lookup decoder. It is
1176 * intended to be used when the relevant bits for decode are too
1177 * awkwardly placed and switch/if based logic would be confusing and
1178 * deeply nested. Since it's a linear search through the table, tables
1179 * should be kept small.
1180 *
1181 * It returns the first handler where insn & mask == pattern, or
1182 * NULL if there is no match.
1183 * The table is terminated by an empty mask (i.e. 0)
1184 */
1185 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1186 uint32_t insn)
1187 {
1188 const AArch64DecodeTable *tptr = table;
1189
1190 while (tptr->mask) {
1191 if ((insn & tptr->mask) == tptr->pattern) {
1192 return tptr->disas_fn;
1193 }
1194 tptr++;
1195 }
1196 return NULL;
1197 }
1198
1199 /*
1200 * The instruction disassembly implemented here matches
1201 * the instruction encoding classifications in chapter C4
1202 * of the ARM Architecture Reference Manual (DDI0487B_a);
1203 * classification names and decode diagrams here should generally
1204 * match up with those in the manual.
1205 */
1206
1207 /* Unconditional branch (immediate)
1208 * 31 30 26 25 0
1209 * +----+-----------+-------------------------------------+
1210 * | op | 0 0 1 0 1 | imm26 |
1211 * +----+-----------+-------------------------------------+
1212 */
1213 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1214 {
1215 uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
1216
1217 if (insn & (1U << 31)) {
1218 /* BL Branch with link */
1219 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
1220 }
1221
1222 /* B Branch / BL Branch with link */
1223 reset_btype(s);
1224 gen_goto_tb(s, 0, addr);
1225 }
1226
1227 /* Compare and branch (immediate)
1228 * 31 30 25 24 23 5 4 0
1229 * +----+-------------+----+---------------------+--------+
1230 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
1231 * +----+-------------+----+---------------------+--------+
1232 */
1233 static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1234 {
1235 unsigned int sf, op, rt;
1236 uint64_t addr;
1237 TCGLabel *label_match;
1238 TCGv_i64 tcg_cmp;
1239
1240 sf = extract32(insn, 31, 1);
1241 op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
1242 rt = extract32(insn, 0, 5);
1243 addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1244
1245 tcg_cmp = read_cpu_reg(s, rt, sf);
1246 label_match = gen_new_label();
1247
1248 reset_btype(s);
1249 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1250 tcg_cmp, 0, label_match);
1251
1252 gen_goto_tb(s, 0, s->base.pc_next);
1253 gen_set_label(label_match);
1254 gen_goto_tb(s, 1, addr);
1255 }
1256
1257 /* Test and branch (immediate)
1258 * 31 30 25 24 23 19 18 5 4 0
1259 * +----+-------------+----+-------+-------------+------+
1260 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
1261 * +----+-------------+----+-------+-------------+------+
1262 */
1263 static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1264 {
1265 unsigned int bit_pos, op, rt;
1266 uint64_t addr;
1267 TCGLabel *label_match;
1268 TCGv_i64 tcg_cmp;
1269
1270 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1271 op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
1272 addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
1273 rt = extract32(insn, 0, 5);
1274
1275 tcg_cmp = tcg_temp_new_i64();
1276 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1277 label_match = gen_new_label();
1278
1279 reset_btype(s);
1280 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1281 tcg_cmp, 0, label_match);
1282 tcg_temp_free_i64(tcg_cmp);
1283 gen_goto_tb(s, 0, s->base.pc_next);
1284 gen_set_label(label_match);
1285 gen_goto_tb(s, 1, addr);
1286 }
1287
1288 /* Conditional branch (immediate)
1289 * 31 25 24 23 5 4 3 0
1290 * +---------------+----+---------------------+----+------+
1291 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
1292 * +---------------+----+---------------------+----+------+
1293 */
1294 static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1295 {
1296 unsigned int cond;
1297 uint64_t addr;
1298
1299 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1300 unallocated_encoding(s);
1301 return;
1302 }
1303 addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1304 cond = extract32(insn, 0, 4);
1305
1306 reset_btype(s);
1307 if (cond < 0x0e) {
1308 /* genuinely conditional branches */
1309 TCGLabel *label_match = gen_new_label();
1310 arm_gen_test_cc(cond, label_match);
1311 gen_goto_tb(s, 0, s->base.pc_next);
1312 gen_set_label(label_match);
1313 gen_goto_tb(s, 1, addr);
1314 } else {
1315 /* 0xe and 0xf are both "always" conditions */
1316 gen_goto_tb(s, 0, addr);
1317 }
1318 }
1319
1320 /* HINT instruction group, including various allocated HINTs */
1321 static void handle_hint(DisasContext *s, uint32_t insn,
1322 unsigned int op1, unsigned int op2, unsigned int crm)
1323 {
1324 unsigned int selector = crm << 3 | op2;
1325
1326 if (op1 != 3) {
1327 unallocated_encoding(s);
1328 return;
1329 }
1330
1331 switch (selector) {
1332 case 0b00000: /* NOP */
1333 break;
1334 case 0b00011: /* WFI */
1335 s->base.is_jmp = DISAS_WFI;
1336 break;
1337 case 0b00001: /* YIELD */
1338 /* When running in MTTCG we don't generate jumps to the yield and
1339 * WFE helpers as it won't affect the scheduling of other vCPUs.
1340 * If we wanted to more completely model WFE/SEV so we don't busy
1341 * spin unnecessarily we would need to do something more involved.
1342 */
1343 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1344 s->base.is_jmp = DISAS_YIELD;
1345 }
1346 break;
1347 case 0b00010: /* WFE */
1348 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1349 s->base.is_jmp = DISAS_WFE;
1350 }
1351 break;
1352 case 0b00100: /* SEV */
1353 case 0b00101: /* SEVL */
1354 /* we treat all as NOP at least for now */
1355 break;
1356 case 0b00111: /* XPACLRI */
1357 if (s->pauth_active) {
1358 gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
1359 }
1360 break;
1361 case 0b01000: /* PACIA1716 */
1362 if (s->pauth_active) {
1363 gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1364 }
1365 break;
1366 case 0b01010: /* PACIB1716 */
1367 if (s->pauth_active) {
1368 gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1369 }
1370 break;
1371 case 0b01100: /* AUTIA1716 */
1372 if (s->pauth_active) {
1373 gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1374 }
1375 break;
1376 case 0b01110: /* AUTIB1716 */
1377 if (s->pauth_active) {
1378 gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1379 }
1380 break;
1381 case 0b11000: /* PACIAZ */
1382 if (s->pauth_active) {
1383 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
1384 new_tmp_a64_zero(s));
1385 }
1386 break;
1387 case 0b11001: /* PACIASP */
1388 if (s->pauth_active) {
1389 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1390 }
1391 break;
1392 case 0b11010: /* PACIBZ */
1393 if (s->pauth_active) {
1394 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
1395 new_tmp_a64_zero(s));
1396 }
1397 break;
1398 case 0b11011: /* PACIBSP */
1399 if (s->pauth_active) {
1400 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1401 }
1402 break;
1403 case 0b11100: /* AUTIAZ */
1404 if (s->pauth_active) {
1405 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
1406 new_tmp_a64_zero(s));
1407 }
1408 break;
1409 case 0b11101: /* AUTIASP */
1410 if (s->pauth_active) {
1411 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1412 }
1413 break;
1414 case 0b11110: /* AUTIBZ */
1415 if (s->pauth_active) {
1416 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
1417 new_tmp_a64_zero(s));
1418 }
1419 break;
1420 case 0b11111: /* AUTIBSP */
1421 if (s->pauth_active) {
1422 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1423 }
1424 break;
1425 default:
1426 /* default specified as NOP equivalent */
1427 break;
1428 }
1429 }
1430
1431 static void gen_clrex(DisasContext *s, uint32_t insn)
1432 {
1433 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1434 }
1435
1436 /* CLREX, DSB, DMB, ISB */
1437 static void handle_sync(DisasContext *s, uint32_t insn,
1438 unsigned int op1, unsigned int op2, unsigned int crm)
1439 {
1440 TCGBar bar;
1441
1442 if (op1 != 3) {
1443 unallocated_encoding(s);
1444 return;
1445 }
1446
1447 switch (op2) {
1448 case 2: /* CLREX */
1449 gen_clrex(s, insn);
1450 return;
1451 case 4: /* DSB */
1452 case 5: /* DMB */
1453 switch (crm & 3) {
1454 case 1: /* MBReqTypes_Reads */
1455 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1456 break;
1457 case 2: /* MBReqTypes_Writes */
1458 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1459 break;
1460 default: /* MBReqTypes_All */
1461 bar = TCG_BAR_SC | TCG_MO_ALL;
1462 break;
1463 }
1464 tcg_gen_mb(bar);
1465 return;
1466 case 6: /* ISB */
1467 /* We need to break the TB after this insn to execute
1468 * a self-modified code correctly and also to take
1469 * any pending interrupts immediately.
1470 */
1471 reset_btype(s);
1472 gen_goto_tb(s, 0, s->base.pc_next);
1473 return;
1474
1475 case 7: /* SB */
1476 if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
1477 goto do_unallocated;
1478 }
1479 /*
1480 * TODO: There is no speculation barrier opcode for TCG;
1481 * MB and end the TB instead.
1482 */
1483 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1484 gen_goto_tb(s, 0, s->base.pc_next);
1485 return;
1486
1487 default:
1488 do_unallocated:
1489 unallocated_encoding(s);
1490 return;
1491 }
1492 }
1493
1494 static void gen_xaflag(void)
1495 {
1496 TCGv_i32 z = tcg_temp_new_i32();
1497
1498 tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
1499
1500 /*
1501 * (!C & !Z) << 31
1502 * (!(C | Z)) << 31
1503 * ~((C | Z) << 31)
1504 * ~-(C | Z)
1505 * (C | Z) - 1
1506 */
1507 tcg_gen_or_i32(cpu_NF, cpu_CF, z);
1508 tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
1509
1510 /* !(Z & C) */
1511 tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
1512 tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
1513
1514 /* (!C & Z) << 31 -> -(Z & ~C) */
1515 tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
1516 tcg_gen_neg_i32(cpu_VF, cpu_VF);
1517
1518 /* C | Z */
1519 tcg_gen_or_i32(cpu_CF, cpu_CF, z);
1520
1521 tcg_temp_free_i32(z);
1522 }
1523
1524 static void gen_axflag(void)
1525 {
1526 tcg_gen_sari_i32(cpu_VF, cpu_VF, 31); /* V ? -1 : 0 */
1527 tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF); /* C & !V */
1528
1529 /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
1530 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
1531
1532 tcg_gen_movi_i32(cpu_NF, 0);
1533 tcg_gen_movi_i32(cpu_VF, 0);
1534 }
1535
1536 /* MSR (immediate) - move immediate to processor state field */
1537 static void handle_msr_i(DisasContext *s, uint32_t insn,
1538 unsigned int op1, unsigned int op2, unsigned int crm)
1539 {
1540 TCGv_i32 t1;
1541 int op = op1 << 3 | op2;
1542
1543 /* End the TB by default, chaining is ok. */
1544 s->base.is_jmp = DISAS_TOO_MANY;
1545
1546 switch (op) {
1547 case 0x00: /* CFINV */
1548 if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
1549 goto do_unallocated;
1550 }
1551 tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
1552 s->base.is_jmp = DISAS_NEXT;
1553 break;
1554
1555 case 0x01: /* XAFlag */
1556 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1557 goto do_unallocated;
1558 }
1559 gen_xaflag();
1560 s->base.is_jmp = DISAS_NEXT;
1561 break;
1562
1563 case 0x02: /* AXFlag */
1564 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1565 goto do_unallocated;
1566 }
1567 gen_axflag();
1568 s->base.is_jmp = DISAS_NEXT;
1569 break;
1570
1571 case 0x03: /* UAO */
1572 if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
1573 goto do_unallocated;
1574 }
1575 if (crm & 1) {
1576 set_pstate_bits(PSTATE_UAO);
1577 } else {
1578 clear_pstate_bits(PSTATE_UAO);
1579 }
1580 t1 = tcg_const_i32(s->current_el);
1581 gen_helper_rebuild_hflags_a64(cpu_env, t1);
1582 tcg_temp_free_i32(t1);
1583 break;
1584
1585 case 0x04: /* PAN */
1586 if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
1587 goto do_unallocated;
1588 }
1589 if (crm & 1) {
1590 set_pstate_bits(PSTATE_PAN);
1591 } else {
1592 clear_pstate_bits(PSTATE_PAN);
1593 }
1594 t1 = tcg_const_i32(s->current_el);
1595 gen_helper_rebuild_hflags_a64(cpu_env, t1);
1596 tcg_temp_free_i32(t1);
1597 break;
1598
1599 case 0x05: /* SPSel */
1600 if (s->current_el == 0) {
1601 goto do_unallocated;
1602 }
1603 t1 = tcg_const_i32(crm & PSTATE_SP);
1604 gen_helper_msr_i_spsel(cpu_env, t1);
1605 tcg_temp_free_i32(t1);
1606 break;
1607
1608 case 0x1e: /* DAIFSet */
1609 t1 = tcg_const_i32(crm);
1610 gen_helper_msr_i_daifset(cpu_env, t1);
1611 tcg_temp_free_i32(t1);
1612 break;
1613
1614 case 0x1f: /* DAIFClear */
1615 t1 = tcg_const_i32(crm);
1616 gen_helper_msr_i_daifclear(cpu_env, t1);
1617 tcg_temp_free_i32(t1);
1618 /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
1619 s->base.is_jmp = DISAS_UPDATE_EXIT;
1620 break;
1621
1622 case 0x1c: /* TCO */
1623 if (dc_isar_feature(aa64_mte, s)) {
1624 /* Full MTE is enabled -- set the TCO bit as directed. */
1625 if (crm & 1) {
1626 set_pstate_bits(PSTATE_TCO);
1627 } else {
1628 clear_pstate_bits(PSTATE_TCO);
1629 }
1630 t1 = tcg_const_i32(s->current_el);
1631 gen_helper_rebuild_hflags_a64(cpu_env, t1);
1632 tcg_temp_free_i32(t1);
1633 /* Many factors, including TCO, go into MTE_ACTIVE. */
1634 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
1635 } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
1636 /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */
1637 s->base.is_jmp = DISAS_NEXT;
1638 } else {
1639 goto do_unallocated;
1640 }
1641 break;
1642
1643 default:
1644 do_unallocated:
1645 unallocated_encoding(s);
1646 return;
1647 }
1648 }
1649
1650 static void gen_get_nzcv(TCGv_i64 tcg_rt)
1651 {
1652 TCGv_i32 tmp = tcg_temp_new_i32();
1653 TCGv_i32 nzcv = tcg_temp_new_i32();
1654
1655 /* build bit 31, N */
1656 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1657 /* build bit 30, Z */
1658 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1659 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1660 /* build bit 29, C */
1661 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1662 /* build bit 28, V */
1663 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1664 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1665 /* generate result */
1666 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1667
1668 tcg_temp_free_i32(nzcv);
1669 tcg_temp_free_i32(tmp);
1670 }
1671
1672 static void gen_set_nzcv(TCGv_i64 tcg_rt)
1673 {
1674 TCGv_i32 nzcv = tcg_temp_new_i32();
1675
1676 /* take NZCV from R[t] */
1677 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1678
1679 /* bit 31, N */
1680 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1681 /* bit 30, Z */
1682 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1683 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1684 /* bit 29, C */
1685 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1686 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1687 /* bit 28, V */
1688 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1689 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1690 tcg_temp_free_i32(nzcv);
1691 }
1692
1693 /* MRS - move from system register
1694 * MSR (register) - move to system register
1695 * SYS
1696 * SYSL
1697 * These are all essentially the same insn in 'read' and 'write'
1698 * versions, with varying op0 fields.
1699 */
1700 static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1701 unsigned int op0, unsigned int op1, unsigned int op2,
1702 unsigned int crn, unsigned int crm, unsigned int rt)
1703 {
1704 const ARMCPRegInfo *ri;
1705 TCGv_i64 tcg_rt;
1706
1707 ri = get_arm_cp_reginfo(s->cp_regs,
1708 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1709 crn, crm, op0, op1, op2));
1710
1711 if (!ri) {
1712 /* Unknown register; this might be a guest error or a QEMU
1713 * unimplemented feature.
1714 */
1715 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1716 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1717 isread ? "read" : "write", op0, op1, crn, crm, op2);
1718 unallocated_encoding(s);
1719 return;
1720 }
1721
1722 /* Check access permissions */
1723 if (!cp_access_ok(s->current_el, ri, isread)) {
1724 unallocated_encoding(s);
1725 return;
1726 }
1727
1728 if (ri->accessfn) {
1729 /* Emit code to perform further access permissions checks at
1730 * runtime; this may result in an exception.
1731 */
1732 TCGv_ptr tmpptr;
1733 TCGv_i32 tcg_syn, tcg_isread;
1734 uint32_t syndrome;
1735
1736 gen_a64_set_pc_im(s->pc_curr);
1737 tmpptr = tcg_const_ptr(ri);
1738 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1739 tcg_syn = tcg_const_i32(syndrome);
1740 tcg_isread = tcg_const_i32(isread);
1741 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1742 tcg_temp_free_ptr(tmpptr);
1743 tcg_temp_free_i32(tcg_syn);
1744 tcg_temp_free_i32(tcg_isread);
1745 } else if (ri->type & ARM_CP_RAISES_EXC) {
1746 /*
1747 * The readfn or writefn might raise an exception;
1748 * synchronize the CPU state in case it does.
1749 */
1750 gen_a64_set_pc_im(s->pc_curr);
1751 }
1752
1753 /* Handle special cases first */
1754 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1755 case ARM_CP_NOP:
1756 return;
1757 case ARM_CP_NZCV:
1758 tcg_rt = cpu_reg(s, rt);
1759 if (isread) {
1760 gen_get_nzcv(tcg_rt);
1761 } else {
1762 gen_set_nzcv(tcg_rt);
1763 }
1764 return;
1765 case ARM_CP_CURRENTEL:
1766 /* Reads as current EL value from pstate, which is
1767 * guaranteed to be constant by the tb flags.
1768 */
1769 tcg_rt = cpu_reg(s, rt);
1770 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1771 return;
1772 case ARM_CP_DC_ZVA:
1773 /* Writes clear the aligned block of memory which rt points into. */
1774 tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
1775 gen_helper_dc_zva(cpu_env, tcg_rt);
1776 return;
1777 default:
1778 break;
1779 }
1780 if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
1781 return;
1782 } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
1783 return;
1784 }
1785
1786 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1787 gen_io_start();
1788 }
1789
1790 tcg_rt = cpu_reg(s, rt);
1791
1792 if (isread) {
1793 if (ri->type & ARM_CP_CONST) {
1794 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1795 } else if (ri->readfn) {
1796 TCGv_ptr tmpptr;
1797 tmpptr = tcg_const_ptr(ri);
1798 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1799 tcg_temp_free_ptr(tmpptr);
1800 } else {
1801 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1802 }
1803 } else {
1804 if (ri->type & ARM_CP_CONST) {
1805 /* If not forbidden by access permissions, treat as WI */
1806 return;
1807 } else if (ri->writefn) {
1808 TCGv_ptr tmpptr;
1809 tmpptr = tcg_const_ptr(ri);
1810 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1811 tcg_temp_free_ptr(tmpptr);
1812 } else {
1813 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1814 }
1815 }
1816
1817 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1818 /* I/O operations must end the TB here (whether read or write) */
1819 s->base.is_jmp = DISAS_UPDATE_EXIT;
1820 }
1821 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1822 /*
1823 * A write to any coprocessor regiser that ends a TB
1824 * must rebuild the hflags for the next TB.
1825 */
1826 TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
1827 gen_helper_rebuild_hflags_a64(cpu_env, tcg_el);
1828 tcg_temp_free_i32(tcg_el);
1829 /*
1830 * We default to ending the TB on a coprocessor register write,
1831 * but allow this to be suppressed by the register definition
1832 * (usually only necessary to work around guest bugs).
1833 */
1834 s->base.is_jmp = DISAS_UPDATE_EXIT;
1835 }
1836 }
1837
1838 /* System
1839 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
1840 * +---------------------+---+-----+-----+-------+-------+-----+------+
1841 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
1842 * +---------------------+---+-----+-----+-------+-------+-----+------+
1843 */
1844 static void disas_system(DisasContext *s, uint32_t insn)
1845 {
1846 unsigned int l, op0, op1, crn, crm, op2, rt;
1847 l = extract32(insn, 21, 1);
1848 op0 = extract32(insn, 19, 2);
1849 op1 = extract32(insn, 16, 3);
1850 crn = extract32(insn, 12, 4);
1851 crm = extract32(insn, 8, 4);
1852 op2 = extract32(insn, 5, 3);
1853 rt = extract32(insn, 0, 5);
1854
1855 if (op0 == 0) {
1856 if (l || rt != 31) {
1857 unallocated_encoding(s);
1858 return;
1859 }
1860 switch (crn) {
1861 case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
1862 handle_hint(s, insn, op1, op2, crm);
1863 break;
1864 case 3: /* CLREX, DSB, DMB, ISB */
1865 handle_sync(s, insn, op1, op2, crm);
1866 break;
1867 case 4: /* MSR (immediate) */
1868 handle_msr_i(s, insn, op1, op2, crm);
1869 break;
1870 default:
1871 unallocated_encoding(s);
1872 break;
1873 }
1874 return;
1875 }
1876 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1877 }
1878
1879 /* Exception generation
1880 *
1881 * 31 24 23 21 20 5 4 2 1 0
1882 * +-----------------+-----+------------------------+-----+----+
1883 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
1884 * +-----------------------+------------------------+----------+
1885 */
1886 static void disas_exc(DisasContext *s, uint32_t insn)
1887 {
1888 int opc = extract32(insn, 21, 3);
1889 int op2_ll = extract32(insn, 0, 5);
1890 int imm16 = extract32(insn, 5, 16);
1891 TCGv_i32 tmp;
1892
1893 switch (opc) {
1894 case 0:
1895 /* For SVC, HVC and SMC we advance the single-step state
1896 * machine before taking the exception. This is architecturally
1897 * mandated, to ensure that single-stepping a system call
1898 * instruction works properly.
1899 */
1900 switch (op2_ll) {
1901 case 1: /* SVC */
1902 gen_ss_advance(s);
1903 gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
1904 syn_aa64_svc(imm16), default_exception_el(s));
1905 break;
1906 case 2: /* HVC */
1907 if (s->current_el == 0) {
1908 unallocated_encoding(s);
1909 break;
1910 }
1911 /* The pre HVC helper handles cases when HVC gets trapped
1912 * as an undefined insn by runtime configuration.
1913 */
1914 gen_a64_set_pc_im(s->pc_curr);
1915 gen_helper_pre_hvc(cpu_env);
1916 gen_ss_advance(s);
1917 gen_exception_insn(s, s->base.pc_next, EXCP_HVC,
1918 syn_aa64_hvc(imm16), 2);
1919 break;
1920 case 3: /* SMC */
1921 if (s->current_el == 0) {
1922 unallocated_encoding(s);
1923 break;
1924 }
1925 gen_a64_set_pc_im(s->pc_curr);
1926 tmp = tcg_const_i32(syn_aa64_smc(imm16));
1927 gen_helper_pre_smc(cpu_env, tmp);
1928 tcg_temp_free_i32(tmp);
1929 gen_ss_advance(s);
1930 gen_exception_insn(s, s->base.pc_next, EXCP_SMC,
1931 syn_aa64_smc(imm16), 3);
1932 break;
1933 default:
1934 unallocated_encoding(s);
1935 break;
1936 }
1937 break;
1938 case 1:
1939 if (op2_ll != 0) {
1940 unallocated_encoding(s);
1941 break;
1942 }
1943 /* BRK */
1944 gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
1945 break;
1946 case 2:
1947 if (op2_ll != 0) {
1948 unallocated_encoding(s);
1949 break;
1950 }
1951 /* HLT. This has two purposes.
1952 * Architecturally, it is an external halting debug instruction.
1953 * Since QEMU doesn't implement external debug, we treat this as
1954 * it is required for halting debug disabled: it will UNDEF.
1955 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
1956 */
1957 if (semihosting_enabled() && imm16 == 0xf000) {
1958 #ifndef CONFIG_USER_ONLY
1959 /* In system mode, don't allow userspace access to semihosting,
1960 * to provide some semblance of security (and for consistency
1961 * with our 32-bit semihosting).
1962 */
1963 if (s->current_el == 0) {
1964 unsupported_encoding(s, insn);
1965 break;
1966 }
1967 #endif
1968 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
1969 } else {
1970 unsupported_encoding(s, insn);
1971 }
1972 break;
1973 case 5:
1974 if (op2_ll < 1 || op2_ll > 3) {
1975 unallocated_encoding(s);
1976 break;
1977 }
1978 /* DCPS1, DCPS2, DCPS3 */
1979 unsupported_encoding(s, insn);
1980 break;
1981 default:
1982 unallocated_encoding(s);
1983 break;
1984 }
1985 }
1986
1987 /* Unconditional branch (register)
1988 * 31 25 24 21 20 16 15 10 9 5 4 0
1989 * +---------------+-------+-------+-------+------+-------+
1990 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
1991 * +---------------+-------+-------+-------+------+-------+
1992 */
1993 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1994 {
1995 unsigned int opc, op2, op3, rn, op4;
1996 unsigned btype_mod = 2; /* 0: BR, 1: BLR, 2: other */
1997 TCGv_i64 dst;
1998 TCGv_i64 modifier;
1999
2000 opc = extract32(insn, 21, 4);
2001 op2 = extract32(insn, 16, 5);
2002 op3 = extract32(insn, 10, 6);
2003 rn = extract32(insn, 5, 5);
2004 op4 = extract32(insn, 0, 5);
2005
2006 if (op2 != 0x1f) {
2007 goto do_unallocated;
2008 }
2009
2010 switch (opc) {
2011 case 0: /* BR */
2012 case 1: /* BLR */
2013 case 2: /* RET */
2014 btype_mod = opc;
2015 switch (op3) {
2016 case 0:
2017 /* BR, BLR, RET */
2018 if (op4 != 0) {
2019 goto do_unallocated;
2020 }
2021 dst = cpu_reg(s, rn);
2022 break;
2023
2024 case 2:
2025 case 3:
2026 if (!dc_isar_feature(aa64_pauth, s)) {
2027 goto do_unallocated;
2028 }
2029 if (opc == 2) {
2030 /* RETAA, RETAB */
2031 if (rn != 0x1f || op4 != 0x1f) {
2032 goto do_unallocated;
2033 }
2034 rn = 30;
2035 modifier = cpu_X[31];
2036 } else {
2037 /* BRAAZ, BRABZ, BLRAAZ, BLRABZ */
2038 if (op4 != 0x1f) {
2039 goto do_unallocated;
2040 }
2041 modifier = new_tmp_a64_zero(s);
2042 }
2043 if (s->pauth_active) {
2044 dst = new_tmp_a64(s);
2045 if (op3 == 2) {
2046 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2047 } else {
2048 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2049 }
2050 } else {
2051 dst = cpu_reg(s, rn);
2052 }
2053 break;
2054
2055 default:
2056 goto do_unallocated;
2057 }
2058 gen_a64_set_pc(s, dst);
2059 /* BLR also needs to load return address */
2060 if (opc == 1) {
2061 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2062 }
2063 break;
2064
2065 case 8: /* BRAA */
2066 case 9: /* BLRAA */
2067 if (!dc_isar_feature(aa64_pauth, s)) {
2068 goto do_unallocated;
2069 }
2070 if ((op3 & ~1) != 2) {
2071 goto do_unallocated;
2072 }
2073 btype_mod = opc & 1;
2074 if (s->pauth_active) {
2075 dst = new_tmp_a64(s);
2076 modifier = cpu_reg_sp(s, op4);
2077 if (op3 == 2) {
2078 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2079 } else {
2080 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2081 }
2082 } else {
2083 dst = cpu_reg(s, rn);
2084 }
2085 gen_a64_set_pc(s, dst);
2086 /* BLRAA also needs to load return address */
2087 if (opc == 9) {
2088 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2089 }
2090 break;
2091
2092 case 4: /* ERET */
2093 if (s->current_el == 0) {
2094 goto do_unallocated;
2095 }
2096 switch (op3) {
2097 case 0: /* ERET */
2098 if (op4 != 0) {
2099 goto do_unallocated;
2100 }
2101 dst = tcg_temp_new_i64();
2102 tcg_gen_ld_i64(dst, cpu_env,
2103 offsetof(CPUARMState, elr_el[s->current_el]));
2104 break;
2105
2106 case 2: /* ERETAA */
2107 case 3: /* ERETAB */
2108 if (!dc_isar_feature(aa64_pauth, s)) {
2109 goto do_unallocated;
2110 }
2111 if (rn != 0x1f || op4 != 0x1f) {
2112 goto do_unallocated;
2113 }
2114 dst = tcg_temp_new_i64();
2115 tcg_gen_ld_i64(dst, cpu_env,
2116 offsetof(CPUARMState, elr_el[s->current_el]));
2117 if (s->pauth_active) {
2118 modifier = cpu_X[31];
2119 if (op3 == 2) {
2120 gen_helper_autia(dst, cpu_env, dst, modifier);
2121 } else {
2122 gen_helper_autib(dst, cpu_env, dst, modifier);
2123 }
2124 }
2125 break;
2126
2127 default:
2128 goto do_unallocated;
2129 }
2130 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
2131 gen_io_start();
2132 }
2133
2134 gen_helper_exception_return(cpu_env, dst);
2135 tcg_temp_free_i64(dst);
2136 /* Must exit loop to check un-masked IRQs */
2137 s->base.is_jmp = DISAS_EXIT;
2138 return;
2139
2140 case 5: /* DRPS */
2141 if (op3 != 0 || op4 != 0 || rn != 0x1f) {
2142 goto do_unallocated;
2143 } else {
2144 unsupported_encoding(s, insn);
2145 }
2146 return;
2147
2148 default:
2149 do_unallocated:
2150 unallocated_encoding(s);
2151 return;
2152 }
2153
2154 switch (btype_mod) {
2155 case 0: /* BR */
2156 if (dc_isar_feature(aa64_bti, s)) {
2157 /* BR to {x16,x17} or !guard -> 1, else 3. */
2158 set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
2159 }
2160 break;
2161
2162 case 1: /* BLR */
2163 if (dc_isar_feature(aa64_bti, s)) {
2164 /* BLR sets BTYPE to 2, regardless of source guarded page. */
2165 set_btype(s, 2);
2166 }
2167 break;
2168
2169 default: /* RET or none of the above. */
2170 /* BTYPE will be set to 0 by normal end-of-insn processing. */
2171 break;
2172 }
2173
2174 s->base.is_jmp = DISAS_JUMP;
2175 }
2176
2177 /* Branches, exception generating and system instructions */
2178 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
2179 {
2180 switch (extract32(insn, 25, 7)) {
2181 case 0x0a: case 0x0b:
2182 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
2183 disas_uncond_b_imm(s, insn);
2184 break;
2185 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
2186 disas_comp_b_imm(s, insn);
2187 break;
2188 case 0x1b: case 0x5b: /* Test & branch (immediate) */
2189 disas_test_b_imm(s, insn);
2190 break;
2191 case 0x2a: /* Conditional branch (immediate) */
2192 disas_cond_b_imm(s, insn);
2193 break;
2194 case 0x6a: /* Exception generation / System */
2195 if (insn & (1 << 24)) {
2196 if (extract32(insn, 22, 2) == 0) {
2197 disas_system(s, insn);
2198 } else {
2199 unallocated_encoding(s);
2200 }
2201 } else {
2202 disas_exc(s, insn);
2203 }
2204 break;
2205 case 0x6b: /* Unconditional branch (register) */
2206 disas_uncond_b_reg(s, insn);
2207 break;
2208 default:
2209 unallocated_encoding(s);
2210 break;
2211 }
2212 }
2213
2214 /*
2215 * Load/Store exclusive instructions are implemented by remembering
2216 * the value/address loaded, and seeing if these are the same
2217 * when the store is performed. This is not actually the architecturally
2218 * mandated semantics, but it works for typical guest code sequences
2219 * and avoids having to monitor regular stores.
2220 *
2221 * The store exclusive uses the atomic cmpxchg primitives to avoid
2222 * races in multi-threaded linux-user and when MTTCG softmmu is
2223 * enabled.
2224 */
2225 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2226 TCGv_i64 addr, int size, bool is_pair)
2227 {
2228 int idx = get_mem_index(s);
2229 MemOp memop = s->be_data;
2230
2231 g_assert(size <= 3);
2232 if (is_pair) {
2233 g_assert(size >= 2);
2234 if (size == 2) {
2235 /* The pair must be single-copy atomic for the doubleword. */
2236 memop |= MO_64 | MO_ALIGN;
2237 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2238 if (s->be_data == MO_LE) {
2239 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2240 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2241 } else {
2242 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2243 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2244 }
2245 } else {
2246 /* The pair must be single-copy atomic for *each* doubleword, not
2247 the entire quadword, however it must be quadword aligned. */
2248 memop |= MO_64;
2249 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2250 memop | MO_ALIGN_16);
2251
2252 TCGv_i64 addr2 = tcg_temp_new_i64();
2253 tcg_gen_addi_i64(addr2, addr, 8);
2254 tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2255 tcg_temp_free_i64(addr2);
2256
2257 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2258 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2259 }
2260 } else {
2261 memop |= size | MO_ALIGN;
2262 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2263 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2264 }
2265 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2266 }
2267
2268 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2269 TCGv_i64 addr, int size, int is_pair)
2270 {
2271 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2272 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2273 * [addr] = {Rt};
2274 * if (is_pair) {
2275 * [addr + datasize] = {Rt2};
2276 * }
2277 * {Rd} = 0;
2278 * } else {
2279 * {Rd} = 1;
2280 * }
2281 * env->exclusive_addr = -1;
2282 */
2283 TCGLabel *fail_label = gen_new_label();
2284 TCGLabel *done_label = gen_new_label();
2285 TCGv_i64 tmp;
2286
2287 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2288
2289 tmp = tcg_temp_new_i64();
2290 if (is_pair) {
2291 if (size == 2) {
2292 if (s->be_data == MO_LE) {
2293 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2294 } else {
2295 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2296 }
2297 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2298 cpu_exclusive_val, tmp,
2299 get_mem_index(s),
2300 MO_64 | MO_ALIGN | s->be_data);
2301 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2302 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2303 if (!HAVE_CMPXCHG128) {
2304 gen_helper_exit_atomic(cpu_env);
2305 s->base.is_jmp = DISAS_NORETURN;
2306 } else if (s->be_data == MO_LE) {
2307 gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2308 cpu_exclusive_addr,
2309 cpu_reg(s, rt),
2310 cpu_reg(s, rt2));
2311 } else {
2312 gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2313 cpu_exclusive_addr,
2314 cpu_reg(s, rt),
2315 cpu_reg(s, rt2));
2316 }
2317 } else if (s->be_data == MO_LE) {
2318 gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2319 cpu_reg(s, rt), cpu_reg(s, rt2));
2320 } else {
2321 gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2322 cpu_reg(s, rt), cpu_reg(s, rt2));
2323 }
2324 } else {
2325 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2326 cpu_reg(s, rt), get_mem_index(s),
2327 size | MO_ALIGN | s->be_data);
2328 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2329 }
2330 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2331 tcg_temp_free_i64(tmp);
2332 tcg_gen_br(done_label);
2333
2334 gen_set_label(fail_label);
2335 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2336 gen_set_label(done_label);
2337 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2338 }
2339
2340 static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2341 int rn, int size)
2342 {
2343 TCGv_i64 tcg_rs = cpu_reg(s, rs);
2344 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2345 int memidx = get_mem_index(s);
2346 TCGv_i64 clean_addr;
2347
2348 if (rn == 31) {
2349 gen_check_sp_alignment(s);
2350 }
2351 clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2352 tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
2353 size | MO_ALIGN | s->be_data);
2354 }
2355
2356 static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2357 int rn, int size)
2358 {
2359 TCGv_i64 s1 = cpu_reg(s, rs);
2360 TCGv_i64 s2 = cpu_reg(s, rs + 1);
2361 TCGv_i64 t1 = cpu_reg(s, rt);
2362 TCGv_i64 t2 = cpu_reg(s, rt + 1);
2363 TCGv_i64 clean_addr;
2364 int memidx = get_mem_index(s);
2365
2366 if (rn == 31) {
2367 gen_check_sp_alignment(s);
2368 }
2369 clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2370
2371 if (size == 2) {
2372 TCGv_i64 cmp = tcg_temp_new_i64();
2373 TCGv_i64 val = tcg_temp_new_i64();
2374
2375 if (s->be_data == MO_LE) {
2376 tcg_gen_concat32_i64(val, t1, t2);
2377 tcg_gen_concat32_i64(cmp, s1, s2);
2378 } else {
2379 tcg_gen_concat32_i64(val, t2, t1);
2380 tcg_gen_concat32_i64(cmp, s2, s1);
2381 }
2382
2383 tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
2384 MO_64 | MO_ALIGN | s->be_data);
2385 tcg_temp_free_i64(val);
2386
2387 if (s->be_data == MO_LE) {
2388 tcg_gen_extr32_i64(s1, s2, cmp);
2389 } else {
2390 tcg_gen_extr32_i64(s2, s1, cmp);
2391 }
2392 tcg_temp_free_i64(cmp);
2393 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2394 if (HAVE_CMPXCHG128) {
2395 TCGv_i32 tcg_rs = tcg_const_i32(rs);
2396 if (s->be_data == MO_LE) {
2397 gen_helper_casp_le_parallel(cpu_env, tcg_rs,
2398 clean_addr, t1, t2);
2399 } else {
2400 gen_helper_casp_be_parallel(cpu_env, tcg_rs,
2401 clean_addr, t1, t2);
2402 }
2403 tcg_temp_free_i32(tcg_rs);
2404 } else {
2405 gen_helper_exit_atomic(cpu_env);
2406 s->base.is_jmp = DISAS_NORETURN;
2407 }
2408 } else {
2409 TCGv_i64 d1 = tcg_temp_new_i64();
2410 TCGv_i64 d2 = tcg_temp_new_i64();
2411 TCGv_i64 a2 = tcg_temp_new_i64();
2412 TCGv_i64 c1 = tcg_temp_new_i64();
2413 TCGv_i64 c2 = tcg_temp_new_i64();
2414 TCGv_i64 zero = tcg_const_i64(0);
2415
2416 /* Load the two words, in memory order. */
2417 tcg_gen_qemu_ld_i64(d1, clean_addr, memidx,
2418 MO_64 | MO_ALIGN_16 | s->be_data);
2419 tcg_gen_addi_i64(a2, clean_addr, 8);
2420 tcg_gen_qemu_ld_i64(d2, a2, memidx, MO_64 | s->be_data);
2421
2422 /* Compare the two words, also in memory order. */
2423 tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
2424 tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
2425 tcg_gen_and_i64(c2, c2, c1);
2426
2427 /* If compare equal, write back new data, else write back old data. */
2428 tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
2429 tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
2430 tcg_gen_qemu_st_i64(c1, clean_addr, memidx, MO_64 | s->be_data);
2431 tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
2432 tcg_temp_free_i64(a2);
2433 tcg_temp_free_i64(c1);
2434 tcg_temp_free_i64(c2);
2435 tcg_temp_free_i64(zero);
2436
2437 /* Write back the data from memory to Rs. */
2438 tcg_gen_mov_i64(s1, d1);
2439 tcg_gen_mov_i64(s2, d2);
2440 tcg_temp_free_i64(d1);
2441 tcg_temp_free_i64(d2);
2442 }
2443 }
2444
2445 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
2446 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2447 */
2448 static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2449 {
2450 int opc0 = extract32(opc, 0, 1);
2451 int regsize;
2452
2453 if (is_signed) {
2454 regsize = opc0 ? 32 : 64;
2455 } else {
2456 regsize = size == 3 ? 64 : 32;
2457 }
2458 return regsize == 64;
2459 }
2460
2461 /* Load/store exclusive
2462 *
2463 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
2464 * +-----+-------------+----+---+----+------+----+-------+------+------+
2465 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
2466 * +-----+-------------+----+---+----+------+----+-------+------+------+
2467 *
2468 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2469 * L: 0 -> store, 1 -> load
2470 * o2: 0 -> exclusive, 1 -> not
2471 * o1: 0 -> single register, 1 -> register pair
2472 * o0: 1 -> load-acquire/store-release, 0 -> not
2473 */
2474 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2475 {
2476 int rt = extract32(insn, 0, 5);
2477 int rn = extract32(insn, 5, 5);
2478 int rt2 = extract32(insn, 10, 5);
2479 int rs = extract32(insn, 16, 5);
2480 int is_lasr = extract32(insn, 15, 1);
2481 int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2482 int size = extract32(insn, 30, 2);
2483 TCGv_i64 clean_addr;
2484
2485 switch (o2_L_o1_o0) {
2486 case 0x0: /* STXR */
2487 case 0x1: /* STLXR */
2488 if (rn == 31) {
2489 gen_check_sp_alignment(s);
2490 }
2491 if (is_lasr) {
2492 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2493 }
2494 clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2495 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
2496 return;
2497
2498 case 0x4: /* LDXR */
2499 case 0x5: /* LDAXR */
2500 if (rn == 31) {
2501 gen_check_sp_alignment(s);
2502 }
2503 clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2504 s->is_ldex = true;
2505 gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
2506 if (is_lasr) {
2507 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2508 }
2509 return;
2510
2511 case 0x8: /* STLLR */
2512 if (!dc_isar_feature(aa64_lor, s)) {
2513 break;
2514 }
2515 /* StoreLORelease is the same as Store-Release for QEMU. */
2516 /* fall through */
2517 case 0x9: /* STLR */
2518 /* Generate ISS for non-exclusive accesses including LASR. */
2519 if (rn == 31) {
2520 gen_check_sp_alignment(s);
2521 }
2522 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2523 clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2524 do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
2525 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2526 return;
2527
2528 case 0xc: /* LDLAR */
2529 if (!dc_isar_feature(aa64_lor, s)) {
2530 break;
2531 }
2532 /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
2533 /* fall through */
2534 case 0xd: /* LDAR */
2535 /* Generate ISS for non-exclusive accesses including LASR. */
2536 if (rn == 31) {
2537 gen_check_sp_alignment(s);
2538 }
2539 clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2540 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
2541 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2542 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2543 return;
2544
2545 case 0x2: case 0x3: /* CASP / STXP */
2546 if (size & 2) { /* STXP / STLXP */
2547 if (rn == 31) {
2548 gen_check_sp_alignment(s);
2549 }
2550 if (is_lasr) {
2551 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2552 }
2553 clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2554 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
2555 return;
2556 }
2557 if (rt2 == 31
2558 && ((rt | rs) & 1) == 0
2559 && dc_isar_feature(aa64_atomics, s)) {
2560 /* CASP / CASPL */
2561 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2562 return;
2563 }
2564 break;
2565
2566 case 0x6: case 0x7: /* CASPA / LDXP */
2567 if (size & 2) { /* LDXP / LDAXP */
2568 if (rn == 31) {
2569 gen_check_sp_alignment(s);
2570 }
2571 clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2572 s->is_ldex = true;
2573 gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
2574 if (is_lasr) {
2575 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2576 }
2577 return;
2578 }
2579 if (rt2 == 31
2580 && ((rt | rs) & 1) == 0
2581 && dc_isar_feature(aa64_atomics, s)) {
2582 /* CASPA / CASPAL */
2583 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2584 return;
2585 }
2586 break;
2587
2588 case 0xa: /* CAS */
2589 case 0xb: /* CASL */
2590 case 0xe: /* CASA */
2591 case 0xf: /* CASAL */
2592 if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
2593 gen_compare_and_swap(s, rs, rt, rn, size);
2594 return;
2595 }
2596 break;
2597 }
2598 unallocated_encoding(s);
2599 }
2600
2601 /*
2602 * Load register (literal)
2603 *
2604 * 31 30 29 27 26 25 24 23 5 4 0
2605 * +-----+-------+---+-----+-------------------+-------+
2606 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
2607 * +-----+-------+---+-----+-------------------+-------+
2608 *
2609 * V: 1 -> vector (simd/fp)
2610 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2611 * 10-> 32 bit signed, 11 -> prefetch
2612 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2613 */
2614 static void disas_ld_lit(DisasContext *s, uint32_t insn)
2615 {
2616 int rt = extract32(insn, 0, 5);
2617 int64_t imm = sextract32(insn, 5, 19) << 2;
2618 bool is_vector = extract32(insn, 26, 1);
2619 int opc = extract32(insn, 30, 2);
2620 bool is_signed = false;
2621 int size = 2;
2622 TCGv_i64 tcg_rt, clean_addr;
2623
2624 if (is_vector) {
2625 if (opc == 3) {
2626 unallocated_encoding(s);
2627 return;
2628 }
2629 size = 2 + opc;
2630 if (!fp_access_check(s)) {
2631 return;
2632 }
2633 } else {
2634 if (opc == 3) {
2635 /* PRFM (literal) : prefetch */
2636 return;
2637 }
2638 size = 2 + extract32(opc, 0, 1);
2639 is_signed = extract32(opc, 1, 1);
2640 }
2641
2642 tcg_rt = cpu_reg(s, rt);
2643
2644 clean_addr = tcg_const_i64(s->pc_curr + imm);
2645 if (is_vector) {
2646 do_fp_ld(s, rt, clean_addr, size);
2647 } else {
2648 /* Only unsigned 32bit loads target 32bit registers. */
2649 bool iss_sf = opc != 0;
2650
2651 do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false,
2652 true, rt, iss_sf, false);
2653 }
2654 tcg_temp_free_i64(clean_addr);
2655 }
2656
2657 /*
2658 * LDNP (Load Pair - non-temporal hint)
2659 * LDP (Load Pair - non vector)
2660 * LDPSW (Load Pair Signed Word - non vector)
2661 * STNP (Store Pair - non-temporal hint)
2662 * STP (Store Pair - non vector)
2663 * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2664 * LDP (Load Pair of SIMD&FP)
2665 * STNP (Store Pair of SIMD&FP - non-temporal hint)
2666 * STP (Store Pair of SIMD&FP)
2667 *
2668 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
2669 * +-----+-------+---+---+-------+---+-----------------------------+
2670 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
2671 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2672 *
2673 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
2674 * LDPSW 01
2675 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2676 * V: 0 -> GPR, 1 -> Vector
2677 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2678 * 10 -> signed offset, 11 -> pre-index
2679 * L: 0 -> Store 1 -> Load
2680 *
2681 * Rt, Rt2 = GPR or SIMD registers to be stored
2682 * Rn = general purpose register containing address
2683 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2684 */
2685 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2686 {
2687 int rt = extract32(insn, 0, 5);
2688 int rn = extract32(insn, 5, 5);
2689 int rt2 = extract32(insn, 10, 5);
2690 uint64_t offset = sextract64(insn, 15, 7);
2691 int index = extract32(insn, 23, 2);
2692 bool is_vector = extract32(insn, 26, 1);
2693 bool is_load = extract32(insn, 22, 1);
2694 int opc = extract32(insn, 30, 2);
2695
2696 bool is_signed = false;
2697 bool postindex = false;
2698 bool wback = false;
2699
2700 TCGv_i64 clean_addr, dirty_addr;
2701
2702 int size;
2703
2704 if (opc == 3) {
2705 unallocated_encoding(s);
2706 return;
2707 }
2708
2709 if (is_vector) {
2710 size = 2 + opc;
2711 } else {
2712 size = 2 + extract32(opc, 1, 1);
2713 is_signed = extract32(opc, 0, 1);
2714 if (!is_load && is_signed) {
2715 unallocated_encoding(s);
2716 return;
2717 }
2718 }
2719
2720 switch (index) {
2721 case 1: /* post-index */
2722 postindex = true;
2723 wback = true;
2724 break;
2725 case 0:
2726 /* signed offset with "non-temporal" hint. Since we don't emulate
2727 * caches we don't care about hints to the cache system about
2728 * data access patterns, and handle this identically to plain
2729 * signed offset.
2730 */
2731 if (is_signed) {
2732 /* There is no non-temporal-hint version of LDPSW */
2733 unallocated_encoding(s);
2734 return;
2735 }
2736 postindex = false;
2737 break;
2738 case 2: /* signed offset, rn not updated */
2739 postindex = false;
2740 break;
2741 case 3: /* pre-index */
2742 postindex = false;
2743 wback = true;
2744 break;
2745 }
2746
2747 if (is_vector && !fp_access_check(s)) {
2748 return;
2749 }
2750
2751 offset <<= size;
2752
2753 if (rn == 31) {
2754 gen_check_sp_alignment(s);
2755 }
2756
2757 dirty_addr = read_cpu_reg_sp(s, rn, 1);
2758 if (!postindex) {
2759 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
2760 }
2761 clean_addr = clean_data_tbi(s, dirty_addr);
2762
2763 if (is_vector) {
2764 if (is_load) {
2765 do_fp_ld(s, rt, clean_addr, size);
2766 } else {
2767 do_fp_st(s, rt, clean_addr, size);
2768 }
2769 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2770 if (is_load) {
2771 do_fp_ld(s, rt2, clean_addr, size);
2772 } else {
2773 do_fp_st(s, rt2, clean_addr, size);
2774 }
2775 } else {
2776 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2777 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2778
2779 if (is_load) {
2780 TCGv_i64 tmp = tcg_temp_new_i64();
2781
2782 /* Do not modify tcg_rt before recognizing any exception
2783 * from the second load.
2784 */
2785 do_gpr_ld(s, tmp, clean_addr, size, is_signed, false,
2786 false, 0, false, false);
2787 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2788 do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false,
2789 false, 0, false, false);
2790
2791 tcg_gen_mov_i64(tcg_rt, tmp);
2792 tcg_temp_free_i64(tmp);
2793 } else {
2794 do_gpr_st(s, tcg_rt, clean_addr, size,
2795 false, 0, false, false);
2796 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2797 do_gpr_st(s, tcg_rt2, clean_addr, size,
2798 false, 0, false, false);
2799 }
2800 }
2801
2802 if (wback) {
2803 if (postindex) {
2804 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
2805 }
2806 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
2807 }
2808 }
2809
2810 /*
2811 * Load/store (immediate post-indexed)
2812 * Load/store (immediate pre-indexed)
2813 * Load/store (unscaled immediate)
2814 *
2815 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
2816 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2817 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
2818 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2819 *
2820 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
2821 10 -> unprivileged
2822 * V = 0 -> non-vector
2823 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
2824 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2825 */
2826 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
2827 int opc,
2828 int size,
2829 int rt,
2830 bool is_vector)
2831 {
2832 int rn = extract32(insn, 5, 5);
2833 int imm9 = sextract32(insn, 12, 9);
2834 int idx = extract32(insn, 10, 2);
2835 bool is_signed = false;
2836 bool is_store = false;
2837 bool is_extended = false;
2838 bool is_unpriv = (idx == 2);
2839 bool iss_valid = !is_vector;
2840 bool post_index;
2841 bool writeback;
2842
2843 TCGv_i64 clean_addr, dirty_addr;
2844
2845 if (is_vector) {
2846 size |= (opc & 2) << 1;
2847 if (size > 4 || is_unpriv) {
2848 unallocated_encoding(s);
2849 return;
2850 }
2851 is_store = ((opc & 1) == 0);
2852 if (!fp_access_check(s)) {
2853 return;
2854 }
2855 } else {
2856 if (size == 3 && opc == 2) {
2857 /* PRFM - prefetch */
2858 if (idx != 0) {
2859 unallocated_encoding(s);
2860 return;
2861 }
2862 return;
2863 }
2864 if (opc == 3 && size > 1) {
2865 unallocated_encoding(s);
2866 return;
2867 }
2868 is_store = (opc == 0);
2869 is_signed = extract32(opc, 1, 1);
2870 is_extended = (size < 3) && extract32(opc, 0, 1);
2871 }
2872
2873 switch (idx) {
2874 case 0:
2875 case 2:
2876 post_index = false;
2877 writeback = false;
2878 break;
2879 case 1:
2880 post_index = true;
2881 writeback = true;
2882 break;
2883 case 3:
2884 post_index = false;
2885 writeback = true;
2886 break;
2887 default:
2888 g_assert_not_reached();
2889 }
2890
2891 if (rn == 31) {
2892 gen_check_sp_alignment(s);
2893 }
2894
2895 dirty_addr = read_cpu_reg_sp(s, rn, 1);
2896 if (!post_index) {
2897 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
2898 }
2899 clean_addr = clean_data_tbi(s, dirty_addr);
2900
2901 if (is_vector) {
2902 if (is_store) {
2903 do_fp_st(s, rt, clean_addr, size);
2904 } else {
2905 do_fp_ld(s, rt, clean_addr, size);
2906 }
2907 } else {
2908 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2909 int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
2910 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2911
2912 if (is_store) {
2913 do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
2914 iss_valid, rt, iss_sf, false);
2915 } else {
2916 do_gpr_ld_memidx(s, tcg_rt, clean_addr, size,
2917 is_signed, is_extended, memidx,
2918 iss_valid, rt, iss_sf, false);
2919 }
2920 }
2921
2922 if (writeback) {
2923 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2924 if (post_index) {
2925 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
2926 }
2927 tcg_gen_mov_i64(tcg_rn, dirty_addr);
2928 }
2929 }
2930
2931 /*
2932 * Load/store (register offset)
2933 *
2934 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2935 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2936 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
2937 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2938 *
2939 * For non-vector:
2940 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2941 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2942 * For vector:
2943 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2944 * opc<0>: 0 -> store, 1 -> load
2945 * V: 1 -> vector/simd
2946 * opt: extend encoding (see DecodeRegExtend)
2947 * S: if S=1 then scale (essentially index by sizeof(size))
2948 * Rt: register to transfer into/out of
2949 * Rn: address register or SP for base
2950 * Rm: offset register or ZR for offset
2951 */
2952 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
2953 int opc,
2954 int size,
2955 int rt,
2956 bool is_vector)
2957 {
2958 int rn = extract32(insn, 5, 5);
2959 int shift = extract32(insn, 12, 1);
2960 int rm = extract32(insn, 16, 5);
2961 int opt = extract32(insn, 13, 3);
2962 bool is_signed = false;
2963 bool is_store = false;
2964 bool is_extended = false;
2965
2966 TCGv_i64 tcg_rm, clean_addr, dirty_addr;
2967
2968 if (extract32(opt, 1, 1) == 0) {
2969 unallocated_encoding(s);
2970 return;
2971 }
2972
2973 if (is_vector) {
2974 size |= (opc & 2) << 1;
2975 if (size > 4) {
2976 unallocated_encoding(s);
2977 return;
2978 }
2979 is_store = !extract32(opc, 0, 1);
2980 if (!fp_access_check(s)) {
2981 return;
2982 }
2983 } else {
2984 if (size == 3 && opc == 2) {
2985 /* PRFM - prefetch */
2986 return;
2987 }
2988 if (opc == 3 && size > 1) {
2989 unallocated_encoding(s);
2990 return;
2991 }
2992 is_store = (opc == 0);
2993 is_signed = extract32(opc, 1, 1);
2994 is_extended = (size < 3) && extract32(opc, 0, 1);
2995 }
2996
2997 if (rn == 31) {
2998 gen_check_sp_alignment(s);
2999 }
3000 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3001
3002 tcg_rm = read_cpu_reg(s, rm, 1);
3003 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
3004
3005 tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
3006 clean_addr = clean_data_tbi(s, dirty_addr);
3007
3008 if (is_vector) {
3009 if (is_store) {
3010 do_fp_st(s, rt, clean_addr, size);
3011 } else {
3012 do_fp_ld(s, rt, clean_addr, size);
3013 }
3014 } else {
3015 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3016 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3017 if (is_store) {
3018 do_gpr_st(s, tcg_rt, clean_addr, size,
3019 true, rt, iss_sf, false);
3020 } else {
3021 do_gpr_ld(s, tcg_rt, clean_addr, size,
3022 is_signed, is_extended,
3023 true, rt, iss_sf, false);
3024 }
3025 }
3026 }
3027
3028 /*
3029 * Load/store (unsigned immediate)
3030 *
3031 * 31 30 29 27 26 25 24 23 22 21 10 9 5
3032 * +----+-------+---+-----+-----+------------+-------+------+
3033 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
3034 * +----+-------+---+-----+-----+------------+-------+------+
3035 *
3036 * For non-vector:
3037 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3038 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3039 * For vector:
3040 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3041 * opc<0>: 0 -> store, 1 -> load
3042 * Rn: base address register (inc SP)
3043 * Rt: target register
3044 */
3045 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
3046 int opc,
3047 int size,
3048 int rt,
3049 bool is_vector)
3050 {
3051 int rn = extract32(insn, 5, 5);
3052 unsigned int imm12 = extract32(insn, 10, 12);
3053 unsigned int offset;
3054
3055 TCGv_i64 clean_addr, dirty_addr;
3056
3057 bool is_store;
3058 bool is_signed = false;
3059 bool is_extended = false;
3060
3061 if (is_vector) {
3062 size |= (opc & 2) << 1;
3063 if (size > 4) {
3064 unallocated_encoding(s);
3065 return;
3066 }
3067 is_store = !extract32(opc, 0, 1);
3068 if (!fp_access_check(s)) {
3069 return;
3070 }
3071 } else {
3072 if (size == 3 && opc == 2) {
3073 /* PRFM - prefetch */
3074 return;
3075 }
3076 if (opc == 3 && size > 1) {
3077 unallocated_encoding(s);
3078 return;
3079 }
3080 is_store = (opc == 0);
3081 is_signed = extract32(opc, 1, 1);
3082 is_extended = (size < 3) && extract32(opc, 0, 1);
3083 }
3084
3085 if (rn == 31) {
3086 gen_check_sp_alignment(s);
3087 }
3088 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3089 offset = imm12 << size;
3090 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3091 clean_addr = clean_data_tbi(s, dirty_addr);
3092
3093 if (is_vector) {
3094 if (is_store) {
3095 do_fp_st(s, rt, clean_addr, size);
3096 } else {
3097 do_fp_ld(s, rt, clean_addr, size);
3098 }
3099 } else {
3100 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3101 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3102 if (is_store) {
3103 do_gpr_st(s, tcg_rt, clean_addr, size,
3104 true, rt, iss_sf, false);
3105 } else {
3106 do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended,
3107 true, rt, iss_sf, false);
3108 }
3109 }
3110 }
3111
3112 /* Atomic memory operations
3113 *
3114 * 31 30 27 26 24 22 21 16 15 12 10 5 0
3115 * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
3116 * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt |
3117 * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
3118 *
3119 * Rt: the result register
3120 * Rn: base address or SP
3121 * Rs: the source register for the operation
3122 * V: vector flag (always 0 as of v8.3)
3123 * A: acquire flag
3124 * R: release flag
3125 */
3126 static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
3127 int size, int rt, bool is_vector)
3128 {
3129 int rs = extract32(insn, 16, 5);
3130 int rn = extract32(insn, 5, 5);
3131 int o3_opc = extract32(insn, 12, 4);
3132 bool r = extract32(insn, 22, 1);
3133 bool a = extract32(insn, 23, 1);
3134 TCGv_i64 tcg_rs, clean_addr;
3135 AtomicThreeOpFn *fn;
3136
3137 if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
3138 unallocated_encoding(s);
3139 return;
3140 }
3141 switch (o3_opc) {
3142 case 000: /* LDADD */
3143 fn = tcg_gen_atomic_fetch_add_i64;
3144 break;
3145 case 001: /* LDCLR */
3146 fn = tcg_gen_atomic_fetch_and_i64;
3147 break;
3148 case 002: /* LDEOR */
3149 fn = tcg_gen_atomic_fetch_xor_i64;
3150 break;
3151 case 003: /* LDSET */
3152 fn = tcg_gen_atomic_fetch_or_i64;
3153 break;
3154 case 004: /* LDSMAX */
3155 fn = tcg_gen_atomic_fetch_smax_i64;
3156 break;
3157 case 005: /* LDSMIN */
3158 fn = tcg_gen_atomic_fetch_smin_i64;
3159 break;
3160 case 006: /* LDUMAX */
3161 fn = tcg_gen_atomic_fetch_umax_i64;
3162 break;
3163 case 007: /* LDUMIN */
3164 fn = tcg_gen_atomic_fetch_umin_i64;
3165 break;
3166 case 010: /* SWP */
3167 fn = tcg_gen_atomic_xchg_i64;
3168 break;
3169 case 014: /* LDAPR, LDAPRH, LDAPRB */
3170 if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
3171 rs != 31 || a != 1 || r != 0) {
3172 unallocated_encoding(s);
3173 return;
3174 }
3175 break;
3176 default:
3177 unallocated_encoding(s);
3178 return;
3179 }
3180
3181 if (rn == 31) {
3182 gen_check_sp_alignment(s);
3183 }
3184 clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
3185
3186 if (o3_opc == 014) {
3187 /*
3188 * LDAPR* are a special case because they are a simple load, not a
3189 * fetch-and-do-something op.
3190 * The architectural consistency requirements here are weaker than
3191 * full load-acquire (we only need "load-acquire processor consistent"),
3192 * but we choose to implement them as full LDAQ.
3193 */
3194 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false,
3195 true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
3196 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3197 return;
3198 }
3199
3200 tcg_rs = read_cpu_reg(s, rs, true);
3201
3202 if (o3_opc == 1) { /* LDCLR */
3203 tcg_gen_not_i64(tcg_rs, tcg_rs);
3204 }
3205
3206 /* The tcg atomic primitives are all full barriers. Therefore we
3207 * can ignore the Acquire and Release bits of this instruction.
3208 */
3209 fn(cpu_reg(s, rt), clean_addr, tcg_rs, get_mem_index(s),
3210 s->be_data | size | MO_ALIGN);
3211 }
3212
3213 /*
3214 * PAC memory operations
3215 *
3216 * 31 30 27 26 24 22 21 12 11 10 5 0
3217 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3218 * | size | 1 1 1 | V | 0 0 | M S | 1 | imm9 | W | 1 | Rn | Rt |
3219 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3220 *
3221 * Rt: the result register
3222 * Rn: base address or SP
3223 * V: vector flag (always 0 as of v8.3)
3224 * M: clear for key DA, set for key DB
3225 * W: pre-indexing flag
3226 * S: sign for imm9.
3227 */
3228 static void disas_ldst_pac(DisasContext *s, uint32_t insn,
3229 int size, int rt, bool is_vector)
3230 {
3231 int rn = extract32(insn, 5, 5);
3232 bool is_wback = extract32(insn, 11, 1);
3233 bool use_key_a = !extract32(insn, 23, 1);
3234 int offset;
3235 TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3236
3237 if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
3238 unallocated_encoding(s);
3239 return;
3240 }
3241
3242 if (rn == 31) {
3243 gen_check_sp_alignment(s);
3244 }
3245 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3246
3247 if (s->pauth_active) {
3248 if (use_key_a) {
3249 gen_helper_autda(dirty_addr, cpu_env, dirty_addr, cpu_X[31]);
3250 } else {
3251 gen_helper_autdb(dirty_addr, cpu_env, dirty_addr, cpu_X[31]);
3252 }
3253 }
3254
3255 /* Form the 10-bit signed, scaled offset. */
3256 offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
3257 offset = sextract32(offset << size, 0, 10 + size);
3258 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3259
3260 /* Note that "clean" and "dirty" here refer to TBI not PAC. */
3261 clean_addr = clean_data_tbi(s, dirty_addr);
3262
3263 tcg_rt = cpu_reg(s, rt);
3264 do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false,
3265 /* extend */ false, /* iss_valid */ !is_wback,
3266 /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
3267
3268 if (is_wback) {
3269 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3270 }
3271 }
3272
3273 /*
3274 * LDAPR/STLR (unscaled immediate)
3275 *
3276 * 31 30 24 22 21 12 10 5 0
3277 * +------+-------------+-----+---+--------+-----+----+-----+
3278 * | size | 0 1 1 0 0 1 | opc | 0 | imm9 | 0 0 | Rn | Rt |
3279 * +------+-------------+-----+---+--------+-----+----+-----+
3280 *
3281 * Rt: source or destination register
3282 * Rn: base register
3283 * imm9: unscaled immediate offset
3284 * opc: 00: STLUR*, 01/10/11: various LDAPUR*
3285 * size: size of load/store
3286 */
3287 static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
3288 {
3289 int rt = extract32(insn, 0, 5);
3290 int rn = extract32(insn, 5, 5);
3291 int offset = sextract32(insn, 12, 9);
3292 int opc = extract32(insn, 22, 2);
3293 int size = extract32(insn, 30, 2);
3294 TCGv_i64 clean_addr, dirty_addr;
3295 bool is_store = false;
3296 bool is_signed = false;
3297 bool extend = false;
3298 bool iss_sf;
3299
3300 if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3301 unallocated_encoding(s);
3302 return;
3303 }
3304
3305 switch (opc) {
3306 case 0: /* STLURB */
3307 is_store = true;
3308 break;
3309 case 1: /* LDAPUR* */
3310 break;
3311 case 2: /* LDAPURS* 64-bit variant */
3312 if (size == 3) {
3313 unallocated_encoding(s);
3314 return;
3315 }
3316 is_signed = true;
3317 break;
3318 case 3: /* LDAPURS* 32-bit variant */
3319 if (size > 1) {
3320 unallocated_encoding(s);
3321 return;
3322 }
3323 is_signed = true;
3324 extend = true; /* zero-extend 32->64 after signed load */
3325 break;
3326 default:
3327 g_assert_not_reached();
3328 }
3329
3330 iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3331
3332 if (rn == 31) {
3333 gen_check_sp_alignment(s);
3334 }
3335
3336 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3337 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3338 clean_addr = clean_data_tbi(s, dirty_addr);
3339
3340 if (is_store) {
3341 /* Store-Release semantics */
3342 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3343 do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, iss_sf, true);
3344 } else {
3345 /*
3346 * Load-AcquirePC semantics; we implement as the slightly more
3347 * restrictive Load-Acquire.
3348 */
3349 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, is_signed, extend,
3350 true, rt, iss_sf, true);
3351 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3352 }
3353 }
3354
3355 /* Load/store register (all forms) */
3356 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
3357 {
3358 int rt = extract32(insn, 0, 5);
3359 int opc = extract32(insn, 22, 2);
3360 bool is_vector = extract32(insn, 26, 1);
3361 int size = extract32(insn, 30, 2);
3362
3363 switch (extract32(insn, 24, 2)) {
3364 case 0:
3365 if (extract32(insn, 21, 1) == 0) {
3366 /* Load/store register (unscaled immediate)
3367 * Load/store immediate pre/post-indexed
3368 * Load/store register unprivileged
3369 */
3370 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
3371 return;
3372 }
3373 switch (extract32(insn, 10, 2)) {
3374 case 0:
3375 disas_ldst_atomic(s, insn, size, rt, is_vector);
3376 return;
3377 case 2:
3378 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
3379 return;
3380 default:
3381 disas_ldst_pac(s, insn, size, rt, is_vector);
3382 return;
3383 }
3384 break;
3385 case 1:
3386 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
3387 return;
3388 }
3389 unallocated_encoding(s);
3390 }
3391
3392 /* AdvSIMD load/store multiple structures
3393 *
3394 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
3395 * +---+---+---------------+---+-------------+--------+------+------+------+
3396 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
3397 * +---+---+---------------+---+-------------+--------+------+------+------+
3398 *
3399 * AdvSIMD load/store multiple structures (post-indexed)
3400 *
3401 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
3402 * +---+---+---------------+---+---+---------+--------+------+------+------+
3403 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
3404 * +---+---+---------------+---+---+---------+--------+------+------+------+
3405 *
3406 * Rt: first (or only) SIMD&FP register to be transferred
3407 * Rn: base address or SP
3408 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3409 */
3410 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3411 {
3412 int rt = extract32(insn, 0, 5);
3413 int rn = extract32(insn, 5, 5);
3414 int rm = extract32(insn, 16, 5);
3415 int size = extract32(insn, 10, 2);
3416 int opcode = extract32(insn, 12, 4);
3417 bool is_store = !extract32(insn, 22, 1);
3418 bool is_postidx = extract32(insn, 23, 1);
3419 bool is_q = extract32(insn, 30, 1);
3420 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3421 MemOp endian = s->be_data;
3422
3423 int ebytes; /* bytes per element */
3424 int elements; /* elements per vector */
3425 int rpt; /* num iterations */
3426 int selem; /* structure elements */
3427 int r;
3428
3429 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3430 unallocated_encoding(s);
3431 return;
3432 }
3433
3434 if (!is_postidx && rm != 0) {
3435 unallocated_encoding(s);
3436 return;
3437 }
3438
3439 /* From the shared decode logic */
3440 switch (opcode) {
3441 case 0x0:
3442 rpt = 1;
3443 selem = 4;
3444 break;
3445 case 0x2:
3446 rpt = 4;
3447 selem = 1;
3448 break;
3449 case 0x4:
3450 rpt = 1;
3451 selem = 3;
3452 break;
3453 case 0x6:
3454 rpt = 3;
3455 selem = 1;
3456 break;
3457 case 0x7:
3458 rpt = 1;
3459 selem = 1;
3460 break;
3461 case 0x8:
3462 rpt = 1;
3463 selem = 2;
3464 break;
3465 case 0xa:
3466 rpt = 2;
3467 selem = 1;
3468 break;
3469 default:
3470 unallocated_encoding(s);
3471 return;
3472 }
3473
3474 if (size == 3 && !is_q && selem != 1) {
3475 /* reserved */
3476 unallocated_encoding(s);
3477 return;
3478 }
3479
3480 if (!fp_access_check(s)) {
3481 return;
3482 }
3483
3484 if (rn == 31) {
3485 gen_check_sp_alignment(s);
3486 }
3487
3488 /* For our purposes, bytes are always little-endian. */
3489 if (size == 0) {
3490 endian = MO_LE;
3491 }
3492
3493 /* Consecutive little-endian elements from a single register
3494 * can be promoted to a larger little-endian operation.
3495 */
3496 if (selem == 1 && endian == MO_LE) {
3497 size = 3;
3498 }
3499 ebytes = 1 << size;
3500 elements = (is_q ? 16 : 8) / ebytes;
3501
3502 tcg_rn = cpu_reg_sp(s, rn);
3503 clean_addr = clean_data_tbi(s, tcg_rn);
3504 tcg_ebytes = tcg_const_i64(ebytes);
3505
3506 for (r = 0; r < rpt; r++) {
3507 int e;
3508 for (e = 0; e < elements; e++) {
3509 int xs;
3510 for (xs = 0; xs < selem; xs++) {
3511 int tt = (rt + r + xs) % 32;
3512 if (is_store) {
3513 do_vec_st(s, tt, e, clean_addr, size, endian);
3514 } else {
3515 do_vec_ld(s, tt, e, clean_addr, size, endian);
3516 }
3517 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3518 }
3519 }
3520 }
3521 tcg_temp_free_i64(tcg_ebytes);
3522
3523 if (!is_store) {
3524 /* For non-quad operations, setting a slice of the low
3525 * 64 bits of the register clears the high 64 bits (in
3526 * the ARM ARM pseudocode this is implicit in the fact
3527 * that 'rval' is a 64 bit wide variable).
3528 * For quad operations, we might still need to zero the
3529 * high bits of SVE.
3530 */
3531 for (r = 0; r < rpt * selem; r++) {
3532 int tt = (rt + r) % 32;
3533 clear_vec_high(s, is_q, tt);
3534 }
3535 }
3536
3537 if (is_postidx) {
3538 if (rm == 31) {
3539 tcg_gen_addi_i64(tcg_rn, tcg_rn, rpt * elements * selem * ebytes);
3540 } else {
3541 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3542 }
3543 }
3544 }
3545
3546 /* AdvSIMD load/store single structure
3547 *
3548 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3549 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3550 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
3551 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3552 *
3553 * AdvSIMD load/store single structure (post-indexed)
3554 *
3555 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3556 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3557 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
3558 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3559 *
3560 * Rt: first (or only) SIMD&FP register to be transferred
3561 * Rn: base address or SP
3562 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3563 * index = encoded in Q:S:size dependent on size
3564 *
3565 * lane_size = encoded in R, opc
3566 * transfer width = encoded in opc, S, size
3567 */
3568 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3569 {
3570 int rt = extract32(insn, 0, 5);
3571 int rn = extract32(insn, 5, 5);
3572 int rm = extract32(insn, 16, 5);
3573 int size = extract32(insn, 10, 2);
3574 int S = extract32(insn, 12, 1);
3575 int opc = extract32(insn, 13, 3);
3576 int R = extract32(insn, 21, 1);
3577 int is_load = extract32(insn, 22, 1);
3578 int is_postidx = extract32(insn, 23, 1);
3579 int is_q = extract32(insn, 30, 1);
3580
3581 int scale = extract32(opc, 1, 2);
3582 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3583 bool replicate = false;
3584 int index = is_q << 3 | S << 2 | size;
3585 int ebytes, xs;
3586 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3587
3588 if (extract32(insn, 31, 1)) {
3589 unallocated_encoding(s);
3590 return;
3591 }
3592 if (!is_postidx && rm != 0) {
3593 unallocated_encoding(s);
3594 return;
3595 }
3596
3597 switch (scale) {
3598 case 3:
3599 if (!is_load || S) {
3600 unallocated_encoding(s);
3601 return;
3602 }
3603 scale = size;
3604 replicate = true;
3605 break;
3606 case 0:
3607 break;
3608 case 1:
3609 if (extract32(size, 0, 1)) {
3610 unallocated_encoding(s);
3611 return;
3612 }
3613 index >>= 1;
3614 break;
3615 case 2:
3616 if (extract32(size, 1, 1)) {
3617 unallocated_encoding(s);
3618 return;
3619 }
3620 if (!extract32(size, 0, 1)) {
3621 index >>= 2;
3622 } else {
3623 if (S) {
3624 unallocated_encoding(s);
3625 return;
3626 }
3627 index >>= 3;
3628 scale = 3;
3629 }
3630 break;
3631 default:
3632 g_assert_not_reached();
3633 }
3634
3635 if (!fp_access_check(s)) {
3636 return;
3637 }
3638
3639 ebytes = 1 << scale;
3640
3641 if (rn == 31) {
3642 gen_check_sp_alignment(s);
3643 }
3644
3645 tcg_rn = cpu_reg_sp(s, rn);
3646 clean_addr = clean_data_tbi(s, tcg_rn);
3647 tcg_ebytes = tcg_const_i64(ebytes);
3648
3649 for (xs = 0; xs < selem; xs++) {
3650 if (replicate) {
3651 /* Load and replicate to all elements */
3652 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3653
3654 tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr,
3655 get_mem_index(s), s->be_data + scale);
3656 tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
3657 (is_q + 1) * 8, vec_full_reg_size(s),
3658 tcg_tmp);
3659 tcg_temp_free_i64(tcg_tmp);
3660 } else {
3661 /* Load/store one element per register */
3662 if (is_load) {
3663 do_vec_ld(s, rt, index, clean_addr, scale, s->be_data);
3664 } else {
3665 do_vec_st(s, rt, index, clean_addr, scale, s->be_data);
3666 }
3667 }
3668 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3669 rt = (rt + 1) % 32;
3670 }
3671 tcg_temp_free_i64(tcg_ebytes);
3672
3673 if (is_postidx) {
3674 if (rm == 31) {
3675 tcg_gen_addi_i64(tcg_rn, tcg_rn, selem * ebytes);
3676 } else {
3677 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3678 }
3679 }
3680 }
3681
3682 /* Loads and stores */
3683 static void disas_ldst(DisasContext *s, uint32_t insn)
3684 {
3685 switch (extract32(insn, 24, 6)) {
3686 case 0x08: /* Load/store exclusive */
3687 disas_ldst_excl(s, insn);
3688 break;
3689 case 0x18: case 0x1c: /* Load register (literal) */
3690 disas_ld_lit(s, insn);
3691 break;
3692 case 0x28: case 0x29:
3693 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
3694 disas_ldst_pair(s, insn);
3695 break;
3696 case 0x38: case 0x39:
3697 case 0x3c: case 0x3d: /* Load/store register (all forms) */
3698 disas_ldst_reg(s, insn);
3699 break;
3700 case 0x0c: /* AdvSIMD load/store multiple structures */
3701 disas_ldst_multiple_struct(s, insn);
3702 break;
3703 case 0x0d: /* AdvSIMD load/store single structure */
3704 disas_ldst_single_struct(s, insn);
3705 break;
3706 case 0x19: /* LDAPR/STLR (unscaled immediate) */
3707 if (extract32(insn, 10, 2) != 0 ||
3708 extract32(insn, 21, 1) != 0) {
3709 unallocated_encoding(s);
3710 break;
3711 }
3712 disas_ldst_ldapr_stlr(s, insn);
3713 break;
3714 default:
3715 unallocated_encoding(s);
3716 break;
3717 }
3718 }
3719
3720 /* PC-rel. addressing
3721 * 31 30 29 28 24 23 5 4 0
3722 * +----+-------+-----------+-------------------+------+
3723 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
3724 * +----+-------+-----------+-------------------+------+
3725 */
3726 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
3727 {
3728 unsigned int page, rd;
3729 uint64_t base;
3730 uint64_t offset;
3731
3732 page = extract32(insn, 31, 1);
3733 /* SignExtend(immhi:immlo) -> offset */
3734 offset = sextract64(insn, 5, 19);
3735 offset = offset << 2 | extract32(insn, 29, 2);
3736 rd = extract32(insn, 0, 5);
3737 base = s->pc_curr;
3738
3739 if (page) {
3740 /* ADRP (page based) */
3741 base &= ~0xfff;
3742 offset <<= 12;
3743 }
3744
3745 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
3746 }
3747
3748 /*
3749 * Add/subtract (immediate)
3750 *
3751 * 31 30 29 28 24 23 22 21 10 9 5 4 0
3752 * +--+--+--+-----------+-----+-------------+-----+-----+
3753 * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
3754 * +--+--+--+-----------+-----+-------------+-----+-----+
3755 *
3756 * sf: 0 -> 32bit, 1 -> 64bit
3757 * op: 0 -> add , 1 -> sub
3758 * S: 1 -> set flags
3759 * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
3760 */
3761 static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
3762 {
3763 int rd = extract32(insn, 0, 5);
3764 int rn = extract32(insn, 5, 5);
3765 uint64_t imm = extract32(insn, 10, 12);
3766 int shift = extract32(insn, 22, 2);
3767 bool setflags = extract32(insn, 29, 1);
3768 bool sub_op = extract32(insn, 30, 1);
3769 bool is_64bit = extract32(insn, 31, 1);
3770
3771 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3772 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
3773 TCGv_i64 tcg_result;
3774
3775 switch (shift) {
3776 case 0x0:
3777 break;
3778 case 0x1:
3779 imm <<= 12;
3780 break;
3781 default:
3782 unallocated_encoding(s);
3783 return;
3784 }
3785
3786 tcg_result = tcg_temp_new_i64();
3787 if (!setflags) {
3788 if (sub_op) {
3789 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
3790 } else {
3791 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
3792 }
3793 } else {
3794 TCGv_i64 tcg_imm = tcg_const_i64(imm);
3795 if (sub_op) {
3796 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3797 } else {
3798 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3799 }
3800 tcg_temp_free_i64(tcg_imm);
3801 }
3802
3803 if (is_64bit) {
3804 tcg_gen_mov_i64(tcg_rd, tcg_result);
3805 } else {
3806 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3807 }
3808
3809 tcg_temp_free_i64(tcg_result);
3810 }
3811
3812 /* The input should be a value in the bottom e bits (with higher
3813 * bits zero); returns that value replicated into every element
3814 * of size e in a 64 bit integer.
3815 */
3816 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
3817 {
3818 assert(e != 0);
3819 while (e < 64) {
3820 mask |= mask << e;
3821 e *= 2;
3822 }
3823 return mask;
3824 }
3825
3826 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
3827 static inline uint64_t bitmask64(unsigned int length)
3828 {
3829 assert(length > 0 && length <= 64);
3830 return ~0ULL >> (64 - length);
3831 }
3832
3833 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
3834 * only require the wmask. Returns false if the imms/immr/immn are a reserved
3835 * value (ie should cause a guest UNDEF exception), and true if they are
3836 * valid, in which case the decoded bit pattern is written to result.
3837 */
3838 bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
3839 unsigned int imms, unsigned int immr)
3840 {
3841 uint64_t mask;
3842 unsigned e, levels, s, r;
3843 int len;
3844
3845 assert(immn < 2 && imms < 64 && immr < 64);
3846
3847 /* The bit patterns we create here are 64 bit patterns which
3848 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
3849 * 64 bits each. Each element contains the same value: a run
3850 * of between 1 and e-1 non-zero bits, rotated within the
3851 * element by between 0 and e-1 bits.
3852 *
3853 * The element size and run length are encoded into immn (1 bit)
3854 * and imms (6 bits) as follows:
3855 * 64 bit elements: immn = 1, imms = <length of run - 1>
3856 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
3857 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
3858 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
3859 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
3860 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
3861 * Notice that immn = 0, imms = 11111x is the only combination
3862 * not covered by one of the above options; this is reserved.
3863 * Further, <length of run - 1> all-ones is a reserved pattern.
3864 *
3865 * In all cases the rotation is by immr % e (and immr is 6 bits).
3866 */
3867
3868 /* First determine the element size */
3869 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
3870 if (len < 1) {
3871 /* This is the immn == 0, imms == 0x11111x case */
3872 return false;
3873 }
3874 e = 1 << len;
3875
3876 levels = e - 1;
3877 s = imms & levels;
3878 r = immr & levels;
3879
3880 if (s == levels) {
3881 /* <length of run - 1> mustn't be all-ones. */
3882 return false;
3883 }
3884
3885 /* Create the value of one element: s+1 set bits rotated
3886 * by r within the element (which is e bits wide)...
3887 */
3888 mask = bitmask64(s + 1);
3889 if (r) {
3890 mask = (mask >> r) | (mask << (e - r));
3891 mask &= bitmask64(e);
3892 }
3893 /* ...then replicate the element over the whole 64 bit value */
3894 mask = bitfield_replicate(mask, e);
3895 *result = mask;
3896 return true;
3897 }
3898
3899 /* Logical (immediate)
3900 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3901 * +----+-----+-------------+---+------+------+------+------+
3902 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
3903 * +----+-----+-------------+---+------+------+------+------+
3904 */
3905 static void disas_logic_imm(DisasContext *s, uint32_t insn)
3906 {
3907 unsigned int sf, opc, is_n, immr, imms, rn, rd;
3908 TCGv_i64 tcg_rd, tcg_rn;
3909 uint64_t wmask;
3910 bool is_and = false;
3911
3912 sf = extract32(insn, 31, 1);
3913 opc = extract32(insn, 29, 2);
3914 is_n = extract32(insn, 22, 1);
3915 immr = extract32(insn, 16, 6);
3916 imms = extract32(insn, 10, 6);
3917 rn = extract32(insn, 5, 5);
3918 rd = extract32(insn, 0, 5);
3919
3920 if (!sf && is_n) {
3921 unallocated_encoding(s);
3922 return;
3923 }
3924
3925 if (opc == 0x3) { /* ANDS */
3926 tcg_rd = cpu_reg(s, rd);
3927 } else {
3928 tcg_rd = cpu_reg_sp(s, rd);
3929 }
3930 tcg_rn = cpu_reg(s, rn);
3931
3932 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
3933 /* some immediate field values are reserved */
3934 unallocated_encoding(s);
3935 return;
3936 }
3937
3938 if (!sf) {
3939 wmask &= 0xffffffff;
3940 }
3941
3942 switch (opc) {
3943 case 0x3: /* ANDS */
3944 case 0x0: /* AND */
3945 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
3946 is_and = true;
3947 break;
3948 case 0x1: /* ORR */
3949 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
3950 break;
3951 case 0x2: /* EOR */
3952 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
3953 break;
3954 default:
3955 assert(FALSE); /* must handle all above */
3956 break;
3957 }
3958
3959 if (!sf && !is_and) {
3960 /* zero extend final result; we know we can skip this for AND
3961 * since the immediate had the high 32 bits clear.
3962 */
3963 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3964 }
3965
3966 if (opc == 3) { /* ANDS */
3967 gen_logic_CC(sf, tcg_rd);
3968 }
3969 }
3970
3971 /*
3972 * Move wide (immediate)
3973 *
3974 * 31 30 29 28 23 22 21 20 5 4 0
3975 * +--+-----+-------------+-----+----------------+------+
3976 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
3977 * +--+-----+-------------+-----+----------------+------+
3978 *
3979 * sf: 0 -> 32 bit, 1 -> 64 bit
3980 * opc: 00 -> N, 10 -> Z, 11 -> K
3981 * hw: shift/16 (0,16, and sf only 32, 48)
3982 */
3983 static void disas_movw_imm(DisasContext *s, uint32_t insn)
3984 {
3985 int rd = extract32(insn, 0, 5);
3986 uint64_t imm = extract32(insn, 5, 16);
3987 int sf = extract32(insn, 31, 1);
3988 int opc = extract32(insn, 29, 2);
3989 int pos = extract32(insn, 21, 2) << 4;
3990 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3991 TCGv_i64 tcg_imm;
3992
3993 if (!sf && (pos >= 32)) {
3994 unallocated_encoding(s);
3995 return;
3996 }
3997
3998 switch (opc) {
3999 case 0: /* MOVN */
4000 case 2: /* MOVZ */
4001 imm <<= pos;
4002 if (opc == 0) {
4003 imm = ~imm;
4004 }
4005 if (!sf) {
4006 imm &= 0xffffffffu;
4007 }
4008 tcg_gen_movi_i64(tcg_rd, imm);
4009 break;
4010 case 3: /* MOVK */
4011 tcg_imm = tcg_const_i64(imm);
4012 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
4013 tcg_temp_free_i64(tcg_imm);
4014 if (!sf) {
4015 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4016 }
4017 break;
4018 default:
4019 unallocated_encoding(s);
4020 break;
4021 }
4022 }
4023
4024 /* Bitfield
4025 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
4026 * +----+-----+-------------+---+------+------+------+------+
4027 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
4028 * +----+-----+-------------+---+------+------+------+------+
4029 */
4030 static void disas_bitfield(DisasContext *s, uint32_t insn)
4031 {
4032 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
4033 TCGv_i64 tcg_rd, tcg_tmp;
4034
4035 sf = extract32(insn, 31, 1);
4036 opc = extract32(insn, 29, 2);
4037 n = extract32(insn, 22, 1);
4038 ri = extract32(insn, 16, 6);
4039 si = extract32(insn, 10, 6);
4040 rn = extract32(insn, 5, 5);
4041 rd = extract32(insn, 0, 5);
4042 bitsize = sf ? 64 : 32;
4043
4044 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
4045 unallocated_encoding(s);
4046 return;
4047 }
4048
4049 tcg_rd = cpu_reg(s, rd);
4050
4051 /* Suppress the zero-extend for !sf. Since RI and SI are constrained
4052 to be smaller than bitsize, we'll never reference data outside the
4053 low 32-bits anyway. */
4054 tcg_tmp = read_cpu_reg(s, rn, 1);
4055
4056 /* Recognize simple(r) extractions. */
4057 if (si >= ri) {
4058 /* Wd<s-r:0> = Wn<s:r> */
4059 len = (si - ri) + 1;
4060 if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
4061 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
4062 goto done;
4063 } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
4064 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
4065 return;
4066 }
4067 /* opc == 1, BFXIL fall through to deposit */
4068 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
4069 pos = 0;
4070 } else {
4071 /* Handle the ri > si case with a deposit
4072 * Wd<32+s-r,32-r> = Wn<s:0>
4073 */
4074 len = si + 1;
4075 pos = (bitsize - ri) & (bitsize - 1);
4076 }
4077
4078 if (opc == 0 && len < ri) {
4079 /* SBFM: sign extend the destination field from len to fill
4080 the balance of the word. Let the deposit below insert all
4081 of those sign bits. */
4082 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
4083 len = ri;
4084 }
4085
4086 if (opc == 1) { /* BFM, BFXIL */
4087 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
4088 } else {
4089 /* SBFM or UBFM: We start with zero, and we haven't modified
4090 any bits outside bitsize, therefore the zero-extension
4091 below is unneeded. */
4092 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4093 return;
4094 }
4095
4096 done:
4097 if (!sf) { /* zero extend final result */
4098 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4099 }
4100 }
4101
4102 /* Extract
4103 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
4104 * +----+------+-------------+---+----+------+--------+------+------+
4105 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
4106 * +----+------+-------------+---+----+------+--------+------+------+
4107 */
4108 static void disas_extract(DisasContext *s, uint32_t insn)
4109 {
4110 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
4111
4112 sf = extract32(insn, 31, 1);
4113 n = extract32(insn, 22, 1);
4114 rm = extract32(insn, 16, 5);
4115 imm = extract32(insn, 10, 6);
4116 rn = extract32(insn, 5, 5);
4117 rd = extract32(insn, 0, 5);
4118 op21 = extract32(insn, 29, 2);
4119 op0 = extract32(insn, 21, 1);
4120 bitsize = sf ? 64 : 32;
4121
4122 if (sf != n || op21 || op0 || imm >= bitsize) {
4123 unallocated_encoding(s);
4124 } else {
4125 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
4126
4127 tcg_rd = cpu_reg(s, rd);
4128
4129 if (unlikely(imm == 0)) {
4130 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4131 * so an extract from bit 0 is a special case.
4132 */
4133 if (sf) {
4134 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
4135 } else {
4136 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
4137 }
4138 } else {
4139 tcg_rm = cpu_reg(s, rm);
4140 tcg_rn = cpu_reg(s, rn);
4141
4142 if (sf) {
4143 /* Specialization to ROR happens in EXTRACT2. */
4144 tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm);
4145 } else {
4146 TCGv_i32 t0 = tcg_temp_new_i32();
4147
4148 tcg_gen_extrl_i64_i32(t0, tcg_rm);
4149 if (rm == rn) {
4150 tcg_gen_rotri_i32(t0, t0, imm);
4151 } else {
4152 TCGv_i32 t1 = tcg_temp_new_i32();
4153 tcg_gen_extrl_i64_i32(t1, tcg_rn);
4154 tcg_gen_extract2_i32(t0, t0, t1, imm);
4155 tcg_temp_free_i32(t1);
4156 }
4157 tcg_gen_extu_i32_i64(tcg_rd, t0);
4158 tcg_temp_free_i32(t0);
4159 }
4160 }
4161 }
4162 }
4163
4164 /* Data processing - immediate */
4165 static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
4166 {
4167 switch (extract32(insn, 23, 6)) {
4168 case 0x20: case 0x21: /* PC-rel. addressing */
4169 disas_pc_rel_adr(s, insn);
4170 break;
4171 case 0x22: case 0x23: /* Add/subtract (immediate) */
4172 disas_add_sub_imm(s, insn);
4173 break;
4174 case 0x24: /* Logical (immediate) */
4175 disas_logic_imm(s, insn);
4176 break;
4177 case 0x25: /* Move wide (immediate) */
4178 disas_movw_imm(s, insn);
4179 break;
4180 case 0x26: /* Bitfield */
4181 disas_bitfield(s, insn);
4182 break;
4183 case 0x27: /* Extract */
4184 disas_extract(s, insn);
4185 break;
4186 default:
4187 unallocated_encoding(s);
4188 break;
4189 }
4190 }
4191
4192 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
4193 * Note that it is the caller's responsibility to ensure that the
4194 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
4195 * mandated semantics for out of range shifts.
4196 */
4197 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
4198 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
4199 {
4200 switch (shift_type) {
4201 case A64_SHIFT_TYPE_LSL:
4202 tcg_gen_shl_i64(dst, src, shift_amount);
4203 break;
4204 case A64_SHIFT_TYPE_LSR:
4205 tcg_gen_shr_i64(dst, src, shift_amount);
4206 break;
4207 case A64_SHIFT_TYPE_ASR:
4208 if (!sf) {
4209 tcg_gen_ext32s_i64(dst, src);
4210 }
4211 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
4212 break;
4213 case A64_SHIFT_TYPE_ROR:
4214 if (sf) {
4215 tcg_gen_rotr_i64(dst, src, shift_amount);
4216 } else {
4217 TCGv_i32 t0, t1;
4218 t0 = tcg_temp_new_i32();
4219 t1 = tcg_temp_new_i32();
4220 tcg_gen_extrl_i64_i32(t0, src);
4221 tcg_gen_extrl_i64_i32(t1, shift_amount);
4222 tcg_gen_rotr_i32(t0, t0, t1);
4223 tcg_gen_extu_i32_i64(dst, t0);
4224 tcg_temp_free_i32(t0);
4225 tcg_temp_free_i32(t1);
4226 }
4227 break;
4228 default:
4229 assert(FALSE); /* all shift types should be handled */
4230 break;
4231 }
4232
4233 if (!sf) { /* zero extend final result */
4234 tcg_gen_ext32u_i64(dst, dst);
4235 }
4236 }
4237
4238 /* Shift a TCGv src by immediate, put result in dst.
4239 * The shift amount must be in range (this should always be true as the
4240 * relevant instructions will UNDEF on bad shift immediates).
4241 */
4242 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
4243 enum a64_shift_type shift_type, unsigned int shift_i)
4244 {
4245 assert(shift_i < (sf ? 64 : 32));
4246
4247 if (shift_i == 0) {
4248 tcg_gen_mov_i64(dst, src);
4249 } else {
4250 TCGv_i64 shift_const;
4251
4252 shift_const = tcg_const_i64(shift_i);
4253 shift_reg(dst, src, sf, shift_type, shift_const);
4254 tcg_temp_free_i64(shift_const);
4255 }
4256 }
4257
4258 /* Logical (shifted register)
4259 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4260 * +----+-----+-----------+-------+---+------+--------+------+------+
4261 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
4262 * +----+-----+-----------+-------+---+------+--------+------+------+
4263 */
4264 static void disas_logic_reg(DisasContext *s, uint32_t insn)
4265 {
4266 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
4267 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
4268
4269 sf = extract32(insn, 31, 1);
4270 opc = extract32(insn, 29, 2);
4271 shift_type = extract32(insn, 22, 2);
4272 invert = extract32(insn, 21, 1);
4273 rm = extract32(insn, 16, 5);
4274 shift_amount = extract32(insn, 10, 6);
4275 rn = extract32(insn, 5, 5);
4276 rd = extract32(insn, 0, 5);
4277
4278 if (!sf && (shift_amount & (1 << 5))) {
4279 unallocated_encoding(s);
4280 return;
4281 }
4282
4283 tcg_rd = cpu_reg(s, rd);
4284
4285 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
4286 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
4287 * register-register MOV and MVN, so it is worth special casing.
4288 */
4289 tcg_rm = cpu_reg(s, rm);
4290 if (invert) {
4291 tcg_gen_not_i64(tcg_rd, tcg_rm);
4292 if (!sf) {
4293 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4294 }
4295 } else {
4296 if (sf) {
4297 tcg_gen_mov_i64(tcg_rd, tcg_rm);
4298 } else {
4299 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
4300 }
4301 }
4302 return;
4303 }
4304
4305 tcg_rm = read_cpu_reg(s, rm, sf);
4306
4307 if (shift_amount) {
4308 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
4309 }
4310
4311 tcg_rn = cpu_reg(s, rn);
4312
4313 switch (opc | (invert << 2)) {
4314 case 0: /* AND */
4315 case 3: /* ANDS */
4316 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
4317 break;
4318 case 1: /* ORR */
4319 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
4320 break;
4321 case 2: /* EOR */
4322 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
4323 break;
4324 case 4: /* BIC */
4325 case 7: /* BICS */
4326 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
4327 break;
4328 case 5: /* ORN */
4329 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
4330 break;
4331 case 6: /* EON */
4332 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
4333 break;
4334 default:
4335 assert(FALSE);
4336 break;
4337 }
4338
4339 if (!sf) {
4340 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4341 }
4342
4343 if (opc == 3) {
4344 gen_logic_CC(sf, tcg_rd);
4345 }
4346 }
4347
4348 /*
4349 * Add/subtract (extended register)
4350 *
4351 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
4352 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4353 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
4354 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4355 *
4356 * sf: 0 -> 32bit, 1 -> 64bit
4357 * op: 0 -> add , 1 -> sub
4358 * S: 1 -> set flags
4359 * opt: 00
4360 * option: extension type (see DecodeRegExtend)
4361 * imm3: optional shift to Rm
4362 *
4363 * Rd = Rn + LSL(extend(Rm), amount)
4364 */
4365 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
4366 {
4367 int rd = extract32(insn, 0, 5);
4368 int rn = extract32(insn, 5, 5);
4369 int imm3 = extract32(insn, 10, 3);
4370 int option = extract32(insn, 13, 3);
4371 int rm = extract32(insn, 16, 5);
4372 int opt = extract32(insn, 22, 2);
4373 bool setflags = extract32(insn, 29, 1);
4374 bool sub_op = extract32(insn, 30, 1);
4375 bool sf = extract32(insn, 31, 1);
4376
4377 TCGv_i64 tcg_rm, tcg_rn; /* temps */
4378 TCGv_i64 tcg_rd;
4379 TCGv_i64 tcg_result;
4380
4381 if (imm3 > 4 || opt != 0) {
4382 unallocated_encoding(s);
4383 return;
4384 }
4385
4386 /* non-flag setting ops may use SP */
4387 if (!setflags) {
4388 tcg_rd = cpu_reg_sp(s, rd);
4389 } else {
4390 tcg_rd = cpu_reg(s, rd);
4391 }
4392 tcg_rn = read_cpu_reg_sp(s, rn, sf);
4393
4394 tcg_rm = read_cpu_reg(s, rm, sf);
4395 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
4396
4397 tcg_result = tcg_temp_new_i64();
4398
4399 if (!setflags) {
4400 if (sub_op) {
4401 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4402 } else {
4403 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4404 }
4405 } else {
4406 if (sub_op) {
4407 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4408 } else {
4409 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4410 }
4411 }
4412
4413 if (sf) {
4414 tcg_gen_mov_i64(tcg_rd, tcg_result);
4415 } else {
4416 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4417 }
4418
4419 tcg_temp_free_i64(tcg_result);
4420 }
4421
4422 /*
4423 * Add/subtract (shifted register)
4424 *
4425 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4426 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4427 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
4428 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4429 *
4430 * sf: 0 -> 32bit, 1 -> 64bit
4431 * op: 0 -> add , 1 -> sub
4432 * S: 1 -> set flags
4433 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4434 * imm6: Shift amount to apply to Rm before the add/sub
4435 */
4436 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4437 {
4438 int rd = extract32(insn, 0, 5);
4439 int rn = extract32(insn, 5, 5);
4440 int imm6 = extract32(insn, 10, 6);
4441 int rm = extract32(insn, 16, 5);
4442 int shift_type = extract32(insn, 22, 2);
4443 bool setflags = extract32(insn, 29, 1);
4444 bool sub_op = extract32(insn, 30, 1);
4445 bool sf = extract32(insn, 31, 1);
4446
4447 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4448 TCGv_i64 tcg_rn, tcg_rm;
4449 TCGv_i64 tcg_result;
4450
4451 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4452 unallocated_encoding(s);
4453 return;
4454 }
4455
4456 tcg_rn = read_cpu_reg(s, rn, sf);
4457 tcg_rm = read_cpu_reg(s, rm, sf);
4458
4459 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4460
4461 tcg_result = tcg_temp_new_i64();
4462
4463 if (!setflags) {
4464 if (sub_op) {
4465 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4466 } else {
4467 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4468 }
4469 } else {
4470 if (sub_op) {
4471 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4472 } else {
4473 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4474 }
4475 }
4476
4477 if (sf) {
4478 tcg_gen_mov_i64(tcg_rd, tcg_result);
4479 } else {
4480 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4481 }
4482
4483 tcg_temp_free_i64(tcg_result);
4484 }
4485
4486 /* Data-processing (3 source)
4487 *
4488 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
4489 * +--+------+-----------+------+------+----+------+------+------+
4490 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
4491 * +--+------+-----------+------+------+----+------+------+------+
4492 */
4493 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
4494 {
4495 int rd = extract32(insn, 0, 5);
4496 int rn = extract32(insn, 5, 5);
4497 int ra = extract32(insn, 10, 5);
4498 int rm = extract32(insn, 16, 5);
4499 int op_id = (extract32(insn, 29, 3) << 4) |
4500 (extract32(insn, 21, 3) << 1) |
4501 extract32(insn, 15, 1);
4502 bool sf = extract32(insn, 31, 1);
4503 bool is_sub = extract32(op_id, 0, 1);
4504 bool is_high = extract32(op_id, 2, 1);
4505 bool is_signed = false;
4506 TCGv_i64 tcg_op1;
4507 TCGv_i64 tcg_op2;
4508 TCGv_i64 tcg_tmp;
4509
4510 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
4511 switch (op_id) {
4512 case 0x42: /* SMADDL */
4513 case 0x43: /* SMSUBL */
4514 case 0x44: /* SMULH */
4515 is_signed = true;
4516 break;
4517 case 0x0: /* MADD (32bit) */
4518 case 0x1: /* MSUB (32bit) */
4519 case 0x40: /* MADD (64bit) */
4520 case 0x41: /* MSUB (64bit) */
4521 case 0x4a: /* UMADDL */
4522 case 0x4b: /* UMSUBL */
4523 case 0x4c: /* UMULH */
4524 break;
4525 default:
4526 unallocated_encoding(s);
4527 return;
4528 }
4529
4530 if (is_high) {
4531 TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
4532 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4533 TCGv_i64 tcg_rn = cpu_reg(s, rn);
4534 TCGv_i64 tcg_rm = cpu_reg(s, rm);
4535
4536 if (is_signed) {
4537 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4538 } else {
4539 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4540 }
4541
4542 tcg_temp_free_i64(low_bits);
4543 return;
4544 }
4545
4546 tcg_op1 = tcg_temp_new_i64();
4547 tcg_op2 = tcg_temp_new_i64();
4548 tcg_tmp = tcg_temp_new_i64();
4549
4550 if (op_id < 0x42) {
4551 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
4552 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
4553 } else {
4554 if (is_signed) {
4555 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
4556 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
4557 } else {
4558 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
4559 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
4560 }
4561 }
4562
4563 if (ra == 31 && !is_sub) {
4564 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
4565 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
4566 } else {
4567 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
4568 if (is_sub) {
4569 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4570 } else {
4571 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4572 }
4573 }
4574
4575 if (!sf) {
4576 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
4577 }
4578
4579 tcg_temp_free_i64(tcg_op1);
4580 tcg_temp_free_i64(tcg_op2);
4581 tcg_temp_free_i64(tcg_tmp);
4582 }
4583
4584 /* Add/subtract (with carry)
4585 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
4586 * +--+--+--+------------------------+------+-------------+------+-----+
4587 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd |
4588 * +--+--+--+------------------------+------+-------------+------+-----+
4589 */
4590
4591 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
4592 {
4593 unsigned int sf, op, setflags, rm, rn, rd;
4594 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
4595
4596 sf = extract32(insn, 31, 1);
4597 op = extract32(insn, 30, 1);
4598 setflags = extract32(insn, 29, 1);
4599 rm = extract32(insn, 16, 5);
4600 rn = extract32(insn, 5, 5);
4601 rd = extract32(insn, 0, 5);
4602
4603 tcg_rd = cpu_reg(s, rd);
4604 tcg_rn = cpu_reg(s, rn);
4605
4606 if (op) {
4607 tcg_y = new_tmp_a64(s);
4608 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
4609 } else {
4610 tcg_y = cpu_reg(s, rm);
4611 }
4612
4613 if (setflags) {
4614 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
4615 } else {
4616 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
4617 }
4618 }
4619
4620 /*
4621 * Rotate right into flags
4622 * 31 30 29 21 15 10 5 4 0
4623 * +--+--+--+-----------------+--------+-----------+------+--+------+
4624 * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask |
4625 * +--+--+--+-----------------+--------+-----------+------+--+------+
4626 */
4627 static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
4628 {
4629 int mask = extract32(insn, 0, 4);
4630 int o2 = extract32(insn, 4, 1);
4631 int rn = extract32(insn, 5, 5);
4632 int imm6 = extract32(insn, 15, 6);
4633 int sf_op_s = extract32(insn, 29, 3);
4634 TCGv_i64 tcg_rn;
4635 TCGv_i32 nzcv;
4636
4637 if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
4638 unallocated_encoding(s);
4639 return;
4640 }
4641
4642 tcg_rn = read_cpu_reg(s, rn, 1);
4643 tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
4644
4645 nzcv = tcg_temp_new_i32();
4646 tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
4647
4648 if (mask & 8) { /* N */
4649 tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
4650 }
4651 if (mask & 4) { /* Z */
4652 tcg_gen_not_i32(cpu_ZF, nzcv);
4653 tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
4654 }
4655 if (mask & 2) { /* C */
4656 tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
4657 }
4658 if (mask & 1) { /* V */
4659 tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
4660 }
4661
4662 tcg_temp_free_i32(nzcv);
4663 }
4664
4665 /*
4666 * Evaluate into flags
4667 * 31 30 29 21 15 14 10 5 4 0
4668 * +--+--+--+-----------------+---------+----+---------+------+--+------+
4669 * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask |
4670 * +--+--+--+-----------------+---------+----+---------+------+--+------+
4671 */
4672 static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
4673 {
4674 int o3_mask = extract32(insn, 0, 5);
4675 int rn = extract32(insn, 5, 5);
4676 int o2 = extract32(insn, 15, 6);
4677 int sz = extract32(insn, 14, 1);
4678 int sf_op_s = extract32(insn, 29, 3);
4679 TCGv_i32 tmp;
4680 int shift;
4681
4682 if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
4683 !dc_isar_feature(aa64_condm_4, s)) {
4684 unallocated_encoding(s);
4685 return;
4686 }
4687 shift = sz ? 16 : 24; /* SETF16 or SETF8 */
4688
4689 tmp = tcg_temp_new_i32();
4690 tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
4691 tcg_gen_shli_i32(cpu_NF, tmp, shift);
4692 tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
4693 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
4694 tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
4695 tcg_temp_free_i32(tmp);
4696 }
4697
4698 /* Conditional compare (immediate / register)
4699 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
4700 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4701 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
4702 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4703 * [1] y [0] [0]
4704 */
4705 static void disas_cc(DisasContext *s, uint32_t insn)
4706 {
4707 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
4708 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
4709 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
4710 DisasCompare c;
4711
4712 if (!extract32(insn, 29, 1)) {
4713 unallocated_encoding(s);
4714 return;
4715 }
4716 if (insn & (1 << 10 | 1 << 4)) {
4717 unallocated_encoding(s);
4718 return;
4719 }
4720 sf = extract32(insn, 31, 1);
4721 op = extract32(insn, 30, 1);
4722 is_imm = extract32(insn, 11, 1);
4723 y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
4724 cond = extract32(insn, 12, 4);
4725 rn = extract32(insn, 5, 5);
4726 nzcv = extract32(insn, 0, 4);
4727
4728 /* Set T0 = !COND. */
4729 tcg_t0 = tcg_temp_new_i32();
4730 arm_test_cc(&c, cond);
4731 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
4732 arm_free_cc(&c);
4733
4734 /* Load the arguments for the new comparison. */
4735 if (is_imm) {
4736 tcg_y = new_tmp_a64(s);
4737 tcg_gen_movi_i64(tcg_y, y);
4738 } else {
4739 tcg_y = cpu_reg(s, y);
4740 }
4741 tcg_rn = cpu_reg(s, rn);
4742
4743 /* Set the flags for the new comparison. */
4744 tcg_tmp = tcg_temp_new_i64();
4745 if (op) {
4746 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4747 } else {
4748 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4749 }
4750 tcg_temp_free_i64(tcg_tmp);
4751
4752 /* If COND was false, force the flags to #nzcv. Compute two masks
4753 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
4754 * For tcg hosts that support ANDC, we can make do with just T1.
4755 * In either case, allow the tcg optimizer to delete any unused mask.
4756 */
4757 tcg_t1 = tcg_temp_new_i32();
4758 tcg_t2 = tcg_temp_new_i32();
4759 tcg_gen_neg_i32(tcg_t1, tcg_t0);
4760 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
4761
4762 if (nzcv & 8) { /* N */
4763 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
4764 } else {
4765 if (TCG_TARGET_HAS_andc_i32) {
4766 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
4767 } else {
4768 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
4769 }
4770 }
4771 if (nzcv & 4) { /* Z */
4772 if (TCG_TARGET_HAS_andc_i32) {
4773 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
4774 } else {
4775 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
4776 }
4777 } else {
4778 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
4779 }
4780 if (nzcv & 2) { /* C */
4781 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
4782 } else {
4783 if (TCG_TARGET_HAS_andc_i32) {
4784 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
4785 } else {
4786 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
4787 }
4788 }
4789 if (nzcv & 1) { /* V */
4790 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
4791 } else {
4792 if (TCG_TARGET_HAS_andc_i32) {
4793 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
4794 } else {
4795 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
4796 }
4797 }
4798 tcg_temp_free_i32(tcg_t0);
4799 tcg_temp_free_i32(tcg_t1);
4800 tcg_temp_free_i32(tcg_t2);
4801 }
4802
4803 /* Conditional select
4804 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
4805 * +----+----+---+-----------------+------+------+-----+------+------+
4806 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
4807 * +----+----+---+-----------------+------+------+-----+------+------+
4808 */
4809 static void disas_cond_select(DisasContext *s, uint32_t insn)
4810 {
4811 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
4812 TCGv_i64 tcg_rd, zero;
4813 DisasCompare64 c;
4814
4815 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
4816 /* S == 1 or op2<1> == 1 */
4817 unallocated_encoding(s);
4818 return;
4819 }
4820 sf = extract32(insn, 31, 1);
4821 else_inv = extract32(insn, 30, 1);
4822 rm = extract32(insn, 16, 5);
4823 cond = extract32(insn, 12, 4);
4824 else_inc = extract32(insn, 10, 1);
4825 rn = extract32(insn, 5, 5);
4826 rd = extract32(insn, 0, 5);
4827
4828 tcg_rd = cpu_reg(s, rd);
4829
4830 a64_test_cc(&c, cond);
4831 zero = tcg_const_i64(0);
4832
4833 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
4834 /* CSET & CSETM. */
4835 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
4836 if (else_inv) {
4837 tcg_gen_neg_i64(tcg_rd, tcg_rd);
4838 }
4839 } else {
4840 TCGv_i64 t_true = cpu_reg(s, rn);
4841 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
4842 if (else_inv && else_inc) {
4843 tcg_gen_neg_i64(t_false, t_false);
4844 } else if (else_inv) {
4845 tcg_gen_not_i64(t_false, t_false);
4846 } else if (else_inc) {
4847 tcg_gen_addi_i64(t_false, t_false, 1);
4848 }
4849 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
4850 }
4851
4852 tcg_temp_free_i64(zero);
4853 a64_free_cc(&c);
4854
4855 if (!sf) {
4856 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4857 }
4858 }
4859
4860 static void handle_clz(DisasContext *s, unsigned int sf,
4861 unsigned int rn, unsigned int rd)
4862 {
4863 TCGv_i64 tcg_rd, tcg_rn;
4864 tcg_rd = cpu_reg(s, rd);
4865 tcg_rn = cpu_reg(s, rn);
4866
4867 if (sf) {
4868 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
4869 } else {
4870 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4871 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4872 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
4873 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4874 tcg_temp_free_i32(tcg_tmp32);
4875 }
4876 }
4877
4878 static void handle_cls(DisasContext *s, unsigned int sf,
4879 unsigned int rn, unsigned int rd)
4880 {
4881 TCGv_i64 tcg_rd, tcg_rn;
4882 tcg_rd = cpu_reg(s, rd);
4883 tcg_rn = cpu_reg(s, rn);
4884
4885 if (sf) {
4886 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
4887 } else {
4888 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4889 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4890 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
4891 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4892 tcg_temp_free_i32(tcg_tmp32);
4893 }
4894 }
4895
4896 static void handle_rbit(DisasContext *s, unsigned int sf,
4897 unsigned int rn, unsigned int rd)
4898 {
4899 TCGv_i64 tcg_rd, tcg_rn;
4900 tcg_rd = cpu_reg(s, rd);
4901 tcg_rn = cpu_reg(s, rn);
4902
4903 if (sf) {
4904 gen_helper_rbit64(tcg_rd, tcg_rn);
4905 } else {
4906 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4907 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4908 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
4909 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4910 tcg_temp_free_i32(tcg_tmp32);
4911 }
4912 }
4913
4914 /* REV with sf==1, opcode==3 ("REV64") */
4915 static void handle_rev64(DisasContext *s, unsigned int sf,
4916 unsigned int rn, unsigned int rd)
4917 {
4918 if (!sf) {
4919 unallocated_encoding(s);
4920 return;
4921 }
4922 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
4923 }
4924
4925 /* REV with sf==0, opcode==2
4926 * REV32 (sf==1, opcode==2)
4927 */
4928 static void handle_rev32(DisasContext *s, unsigned int sf,
4929 unsigned int rn, unsigned int rd)
4930 {
4931 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4932
4933 if (sf) {
4934 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4935 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4936
4937 /* bswap32_i64 requires zero high word */
4938 tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
4939 tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
4940 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
4941 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
4942 tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
4943
4944 tcg_temp_free_i64(tcg_tmp);
4945 } else {
4946 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
4947 tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
4948 }
4949 }
4950
4951 /* REV16 (opcode==1) */
4952 static void handle_rev16(DisasContext *s, unsigned int sf,
4953 unsigned int rn, unsigned int rd)
4954 {
4955 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4956 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4957 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4958 TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
4959
4960 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
4961 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
4962 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
4963 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
4964 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
4965
4966 tcg_temp_free_i64(mask);
4967 tcg_temp_free_i64(tcg_tmp);
4968 }
4969
4970 /* Data-processing (1 source)
4971 * 31 30 29 28 21 20 16 15 10 9 5 4 0
4972 * +----+---+---+-----------------+---------+--------+------+------+
4973 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
4974 * +----+---+---+-----------------+---------+--------+------+------+
4975 */
4976 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
4977 {
4978 unsigned int sf, opcode, opcode2, rn, rd;
4979 TCGv_i64 tcg_rd;
4980
4981 if (extract32(insn, 29, 1)) {
4982 unallocated_encoding(s);
4983 return;
4984 }
4985
4986 sf = extract32(insn, 31, 1);
4987 opcode = extract32(insn, 10, 6);
4988 opcode2 = extract32(insn, 16, 5);
4989 rn = extract32(insn, 5, 5);
4990 rd = extract32(insn, 0, 5);
4991
4992 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
4993
4994 switch (MAP(sf, opcode2, opcode)) {
4995 case MAP(0, 0x00, 0x00): /* RBIT */
4996 case MAP(1, 0x00, 0x00):
4997 handle_rbit(s, sf, rn, rd);
4998 break;
4999 case MAP(0, 0x00, 0x01): /* REV16 */
5000 case MAP(1, 0x00, 0x01):
5001 handle_rev16(s, sf, rn, rd);
5002 break;
5003 case MAP(0, 0x00, 0x02): /* REV/REV32 */
5004 case MAP(1, 0x00, 0x02):
5005 handle_rev32(s, sf, rn, rd);
5006 break;
5007 case MAP(1, 0x00, 0x03): /* REV64 */
5008 handle_rev64(s, sf, rn, rd);
5009 break;
5010 case MAP(0, 0x00, 0x04): /* CLZ */
5011 case MAP(1, 0x00, 0x04):
5012 handle_clz(s, sf, rn, rd);
5013 break;
5014 case MAP(0, 0x00, 0x05): /* CLS */
5015 case MAP(1, 0x00, 0x05):
5016 handle_cls(s, sf, rn, rd);
5017 break;
5018 case MAP(1, 0x01, 0x00): /* PACIA */
5019 if (s->pauth_active) {
5020 tcg_rd = cpu_reg(s, rd);
5021 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5022 } else if (!dc_isar_feature(aa64_pauth, s)) {
5023 goto do_unallocated;
5024 }
5025 break;
5026 case MAP(1, 0x01, 0x01): /* PACIB */
5027 if (s->pauth_active) {
5028 tcg_rd = cpu_reg(s, rd);
5029 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5030 } else if (!dc_isar_feature(aa64_pauth, s)) {
5031 goto do_unallocated;
5032 }
5033 break;
5034 case MAP(1, 0x01, 0x02): /* PACDA */
5035 if (s->pauth_active) {
5036 tcg_rd = cpu_reg(s, rd);
5037 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5038 } else if (!dc_isar_feature(aa64_pauth, s)) {
5039 goto do_unallocated;
5040 }
5041 break;
5042 case MAP(1, 0x01, 0x03): /* PACDB */
5043 if (s->pauth_active) {
5044 tcg_rd = cpu_reg(s, rd);
5045 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5046 } else if (!dc_isar_feature(aa64_pauth, s)) {
5047 goto do_unallocated;
5048 }
5049 break;
5050 case MAP(1, 0x01, 0x04): /* AUTIA */
5051 if (s->pauth_active) {
5052 tcg_rd = cpu_reg(s, rd);
5053 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5054 } else if (!dc_isar_feature(aa64_pauth, s)) {
5055 goto do_unallocated;
5056 }
5057 break;
5058 case MAP(1, 0x01, 0x05): /* AUTIB */
5059 if (s->pauth_active) {
5060 tcg_rd = cpu_reg(s, rd);
5061 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5062 } else if (!dc_isar_feature(aa64_pauth, s)) {
5063 goto do_unallocated;
5064 }
5065 break;
5066 case MAP(1, 0x01, 0x06): /* AUTDA */
5067 if (s->pauth_active) {
5068 tcg_rd = cpu_reg(s, rd);
5069 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5070 } else if (!dc_isar_feature(aa64_pauth, s)) {
5071 goto do_unallocated;
5072 }
5073 break;
5074 case MAP(1, 0x01, 0x07): /* AUTDB */
5075 if (s->pauth_active) {
5076 tcg_rd = cpu_reg(s, rd);
5077 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5078 } else if (!dc_isar_feature(aa64_pauth, s)) {
5079 goto do_unallocated;
5080 }
5081 break;
5082 case MAP(1, 0x01, 0x08): /* PACIZA */
5083 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5084 goto do_unallocated;
5085 } else if (s->pauth_active) {
5086 tcg_rd = cpu_reg(s, rd);
5087 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5088 }
5089 break;
5090 case MAP(1, 0x01, 0x09): /* PACIZB */
5091 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5092 goto do_unallocated;
5093 } else if (s->pauth_active) {
5094 tcg_rd = cpu_reg(s, rd);
5095 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5096 }
5097 break;
5098 case MAP(1, 0x01, 0x0a): /* PACDZA */
5099 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5100 goto do_unallocated;
5101 } else if (s->pauth_active) {
5102 tcg_rd = cpu_reg(s, rd);
5103 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5104 }
5105 break;
5106 case MAP(1, 0x01, 0x0b): /* PACDZB */
5107 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5108 goto do_unallocated;
5109 } else if (s->pauth_active) {
5110 tcg_rd = cpu_reg(s, rd);
5111 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5112 }
5113 break;
5114 case MAP(1, 0x01, 0x0c): /* AUTIZA */
5115 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5116 goto do_unallocated;
5117 } else if (s->pauth_active) {
5118 tcg_rd = cpu_reg(s, rd);
5119 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5120 }
5121 break;
5122 case MAP(1, 0x01, 0x0d): /* AUTIZB */
5123 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5124 goto do_unallocated;
5125 } else if (s->pauth_active) {
5126 tcg_rd = cpu_reg(s, rd);
5127 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5128 }
5129 break;
5130 case MAP(1, 0x01, 0x0e): /* AUTDZA */
5131 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5132 goto do_unallocated;
5133 } else if (s->pauth_active) {
5134 tcg_rd = cpu_reg(s, rd);
5135 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5136 }
5137 break;
5138 case MAP(1, 0x01, 0x0f): /* AUTDZB */
5139 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5140 goto do_unallocated;
5141 } else if (s->pauth_active) {
5142 tcg_rd = cpu_reg(s, rd);
5143 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5144 }
5145 break;
5146 case MAP(1, 0x01, 0x10): /* XPACI */
5147 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5148 goto do_unallocated;
5149 } else if (s->pauth_active) {
5150 tcg_rd = cpu_reg(s, rd);
5151 gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
5152 }
5153 break;
5154 case MAP(1, 0x01, 0x11): /* XPACD */
5155 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5156 goto do_unallocated;
5157 } else if (s->pauth_active) {
5158 tcg_rd = cpu_reg(s, rd);
5159 gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
5160 }
5161 break;
5162 default:
5163 do_unallocated:
5164 unallocated_encoding(s);
5165 break;
5166 }
5167
5168 #undef MAP
5169 }
5170
5171 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
5172 unsigned int rm, unsigned int rn, unsigned int rd)
5173 {
5174 TCGv_i64 tcg_n, tcg_m, tcg_rd;
5175 tcg_rd = cpu_reg(s, rd);
5176
5177 if (!sf && is_signed) {
5178 tcg_n = new_tmp_a64(s);
5179 tcg_m = new_tmp_a64(s);
5180 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
5181 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
5182 } else {
5183 tcg_n = read_cpu_reg(s, rn, sf);
5184 tcg_m = read_cpu_reg(s, rm, sf);
5185 }
5186
5187 if (is_signed) {
5188 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
5189 } else {
5190 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
5191 }
5192
5193 if (!sf) { /* zero extend final result */
5194 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5195 }
5196 }
5197
5198 /* LSLV, LSRV, ASRV, RORV */
5199 static void handle_shift_reg(DisasContext *s,
5200 enum a64_shift_type shift_type, unsigned int sf,
5201 unsigned int rm, unsigned int rn, unsigned int rd)
5202 {
5203 TCGv_i64 tcg_shift = tcg_temp_new_i64();
5204 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5205 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5206
5207 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
5208 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
5209 tcg_temp_free_i64(tcg_shift);
5210 }
5211
5212 /* CRC32[BHWX], CRC32C[BHWX] */
5213 static void handle_crc32(DisasContext *s,
5214 unsigned int sf, unsigned int sz, bool crc32c,
5215 unsigned int rm, unsigned int rn, unsigned int rd)
5216 {
5217 TCGv_i64 tcg_acc, tcg_val;
5218 TCGv_i32 tcg_bytes;
5219
5220 if (!dc_isar_feature(aa64_crc32, s)
5221 || (sf == 1 && sz != 3)
5222 || (sf == 0 && sz == 3)) {
5223 unallocated_encoding(s);
5224 return;
5225 }
5226
5227 if (sz == 3) {
5228 tcg_val = cpu_reg(s, rm);
5229 } else {
5230 uint64_t mask;
5231 switch (sz) {
5232 case 0:
5233 mask = 0xFF;
5234 break;
5235 case 1:
5236 mask = 0xFFFF;
5237 break;
5238 case 2:
5239 mask = 0xFFFFFFFF;
5240 break;
5241 default:
5242 g_assert_not_reached();
5243 }
5244 tcg_val = new_tmp_a64(s);
5245 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
5246 }
5247
5248 tcg_acc = cpu_reg(s, rn);
5249 tcg_bytes = tcg_const_i32(1 << sz);
5250
5251 if (crc32c) {
5252 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5253 } else {
5254 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5255 }
5256
5257 tcg_temp_free_i32(tcg_bytes);
5258 }
5259
5260 /* Data-processing (2 source)
5261 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5262 * +----+---+---+-----------------+------+--------+------+------+
5263 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
5264 * +----+---+---+-----------------+------+--------+------+------+
5265 */
5266 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
5267 {
5268 unsigned int sf, rm, opcode, rn, rd;
5269 sf = extract32(insn, 31, 1);
5270 rm = extract32(insn, 16, 5);
5271 opcode = extract32(insn, 10, 6);
5272 rn = extract32(insn, 5, 5);
5273 rd = extract32(insn, 0, 5);
5274
5275 if (extract32(insn, 29, 1)) {
5276 unallocated_encoding(s);
5277 return;
5278 }
5279
5280 switch (opcode) {
5281 case 2: /* UDIV */
5282 handle_div(s, false, sf, rm, rn, rd);
5283 break;
5284 case 3: /* SDIV */
5285 handle_div(s, true, sf, rm, rn, rd);
5286 break;
5287 case 8: /* LSLV */
5288 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
5289 break;
5290 case 9: /* LSRV */
5291 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
5292 break;
5293 case 10: /* ASRV */
5294 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
5295 break;
5296 case 11: /* RORV */
5297 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
5298 break;
5299 case 12: /* PACGA */
5300 if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
5301 goto do_unallocated;
5302 }
5303 gen_helper_pacga(cpu_reg(s, rd), cpu_env,
5304 cpu_reg(s, rn), cpu_reg_sp(s, rm));
5305 break;
5306 case 16:
5307 case 17:
5308 case 18:
5309 case 19:
5310 case 20:
5311 case 21:
5312 case 22:
5313 case 23: /* CRC32 */
5314 {
5315 int sz = extract32(opcode, 0, 2);
5316 bool crc32c = extract32(opcode, 2, 1);
5317 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
5318 break;
5319 }
5320 default:
5321 do_unallocated:
5322 unallocated_encoding(s);
5323 break;
5324 }
5325 }
5326
5327 /*
5328 * Data processing - register
5329 * 31 30 29 28 25 21 20 16 10 0
5330 * +--+---+--+---+-------+-----+-------+-------+---------+
5331 * | |op0| |op1| 1 0 1 | op2 | | op3 | |
5332 * +--+---+--+---+-------+-----+-------+-------+---------+
5333 */
5334 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
5335 {
5336 int op0 = extract32(insn, 30, 1);
5337 int op1 = extract32(insn, 28, 1);
5338 int op2 = extract32(insn, 21, 4);
5339 int op3 = extract32(insn, 10, 6);
5340
5341 if (!op1) {
5342 if (op2 & 8) {
5343 if (op2 & 1) {
5344 /* Add/sub (extended register) */
5345 disas_add_sub_ext_reg(s, insn);
5346 } else {
5347 /* Add/sub (shifted register) */
5348 disas_add_sub_reg(s, insn);
5349 }
5350 } else {
5351 /* Logical (shifted register) */
5352 disas_logic_reg(s, insn);
5353 }
5354 return;
5355 }
5356
5357 switch (op2) {
5358 case 0x0:
5359 switch (op3) {
5360 case 0x00: /* Add/subtract (with carry) */
5361 disas_adc_sbc(s, insn);
5362 break;
5363
5364 case 0x01: /* Rotate right into flags */
5365 case 0x21:
5366 disas_rotate_right_into_flags(s, insn);
5367 break;
5368
5369 case 0x02: /* Evaluate into flags */
5370 case 0x12:
5371 case 0x22:
5372 case 0x32:
5373 disas_evaluate_into_flags(s, insn);
5374 break;
5375
5376 default:
5377 goto do_unallocated;
5378 }
5379 break;
5380
5381 case 0x2: /* Conditional compare */
5382 disas_cc(s, insn); /* both imm and reg forms */
5383 break;
5384
5385 case 0x4: /* Conditional select */
5386 disas_cond_select(s, insn);
5387 break;
5388
5389 case 0x6: /* Data-processing */
5390 if (op0) { /* (1 source) */
5391 disas_data_proc_1src(s, insn);
5392 } else { /* (2 source) */
5393 disas_data_proc_2src(s, insn);
5394 }
5395 break;
5396 case 0x8 ... 0xf: /* (3 source) */
5397 disas_data_proc_3src(s, insn);
5398 break;
5399
5400 default:
5401 do_unallocated:
5402 unallocated_encoding(s);
5403 break;
5404 }
5405 }
5406
5407 static void handle_fp_compare(DisasContext *s, int size,
5408 unsigned int rn, unsigned int rm,
5409 bool cmp_with_zero, bool signal_all_nans)
5410 {
5411 TCGv_i64 tcg_flags = tcg_temp_new_i64();
5412 TCGv_ptr fpst = get_fpstatus_ptr(size == MO_16);
5413
5414 if (size == MO_64) {
5415 TCGv_i64 tcg_vn, tcg_vm;
5416
5417 tcg_vn = read_fp_dreg(s, rn);
5418 if (cmp_with_zero) {
5419 tcg_vm = tcg_const_i64(0);
5420 } else {
5421 tcg_vm = read_fp_dreg(s, rm);
5422 }
5423 if (signal_all_nans) {
5424 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5425 } else {
5426 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5427 }
5428 tcg_temp_free_i64(tcg_vn);
5429 tcg_temp_free_i64(tcg_vm);
5430 } else {
5431 TCGv_i32 tcg_vn = tcg_temp_new_i32();
5432 TCGv_i32 tcg_vm = tcg_temp_new_i32();
5433
5434 read_vec_element_i32(s, tcg_vn, rn, 0, size);
5435 if (cmp_with_zero) {
5436 tcg_gen_movi_i32(tcg_vm, 0);
5437 } else {
5438 read_vec_element_i32(s, tcg_vm, rm, 0, size);
5439 }
5440
5441 switch (size) {
5442 case MO_32:
5443 if (signal_all_nans) {
5444 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5445 } else {
5446 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5447 }
5448 break;
5449 case MO_16:
5450 if (signal_all_nans) {
5451 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5452 } else {
5453 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5454 }
5455 break;
5456 default:
5457 g_assert_not_reached();
5458 }
5459
5460 tcg_temp_free_i32(tcg_vn);
5461 tcg_temp_free_i32(tcg_vm);
5462 }
5463
5464 tcg_temp_free_ptr(fpst);
5465
5466 gen_set_nzcv(tcg_flags);
5467
5468 tcg_temp_free_i64(tcg_flags);
5469 }
5470
5471 /* Floating point compare
5472 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
5473 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5474 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
5475 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5476 */
5477 static void disas_fp_compare(DisasContext *s, uint32_t insn)
5478 {
5479 unsigned int mos, type, rm, op, rn, opc, op2r;
5480 int size;
5481
5482 mos = extract32(insn, 29, 3);
5483 type = extract32(insn, 22, 2);
5484 rm = extract32(insn, 16, 5);
5485 op = extract32(insn, 14, 2);
5486 rn = extract32(insn, 5, 5);
5487 opc = extract32(insn, 3, 2);
5488 op2r = extract32(insn, 0, 3);
5489
5490 if (mos || op || op2r) {
5491 unallocated_encoding(s);
5492 return;
5493 }
5494
5495 switch (type) {
5496 case 0:
5497 size = MO_32;
5498 break;
5499 case 1:
5500 size = MO_64;
5501 break;
5502 case 3:
5503 size = MO_16;
5504 if (dc_isar_feature(aa64_fp16, s)) {
5505 break;
5506 }
5507 /* fallthru */
5508 default:
5509 unallocated_encoding(s);
5510 return;
5511 }
5512
5513 if (!fp_access_check(s)) {
5514 return;
5515 }
5516
5517 handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
5518 }
5519
5520 /* Floating point conditional compare
5521 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
5522 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5523 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
5524 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5525 */
5526 static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
5527 {
5528 unsigned int mos, type, rm, cond, rn, op, nzcv;
5529 TCGv_i64 tcg_flags;
5530 TCGLabel *label_continue = NULL;
5531 int size;
5532
5533 mos = extract32(insn, 29, 3);
5534 type = extract32(insn, 22, 2);
5535 rm = extract32(insn, 16, 5);
5536 cond = extract32(insn, 12, 4);
5537 rn = extract32(insn, 5, 5);
5538 op = extract32(insn, 4, 1);
5539 nzcv = extract32(insn, 0, 4);
5540
5541 if (mos) {
5542 unallocated_encoding(s);
5543 return;
5544 }
5545
5546 switch (type) {
5547 case 0:
5548 size = MO_32;
5549 break;
5550 case 1:
5551 size = MO_64;
5552 break;
5553 case 3:
5554 size = MO_16;
5555 if (dc_isar_feature(aa64_fp16, s)) {
5556 break;
5557 }
5558 /* fallthru */
5559 default:
5560 unallocated_encoding(s);
5561 return;
5562 }
5563
5564 if (!fp_access_check(s)) {
5565 return;
5566 }
5567
5568 if (cond < 0x0e) { /* not always */
5569 TCGLabel *label_match = gen_new_label();
5570 label_continue = gen_new_label();
5571 arm_gen_test_cc(cond, label_match);
5572 /* nomatch: */
5573 tcg_flags = tcg_const_i64(nzcv << 28);
5574 gen_set_nzcv(tcg_flags);
5575 tcg_temp_free_i64(tcg_flags);
5576 tcg_gen_br(label_continue);
5577 gen_set_label(label_match);
5578 }
5579
5580 handle_fp_compare(s, size, rn, rm, false, op);
5581
5582 if (cond < 0x0e) {
5583 gen_set_label(label_continue);
5584 }
5585 }
5586
5587 /* Floating point conditional select
5588 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
5589 * +---+---+---+-----------+------+---+------+------+-----+------+------+
5590 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
5591 * +---+---+---+-----------+------+---+------+------+-----+------+------+
5592 */
5593 static void disas_fp_csel(DisasContext *s, uint32_t insn)
5594 {
5595 unsigned int mos, type, rm, cond, rn, rd;
5596 TCGv_i64 t_true, t_false, t_zero;
5597 DisasCompare64 c;
5598 MemOp sz;
5599
5600 mos = extract32(insn, 29, 3);
5601 type = extract32(insn, 22, 2);
5602 rm = extract32(insn, 16, 5);
5603 cond = extract32(insn, 12, 4);
5604 rn = extract32(insn, 5, 5);
5605 rd = extract32(insn, 0, 5);
5606
5607 if (mos) {
5608 unallocated_encoding(s);
5609 return;
5610 }
5611
5612 switch (type) {
5613 case 0:
5614 sz = MO_32;
5615 break;
5616 case 1:
5617 sz = MO_64;
5618 break;
5619 case 3:
5620 sz = MO_16;
5621 if (dc_isar_feature(aa64_fp16, s)) {
5622 break;
5623 }
5624 /* fallthru */
5625 default:
5626 unallocated_encoding(s);
5627 return;
5628 }
5629
5630 if (!fp_access_check(s)) {
5631 return;
5632 }
5633
5634 /* Zero extend sreg & hreg inputs to 64 bits now. */
5635 t_true = tcg_temp_new_i64();
5636 t_false = tcg_temp_new_i64();
5637 read_vec_element(s, t_true, rn, 0, sz);
5638 read_vec_element(s, t_false, rm, 0, sz);
5639
5640 a64_test_cc(&c, cond);
5641 t_zero = tcg_const_i64(0);
5642 tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
5643 tcg_temp_free_i64(t_zero);
5644 tcg_temp_free_i64(t_false);
5645 a64_free_cc(&c);
5646
5647 /* Note that sregs & hregs write back zeros to the high bits,
5648 and we've already done the zero-extension. */
5649 write_fp_dreg(s, rd, t_true);
5650 tcg_temp_free_i64(t_true);
5651 }
5652
5653 /* Floating-point data-processing (1 source) - half precision */
5654 static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
5655 {
5656 TCGv_ptr fpst = NULL;
5657 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
5658 TCGv_i32 tcg_res = tcg_temp_new_i32();
5659
5660 switch (opcode) {
5661 case 0x0: /* FMOV */
5662 tcg_gen_mov_i32(tcg_res, tcg_op);
5663 break;
5664 case 0x1: /* FABS */
5665 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
5666 break;
5667 case 0x2: /* FNEG */
5668 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
5669 break;
5670 case 0x3: /* FSQRT */
5671 fpst = get_fpstatus_ptr(true);
5672 gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
5673 break;
5674 case 0x8: /* FRINTN */
5675 case 0x9: /* FRINTP */
5676 case 0xa: /* FRINTM */
5677 case 0xb: /* FRINTZ */
5678 case 0xc: /* FRINTA */
5679 {
5680 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
5681 fpst = get_fpstatus_ptr(true);
5682
5683 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5684 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5685
5686 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5687 tcg_temp_free_i32(tcg_rmode);
5688 break;
5689 }
5690 case 0xe: /* FRINTX */
5691 fpst = get_fpstatus_ptr(true);
5692 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
5693 break;
5694 case 0xf: /* FRINTI */
5695 fpst = get_fpstatus_ptr(true);
5696 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5697 break;
5698 default:
5699 abort();
5700 }
5701
5702 write_fp_sreg(s, rd, tcg_res);
5703
5704 if (fpst) {
5705 tcg_temp_free_ptr(fpst);
5706 }
5707 tcg_temp_free_i32(tcg_op);
5708 tcg_temp_free_i32(tcg_res);
5709 }
5710
5711 /* Floating-point data-processing (1 source) - single precision */
5712 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
5713 {
5714 void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
5715 TCGv_i32 tcg_op, tcg_res;
5716 TCGv_ptr fpst;
5717 int rmode = -1;
5718
5719 tcg_op = read_fp_sreg(s, rn);
5720 tcg_res = tcg_temp_new_i32();
5721
5722 switch (opcode) {
5723 case 0x0: /* FMOV */
5724 tcg_gen_mov_i32(tcg_res, tcg_op);
5725 goto done;
5726 case 0x1: /* FABS */
5727 gen_helper_vfp_abss(tcg_res, tcg_op);
5728 goto done;
5729 case 0x2: /* FNEG */
5730 gen_helper_vfp_negs(tcg_res, tcg_op);
5731 goto done;
5732 case 0x3: /* FSQRT */
5733 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
5734 goto done;
5735 case 0x8: /* FRINTN */
5736 case 0x9: /* FRINTP */
5737 case 0xa: /* FRINTM */
5738 case 0xb: /* FRINTZ */
5739 case 0xc: /* FRINTA */
5740 rmode = arm_rmode_to_sf(opcode & 7);
5741 gen_fpst = gen_helper_rints;
5742 break;
5743 case 0xe: /* FRINTX */
5744 gen_fpst = gen_helper_rints_exact;
5745 break;
5746 case 0xf: /* FRINTI */
5747 gen_fpst = gen_helper_rints;
5748 break;
5749 case 0x10: /* FRINT32Z */
5750 rmode = float_round_to_zero;
5751 gen_fpst = gen_helper_frint32_s;
5752 break;
5753 case 0x11: /* FRINT32X */
5754 gen_fpst = gen_helper_frint32_s;
5755 break;
5756 case 0x12: /* FRINT64Z */
5757 rmode = float_round_to_zero;
5758 gen_fpst = gen_helper_frint64_s;
5759 break;
5760 case 0x13: /* FRINT64X */
5761 gen_fpst = gen_helper_frint64_s;
5762 break;
5763 default:
5764 g_assert_not_reached();
5765 }
5766
5767 fpst = get_fpstatus_ptr(false);
5768 if (rmode >= 0) {
5769 TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
5770 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5771 gen_fpst(tcg_res, tcg_op, fpst);
5772 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5773 tcg_temp_free_i32(tcg_rmode);
5774 } else {
5775 gen_fpst(tcg_res, tcg_op, fpst);
5776 }
5777 tcg_temp_free_ptr(fpst);
5778
5779 done:
5780 write_fp_sreg(s, rd, tcg_res);
5781 tcg_temp_free_i32(tcg_op);
5782 tcg_temp_free_i32(tcg_res);
5783 }
5784
5785 /* Floating-point data-processing (1 source) - double precision */
5786 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
5787 {
5788 void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
5789 TCGv_i64 tcg_op, tcg_res;
5790 TCGv_ptr fpst;
5791 int rmode = -1;
5792
5793 switch (opcode) {
5794 case 0x0: /* FMOV */
5795 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
5796 return;
5797 }
5798
5799 tcg_op = read_fp_dreg(s, rn);
5800 tcg_res = tcg_temp_new_i64();
5801
5802 switch (opcode) {
5803 case 0x1: /* FABS */
5804 gen_helper_vfp_absd(tcg_res, tcg_op);
5805 goto done;
5806 case 0x2: /* FNEG */
5807 gen_helper_vfp_negd(tcg_res, tcg_op);
5808 goto done;
5809 case 0x3: /* FSQRT */
5810 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
5811 goto done;
5812 case 0x8: /* FRINTN */
5813 case 0x9: /* FRINTP */
5814 case 0xa: /* FRINTM */
5815 case 0xb: /* FRINTZ */
5816 case 0xc: /* FRINTA */
5817 rmode = arm_rmode_to_sf(opcode & 7);
5818 gen_fpst = gen_helper_rintd;
5819 break;
5820 case 0xe: /* FRINTX */
5821 gen_fpst = gen_helper_rintd_exact;
5822 break;
5823 case 0xf: /* FRINTI */
5824 gen_fpst = gen_helper_rintd;
5825 break;
5826 case 0x10: /* FRINT32Z */
5827 rmode = float_round_to_zero;
5828 gen_fpst = gen_helper_frint32_d;
5829 break;
5830 case 0x11: /* FRINT32X */
5831 gen_fpst = gen_helper_frint32_d;
5832 break;
5833 case 0x12: /* FRINT64Z */
5834 rmode = float_round_to_zero;
5835 gen_fpst = gen_helper_frint64_d;
5836 break;
5837 case 0x13: /* FRINT64X */
5838 gen_fpst = gen_helper_frint64_d;
5839 break;
5840 default:
5841 g_assert_not_reached();
5842 }
5843
5844 fpst = get_fpstatus_ptr(false);
5845 if (rmode >= 0) {
5846 TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
5847 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5848 gen_fpst(tcg_res, tcg_op, fpst);
5849 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5850 tcg_temp_free_i32(tcg_rmode);
5851 } else {
5852 gen_fpst(tcg_res, tcg_op, fpst);
5853 }
5854 tcg_temp_free_ptr(fpst);
5855
5856 done:
5857 write_fp_dreg(s, rd, tcg_res);
5858 tcg_temp_free_i64(tcg_op);
5859 tcg_temp_free_i64(tcg_res);
5860 }
5861
5862 static void handle_fp_fcvt(DisasContext *s, int opcode,
5863 int rd, int rn, int dtype, int ntype)
5864 {
5865 switch (ntype) {
5866 case 0x0:
5867 {
5868 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5869 if (dtype == 1) {
5870 /* Single to double */
5871 TCGv_i64 tcg_rd = tcg_temp_new_i64();
5872 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
5873 write_fp_dreg(s, rd, tcg_rd);
5874 tcg_temp_free_i64(tcg_rd);
5875 } else {
5876 /* Single to half */
5877 TCGv_i32 tcg_rd = tcg_temp_new_i32();
5878 TCGv_i32 ahp = get_ahp_flag();
5879 TCGv_ptr fpst = get_fpstatus_ptr(false);
5880
5881 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
5882 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
5883 write_fp_sreg(s, rd, tcg_rd);
5884 tcg_temp_free_i32(tcg_rd);
5885 tcg_temp_free_i32(ahp);
5886 tcg_temp_free_ptr(fpst);
5887 }
5888 tcg_temp_free_i32(tcg_rn);
5889 break;
5890 }
5891 case 0x1:
5892 {
5893 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
5894 TCGv_i32 tcg_rd = tcg_temp_new_i32();
5895 if (dtype == 0) {
5896 /* Double to single */
5897 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
5898 } else {
5899 TCGv_ptr fpst = get_fpstatus_ptr(false);
5900 TCGv_i32 ahp = get_ahp_flag();
5901 /* Double to half */
5902 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
5903 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
5904 tcg_temp_free_ptr(fpst);
5905 tcg_temp_free_i32(ahp);
5906 }
5907 write_fp_sreg(s, rd, tcg_rd);
5908 tcg_temp_free_i32(tcg_rd);
5909 tcg_temp_free_i64(tcg_rn);
5910 break;
5911 }
5912 case 0x3:
5913 {
5914 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5915 TCGv_ptr tcg_fpst = get_fpstatus_ptr(false);
5916 TCGv_i32 tcg_ahp = get_ahp_flag();
5917 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
5918 if (dtype == 0) {
5919 /* Half to single */
5920 TCGv_i32 tcg_rd = tcg_temp_new_i32();
5921 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
5922 write_fp_sreg(s, rd, tcg_rd);
5923 tcg_temp_free_i32(tcg_rd);
5924 } else {
5925 /* Half to double */
5926 TCGv_i64 tcg_rd = tcg_temp_new_i64();
5927 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
5928 write_fp_dreg(s, rd, tcg_rd);
5929 tcg_temp_free_i64(tcg_rd);
5930 }
5931 tcg_temp_free_i32(tcg_rn);
5932 tcg_temp_free_ptr(tcg_fpst);
5933 tcg_temp_free_i32(tcg_ahp);
5934 break;
5935 }
5936 default:
5937 abort();
5938 }
5939 }
5940
5941 /* Floating point data-processing (1 source)
5942 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
5943 * +---+---+---+-----------+------+---+--------+-----------+------+------+
5944 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
5945 * +---+---+---+-----------+------+---+--------+-----------+------+------+
5946 */
5947 static void disas_fp_1src(DisasContext *s, uint32_t insn)
5948 {
5949 int mos = extract32(insn, 29, 3);
5950 int type = extract32(insn, 22, 2);
5951 int opcode = extract32(insn, 15, 6);
5952 int rn = extract32(insn, 5, 5);
5953 int rd = extract32(insn, 0, 5);
5954
5955 if (mos) {
5956 unallocated_encoding(s);
5957 return;
5958 }
5959
5960 switch (opcode) {
5961 case 0x4: case 0x5: case 0x7:
5962 {
5963 /* FCVT between half, single and double precision */
5964 int dtype = extract32(opcode, 0, 2);
5965 if (type == 2 || dtype == type) {
5966 unallocated_encoding(s);
5967 return;
5968 }
5969 if (!fp_access_check(s)) {
5970 return;
5971 }
5972
5973 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
5974 break;
5975 }
5976
5977 case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
5978 if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
5979 unallocated_encoding(s);
5980 return;
5981 }
5982 /* fall through */
5983 case 0x0 ... 0x3:
5984 case 0x8 ... 0xc:
5985 case 0xe ... 0xf:
5986 /* 32-to-32 and 64-to-64 ops */
5987 switch (type) {
5988 case 0:
5989 if (!fp_access_check(s)) {
5990 return;
5991 }
5992 handle_fp_1src_single(s, opcode, rd, rn);
5993 break;
5994 case 1:
5995 if (!fp_access_check(s)) {
5996 return;
5997 }
5998 handle_fp_1src_double(s, opcode, rd, rn);
5999 break;
6000 case 3:
6001 if (!dc_isar_feature(aa64_fp16, s)) {
6002 unallocated_encoding(s);
6003 return;
6004 }
6005
6006 if (!fp_access_check(s)) {
6007 return;
6008 }
6009 handle_fp_1src_half(s, opcode, rd, rn);
6010 break;
6011 default:
6012 unallocated_encoding(s);
6013 }
6014 break;
6015
6016 default:
6017 unallocated_encoding(s);
6018 break;
6019 }
6020 }
6021
6022 /* Floating-point data-processing (2 source) - single precision */
6023 static void handle_fp_2src_single(DisasContext *s, int opcode,
6024 int rd, int rn, int rm)
6025 {
6026 TCGv_i32 tcg_op1;
6027 TCGv_i32 tcg_op2;
6028 TCGv_i32 tcg_res;
6029 TCGv_ptr fpst;
6030
6031 tcg_res = tcg_temp_new_i32();
6032 fpst = get_fpstatus_ptr(false);
6033 tcg_op1 = read_fp_sreg(s, rn);
6034 tcg_op2 = read_fp_sreg(s, rm);
6035
6036 switch (opcode) {
6037 case 0x0: /* FMUL */
6038 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6039 break;
6040 case 0x1: /* FDIV */
6041 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
6042 break;
6043 case 0x2: /* FADD */
6044 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6045 break;
6046 case 0x3: /* FSUB */
6047 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
6048 break;
6049 case 0x4: /* FMAX */
6050 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6051 break;
6052 case 0x5: /* FMIN */
6053 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6054 break;
6055 case 0x6: /* FMAXNM */
6056 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6057 break;
6058 case 0x7: /* FMINNM */
6059 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6060 break;
6061 case 0x8: /* FNMUL */
6062 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6063 gen_helper_vfp_negs(tcg_res, tcg_res);
6064 break;
6065 }
6066
6067 write_fp_sreg(s, rd, tcg_res);
6068
6069 tcg_temp_free_ptr(fpst);
6070 tcg_temp_free_i32(tcg_op1);
6071 tcg_temp_free_i32(tcg_op2);
6072 tcg_temp_free_i32(tcg_res);
6073 }
6074
6075 /* Floating-point data-processing (2 source) - double precision */
6076 static void handle_fp_2src_double(DisasContext *s, int opcode,
6077 int rd, int rn, int rm)
6078 {
6079 TCGv_i64 tcg_op1;
6080 TCGv_i64 tcg_op2;
6081 TCGv_i64 tcg_res;
6082 TCGv_ptr fpst;
6083
6084 tcg_res = tcg_temp_new_i64();
6085 fpst = get_fpstatus_ptr(false);
6086 tcg_op1 = read_fp_dreg(s, rn);
6087 tcg_op2 = read_fp_dreg(s, rm);
6088
6089 switch (opcode) {
6090 case 0x0: /* FMUL */
6091 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6092 break;
6093 case 0x1: /* FDIV */
6094 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
6095 break;
6096 case 0x2: /* FADD */
6097 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6098 break;
6099 case 0x3: /* FSUB */
6100 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
6101 break;
6102 case 0x4: /* FMAX */
6103 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6104 break;
6105 case 0x5: /* FMIN */
6106 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6107 break;
6108 case 0x6: /* FMAXNM */
6109 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6110 break;
6111 case 0x7: /* FMINNM */
6112 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6113 break;
6114 case 0x8: /* FNMUL */
6115 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6116 gen_helper_vfp_negd(tcg_res, tcg_res);
6117 break;
6118 }
6119
6120 write_fp_dreg(s, rd, tcg_res);
6121
6122 tcg_temp_free_ptr(fpst);
6123 tcg_temp_free_i64(tcg_op1);
6124 tcg_temp_free_i64(tcg_op2);
6125 tcg_temp_free_i64(tcg_res);
6126 }
6127
6128 /* Floating-point data-processing (2 source) - half precision */
6129 static void handle_fp_2src_half(DisasContext *s, int opcode,
6130 int rd, int rn, int rm)
6131 {
6132 TCGv_i32 tcg_op1;
6133 TCGv_i32 tcg_op2;
6134 TCGv_i32 tcg_res;
6135 TCGv_ptr fpst;
6136
6137 tcg_res = tcg_temp_new_i32();
6138 fpst = get_fpstatus_ptr(true);
6139 tcg_op1 = read_fp_hreg(s, rn);
6140 tcg_op2 = read_fp_hreg(s, rm);
6141
6142 switch (opcode) {
6143 case 0x0: /* FMUL */
6144 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6145 break;
6146 case 0x1: /* FDIV */
6147 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
6148 break;
6149 case 0x2: /* FADD */
6150 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6151 break;
6152 case 0x3: /* FSUB */
6153 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
6154 break;
6155 case 0x4: /* FMAX */
6156 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6157 break;
6158 case 0x5: /* FMIN */
6159 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6160 break;
6161 case 0x6: /* FMAXNM */
6162 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6163 break;
6164 case 0x7: /* FMINNM */
6165 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6166 break;
6167 case 0x8: /* FNMUL */
6168 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6169 tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
6170 break;
6171 default:
6172 g_assert_not_reached();
6173 }
6174
6175 write_fp_sreg(s, rd, tcg_res);
6176
6177 tcg_temp_free_ptr(fpst);
6178 tcg_temp_free_i32(tcg_op1);
6179 tcg_temp_free_i32(tcg_op2);
6180 tcg_temp_free_i32(tcg_res);
6181 }
6182
6183 /* Floating point data-processing (2 source)
6184 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
6185 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6186 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
6187 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6188 */
6189 static void disas_fp_2src(DisasContext *s, uint32_t insn)
6190 {
6191 int mos = extract32(insn, 29, 3);
6192 int type = extract32(insn, 22, 2);
6193 int rd = extract32(insn, 0, 5);
6194 int rn = extract32(insn, 5, 5);
6195 int rm = extract32(insn, 16, 5);
6196 int opcode = extract32(insn, 12, 4);
6197
6198 if (opcode > 8 || mos) {
6199 unallocated_encoding(s);
6200 return;
6201 }
6202
6203 switch (type) {
6204 case 0:
6205 if (!fp_access_check(s)) {
6206 return;
6207 }
6208 handle_fp_2src_single(s, opcode, rd, rn, rm);
6209 break;
6210 case 1:
6211 if (!fp_access_check(s)) {
6212 return;
6213 }
6214 handle_fp_2src_double(s, opcode, rd, rn, rm);
6215 break;
6216 case 3:
6217 if (!dc_isar_feature(aa64_fp16, s)) {
6218 unallocated_encoding(s);
6219 return;
6220 }
6221 if (!fp_access_check(s)) {
6222 return;
6223 }
6224 handle_fp_2src_half(s, opcode, rd, rn, rm);
6225 break;
6226 default:
6227 unallocated_encoding(s);
6228 }
6229 }
6230
6231 /* Floating-point data-processing (3 source) - single precision */
6232 static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
6233 int rd, int rn, int rm, int ra)
6234 {
6235 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6236 TCGv_i32 tcg_res = tcg_temp_new_i32();
6237 TCGv_ptr fpst = get_fpstatus_ptr(false);
6238
6239 tcg_op1 = read_fp_sreg(s, rn);
6240 tcg_op2 = read_fp_sreg(s, rm);
6241 tcg_op3 = read_fp_sreg(s, ra);
6242
6243 /* These are fused multiply-add, and must be done as one
6244 * floating point operation with no rounding between the
6245 * multiplication and addition steps.
6246 * NB that doing the negations here as separate steps is
6247 * correct : an input NaN should come out with its sign bit
6248 * flipped if it is a negated-input.
6249 */
6250 if (o1 == true) {
6251 gen_helper_vfp_negs(tcg_op3, tcg_op3);
6252 }
6253
6254 if (o0 != o1) {
6255 gen_helper_vfp_negs(tcg_op1, tcg_op1);
6256 }
6257
6258 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6259
6260 write_fp_sreg(s, rd, tcg_res);
6261
6262 tcg_temp_free_ptr(fpst);
6263 tcg_temp_free_i32(tcg_op1);
6264 tcg_temp_free_i32(tcg_op2);
6265 tcg_temp_free_i32(tcg_op3);
6266 tcg_temp_free_i32(tcg_res);
6267 }
6268
6269 /* Floating-point data-processing (3 source) - double precision */
6270 static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
6271 int rd, int rn, int rm, int ra)
6272 {
6273 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
6274 TCGv_i64 tcg_res = tcg_temp_new_i64();
6275 TCGv_ptr fpst = get_fpstatus_ptr(false);
6276
6277 tcg_op1 = read_fp_dreg(s, rn);
6278 tcg_op2 = read_fp_dreg(s, rm);
6279 tcg_op3 = read_fp_dreg(s, ra);
6280
6281 /* These are fused multiply-add, and must be done as one
6282 * floating point operation with no rounding between the
6283 * multiplication and addition steps.
6284 * NB that doing the negations here as separate steps is
6285 * correct : an input NaN should come out with its sign bit
6286 * flipped if it is a negated-input.
6287 */
6288 if (o1 == true) {
6289 gen_helper_vfp_negd(tcg_op3, tcg_op3);
6290 }
6291
6292 if (o0 != o1) {
6293 gen_helper_vfp_negd(tcg_op1, tcg_op1);
6294 }
6295
6296 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6297
6298 write_fp_dreg(s, rd, tcg_res);
6299
6300 tcg_temp_free_ptr(fpst);
6301 tcg_temp_free_i64(tcg_op1);
6302 tcg_temp_free_i64(tcg_op2);
6303 tcg_temp_free_i64(tcg_op3);
6304 tcg_temp_free_i64(tcg_res);
6305 }
6306
6307 /* Floating-point data-processing (3 source) - half precision */
6308 static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
6309 int rd, int rn, int rm, int ra)
6310 {
6311 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6312 TCGv_i32 tcg_res = tcg_temp_new_i32();
6313 TCGv_ptr fpst = get_fpstatus_ptr(true);
6314
6315 tcg_op1 = read_fp_hreg(s, rn);
6316 tcg_op2 = read_fp_hreg(s, rm);
6317 tcg_op3 = read_fp_hreg(s, ra);
6318
6319 /* These are fused multiply-add, and must be done as one
6320 * floating point operation with no rounding between the
6321 * multiplication and addition steps.
6322 * NB that doing the negations here as separate steps is
6323 * correct : an input NaN should come out with its sign bit
6324 * flipped if it is a negated-input.
6325 */
6326 if (o1 == true) {
6327 tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
6328 }
6329
6330 if (o0 != o1) {
6331 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
6332 }
6333
6334 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6335
6336 write_fp_sreg(s, rd, tcg_res);
6337
6338 tcg_temp_free_ptr(fpst);
6339 tcg_temp_free_i32(tcg_op1);
6340 tcg_temp_free_i32(tcg_op2);
6341 tcg_temp_free_i32(tcg_op3);
6342 tcg_temp_free_i32(tcg_res);
6343 }
6344
6345 /* Floating point data-processing (3 source)
6346 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
6347 * +---+---+---+-----------+------+----+------+----+------+------+------+
6348 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
6349 * +---+---+---+-----------+------+----+------+----+------+------+------+
6350 */
6351 static void disas_fp_3src(DisasContext *s, uint32_t insn)
6352 {
6353 int mos = extract32(insn, 29, 3);
6354 int type = extract32(insn, 22, 2);
6355 int rd = extract32(insn, 0, 5);
6356 int rn = extract32(insn, 5, 5);
6357 int ra = extract32(insn, 10, 5);
6358 int rm = extract32(insn, 16, 5);
6359 bool o0 = extract32(insn, 15, 1);
6360 bool o1 = extract32(insn, 21, 1);
6361
6362 if (mos) {
6363 unallocated_encoding(s);
6364 return;
6365 }
6366
6367 switch (type) {
6368 case 0:
6369 if (!fp_access_check(s)) {
6370 return;
6371 }
6372 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
6373 break;
6374 case 1:
6375 if (!fp_access_check(s)) {
6376 return;
6377 }
6378 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
6379 break;
6380 case 3:
6381 if (!dc_isar_feature(aa64_fp16, s)) {
6382 unallocated_encoding(s);
6383 return;
6384 }
6385 if (!fp_access_check(s)) {
6386 return;
6387 }
6388 handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
6389 break;
6390 default:
6391 unallocated_encoding(s);
6392 }
6393 }
6394
6395 /* Floating point immediate
6396 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
6397 * +---+---+---+-----------+------+---+------------+-------+------+------+
6398 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
6399 * +---+---+---+-----------+------+---+------------+-------+------+------+
6400 */
6401 static void disas_fp_imm(DisasContext *s, uint32_t insn)
6402 {
6403 int rd = extract32(insn, 0, 5);
6404 int imm5 = extract32(insn, 5, 5);
6405 int imm8 = extract32(insn, 13, 8);
6406 int type = extract32(insn, 22, 2);
6407 int mos = extract32(insn, 29, 3);
6408 uint64_t imm;
6409 TCGv_i64 tcg_res;
6410 MemOp sz;
6411
6412 if (mos || imm5) {
6413 unallocated_encoding(s);
6414 return;
6415 }
6416
6417 switch (type) {
6418 case 0:
6419 sz = MO_32;
6420 break;
6421 case 1:
6422 sz = MO_64;
6423 break;
6424 case 3:
6425 sz = MO_16;
6426 if (dc_isar_feature(aa64_fp16, s)) {
6427 break;
6428 }
6429 /* fallthru */
6430 default:
6431 unallocated_encoding(s);
6432 return;
6433 }
6434
6435 if (!fp_access_check(s)) {
6436 return;
6437 }
6438
6439 imm = vfp_expand_imm(sz, imm8);
6440
6441 tcg_res = tcg_const_i64(imm);
6442 write_fp_dreg(s, rd, tcg_res);
6443 tcg_temp_free_i64(tcg_res);
6444 }
6445
6446 /* Handle floating point <=> fixed point conversions. Note that we can
6447 * also deal with fp <=> integer conversions as a special case (scale == 64)
6448 * OPTME: consider handling that special case specially or at least skipping
6449 * the call to scalbn in the helpers for zero shifts.
6450 */
6451 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
6452 bool itof, int rmode, int scale, int sf, int type)
6453 {
6454 bool is_signed = !(opcode & 1);
6455 TCGv_ptr tcg_fpstatus;
6456 TCGv_i32 tcg_shift, tcg_single;
6457 TCGv_i64 tcg_double;
6458
6459 tcg_fpstatus = get_fpstatus_ptr(type == 3);
6460
6461 tcg_shift = tcg_const_i32(64 - scale);
6462
6463 if (itof) {
6464 TCGv_i64 tcg_int = cpu_reg(s, rn);
6465 if (!sf) {
6466 TCGv_i64 tcg_extend = new_tmp_a64(s);
6467
6468 if (is_signed) {
6469 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
6470 } else {
6471 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
6472 }
6473
6474 tcg_int = tcg_extend;
6475 }
6476
6477 switch (type) {
6478 case 1: /* float64 */
6479 tcg_double = tcg_temp_new_i64();
6480 if (is_signed) {
6481 gen_helper_vfp_sqtod(tcg_double, tcg_int,
6482 tcg_shift, tcg_fpstatus);
6483 } else {
6484 gen_helper_vfp_uqtod(tcg_double, tcg_int,
6485 tcg_shift, tcg_fpstatus);
6486 }
6487 write_fp_dreg(s, rd, tcg_double);
6488 tcg_temp_free_i64(tcg_double);
6489 break;
6490
6491 case 0: /* float32 */
6492 tcg_single = tcg_temp_new_i32();
6493 if (is_signed) {
6494 gen_helper_vfp_sqtos(tcg_single, tcg_int,
6495 tcg_shift, tcg_fpstatus);
6496 } else {
6497 gen_helper_vfp_uqtos(tcg_single, tcg_int,
6498 tcg_shift, tcg_fpstatus);
6499 }
6500 write_fp_sreg(s, rd, tcg_single);
6501 tcg_temp_free_i32(tcg_single);
6502 break;
6503
6504 case 3: /* float16 */
6505 tcg_single = tcg_temp_new_i32();
6506 if (is_signed) {
6507 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
6508 tcg_shift, tcg_fpstatus);
6509 } else {
6510 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
6511 tcg_shift, tcg_fpstatus);
6512 }
6513 write_fp_sreg(s, rd, tcg_single);
6514 tcg_temp_free_i32(tcg_single);
6515 break;
6516
6517 default:
6518 g_assert_not_reached();
6519 }
6520 } else {
6521 TCGv_i64 tcg_int = cpu_reg(s, rd);
6522 TCGv_i32 tcg_rmode;
6523
6524 if (extract32(opcode, 2, 1)) {
6525 /* There are too many rounding modes to all fit into rmode,
6526 * so FCVTA[US] is a special case.
6527 */
6528 rmode = FPROUNDING_TIEAWAY;
6529 }
6530
6531 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6532
6533 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
6534
6535 switch (type) {
6536 case 1: /* float64 */
6537 tcg_double = read_fp_dreg(s, rn);
6538 if (is_signed) {
6539 if (!sf) {
6540 gen_helper_vfp_tosld(tcg_int, tcg_double,
6541 tcg_shift, tcg_fpstatus);
6542 } else {
6543 gen_helper_vfp_tosqd(tcg_int, tcg_double,
6544 tcg_shift, tcg_fpstatus);
6545 }
6546 } else {
6547 if (!sf) {
6548 gen_helper_vfp_tould(tcg_int, tcg_double,
6549 tcg_shift, tcg_fpstatus);
6550 } else {
6551 gen_helper_vfp_touqd(tcg_int, tcg_double,
6552 tcg_shift, tcg_fpstatus);
6553 }
6554 }
6555 if (!sf) {
6556 tcg_gen_ext32u_i64(tcg_int, tcg_int);
6557 }
6558 tcg_temp_free_i64(tcg_double);
6559 break;
6560
6561 case 0: /* float32 */
6562 tcg_single = read_fp_sreg(s, rn);
6563 if (sf) {
6564 if (is_signed) {
6565 gen_helper_vfp_tosqs(tcg_int, tcg_single,
6566 tcg_shift, tcg_fpstatus);
6567 } else {
6568 gen_helper_vfp_touqs(tcg_int, tcg_single,
6569 tcg_shift, tcg_fpstatus);
6570 }
6571 } else {
6572 TCGv_i32 tcg_dest = tcg_temp_new_i32();
6573 if (is_signed) {
6574 gen_helper_vfp_tosls(tcg_dest, tcg_single,
6575 tcg_shift, tcg_fpstatus);
6576 } else {
6577 gen_helper_vfp_touls(tcg_dest, tcg_single,
6578 tcg_shift, tcg_fpstatus);
6579 }
6580 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
6581 tcg_temp_free_i32(tcg_dest);
6582 }
6583 tcg_temp_free_i32(tcg_single);
6584 break;
6585
6586 case 3: /* float16 */
6587 tcg_single = read_fp_sreg(s, rn);
6588 if (sf) {
6589 if (is_signed) {
6590 gen_helper_vfp_tosqh(tcg_int, tcg_single,
6591 tcg_shift, tcg_fpstatus);
6592 } else {
6593 gen_helper_vfp_touqh(tcg_int, tcg_single,
6594 tcg_shift, tcg_fpstatus);
6595 }
6596 } else {
6597 TCGv_i32 tcg_dest = tcg_temp_new_i32();
6598 if (is_signed) {
6599 gen_helper_vfp_toslh(tcg_dest, tcg_single,
6600 tcg_shift, tcg_fpstatus);
6601 } else {
6602 gen_helper_vfp_toulh(tcg_dest, tcg_single,
6603 tcg_shift, tcg_fpstatus);
6604 }
6605 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
6606 tcg_temp_free_i32(tcg_dest);
6607 }
6608 tcg_temp_free_i32(tcg_single);
6609 break;
6610
6611 default:
6612 g_assert_not_reached();
6613 }
6614
6615 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
6616 tcg_temp_free_i32(tcg_rmode);
6617 }
6618
6619 tcg_temp_free_ptr(tcg_fpstatus);
6620 tcg_temp_free_i32(tcg_shift);
6621 }
6622
6623 /* Floating point <-> fixed point conversions
6624 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
6625 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6626 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
6627 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6628 */
6629 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
6630 {
6631 int rd = extract32(insn, 0, 5);
6632 int rn = extract32(insn, 5, 5);
6633 int scale = extract32(insn, 10, 6);
6634 int opcode = extract32(insn, 16, 3);
6635 int rmode = extract32(insn, 19, 2);
6636 int type = extract32(insn, 22, 2);
6637 bool sbit = extract32(insn, 29, 1);
6638 bool sf = extract32(insn, 31, 1);
6639 bool itof;
6640
6641 if (sbit || (!sf && scale < 32)) {
6642 unallocated_encoding(s);
6643 return;
6644 }
6645
6646 switch (type) {
6647 case 0: /* float32 */
6648 case 1: /* float64 */
6649 break;
6650 case 3: /* float16 */
6651 if (dc_isar_feature(aa64_fp16, s)) {
6652 break;
6653 }
6654 /* fallthru */
6655 default:
6656 unallocated_encoding(s);
6657 return;
6658 }
6659
6660 switch ((rmode << 3) | opcode) {
6661 case 0x2: /* SCVTF */
6662 case 0x3: /* UCVTF */
6663 itof = true;
6664 break;
6665 case 0x18: /* FCVTZS */
6666 case 0x19: /* FCVTZU */
6667 itof = false;
6668 break;
6669 default:
6670 unallocated_encoding(s);
6671 return;
6672 }
6673
6674 if (!fp_access_check(s)) {
6675 return;
6676 }
6677
6678 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
6679 }
6680
6681 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
6682 {
6683 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
6684 * without conversion.
6685 */
6686
6687 if (itof) {
6688 TCGv_i64 tcg_rn = cpu_reg(s, rn);
6689 TCGv_i64 tmp;
6690
6691 switch (type) {
6692 case 0:
6693 /* 32 bit */
6694 tmp = tcg_temp_new_i64();
6695 tcg_gen_ext32u_i64(tmp, tcg_rn);
6696 write_fp_dreg(s, rd, tmp);
6697 tcg_temp_free_i64(tmp);
6698 break;
6699 case 1:
6700 /* 64 bit */
6701 write_fp_dreg(s, rd, tcg_rn);
6702 break;
6703 case 2:
6704 /* 64 bit to top half. */
6705 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
6706 clear_vec_high(s, true, rd);
6707 break;
6708 case 3:
6709 /* 16 bit */
6710 tmp = tcg_temp_new_i64();
6711 tcg_gen_ext16u_i64(tmp, tcg_rn);
6712 write_fp_dreg(s, rd, tmp);
6713 tcg_temp_free_i64(tmp);
6714 break;
6715 default:
6716 g_assert_not_reached();
6717 }
6718 } else {
6719 TCGv_i64 tcg_rd = cpu_reg(s, rd);
6720
6721 switch (type) {
6722 case 0:
6723 /* 32 bit */
6724 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
6725 break;
6726 case 1:
6727 /* 64 bit */
6728 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
6729 break;
6730 case 2:
6731 /* 64 bits from top half */
6732 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
6733 break;
6734 case 3:
6735 /* 16 bit */
6736 tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
6737 break;
6738 default:
6739 g_assert_not_reached();
6740 }
6741 }
6742 }
6743
6744 static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
6745 {
6746 TCGv_i64 t = read_fp_dreg(s, rn);
6747 TCGv_ptr fpstatus = get_fpstatus_ptr(false);
6748
6749 gen_helper_fjcvtzs(t, t, fpstatus);
6750
6751 tcg_temp_free_ptr(fpstatus);
6752
6753 tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
6754 tcg_gen_extrh_i64_i32(cpu_ZF, t);
6755 tcg_gen_movi_i32(cpu_CF, 0);
6756 tcg_gen_movi_i32(cpu_NF, 0);
6757 tcg_gen_movi_i32(cpu_VF, 0);
6758
6759 tcg_temp_free_i64(t);
6760 }
6761
6762 /* Floating point <-> integer conversions
6763 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
6764 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6765 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
6766 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6767 */
6768 static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
6769 {
6770 int rd = extract32(insn, 0, 5);
6771 int rn = extract32(insn, 5, 5);
6772 int opcode = extract32(insn, 16, 3);
6773 int rmode = extract32(insn, 19, 2);
6774 int type = extract32(insn, 22, 2);
6775 bool sbit = extract32(insn, 29, 1);
6776 bool sf = extract32(insn, 31, 1);
6777 bool itof = false;
6778
6779 if (sbit) {
6780 goto do_unallocated;
6781 }
6782
6783 switch (opcode) {
6784 case 2: /* SCVTF */
6785 case 3: /* UCVTF */
6786 itof = true;
6787 /* fallthru */
6788 case 4: /* FCVTAS */
6789 case 5: /* FCVTAU */
6790 if (rmode != 0) {
6791 goto do_unallocated;
6792 }
6793 /* fallthru */
6794 case 0: /* FCVT[NPMZ]S */
6795 case 1: /* FCVT[NPMZ]U */
6796 switch (type) {
6797 case 0: /* float32 */
6798 case 1: /* float64 */
6799 break;
6800 case 3: /* float16 */
6801 if (!dc_isar_feature(aa64_fp16, s)) {
6802 goto do_unallocated;
6803 }
6804 break;
6805 default:
6806 goto do_unallocated;
6807 }
6808 if (!fp_access_check(s)) {
6809 return;
6810 }
6811 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
6812 break;
6813
6814 default:
6815 switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
6816 case 0b01100110: /* FMOV half <-> 32-bit int */
6817 case 0b01100111:
6818 case 0b11100110: /* FMOV half <-> 64-bit int */
6819 case 0b11100111:
6820 if (!dc_isar_feature(aa64_fp16, s)) {
6821 goto do_unallocated;
6822 }
6823 /* fallthru */
6824 case 0b00000110: /* FMOV 32-bit */
6825 case 0b00000111:
6826 case 0b10100110: /* FMOV 64-bit */
6827 case 0b10100111:
6828 case 0b11001110: /* FMOV top half of 128-bit */
6829 case 0b11001111:
6830 if (!fp_access_check(s)) {
6831 return;
6832 }
6833 itof = opcode & 1;
6834 handle_fmov(s, rd, rn, type, itof);
6835 break;
6836
6837 case 0b00111110: /* FJCVTZS */
6838 if (!dc_isar_feature(aa64_jscvt, s)) {
6839 goto do_unallocated;
6840 } else if (fp_access_check(s)) {
6841 handle_fjcvtzs(s, rd, rn);
6842 }
6843 break;
6844
6845 default:
6846 do_unallocated:
6847 unallocated_encoding(s);
6848 return;
6849 }
6850 break;
6851 }
6852 }
6853
6854 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
6855 * 31 30 29 28 25 24 0
6856 * +---+---+---+---------+-----------------------------+
6857 * | | 0 | | 1 1 1 1 | |
6858 * +---+---+---+---------+-----------------------------+
6859 */
6860 static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
6861 {
6862 if (extract32(insn, 24, 1)) {
6863 /* Floating point data-processing (3 source) */
6864 disas_fp_3src(s, insn);
6865 } else if (extract32(insn, 21, 1) == 0) {
6866 /* Floating point to fixed point conversions */
6867 disas_fp_fixed_conv(s, insn);
6868 } else {
6869 switch (extract32(insn, 10, 2)) {
6870 case 1:
6871 /* Floating point conditional compare */
6872 disas_fp_ccomp(s, insn);
6873 break;
6874 case 2:
6875 /* Floating point data-processing (2 source) */
6876 disas_fp_2src(s, insn);
6877 break;
6878 case 3:
6879 /* Floating point conditional select */
6880 disas_fp_csel(s, insn);
6881 break;
6882 case 0:
6883 switch (ctz32(extract32(insn, 12, 4))) {
6884 case 0: /* [15:12] == xxx1 */
6885 /* Floating point immediate */
6886 disas_fp_imm(s, insn);
6887 break;
6888 case 1: /* [15:12] == xx10 */
6889 /* Floating point compare */
6890 disas_fp_compare(s, insn);
6891 break;
6892 case 2: /* [15:12] == x100 */
6893 /* Floating point data-processing (1 source) */
6894 disas_fp_1src(s, insn);
6895 break;
6896 case 3: /* [15:12] == 1000 */
6897 unallocated_encoding(s);
6898 break;
6899 default: /* [15:12] == 0000 */
6900 /* Floating point <-> integer conversions */
6901 disas_fp_int_conv(s, insn);
6902 break;
6903 }
6904 break;
6905 }
6906 }
6907 }
6908
6909 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
6910 int pos)
6911 {
6912 /* Extract 64 bits from the middle of two concatenated 64 bit
6913 * vector register slices left:right. The extracted bits start
6914 * at 'pos' bits into the right (least significant) side.
6915 * We return the result in tcg_right, and guarantee not to
6916 * trash tcg_left.
6917 */
6918 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
6919 assert(pos > 0 && pos < 64);
6920
6921 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
6922 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
6923 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
6924
6925 tcg_temp_free_i64(tcg_tmp);
6926 }
6927
6928 /* EXT
6929 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
6930 * +---+---+-------------+-----+---+------+---+------+---+------+------+
6931 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
6932 * +---+---+-------------+-----+---+------+---+------+---+------+------+
6933 */
6934 static void disas_simd_ext(DisasContext *s, uint32_t insn)
6935 {
6936 int is_q = extract32(insn, 30, 1);
6937 int op2 = extract32(insn, 22, 2);
6938 int imm4 = extract32(insn, 11, 4);
6939 int rm = extract32(insn, 16, 5);
6940 int rn = extract32(insn, 5, 5);
6941 int rd = extract32(insn, 0, 5);
6942 int pos = imm4 << 3;
6943 TCGv_i64 tcg_resl, tcg_resh;
6944
6945 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
6946 unallocated_encoding(s);
6947 return;
6948 }
6949
6950 if (!fp_access_check(s)) {
6951 return;
6952 }
6953
6954 tcg_resh = tcg_temp_new_i64();
6955 tcg_resl = tcg_temp_new_i64();
6956
6957 /* Vd gets bits starting at pos bits into Vm:Vn. This is
6958 * either extracting 128 bits from a 128:128 concatenation, or
6959 * extracting 64 bits from a 64:64 concatenation.
6960 */
6961 if (!is_q) {
6962 read_vec_element(s, tcg_resl, rn, 0, MO_64);
6963 if (pos != 0) {
6964 read_vec_element(s, tcg_resh, rm, 0, MO_64);
6965 do_ext64(s, tcg_resh, tcg_resl, pos);
6966 }
6967 } else {
6968 TCGv_i64 tcg_hh;
6969 typedef struct {
6970 int reg;
6971 int elt;
6972 } EltPosns;
6973 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
6974 EltPosns *elt = eltposns;
6975
6976 if (pos >= 64) {
6977 elt++;
6978 pos -= 64;
6979 }
6980
6981 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
6982 elt++;
6983 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
6984 elt++;
6985 if (pos != 0) {
6986 do_ext64(s, tcg_resh, tcg_resl, pos);
6987 tcg_hh = tcg_temp_new_i64();
6988 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
6989 do_ext64(s, tcg_hh, tcg_resh, pos);
6990 tcg_temp_free_i64(tcg_hh);
6991 }
6992 }
6993
6994 write_vec_element(s, tcg_resl, rd, 0, MO_64);
6995 tcg_temp_free_i64(tcg_resl);
6996 if (is_q) {
6997 write_vec_element(s, tcg_resh, rd, 1, MO_64);
6998 }
6999 tcg_temp_free_i64(tcg_resh);
7000 clear_vec_high(s, is_q, rd);
7001 }
7002
7003 /* TBL/TBX
7004 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
7005 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7006 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
7007 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7008 */
7009 static void disas_simd_tb(DisasContext *s, uint32_t insn)
7010 {
7011 int op2 = extract32(insn, 22, 2);
7012 int is_q = extract32(insn, 30, 1);
7013 int rm = extract32(insn, 16, 5);
7014 int rn = extract32(insn, 5, 5);
7015 int rd = extract32(insn, 0, 5);
7016 int is_tblx = extract32(insn, 12, 1);
7017 int len = extract32(insn, 13, 2);
7018 TCGv_i64 tcg_resl, tcg_resh, tcg_idx;
7019 TCGv_i32 tcg_regno, tcg_numregs;
7020
7021 if (op2 != 0) {
7022 unallocated_encoding(s);
7023 return;
7024 }
7025
7026 if (!fp_access_check(s)) {
7027 return;
7028 }
7029
7030 /* This does a table lookup: for every byte element in the input
7031 * we index into a table formed from up to four vector registers,
7032 * and then the output is the result of the lookups. Our helper
7033 * function does the lookup operation for a single 64 bit part of
7034 * the input.
7035 */
7036 tcg_resl = tcg_temp_new_i64();
7037 tcg_resh = NULL;
7038
7039 if (is_tblx) {
7040 read_vec_element(s, tcg_resl, rd, 0, MO_64);
7041 } else {
7042 tcg_gen_movi_i64(tcg_resl, 0);
7043 }
7044
7045 if (is_q) {
7046 tcg_resh = tcg_temp_new_i64();
7047 if (is_tblx) {
7048 read_vec_element(s, tcg_resh, rd, 1, MO_64);
7049 } else {
7050 tcg_gen_movi_i64(tcg_resh, 0);
7051 }
7052 }
7053
7054 tcg_idx = tcg_temp_new_i64();
7055 tcg_regno = tcg_const_i32(rn);
7056 tcg_numregs = tcg_const_i32(len + 1);
7057 read_vec_element(s, tcg_idx, rm, 0, MO_64);
7058 gen_helper_simd_tbl(tcg_resl, cpu_env, tcg_resl, tcg_idx,
7059 tcg_regno, tcg_numregs);
7060 if (is_q) {
7061 read_vec_element(s, tcg_idx, rm, 1, MO_64);
7062 gen_helper_simd_tbl(tcg_resh, cpu_env, tcg_resh, tcg_idx,
7063 tcg_regno, tcg_numregs);
7064 }
7065 tcg_temp_free_i64(tcg_idx);
7066 tcg_temp_free_i32(tcg_regno);
7067 tcg_temp_free_i32(tcg_numregs);
7068
7069 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7070 tcg_temp_free_i64(tcg_resl);
7071
7072 if (is_q) {
7073 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7074 tcg_temp_free_i64(tcg_resh);
7075 }
7076 clear_vec_high(s, is_q, rd);
7077 }
7078
7079 /* ZIP/UZP/TRN
7080 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
7081 * +---+---+-------------+------+---+------+---+------------------+------+
7082 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
7083 * +---+---+-------------+------+---+------+---+------------------+------+
7084 */
7085 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
7086 {
7087 int rd = extract32(insn, 0, 5);
7088 int rn = extract32(insn, 5, 5);
7089 int rm = extract32(insn, 16, 5);
7090 int size = extract32(insn, 22, 2);
7091 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
7092 * bit 2 indicates 1 vs 2 variant of the insn.
7093 */
7094 int opcode = extract32(insn, 12, 2);
7095 bool part = extract32(insn, 14, 1);
7096 bool is_q = extract32(insn, 30, 1);
7097 int esize = 8 << size;
7098 int i, ofs;
7099 int datasize = is_q ? 128 : 64;
7100 int elements = datasize / esize;
7101 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
7102
7103 if (opcode == 0 || (size == 3 && !is_q)) {
7104 unallocated_encoding(s);
7105 return;
7106 }
7107
7108 if (!fp_access_check(s)) {
7109 return;
7110 }
7111
7112 tcg_resl = tcg_const_i64(0);
7113 tcg_resh = is_q ? tcg_const_i64(0) : NULL;
7114 tcg_res = tcg_temp_new_i64();
7115
7116 for (i = 0; i < elements; i++) {
7117 switch (opcode) {
7118 case 1: /* UZP1/2 */
7119 {
7120 int midpoint = elements / 2;
7121 if (i < midpoint) {
7122 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
7123 } else {
7124 read_vec_element(s, tcg_res, rm,
7125 2 * (i - midpoint) + part, size);
7126 }
7127 break;
7128 }
7129 case 2: /* TRN1/2 */
7130 if (i & 1) {
7131 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
7132 } else {
7133 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
7134 }
7135 break;
7136 case 3: /* ZIP1/2 */
7137 {
7138 int base = part * elements / 2;
7139 if (i & 1) {
7140 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
7141 } else {
7142 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
7143 }
7144 break;
7145 }
7146 default:
7147 g_assert_not_reached();
7148 }
7149
7150 ofs = i * esize;
7151 if (ofs < 64) {
7152 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
7153 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
7154 } else {
7155 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
7156 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
7157 }
7158 }
7159
7160 tcg_temp_free_i64(tcg_res);
7161
7162 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7163 tcg_temp_free_i64(tcg_resl);
7164
7165 if (is_q) {
7166 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7167 tcg_temp_free_i64(tcg_resh);
7168 }
7169 clear_vec_high(s, is_q, rd);
7170 }
7171
7172 /*
7173 * do_reduction_op helper
7174 *
7175 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
7176 * important for correct NaN propagation that we do these
7177 * operations in exactly the order specified by the pseudocode.
7178 *
7179 * This is a recursive function, TCG temps should be freed by the
7180 * calling function once it is done with the values.
7181 */
7182 static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
7183 int esize, int size, int vmap, TCGv_ptr fpst)
7184 {
7185 if (esize == size) {
7186 int element;
7187 MemOp msize = esize == 16 ? MO_16 : MO_32;
7188 TCGv_i32 tcg_elem;
7189
7190 /* We should have one register left here */
7191 assert(ctpop8(vmap) == 1);
7192 element = ctz32(vmap);
7193 assert(element < 8);
7194
7195 tcg_elem = tcg_temp_new_i32();
7196 read_vec_element_i32(s, tcg_elem, rn, element, msize);
7197 return tcg_elem;
7198 } else {
7199 int bits = size / 2;
7200 int shift = ctpop8(vmap) / 2;
7201 int vmap_lo = (vmap >> shift) & vmap;
7202 int vmap_hi = (vmap & ~vmap_lo);
7203 TCGv_i32 tcg_hi, tcg_lo, tcg_res;
7204
7205 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
7206 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
7207 tcg_res = tcg_temp_new_i32();
7208
7209 switch (fpopcode) {
7210 case 0x0c: /* fmaxnmv half-precision */
7211 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7212 break;
7213 case 0x0f: /* fmaxv half-precision */
7214 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
7215 break;
7216 case 0x1c: /* fminnmv half-precision */
7217 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7218 break;
7219 case 0x1f: /* fminv half-precision */
7220 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
7221 break;
7222 case 0x2c: /* fmaxnmv */
7223 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
7224 break;
7225 case 0x2f: /* fmaxv */
7226 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
7227 break;
7228 case 0x3c: /* fminnmv */
7229 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
7230 break;
7231 case 0x3f: /* fminv */
7232 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
7233 break;
7234 default:
7235 g_assert_not_reached();
7236 }
7237
7238 tcg_temp_free_i32(tcg_hi);
7239 tcg_temp_free_i32(tcg_lo);
7240 return tcg_res;
7241 }
7242 }
7243
7244 /* AdvSIMD across lanes
7245 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7246 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7247 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
7248 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7249 */
7250 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
7251 {
7252 int rd = extract32(insn, 0, 5);
7253 int rn = extract32(insn, 5, 5);
7254 int size = extract32(insn, 22, 2);
7255 int opcode = extract32(insn, 12, 5);
7256 bool is_q = extract32(insn, 30, 1);
7257 bool is_u = extract32(insn, 29, 1);
7258 bool is_fp = false;
7259 bool is_min = false;
7260 int esize;
7261 int elements;
7262 int i;
7263 TCGv_i64 tcg_res, tcg_elt;
7264
7265 switch (opcode) {
7266 case 0x1b: /* ADDV */
7267 if (is_u) {
7268 unallocated_encoding(s);
7269 return;
7270 }
7271 /* fall through */
7272 case 0x3: /* SADDLV, UADDLV */
7273 case 0xa: /* SMAXV, UMAXV */
7274 case 0x1a: /* SMINV, UMINV */
7275 if (size == 3 || (size == 2 && !is_q)) {
7276 unallocated_encoding(s);
7277 return;
7278 }
7279 break;
7280 case 0xc: /* FMAXNMV, FMINNMV */
7281 case 0xf: /* FMAXV, FMINV */
7282 /* Bit 1 of size field encodes min vs max and the actual size
7283 * depends on the encoding of the U bit. If not set (and FP16
7284 * enabled) then we do half-precision float instead of single
7285 * precision.
7286 */
7287 is_min = extract32(size, 1, 1);
7288 is_fp = true;
7289 if (!is_u && dc_isar_feature(aa64_fp16, s)) {
7290 size = 1;
7291 } else if (!is_u || !is_q || extract32(size, 0, 1)) {
7292 unallocated_encoding(s);
7293 return;
7294 } else {
7295 size = 2;
7296 }
7297 break;
7298 default:
7299 unallocated_encoding(s);
7300 return;
7301 }
7302
7303 if (!fp_access_check(s)) {
7304 return;
7305 }
7306
7307 esize = 8 << size;
7308 elements = (is_q ? 128 : 64) / esize;
7309
7310 tcg_res = tcg_temp_new_i64();
7311 tcg_elt = tcg_temp_new_i64();
7312
7313 /* These instructions operate across all lanes of a vector
7314 * to produce a single result. We can guarantee that a 64
7315 * bit intermediate is sufficient:
7316 * + for [US]ADDLV the maximum element size is 32 bits, and
7317 * the result type is 64 bits
7318 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
7319 * same as the element size, which is 32 bits at most
7320 * For the integer operations we can choose to work at 64
7321 * or 32 bits and truncate at the end; for simplicity
7322 * we use 64 bits always. The floating point
7323 * ops do require 32 bit intermediates, though.
7324 */
7325 if (!is_fp) {
7326 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
7327
7328 for (i = 1; i < elements; i++) {
7329 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
7330
7331 switch (opcode) {
7332 case 0x03: /* SADDLV / UADDLV */
7333 case 0x1b: /* ADDV */
7334 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
7335 break;
7336 case 0x0a: /* SMAXV / UMAXV */
7337 if (is_u) {
7338 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
7339 } else {
7340 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
7341 }
7342 break;
7343 case 0x1a: /* SMINV / UMINV */
7344 if (is_u) {
7345 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
7346 } else {
7347 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
7348 }
7349 break;
7350 default:
7351 g_assert_not_reached();
7352 }
7353
7354 }
7355 } else {
7356 /* Floating point vector reduction ops which work across 32
7357 * bit (single) or 16 bit (half-precision) intermediates.
7358 * Note that correct NaN propagation requires that we do these
7359 * operations in exactly the order specified by the pseudocode.
7360 */
7361 TCGv_ptr fpst = get_fpstatus_ptr(size == MO_16);
7362 int fpopcode = opcode | is_min << 4 | is_u << 5;
7363 int vmap = (1 << elements) - 1;
7364 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
7365 (is_q ? 128 : 64), vmap, fpst);
7366 tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
7367 tcg_temp_free_i32(tcg_res32);
7368 tcg_temp_free_ptr(fpst);
7369 }
7370
7371 tcg_temp_free_i64(tcg_elt);
7372
7373 /* Now truncate the result to the width required for the final output */
7374 if (opcode == 0x03) {
7375 /* SADDLV, UADDLV: result is 2*esize */
7376 size++;
7377 }
7378
7379 switch (size) {
7380 case 0:
7381 tcg_gen_ext8u_i64(tcg_res, tcg_res);
7382 break;
7383 case 1:
7384 tcg_gen_ext16u_i64(tcg_res, tcg_res);
7385 break;
7386 case 2:
7387 tcg_gen_ext32u_i64(tcg_res, tcg_res);
7388 break;
7389 case 3:
7390 break;
7391 default:
7392 g_assert_not_reached();
7393 }
7394
7395 write_fp_dreg(s, rd, tcg_res);
7396 tcg_temp_free_i64(tcg_res);
7397 }
7398
7399 /* DUP (Element, Vector)
7400 *
7401 * 31 30 29 21 20 16 15 10 9 5 4 0
7402 * +---+---+-------------------+--------+-------------+------+------+
7403 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7404 * +---+---+-------------------+--------+-------------+------+------+
7405 *
7406 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7407 */
7408 static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
7409 int imm5)
7410 {
7411 int size = ctz32(imm5);
7412 int index;
7413
7414 if (size > 3 || (size == 3 && !is_q)) {
7415 unallocated_encoding(s);
7416 return;
7417 }
7418
7419 if (!fp_access_check(s)) {
7420 return;
7421 }
7422
7423 index = imm5 >> (size + 1);
7424 tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
7425 vec_reg_offset(s, rn, index, size),
7426 is_q ? 16 : 8, vec_full_reg_size(s));
7427 }
7428
7429 /* DUP (element, scalar)
7430 * 31 21 20 16 15 10 9 5 4 0
7431 * +-----------------------+--------+-------------+------+------+
7432 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7433 * +-----------------------+--------+-------------+------+------+
7434 */
7435 static void handle_simd_dupes(DisasContext *s, int rd, int rn,
7436 int imm5)
7437 {
7438 int size = ctz32(imm5);
7439 int index;
7440 TCGv_i64 tmp;
7441
7442 if (size > 3) {
7443 unallocated_encoding(s);
7444 return;
7445 }
7446
7447 if (!fp_access_check(s)) {
7448 return;
7449 }
7450
7451 index = imm5 >> (size + 1);
7452
7453 /* This instruction just extracts the specified element and
7454 * zero-extends it into the bottom of the destination register.
7455 */
7456 tmp = tcg_temp_new_i64();
7457 read_vec_element(s, tmp, rn, index, size);
7458 write_fp_dreg(s, rd, tmp);
7459 tcg_temp_free_i64(tmp);
7460 }
7461
7462 /* DUP (General)
7463 *
7464 * 31 30 29 21 20 16 15 10 9 5 4 0
7465 * +---+---+-------------------+--------+-------------+------+------+
7466 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
7467 * +---+---+-------------------+--------+-------------+------+------+
7468 *
7469 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7470 */
7471 static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
7472 int imm5)
7473 {
7474 int size = ctz32(imm5);
7475 uint32_t dofs, oprsz, maxsz;
7476
7477 if (size > 3 || ((size == 3) && !is_q)) {
7478 unallocated_encoding(s);
7479 return;
7480 }
7481
7482 if (!fp_access_check(s)) {
7483 return;
7484 }
7485
7486 dofs = vec_full_reg_offset(s, rd);
7487 oprsz = is_q ? 16 : 8;
7488 maxsz = vec_full_reg_size(s);
7489
7490 tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
7491 }
7492
7493 /* INS (Element)
7494 *
7495 * 31 21 20 16 15 14 11 10 9 5 4 0
7496 * +-----------------------+--------+------------+---+------+------+
7497 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7498 * +-----------------------+--------+------------+---+------+------+
7499 *
7500 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7501 * index: encoded in imm5<4:size+1>
7502 */
7503 static void handle_simd_inse(DisasContext *s, int rd, int rn,
7504 int imm4, int imm5)
7505 {
7506 int size = ctz32(imm5);
7507 int src_index, dst_index;
7508 TCGv_i64 tmp;
7509
7510 if (size > 3) {
7511 unallocated_encoding(s);
7512 return;
7513 }
7514
7515 if (!fp_access_check(s)) {
7516 return;
7517 }
7518
7519 dst_index = extract32(imm5, 1+size, 5);
7520 src_index = extract32(imm4, size, 4);
7521
7522 tmp = tcg_temp_new_i64();
7523
7524 read_vec_element(s, tmp, rn, src_index, size);
7525 write_vec_element(s, tmp, rd, dst_index, size);
7526
7527 tcg_temp_free_i64(tmp);
7528
7529 /* INS is considered a 128-bit write for SVE. */
7530 clear_vec_high(s, true, rd);
7531 }
7532
7533
7534 /* INS (General)
7535 *
7536 * 31 21 20 16 15 10 9 5 4 0
7537 * +-----------------------+--------+-------------+------+------+
7538 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
7539 * +-----------------------+--------+-------------+------+------+
7540 *
7541 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7542 * index: encoded in imm5<4:size+1>
7543 */
7544 static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
7545 {
7546 int size = ctz32(imm5);
7547 int idx;
7548
7549 if (size > 3) {
7550 unallocated_encoding(s);
7551 return;
7552 }
7553
7554 if (!fp_access_check(s)) {
7555 return;
7556 }
7557
7558 idx = extract32(imm5, 1 + size, 4 - size);
7559 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
7560
7561 /* INS is considered a 128-bit write for SVE. */
7562 clear_vec_high(s, true, rd);
7563 }
7564
7565 /*
7566 * UMOV (General)
7567 * SMOV (General)
7568 *
7569 * 31 30 29 21 20 16 15 12 10 9 5 4 0
7570 * +---+---+-------------------+--------+-------------+------+------+
7571 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
7572 * +---+---+-------------------+--------+-------------+------+------+
7573 *
7574 * U: unsigned when set
7575 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7576 */
7577 static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
7578 int rn, int rd, int imm5)
7579 {
7580 int size = ctz32(imm5);
7581 int element;
7582 TCGv_i64 tcg_rd;
7583
7584 /* Check for UnallocatedEncodings */
7585 if (is_signed) {
7586 if (size > 2 || (size == 2 && !is_q)) {
7587 unallocated_encoding(s);
7588 return;
7589 }
7590 } else {
7591 if (size > 3
7592 || (size < 3 && is_q)
7593 || (size == 3 && !is_q)) {
7594 unallocated_encoding(s);
7595 return;
7596 }
7597 }
7598
7599 if (!fp_access_check(s)) {
7600 return;
7601 }
7602
7603 element = extract32(imm5, 1+size, 4);
7604
7605 tcg_rd = cpu_reg(s, rd);
7606 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
7607 if (is_signed && !is_q) {
7608 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
7609 }
7610 }
7611
7612 /* AdvSIMD copy
7613 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
7614 * +---+---+----+-----------------+------+---+------+---+------+------+
7615 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7616 * +---+---+----+-----------------+------+---+------+---+------+------+
7617 */
7618 static void disas_simd_copy(DisasContext *s, uint32_t insn)
7619 {
7620 int rd = extract32(insn, 0, 5);
7621 int rn = extract32(insn, 5, 5);
7622 int imm4 = extract32(insn, 11, 4);
7623 int op = extract32(insn, 29, 1);
7624 int is_q = extract32(insn, 30, 1);
7625 int imm5 = extract32(insn, 16, 5);
7626
7627 if (op) {
7628 if (is_q) {
7629 /* INS (element) */
7630 handle_simd_inse(s, rd, rn, imm4, imm5);
7631 } else {
7632 unallocated_encoding(s);
7633 }
7634 } else {
7635 switch (imm4) {
7636 case 0:
7637 /* DUP (element - vector) */
7638 handle_simd_dupe(s, is_q, rd, rn, imm5);
7639 break;
7640 case 1:
7641 /* DUP (general) */
7642 handle_simd_dupg(s, is_q, rd, rn, imm5);
7643 break;
7644 case 3:
7645 if (is_q) {
7646 /* INS (general) */
7647 handle_simd_insg(s, rd, rn, imm5);
7648 } else {
7649 unallocated_encoding(s);
7650 }
7651 break;
7652 case 5:
7653 case 7:
7654 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
7655 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
7656 break;
7657 default:
7658 unallocated_encoding(s);
7659 break;
7660 }
7661 }
7662 }
7663
7664 /* AdvSIMD modified immediate
7665 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
7666 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7667 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
7668 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7669 *
7670 * There are a number of operations that can be carried out here:
7671 * MOVI - move (shifted) imm into register
7672 * MVNI - move inverted (shifted) imm into register
7673 * ORR - bitwise OR of (shifted) imm with register
7674 * BIC - bitwise clear of (shifted) imm with register
7675 * With ARMv8.2 we also have:
7676 * FMOV half-precision
7677 */
7678 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
7679 {
7680 int rd = extract32(insn, 0, 5);
7681 int cmode = extract32(insn, 12, 4);
7682 int cmode_3_1 = extract32(cmode, 1, 3);
7683 int cmode_0 = extract32(cmode, 0, 1);
7684 int o2 = extract32(insn, 11, 1);
7685 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
7686 bool is_neg = extract32(insn, 29, 1);
7687 bool is_q = extract32(insn, 30, 1);
7688 uint64_t imm = 0;
7689
7690 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
7691 /* Check for FMOV (vector, immediate) - half-precision */
7692 if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
7693 unallocated_encoding(s);
7694 return;
7695 }
7696 }
7697
7698 if (!fp_access_check(s)) {
7699 return;
7700 }
7701
7702 /* See AdvSIMDExpandImm() in ARM ARM */
7703 switch (cmode_3_1) {
7704 case 0: /* Replicate(Zeros(24):imm8, 2) */
7705 case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
7706 case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
7707 case 3: /* Replicate(imm8:Zeros(24), 2) */
7708 {
7709 int shift = cmode_3_1 * 8;
7710 imm = bitfield_replicate(abcdefgh << shift, 32);
7711 break;
7712 }
7713 case 4: /* Replicate(Zeros(8):imm8, 4) */
7714 case 5: /* Replicate(imm8:Zeros(8), 4) */
7715 {
7716 int shift = (cmode_3_1 & 0x1) * 8;
7717 imm = bitfield_replicate(abcdefgh << shift, 16);
7718 break;
7719 }
7720 case 6:
7721 if (cmode_0) {
7722 /* Replicate(Zeros(8):imm8:Ones(16), 2) */
7723 imm = (abcdefgh << 16) | 0xffff;
7724 } else {
7725 /* Replicate(Zeros(16):imm8:Ones(8), 2) */
7726 imm = (abcdefgh << 8) | 0xff;
7727 }
7728 imm = bitfield_replicate(imm, 32);
7729 break;
7730 case 7:
7731 if (!cmode_0 && !is_neg) {
7732 imm = bitfield_replicate(abcdefgh, 8);
7733 } else if (!cmode_0 && is_neg) {
7734 int i;
7735 imm = 0;
7736 for (i = 0; i < 8; i++) {
7737 if ((abcdefgh) & (1 << i)) {
7738 imm |= 0xffULL << (i * 8);
7739 }
7740 }
7741 } else if (cmode_0) {
7742 if (is_neg) {
7743 imm = (abcdefgh & 0x3f) << 48;
7744 if (abcdefgh & 0x80) {
7745 imm |= 0x8000000000000000ULL;
7746 }
7747 if (abcdefgh & 0x40) {
7748 imm |= 0x3fc0000000000000ULL;
7749 } else {
7750 imm |= 0x4000000000000000ULL;
7751 }
7752 } else {
7753 if (o2) {
7754 /* FMOV (vector, immediate) - half-precision */
7755 imm = vfp_expand_imm(MO_16, abcdefgh);
7756 /* now duplicate across the lanes */
7757 imm = bitfield_replicate(imm, 16);
7758 } else {
7759 imm = (abcdefgh & 0x3f) << 19;
7760 if (abcdefgh & 0x80) {
7761 imm |= 0x80000000;
7762 }
7763 if (abcdefgh & 0x40) {
7764 imm |= 0x3e000000;
7765 } else {
7766 imm |= 0x40000000;
7767 }
7768 imm |= (imm << 32);
7769 }
7770 }
7771 }
7772 break;
7773 default:
7774 fprintf(stderr, "%s: cmode_3_1: %x\n", __func__, cmode_3_1);
7775 g_assert_not_reached();
7776 }
7777
7778 if (cmode_3_1 != 7 && is_neg) {
7779 imm = ~imm;
7780 }
7781
7782 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
7783 /* MOVI or MVNI, with MVNI negation handled above. */
7784 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
7785 vec_full_reg_size(s), imm);
7786 } else {
7787 /* ORR or BIC, with BIC negation to AND handled above. */
7788 if (is_neg) {
7789 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
7790 } else {
7791 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
7792 }
7793 }
7794 }
7795
7796 /* AdvSIMD scalar copy
7797 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
7798 * +-----+----+-----------------+------+---+------+---+------+------+
7799 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7800 * +-----+----+-----------------+------+---+------+---+------+------+
7801 */
7802 static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
7803 {
7804 int rd = extract32(insn, 0, 5);
7805 int rn = extract32(insn, 5, 5);
7806 int imm4 = extract32(insn, 11, 4);
7807 int imm5 = extract32(insn, 16, 5);
7808 int op = extract32(insn, 29, 1);
7809
7810 if (op != 0 || imm4 != 0) {
7811 unallocated_encoding(s);
7812 return;
7813 }
7814
7815 /* DUP (element, scalar) */
7816 handle_simd_dupes(s, rd, rn, imm5);
7817 }
7818
7819 /* AdvSIMD scalar pairwise
7820 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7821 * +-----+---+-----------+------+-----------+--------+-----+------+------+
7822 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
7823 * +-----+---+-----------+------+-----------+--------+-----+------+------+
7824 */
7825 static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
7826 {
7827 int u = extract32(insn, 29, 1);
7828 int size = extract32(insn, 22, 2);
7829 int opcode = extract32(insn, 12, 5);
7830 int rn = extract32(insn, 5, 5);
7831 int rd = extract32(insn, 0, 5);
7832 TCGv_ptr fpst;
7833
7834 /* For some ops (the FP ones), size[1] is part of the encoding.
7835 * For ADDP strictly it is not but size[1] is always 1 for valid
7836 * encodings.
7837 */
7838 opcode |= (extract32(size, 1, 1) << 5);
7839
7840 switch (opcode) {
7841 case 0x3b: /* ADDP */
7842 if (u || size != 3) {
7843 unallocated_encoding(s);
7844 return;
7845 }
7846 if (!fp_access_check(s)) {
7847 return;
7848 }
7849
7850 fpst = NULL;
7851 break;
7852 case 0xc: /* FMAXNMP */
7853 case 0xd: /* FADDP */
7854 case 0xf: /* FMAXP */
7855 case 0x2c: /* FMINNMP */
7856 case 0x2f: /* FMINP */
7857 /* FP op, size[0] is 32 or 64 bit*/
7858 if (!u) {
7859 if (!dc_isar_feature(aa64_fp16, s)) {
7860 unallocated_encoding(s);
7861 return;
7862 } else {
7863 size = MO_16;
7864 }
7865 } else {
7866 size = extract32(size, 0, 1) ? MO_64 : MO_32;
7867 }
7868
7869 if (!fp_access_check(s)) {
7870 return;
7871 }
7872
7873 fpst = get_fpstatus_ptr(size == MO_16);
7874 break;
7875 default:
7876 unallocated_encoding(s);
7877 return;
7878 }
7879
7880 if (size == MO_64) {
7881 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7882 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7883 TCGv_i64 tcg_res = tcg_temp_new_i64();
7884
7885 read_vec_element(s, tcg_op1, rn, 0, MO_64);
7886 read_vec_element(s, tcg_op2, rn, 1, MO_64);
7887
7888 switch (opcode) {
7889 case 0x3b: /* ADDP */
7890 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
7891 break;
7892 case 0xc: /* FMAXNMP */
7893 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7894 break;
7895 case 0xd: /* FADDP */
7896 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7897 break;
7898 case 0xf: /* FMAXP */
7899 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7900 break;
7901 case 0x2c: /* FMINNMP */
7902 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7903 break;
7904 case 0x2f: /* FMINP */
7905 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7906 break;
7907 default:
7908 g_assert_not_reached();
7909 }
7910
7911 write_fp_dreg(s, rd, tcg_res);
7912
7913 tcg_temp_free_i64(tcg_op1);
7914 tcg_temp_free_i64(tcg_op2);
7915 tcg_temp_free_i64(tcg_res);
7916 } else {
7917 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7918 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7919 TCGv_i32 tcg_res = tcg_temp_new_i32();
7920
7921 read_vec_element_i32(s, tcg_op1, rn, 0, size);
7922 read_vec_element_i32(s, tcg_op2, rn, 1, size);
7923
7924 if (size == MO_16) {
7925 switch (opcode) {
7926 case 0xc: /* FMAXNMP */
7927 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7928 break;
7929 case 0xd: /* FADDP */
7930 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
7931 break;
7932 case 0xf: /* FMAXP */
7933 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
7934 break;
7935 case 0x2c: /* FMINNMP */
7936 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7937 break;
7938 case 0x2f: /* FMINP */
7939 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
7940 break;
7941 default:
7942 g_assert_not_reached();
7943 }
7944 } else {
7945 switch (opcode) {
7946 case 0xc: /* FMAXNMP */
7947 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
7948 break;
7949 case 0xd: /* FADDP */
7950 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
7951 break;
7952 case 0xf: /* FMAXP */
7953 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
7954 break;
7955 case 0x2c: /* FMINNMP */
7956 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
7957 break;
7958 case 0x2f: /* FMINP */
7959 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
7960 break;
7961 default:
7962 g_assert_not_reached();
7963 }
7964 }
7965
7966 write_fp_sreg(s, rd, tcg_res);
7967
7968 tcg_temp_free_i32(tcg_op1);
7969 tcg_temp_free_i32(tcg_op2);
7970 tcg_temp_free_i32(tcg_res);
7971 }
7972
7973 if (fpst) {
7974 tcg_temp_free_ptr(fpst);
7975 }
7976 }
7977
7978 /*
7979 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
7980 *
7981 * This code is handles the common shifting code and is used by both
7982 * the vector and scalar code.
7983 */
7984 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
7985 TCGv_i64 tcg_rnd, bool accumulate,
7986 bool is_u, int size, int shift)
7987 {
7988 bool extended_result = false;
7989 bool round = tcg_rnd != NULL;
7990 int ext_lshift = 0;
7991 TCGv_i64 tcg_src_hi;
7992
7993 if (round && size == 3) {
7994 extended_result = true;
7995 ext_lshift = 64 - shift;
7996 tcg_src_hi = tcg_temp_new_i64();
7997 } else if (shift == 64) {
7998 if (!accumulate && is_u) {
7999 /* result is zero */
8000 tcg_gen_movi_i64(tcg_res, 0);
8001 return;
8002 }
8003 }
8004
8005 /* Deal with the rounding step */
8006 if (round) {
8007 if (extended_result) {
8008 TCGv_i64 tcg_zero = tcg_const_i64(0);
8009 if (!is_u) {
8010 /* take care of sign extending tcg_res */
8011 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
8012 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8013 tcg_src, tcg_src_hi,
8014 tcg_rnd, tcg_zero);
8015 } else {
8016 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8017 tcg_src, tcg_zero,
8018 tcg_rnd, tcg_zero);
8019 }
8020 tcg_temp_free_i64(tcg_zero);
8021 } else {
8022 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
8023 }
8024 }
8025
8026 /* Now do the shift right */
8027 if (round && extended_result) {
8028 /* extended case, >64 bit precision required */
8029 if (ext_lshift == 0) {
8030 /* special case, only high bits matter */
8031 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
8032 } else {
8033 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8034 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
8035 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
8036 }
8037 } else {
8038 if (is_u) {
8039 if (shift == 64) {
8040 /* essentially shifting in 64 zeros */
8041 tcg_gen_movi_i64(tcg_src, 0);
8042 } else {
8043 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8044 }
8045 } else {
8046 if (shift == 64) {
8047 /* effectively extending the sign-bit */
8048 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
8049 } else {
8050 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
8051 }
8052 }
8053 }
8054
8055 if (accumulate) {
8056 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
8057 } else {
8058 tcg_gen_mov_i64(tcg_res, tcg_src);
8059 }
8060
8061 if (extended_result) {
8062 tcg_temp_free_i64(tcg_src_hi);
8063 }
8064 }
8065
8066 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
8067 static void handle_scalar_simd_shri(DisasContext *s,
8068 bool is_u, int immh, int immb,
8069 int opcode, int rn, int rd)
8070 {
8071 const int size = 3;
8072 int immhb = immh << 3 | immb;
8073 int shift = 2 * (8 << size) - immhb;
8074 bool accumulate = false;
8075 bool round = false;
8076 bool insert = false;
8077 TCGv_i64 tcg_rn;
8078 TCGv_i64 tcg_rd;
8079 TCGv_i64 tcg_round;
8080
8081 if (!extract32(immh, 3, 1)) {
8082 unallocated_encoding(s);
8083 return;
8084 }
8085
8086 if (!fp_access_check(s)) {
8087 return;
8088 }
8089
8090 switch (opcode) {
8091 case 0x02: /* SSRA / USRA (accumulate) */
8092 accumulate = true;
8093 break;
8094 case 0x04: /* SRSHR / URSHR (rounding) */
8095 round = true;
8096 break;
8097 case 0x06: /* SRSRA / URSRA (accum + rounding) */
8098 accumulate = round = true;
8099 break;
8100 case 0x08: /* SRI */
8101 insert = true;
8102 break;
8103 }
8104
8105 if (round) {
8106 uint64_t round_const = 1ULL << (shift - 1);
8107 tcg_round = tcg_const_i64(round_const);
8108 } else {
8109 tcg_round = NULL;
8110 }
8111
8112 tcg_rn = read_fp_dreg(s, rn);
8113 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8114
8115 if (insert) {
8116 /* shift count same as element size is valid but does nothing;
8117 * special case to avoid potential shift by 64.
8118 */
8119 int esize = 8 << size;
8120 if (shift != esize) {
8121 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
8122 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
8123 }
8124 } else {
8125 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8126 accumulate, is_u, size, shift);
8127 }
8128
8129 write_fp_dreg(s, rd, tcg_rd);
8130
8131 tcg_temp_free_i64(tcg_rn);
8132 tcg_temp_free_i64(tcg_rd);
8133 if (round) {
8134 tcg_temp_free_i64(tcg_round);
8135 }
8136 }
8137
8138 /* SHL/SLI - Scalar shift left */
8139 static void handle_scalar_simd_shli(DisasContext *s, bool insert,
8140 int immh, int immb, int opcode,
8141 int rn, int rd)
8142 {
8143 int size = 32 - clz32(immh) - 1;
8144 int immhb = immh << 3 | immb;
8145 int shift = immhb - (8 << size);
8146 TCGv_i64 tcg_rn = new_tmp_a64(s);
8147 TCGv_i64 tcg_rd = new_tmp_a64(s);
8148
8149 if (!extract32(immh, 3, 1)) {
8150 unallocated_encoding(s);
8151 return;
8152 }
8153
8154 if (!fp_access_check(s)) {
8155 return;
8156 }
8157
8158 tcg_rn = read_fp_dreg(s, rn);
8159 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8160
8161 if (insert) {
8162 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
8163 } else {
8164 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
8165 }
8166
8167 write_fp_dreg(s, rd, tcg_rd);
8168
8169 tcg_temp_free_i64(tcg_rn);
8170 tcg_temp_free_i64(tcg_rd);
8171 }
8172
8173 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
8174 * (signed/unsigned) narrowing */
8175 static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
8176 bool is_u_shift, bool is_u_narrow,
8177 int immh, int immb, int opcode,
8178 int rn, int rd)
8179 {
8180 int immhb = immh << 3 | immb;
8181 int size = 32 - clz32(immh) - 1;
8182 int esize = 8 << size;
8183 int shift = (2 * esize) - immhb;
8184 int elements = is_scalar ? 1 : (64 / esize);
8185 bool round = extract32(opcode, 0, 1);
8186 MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
8187 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
8188 TCGv_i32 tcg_rd_narrowed;
8189 TCGv_i64 tcg_final;
8190
8191 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
8192 { gen_helper_neon_narrow_sat_s8,
8193 gen_helper_neon_unarrow_sat8 },
8194 { gen_helper_neon_narrow_sat_s16,
8195 gen_helper_neon_unarrow_sat16 },
8196 { gen_helper_neon_narrow_sat_s32,
8197 gen_helper_neon_unarrow_sat32 },
8198 { NULL, NULL },
8199 };
8200 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
8201 gen_helper_neon_narrow_sat_u8,
8202 gen_helper_neon_narrow_sat_u16,
8203 gen_helper_neon_narrow_sat_u32,
8204 NULL
8205 };
8206 NeonGenNarrowEnvFn *narrowfn;
8207
8208 int i;
8209
8210 assert(size < 4);
8211
8212 if (extract32(immh, 3, 1)) {
8213 unallocated_encoding(s);
8214 return;
8215 }
8216
8217 if (!fp_access_check(s)) {
8218 return;
8219 }
8220
8221 if (is_u_shift) {
8222 narrowfn = unsigned_narrow_fns[size];
8223 } else {
8224 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
8225 }
8226
8227 tcg_rn = tcg_temp_new_i64();
8228 tcg_rd = tcg_temp_new_i64();
8229 tcg_rd_narrowed = tcg_temp_new_i32();
8230 tcg_final = tcg_const_i64(0);
8231
8232 if (round) {
8233 uint64_t round_const = 1ULL << (shift - 1);
8234 tcg_round = tcg_const_i64(round_const);
8235 } else {
8236 tcg_round = NULL;
8237 }
8238
8239 for (i = 0; i < elements; i++) {
8240 read_vec_element(s, tcg_rn, rn, i, ldop);
8241 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8242 false, is_u_shift, size+1, shift);
8243 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
8244 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
8245 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8246 }
8247
8248 if (!is_q) {
8249 write_vec_element(s, tcg_final, rd, 0, MO_64);
8250 } else {
8251 write_vec_element(s, tcg_final, rd, 1, MO_64);
8252 }
8253
8254 if (round) {
8255 tcg_temp_free_i64(tcg_round);
8256 }
8257 tcg_temp_free_i64(tcg_rn);
8258 tcg_temp_free_i64(tcg_rd);
8259 tcg_temp_free_i32(tcg_rd_narrowed);
8260 tcg_temp_free_i64(tcg_final);
8261
8262 clear_vec_high(s, is_q, rd);
8263 }
8264
8265 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
8266 static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
8267 bool src_unsigned, bool dst_unsigned,
8268 int immh, int immb, int rn, int rd)
8269 {
8270 int immhb = immh << 3 | immb;
8271 int size = 32 - clz32(immh) - 1;
8272 int shift = immhb - (8 << size);
8273 int pass;
8274
8275 assert(immh != 0);
8276 assert(!(scalar && is_q));
8277
8278 if (!scalar) {
8279 if (!is_q && extract32(immh, 3, 1)) {
8280 unallocated_encoding(s);
8281 return;
8282 }
8283
8284 /* Since we use the variable-shift helpers we must
8285 * replicate the shift count into each element of
8286 * the tcg_shift value.
8287 */
8288 switch (size) {
8289 case 0:
8290 shift |= shift << 8;
8291 /* fall through */
8292 case 1:
8293 shift |= shift << 16;
8294 break;
8295 case 2:
8296 case 3:
8297 break;
8298 default:
8299 g_assert_not_reached();
8300 }
8301 }
8302
8303 if (!fp_access_check(s)) {
8304 return;
8305 }
8306
8307 if (size == 3) {
8308 TCGv_i64 tcg_shift = tcg_const_i64(shift);
8309 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
8310 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
8311 { NULL, gen_helper_neon_qshl_u64 },
8312 };
8313 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
8314 int maxpass = is_q ? 2 : 1;
8315
8316 for (pass = 0; pass < maxpass; pass++) {
8317 TCGv_i64 tcg_op = tcg_temp_new_i64();
8318
8319 read_vec_element(s, tcg_op, rn, pass, MO_64);
8320 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8321 write_vec_element(s, tcg_op, rd, pass, MO_64);
8322
8323 tcg_temp_free_i64(tcg_op);
8324 }
8325 tcg_temp_free_i64(tcg_shift);
8326 clear_vec_high(s, is_q, rd);
8327 } else {
8328 TCGv_i32 tcg_shift = tcg_const_i32(shift);
8329 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
8330 {
8331 { gen_helper_neon_qshl_s8,
8332 gen_helper_neon_qshl_s16,
8333 gen_helper_neon_qshl_s32 },
8334 { gen_helper_neon_qshlu_s8,
8335 gen_helper_neon_qshlu_s16,
8336 gen_helper_neon_qshlu_s32 }
8337 }, {
8338 { NULL, NULL, NULL },
8339 { gen_helper_neon_qshl_u8,
8340 gen_helper_neon_qshl_u16,
8341 gen_helper_neon_qshl_u32 }
8342 }
8343 };
8344 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
8345 MemOp memop = scalar ? size : MO_32;
8346 int maxpass = scalar ? 1 : is_q ? 4 : 2;
8347
8348 for (pass = 0; pass < maxpass; pass++) {
8349 TCGv_i32 tcg_op = tcg_temp_new_i32();
8350
8351 read_vec_element_i32(s, tcg_op, rn, pass, memop);
8352 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8353 if (scalar) {
8354 switch (size) {
8355 case 0:
8356 tcg_gen_ext8u_i32(tcg_op, tcg_op);
8357 break;
8358 case 1:
8359 tcg_gen_ext16u_i32(tcg_op, tcg_op);
8360 break;
8361 case 2:
8362 break;
8363 default:
8364 g_assert_not_reached();
8365 }
8366 write_fp_sreg(s, rd, tcg_op);
8367 } else {
8368 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
8369 }
8370
8371 tcg_temp_free_i32(tcg_op);
8372 }
8373 tcg_temp_free_i32(tcg_shift);
8374
8375 if (!scalar) {
8376 clear_vec_high(s, is_q, rd);
8377 }
8378 }
8379 }
8380
8381 /* Common vector code for handling integer to FP conversion */
8382 static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
8383 int elements, int is_signed,
8384 int fracbits, int size)
8385 {
8386 TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16);
8387 TCGv_i32 tcg_shift = NULL;
8388
8389 MemOp mop = size | (is_signed ? MO_SIGN : 0);
8390 int pass;
8391
8392 if (fracbits || size == MO_64) {
8393 tcg_shift = tcg_const_i32(fracbits);
8394 }
8395
8396 if (size == MO_64) {
8397 TCGv_i64 tcg_int64 = tcg_temp_new_i64();
8398 TCGv_i64 tcg_double = tcg_temp_new_i64();
8399
8400 for (pass = 0; pass < elements; pass++) {
8401 read_vec_element(s, tcg_int64, rn, pass, mop);
8402
8403 if (is_signed) {
8404 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
8405 tcg_shift, tcg_fpst);
8406 } else {
8407 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
8408 tcg_shift, tcg_fpst);
8409 }
8410 if (elements == 1) {
8411 write_fp_dreg(s, rd, tcg_double);
8412 } else {
8413 write_vec_element(s, tcg_double, rd, pass, MO_64);
8414 }
8415 }
8416
8417 tcg_temp_free_i64(tcg_int64);
8418 tcg_temp_free_i64(tcg_double);
8419
8420 } else {
8421 TCGv_i32 tcg_int32 = tcg_temp_new_i32();
8422 TCGv_i32 tcg_float = tcg_temp_new_i32();
8423
8424 for (pass = 0; pass < elements; pass++) {
8425 read_vec_element_i32(s, tcg_int32, rn, pass, mop);
8426
8427 switch (size) {
8428 case MO_32:
8429 if (fracbits) {
8430 if (is_signed) {
8431 gen_helper_vfp_sltos(tcg_float, tcg_int32,
8432 tcg_shift, tcg_fpst);
8433 } else {
8434 gen_helper_vfp_ultos(tcg_float, tcg_int32,
8435 tcg_shift, tcg_fpst);
8436 }
8437 } else {
8438 if (is_signed) {
8439 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
8440 } else {
8441 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
8442 }
8443 }
8444 break;
8445 case MO_16:
8446 if (fracbits) {
8447 if (is_signed) {
8448 gen_helper_vfp_sltoh(tcg_float, tcg_int32,
8449 tcg_shift, tcg_fpst);
8450 } else {
8451 gen_helper_vfp_ultoh(tcg_float, tcg_int32,
8452 tcg_shift, tcg_fpst);
8453 }
8454 } else {
8455 if (is_signed) {
8456 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
8457 } else {
8458 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
8459 }
8460 }
8461 break;
8462 default:
8463 g_assert_not_reached();
8464 }
8465
8466 if (elements == 1) {
8467 write_fp_sreg(s, rd, tcg_float);
8468 } else {
8469 write_vec_element_i32(s, tcg_float, rd, pass, size);
8470 }
8471 }
8472
8473 tcg_temp_free_i32(tcg_int32);
8474 tcg_temp_free_i32(tcg_float);
8475 }
8476
8477 tcg_temp_free_ptr(tcg_fpst);
8478 if (tcg_shift) {
8479 tcg_temp_free_i32(tcg_shift);
8480 }
8481
8482 clear_vec_high(s, elements << size == 16, rd);
8483 }
8484
8485 /* UCVTF/SCVTF - Integer to FP conversion */
8486 static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
8487 bool is_q, bool is_u,
8488 int immh, int immb, int opcode,
8489 int rn, int rd)
8490 {
8491 int size, elements, fracbits;
8492 int immhb = immh << 3 | immb;
8493
8494 if (immh & 8) {
8495 size = MO_64;
8496 if (!is_scalar && !is_q) {
8497 unallocated_encoding(s);
8498 return;
8499 }
8500 } else if (immh & 4) {
8501 size = MO_32;
8502 } else if (immh & 2) {
8503 size = MO_16;
8504 if (!dc_isar_feature(aa64_fp16, s)) {
8505 unallocated_encoding(s);
8506 return;
8507 }
8508 } else {
8509 /* immh == 0 would be a failure of the decode logic */
8510 g_assert(immh == 1);
8511 unallocated_encoding(s);
8512 return;
8513 }
8514
8515 if (is_scalar) {
8516 elements = 1;
8517 } else {
8518 elements = (8 << is_q) >> size;
8519 }
8520 fracbits = (16 << size) - immhb;
8521
8522 if (!fp_access_check(s)) {
8523 return;
8524 }
8525
8526 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
8527 }
8528
8529 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
8530 static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
8531 bool is_q, bool is_u,
8532 int immh, int immb, int rn, int rd)
8533 {
8534 int immhb = immh << 3 | immb;
8535 int pass, size, fracbits;
8536 TCGv_ptr tcg_fpstatus;
8537 TCGv_i32 tcg_rmode, tcg_shift;
8538
8539 if (immh & 0x8) {
8540 size = MO_64;
8541 if (!is_scalar && !is_q) {
8542 unallocated_encoding(s);
8543 return;
8544 }
8545 } else if (immh & 0x4) {
8546 size = MO_32;
8547 } else if (immh & 0x2) {
8548 size = MO_16;
8549 if (!dc_isar_feature(aa64_fp16, s)) {
8550 unallocated_encoding(s);
8551 return;
8552 }
8553 } else {
8554 /* Should have split out AdvSIMD modified immediate earlier. */
8555 assert(immh == 1);
8556 unallocated_encoding(s);
8557 return;
8558 }
8559
8560 if (!fp_access_check(s)) {
8561 return;
8562 }
8563
8564 assert(!(is_scalar && is_q));
8565
8566 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
8567 tcg_fpstatus = get_fpstatus_ptr(size == MO_16);
8568 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
8569 fracbits = (16 << size) - immhb;
8570 tcg_shift = tcg_const_i32(fracbits);
8571
8572 if (size == MO_64) {
8573 int maxpass = is_scalar ? 1 : 2;
8574
8575 for (pass = 0; pass < maxpass; pass++) {
8576 TCGv_i64 tcg_op = tcg_temp_new_i64();
8577
8578 read_vec_element(s, tcg_op, rn, pass, MO_64);
8579 if (is_u) {
8580 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8581 } else {
8582 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8583 }
8584 write_vec_element(s, tcg_op, rd, pass, MO_64);
8585 tcg_temp_free_i64(tcg_op);
8586 }
8587 clear_vec_high(s, is_q, rd);
8588 } else {
8589 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
8590 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
8591
8592 switch (size) {
8593 case MO_16:
8594 if (is_u) {
8595 fn = gen_helper_vfp_touhh;
8596 } else {
8597 fn = gen_helper_vfp_toshh;
8598 }
8599 break;
8600 case MO_32:
8601 if (is_u) {
8602 fn = gen_helper_vfp_touls;
8603 } else {
8604 fn = gen_helper_vfp_tosls;
8605 }
8606 break;
8607 default:
8608 g_assert_not_reached();
8609 }
8610
8611 for (pass = 0; pass < maxpass; pass++) {
8612 TCGv_i32 tcg_op = tcg_temp_new_i32();
8613
8614 read_vec_element_i32(s, tcg_op, rn, pass, size);
8615 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8616 if (is_scalar) {
8617 write_fp_sreg(s, rd, tcg_op);
8618 } else {
8619 write_vec_element_i32(s, tcg_op, rd, pass, size);
8620 }
8621 tcg_temp_free_i32(tcg_op);
8622 }
8623 if (!is_scalar) {
8624 clear_vec_high(s, is_q, rd);
8625 }
8626 }
8627
8628 tcg_temp_free_ptr(tcg_fpstatus);
8629 tcg_temp_free_i32(tcg_shift);
8630 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
8631 tcg_temp_free_i32(tcg_rmode);
8632 }
8633
8634 /* AdvSIMD scalar shift by immediate
8635 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
8636 * +-----+---+-------------+------+------+--------+---+------+------+
8637 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
8638 * +-----+---+-------------+------+------+--------+---+------+------+
8639 *
8640 * This is the scalar version so it works on a fixed sized registers
8641 */
8642 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
8643 {
8644 int rd = extract32(insn, 0, 5);
8645 int rn = extract32(insn, 5, 5);
8646 int opcode = extract32(insn, 11, 5);
8647 int immb = extract32(insn, 16, 3);
8648 int immh = extract32(insn, 19, 4);
8649 bool is_u = extract32(insn, 29, 1);
8650
8651 if (immh == 0) {
8652 unallocated_encoding(s);
8653 return;
8654 }
8655
8656 switch (opcode) {
8657 case 0x08: /* SRI */
8658 if (!is_u) {
8659 unallocated_encoding(s);
8660 return;
8661 }
8662 /* fall through */
8663 case 0x00: /* SSHR / USHR */
8664 case 0x02: /* SSRA / USRA */
8665 case 0x04: /* SRSHR / URSHR */
8666 case 0x06: /* SRSRA / URSRA */
8667 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
8668 break;
8669 case 0x0a: /* SHL / SLI */
8670 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
8671 break;
8672 case 0x1c: /* SCVTF, UCVTF */
8673 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
8674 opcode, rn, rd);
8675 break;
8676 case 0x10: /* SQSHRUN, SQSHRUN2 */
8677 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
8678 if (!is_u) {
8679 unallocated_encoding(s);
8680 return;
8681 }
8682 handle_vec_simd_sqshrn(s, true, false, false, true,
8683 immh, immb, opcode, rn, rd);
8684 break;
8685 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
8686 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
8687 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
8688 immh, immb, opcode, rn, rd);
8689 break;
8690 case 0xc: /* SQSHLU */
8691 if (!is_u) {
8692 unallocated_encoding(s);
8693 return;
8694 }
8695 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
8696 break;
8697 case 0xe: /* SQSHL, UQSHL */
8698 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
8699 break;
8700 case 0x1f: /* FCVTZS, FCVTZU */
8701 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
8702 break;
8703 default:
8704 unallocated_encoding(s);
8705 break;
8706 }
8707 }
8708
8709 /* AdvSIMD scalar three different
8710 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
8711 * +-----+---+-----------+------+---+------+--------+-----+------+------+
8712 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
8713 * +-----+---+-----------+------+---+------+--------+-----+------+------+
8714 */
8715 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
8716 {
8717 bool is_u = extract32(insn, 29, 1);
8718 int size = extract32(insn, 22, 2);
8719 int opcode = extract32(insn, 12, 4);
8720 int rm = extract32(insn, 16, 5);
8721 int rn = extract32(insn, 5, 5);
8722 int rd = extract32(insn, 0, 5);
8723
8724 if (is_u) {
8725 unallocated_encoding(s);
8726 return;
8727 }
8728
8729 switch (opcode) {
8730 case 0x9: /* SQDMLAL, SQDMLAL2 */
8731 case 0xb: /* SQDMLSL, SQDMLSL2 */
8732 case 0xd: /* SQDMULL, SQDMULL2 */
8733 if (size == 0 || size == 3) {
8734 unallocated_encoding(s);
8735 return;
8736 }
8737 break;
8738 default:
8739 unallocated_encoding(s);
8740 return;
8741 }
8742
8743 if (!fp_access_check(s)) {
8744 return;
8745 }
8746
8747 if (size == 2) {
8748 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8749 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8750 TCGv_i64 tcg_res = tcg_temp_new_i64();
8751
8752 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
8753 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
8754
8755 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
8756 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
8757
8758 switch (opcode) {
8759 case 0xd: /* SQDMULL, SQDMULL2 */
8760 break;
8761 case 0xb: /* SQDMLSL, SQDMLSL2 */
8762 tcg_gen_neg_i64(tcg_res, tcg_res);
8763 /* fall through */
8764 case 0x9: /* SQDMLAL, SQDMLAL2 */
8765 read_vec_element(s, tcg_op1, rd, 0, MO_64);
8766 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
8767 tcg_res, tcg_op1);
8768 break;
8769 default:
8770 g_assert_not_reached();
8771 }
8772
8773 write_fp_dreg(s, rd, tcg_res);
8774
8775 tcg_temp_free_i64(tcg_op1);
8776 tcg_temp_free_i64(tcg_op2);
8777 tcg_temp_free_i64(tcg_res);
8778 } else {
8779 TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
8780 TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
8781 TCGv_i64 tcg_res = tcg_temp_new_i64();
8782
8783 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
8784 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
8785
8786 switch (opcode) {
8787 case 0xd: /* SQDMULL, SQDMULL2 */
8788 break;
8789 case 0xb: /* SQDMLSL, SQDMLSL2 */
8790 gen_helper_neon_negl_u32(tcg_res, tcg_res);
8791 /* fall through */
8792 case 0x9: /* SQDMLAL, SQDMLAL2 */
8793 {
8794 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
8795 read_vec_element(s, tcg_op3, rd, 0, MO_32);
8796 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
8797 tcg_res, tcg_op3);
8798 tcg_temp_free_i64(tcg_op3);
8799 break;
8800 }
8801 default:
8802 g_assert_not_reached();
8803 }
8804
8805 tcg_gen_ext32u_i64(tcg_res, tcg_res);
8806 write_fp_dreg(s, rd, tcg_res);
8807
8808 tcg_temp_free_i32(tcg_op1);
8809 tcg_temp_free_i32(tcg_op2);
8810 tcg_temp_free_i64(tcg_res);
8811 }
8812 }
8813
8814 static void handle_3same_64(DisasContext *s, int opcode, bool u,
8815 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
8816 {
8817 /* Handle 64x64->64 opcodes which are shared between the scalar
8818 * and vector 3-same groups. We cover every opcode where size == 3
8819 * is valid in either the three-reg-same (integer, not pairwise)
8820 * or scalar-three-reg-same groups.
8821 */
8822 TCGCond cond;
8823
8824 switch (opcode) {
8825 case 0x1: /* SQADD */
8826 if (u) {
8827 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8828 } else {
8829 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8830 }
8831 break;
8832 case 0x5: /* SQSUB */
8833 if (u) {
8834 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8835 } else {
8836 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8837 }
8838 break;
8839 case 0x6: /* CMGT, CMHI */
8840 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
8841 * We implement this using setcond (test) and then negating.
8842 */
8843 cond = u ? TCG_COND_GTU : TCG_COND_GT;
8844 do_cmop:
8845 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
8846 tcg_gen_neg_i64(tcg_rd, tcg_rd);
8847 break;
8848 case 0x7: /* CMGE, CMHS */
8849 cond = u ? TCG_COND_GEU : TCG_COND_GE;
8850 goto do_cmop;
8851 case 0x11: /* CMTST, CMEQ */
8852 if (u) {
8853 cond = TCG_COND_EQ;
8854 goto do_cmop;
8855 }
8856 gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
8857 break;
8858 case 0x8: /* SSHL, USHL */
8859 if (u) {
8860 gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
8861 } else {
8862 gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
8863 }
8864 break;
8865 case 0x9: /* SQSHL, UQSHL */
8866 if (u) {
8867 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8868 } else {
8869 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8870 }
8871 break;
8872 case 0xa: /* SRSHL, URSHL */
8873 if (u) {
8874 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
8875 } else {
8876 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
8877 }
8878 break;
8879 case 0xb: /* SQRSHL, UQRSHL */
8880 if (u) {
8881 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8882 } else {
8883 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8884 }
8885 break;
8886 case 0x10: /* ADD, SUB */
8887 if (u) {
8888 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
8889 } else {
8890 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
8891 }
8892 break;
8893 default:
8894 g_assert_not_reached();
8895 }
8896 }
8897
8898 /* Handle the 3-same-operands float operations; shared by the scalar
8899 * and vector encodings. The caller must filter out any encodings
8900 * not allocated for the encoding it is dealing with.
8901 */
8902 static void handle_3same_float(DisasContext *s, int size, int elements,
8903 int fpopcode, int rd, int rn, int rm)
8904 {
8905 int pass;
8906 TCGv_ptr fpst = get_fpstatus_ptr(false);
8907
8908 for (pass = 0; pass < elements; pass++) {
8909 if (size) {
8910 /* Double */
8911 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8912 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8913 TCGv_i64 tcg_res = tcg_temp_new_i64();
8914
8915 read_vec_element(s, tcg_op1, rn, pass, MO_64);
8916 read_vec_element(s, tcg_op2, rm, pass, MO_64);
8917
8918 switch (fpopcode) {
8919 case 0x39: /* FMLS */
8920 /* As usual for ARM, separate negation for fused multiply-add */
8921 gen_helper_vfp_negd(tcg_op1, tcg_op1);
8922 /* fall through */
8923 case 0x19: /* FMLA */
8924 read_vec_element(s, tcg_res, rd, pass, MO_64);
8925 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
8926 tcg_res, fpst);
8927 break;
8928 case 0x18: /* FMAXNM */
8929 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8930 break;
8931 case 0x1a: /* FADD */
8932 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8933 break;
8934 case 0x1b: /* FMULX */
8935 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
8936 break;
8937 case 0x1c: /* FCMEQ */
8938 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8939 break;
8940 case 0x1e: /* FMAX */
8941 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8942 break;
8943 case 0x1f: /* FRECPS */
8944 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8945 break;
8946 case 0x38: /* FMINNM */
8947 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8948 break;
8949 case 0x3a: /* FSUB */
8950 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8951 break;
8952 case 0x3e: /* FMIN */
8953 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8954 break;
8955 case 0x3f: /* FRSQRTS */
8956 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8957 break;
8958 case 0x5b: /* FMUL */
8959 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
8960 break;
8961 case 0x5c: /* FCMGE */
8962 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8963 break;
8964 case 0x5d: /* FACGE */
8965 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8966 break;
8967 case 0x5f: /* FDIV */
8968 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
8969 break;
8970 case 0x7a: /* FABD */
8971 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8972 gen_helper_vfp_absd(tcg_res, tcg_res);
8973 break;
8974 case 0x7c: /* FCMGT */
8975 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8976 break;
8977 case 0x7d: /* FACGT */
8978 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8979 break;
8980 default:
8981 g_assert_not_reached();
8982 }
8983
8984 write_vec_element(s, tcg_res, rd, pass, MO_64);
8985
8986 tcg_temp_free_i64(tcg_res);
8987 tcg_temp_free_i64(tcg_op1);
8988 tcg_temp_free_i64(tcg_op2);
8989 } else {
8990 /* Single */
8991 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8992 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8993 TCGv_i32 tcg_res = tcg_temp_new_i32();
8994
8995 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
8996 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
8997
8998 switch (fpopcode) {
8999 case 0x39: /* FMLS */
9000 /* As usual for ARM, separate negation for fused multiply-add */
9001 gen_helper_vfp_negs(tcg_op1, tcg_op1);
9002 /* fall through */
9003 case 0x19: /* FMLA */
9004 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9005 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
9006 tcg_res, fpst);
9007 break;
9008 case 0x1a: /* FADD */
9009 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
9010 break;
9011 case 0x1b: /* FMULX */
9012 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
9013 break;
9014 case 0x1c: /* FCMEQ */
9015 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9016 break;
9017 case 0x1e: /* FMAX */
9018 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
9019 break;
9020 case 0x1f: /* FRECPS */
9021 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9022 break;
9023 case 0x18: /* FMAXNM */
9024 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
9025 break;
9026 case 0x38: /* FMINNM */
9027 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
9028 break;
9029 case 0x3a: /* FSUB */
9030 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9031 break;
9032 case 0x3e: /* FMIN */
9033 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
9034 break;
9035 case 0x3f: /* FRSQRTS */
9036 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9037 break;
9038 case 0x5b: /* FMUL */
9039 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
9040 break;
9041 case 0x5c: /* FCMGE */
9042 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9043 break;
9044 case 0x5d: /* FACGE */
9045 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9046 break;
9047 case 0x5f: /* FDIV */
9048 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
9049 break;
9050 case 0x7a: /* FABD */
9051 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9052 gen_helper_vfp_abss(tcg_res, tcg_res);
9053 break;
9054 case 0x7c: /* FCMGT */
9055 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9056 break;
9057 case 0x7d: /* FACGT */
9058 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9059 break;
9060 default:
9061 g_assert_not_reached();
9062 }
9063
9064 if (elements == 1) {
9065 /* scalar single so clear high part */
9066 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9067
9068 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
9069 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
9070 tcg_temp_free_i64(tcg_tmp);
9071 } else {
9072 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9073 }
9074
9075 tcg_temp_free_i32(tcg_res);
9076 tcg_temp_free_i32(tcg_op1);
9077 tcg_temp_free_i32(tcg_op2);
9078 }
9079 }
9080
9081 tcg_temp_free_ptr(fpst);
9082
9083 clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
9084 }
9085
9086 /* AdvSIMD scalar three same
9087 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
9088 * +-----+---+-----------+------+---+------+--------+---+------+------+
9089 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
9090 * +-----+---+-----------+------+---+------+--------+---+------+------+
9091 */
9092 static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
9093 {
9094 int rd = extract32(insn, 0, 5);
9095 int rn = extract32(insn, 5, 5);
9096 int opcode = extract32(insn, 11, 5);
9097 int rm = extract32(insn, 16, 5);
9098 int size = extract32(insn, 22, 2);
9099 bool u = extract32(insn, 29, 1);
9100 TCGv_i64 tcg_rd;
9101
9102 if (opcode >= 0x18) {
9103 /* Floating point: U, size[1] and opcode indicate operation */
9104 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
9105 switch (fpopcode) {
9106 case 0x1b: /* FMULX */
9107 case 0x1f: /* FRECPS */
9108 case 0x3f: /* FRSQRTS */
9109 case 0x5d: /* FACGE */
9110 case 0x7d: /* FACGT */
9111 case 0x1c: /* FCMEQ */
9112 case 0x5c: /* FCMGE */
9113 case 0x7c: /* FCMGT */
9114 case 0x7a: /* FABD */
9115 break;
9116 default:
9117 unallocated_encoding(s);
9118 return;
9119 }
9120
9121 if (!fp_access_check(s)) {
9122 return;
9123 }
9124
9125 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
9126 return;
9127 }
9128
9129 switch (opcode) {
9130 case 0x1: /* SQADD, UQADD */
9131 case 0x5: /* SQSUB, UQSUB */
9132 case 0x9: /* SQSHL, UQSHL */
9133 case 0xb: /* SQRSHL, UQRSHL */
9134 break;
9135 case 0x8: /* SSHL, USHL */
9136 case 0xa: /* SRSHL, URSHL */
9137 case 0x6: /* CMGT, CMHI */
9138 case 0x7: /* CMGE, CMHS */
9139 case 0x11: /* CMTST, CMEQ */
9140 case 0x10: /* ADD, SUB (vector) */
9141 if (size != 3) {
9142 unallocated_encoding(s);
9143 return;
9144 }
9145 break;
9146 case 0x16: /* SQDMULH, SQRDMULH (vector) */
9147 if (size != 1 && size != 2) {
9148 unallocated_encoding(s);
9149 return;
9150 }
9151 break;
9152 default:
9153 unallocated_encoding(s);
9154 return;
9155 }
9156
9157 if (!fp_access_check(s)) {
9158 return;
9159 }
9160
9161 tcg_rd = tcg_temp_new_i64();
9162
9163 if (size == 3) {
9164 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9165 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
9166
9167 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
9168 tcg_temp_free_i64(tcg_rn);
9169 tcg_temp_free_i64(tcg_rm);
9170 } else {
9171 /* Do a single operation on the lowest element in the vector.
9172 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
9173 * no side effects for all these operations.
9174 * OPTME: special-purpose helpers would avoid doing some
9175 * unnecessary work in the helper for the 8 and 16 bit cases.
9176 */
9177 NeonGenTwoOpEnvFn *genenvfn;
9178 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9179 TCGv_i32 tcg_rm = tcg_temp_new_i32();
9180 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
9181
9182 read_vec_element_i32(s, tcg_rn, rn, 0, size);
9183 read_vec_element_i32(s, tcg_rm, rm, 0, size);
9184
9185 switch (opcode) {
9186 case 0x1: /* SQADD, UQADD */
9187 {
9188 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9189 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9190 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9191 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9192 };
9193 genenvfn = fns[size][u];
9194 break;
9195 }
9196 case 0x5: /* SQSUB, UQSUB */
9197 {
9198 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9199 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9200 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9201 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9202 };
9203 genenvfn = fns[size][u];
9204 break;
9205 }
9206 case 0x9: /* SQSHL, UQSHL */
9207 {
9208 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9209 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9210 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9211 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9212 };
9213 genenvfn = fns[size][u];
9214 break;
9215 }
9216 case 0xb: /* SQRSHL, UQRSHL */
9217 {
9218 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9219 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9220 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9221 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9222 };
9223 genenvfn = fns[size][u];
9224 break;
9225 }
9226 case 0x16: /* SQDMULH, SQRDMULH */
9227 {
9228 static NeonGenTwoOpEnvFn * const fns[2][2] = {
9229 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9230 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9231 };
9232 assert(size == 1 || size == 2);
9233 genenvfn = fns[size - 1][u];
9234 break;
9235 }
9236 default:
9237 g_assert_not_reached();
9238 }
9239
9240 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
9241 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
9242 tcg_temp_free_i32(tcg_rd32);
9243 tcg_temp_free_i32(tcg_rn);
9244 tcg_temp_free_i32(tcg_rm);
9245 }
9246
9247 write_fp_dreg(s, rd, tcg_rd);
9248
9249 tcg_temp_free_i64(tcg_rd);
9250 }
9251
9252 /* AdvSIMD scalar three same FP16
9253 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
9254 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9255 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
9256 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9257 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
9258 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
9259 */
9260 static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
9261 uint32_t insn)
9262 {
9263 int rd = extract32(insn, 0, 5);
9264 int rn = extract32(insn, 5, 5);
9265 int opcode = extract32(insn, 11, 3);
9266 int rm = extract32(insn, 16, 5);
9267 bool u = extract32(insn, 29, 1);
9268 bool a = extract32(insn, 23, 1);
9269 int fpopcode = opcode | (a << 3) | (u << 4);
9270 TCGv_ptr fpst;
9271 TCGv_i32 tcg_op1;
9272 TCGv_i32 tcg_op2;
9273 TCGv_i32 tcg_res;
9274
9275 switch (fpopcode) {
9276 case 0x03: /* FMULX */
9277 case 0x04: /* FCMEQ (reg) */
9278 case 0x07: /* FRECPS */
9279 case 0x0f: /* FRSQRTS */
9280 case 0x14: /* FCMGE (reg) */
9281 case 0x15: /* FACGE */
9282 case 0x1a: /* FABD */
9283 case 0x1c: /* FCMGT (reg) */
9284 case 0x1d: /* FACGT */
9285 break;
9286 default:
9287 unallocated_encoding(s);
9288 return;
9289 }
9290
9291 if (!dc_isar_feature(aa64_fp16, s)) {
9292 unallocated_encoding(s);
9293 }
9294
9295 if (!fp_access_check(s)) {
9296 return;
9297 }
9298
9299 fpst = get_fpstatus_ptr(true);
9300
9301 tcg_op1 = read_fp_hreg(s, rn);
9302 tcg_op2 = read_fp_hreg(s, rm);
9303 tcg_res = tcg_temp_new_i32();
9304
9305 switch (fpopcode) {
9306 case 0x03: /* FMULX */
9307 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
9308 break;
9309 case 0x04: /* FCMEQ (reg) */
9310 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9311 break;
9312 case 0x07: /* FRECPS */
9313 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9314 break;
9315 case 0x0f: /* FRSQRTS */
9316 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9317 break;
9318 case 0x14: /* FCMGE (reg) */
9319 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9320 break;
9321 case 0x15: /* FACGE */
9322 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9323 break;
9324 case 0x1a: /* FABD */
9325 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
9326 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
9327 break;
9328 case 0x1c: /* FCMGT (reg) */
9329 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9330 break;
9331 case 0x1d: /* FACGT */
9332 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9333 break;
9334 default:
9335 g_assert_not_reached();
9336 }
9337
9338 write_fp_sreg(s, rd, tcg_res);
9339
9340
9341 tcg_temp_free_i32(tcg_res);
9342 tcg_temp_free_i32(tcg_op1);
9343 tcg_temp_free_i32(tcg_op2);
9344 tcg_temp_free_ptr(fpst);
9345 }
9346
9347 /* AdvSIMD scalar three same extra
9348 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
9349 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9350 * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
9351 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9352 */
9353 static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
9354 uint32_t insn)
9355 {
9356 int rd = extract32(insn, 0, 5);
9357 int rn = extract32(insn, 5, 5);
9358 int opcode = extract32(insn, 11, 4);
9359 int rm = extract32(insn, 16, 5);
9360 int size = extract32(insn, 22, 2);
9361 bool u = extract32(insn, 29, 1);
9362 TCGv_i32 ele1, ele2, ele3;
9363 TCGv_i64 res;
9364 bool feature;
9365
9366 switch (u * 16 + opcode) {
9367 case 0x10: /* SQRDMLAH (vector) */
9368 case 0x11: /* SQRDMLSH (vector) */
9369 if (size != 1 && size != 2) {
9370 unallocated_encoding(s);
9371 return;
9372 }
9373 feature = dc_isar_feature(aa64_rdm, s);
9374 break;
9375 default:
9376 unallocated_encoding(s);
9377 return;
9378 }
9379 if (!feature) {
9380 unallocated_encoding(s);
9381 return;
9382 }
9383 if (!fp_access_check(s)) {
9384 return;
9385 }
9386
9387 /* Do a single operation on the lowest element in the vector.
9388 * We use the standard Neon helpers and rely on 0 OP 0 == 0
9389 * with no side effects for all these operations.
9390 * OPTME: special-purpose helpers would avoid doing some
9391 * unnecessary work in the helper for the 16 bit cases.
9392 */
9393 ele1 = tcg_temp_new_i32();
9394 ele2 = tcg_temp_new_i32();
9395 ele3 = tcg_temp_new_i32();
9396
9397 read_vec_element_i32(s, ele1, rn, 0, size);
9398 read_vec_element_i32(s, ele2, rm, 0, size);
9399 read_vec_element_i32(s, ele3, rd, 0, size);
9400
9401 switch (opcode) {
9402 case 0x0: /* SQRDMLAH */
9403 if (size == 1) {
9404 gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
9405 } else {
9406 gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
9407 }
9408 break;
9409 case 0x1: /* SQRDMLSH */
9410 if (size == 1) {
9411 gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
9412 } else {
9413 gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
9414 }
9415 break;
9416 default:
9417 g_assert_not_reached();
9418 }
9419 tcg_temp_free_i32(ele1);
9420 tcg_temp_free_i32(ele2);
9421
9422 res = tcg_temp_new_i64();
9423 tcg_gen_extu_i32_i64(res, ele3);
9424 tcg_temp_free_i32(ele3);
9425
9426 write_fp_dreg(s, rd, res);
9427 tcg_temp_free_i64(res);
9428 }
9429
9430 static void handle_2misc_64(DisasContext *s, int opcode, bool u,
9431 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
9432 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
9433 {
9434 /* Handle 64->64 opcodes which are shared between the scalar and
9435 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9436 * is valid in either group and also the double-precision fp ops.
9437 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9438 * requires them.
9439 */
9440 TCGCond cond;
9441
9442 switch (opcode) {
9443 case 0x4: /* CLS, CLZ */
9444 if (u) {
9445 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
9446 } else {
9447 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
9448 }
9449 break;
9450 case 0x5: /* NOT */
9451 /* This opcode is shared with CNT and RBIT but we have earlier
9452 * enforced that size == 3 if and only if this is the NOT insn.
9453 */
9454 tcg_gen_not_i64(tcg_rd, tcg_rn);
9455 break;
9456 case 0x7: /* SQABS, SQNEG */
9457 if (u) {
9458 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
9459 } else {
9460 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
9461 }
9462 break;
9463 case 0xa: /* CMLT */
9464 /* 64 bit integer comparison against zero, result is
9465 * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
9466 * subtracting 1.
9467 */
9468 cond = TCG_COND_LT;
9469 do_cmop:
9470 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
9471 tcg_gen_neg_i64(tcg_rd, tcg_rd);
9472 break;
9473 case 0x8: /* CMGT, CMGE */
9474 cond = u ? TCG_COND_GE : TCG_COND_GT;
9475 goto do_cmop;
9476 case 0x9: /* CMEQ, CMLE */
9477 cond = u ? TCG_COND_LE : TCG_COND_EQ;
9478 goto do_cmop;
9479 case 0xb: /* ABS, NEG */
9480 if (u) {
9481 tcg_gen_neg_i64(tcg_rd, tcg_rn);
9482 } else {
9483 tcg_gen_abs_i64(tcg_rd, tcg_rn);
9484 }
9485 break;
9486 case 0x2f: /* FABS */
9487 gen_helper_vfp_absd(tcg_rd, tcg_rn);
9488 break;
9489 case 0x6f: /* FNEG */
9490 gen_helper_vfp_negd(tcg_rd, tcg_rn);
9491 break;
9492 case 0x7f: /* FSQRT */
9493 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
9494 break;
9495 case 0x1a: /* FCVTNS */
9496 case 0x1b: /* FCVTMS */
9497 case 0x1c: /* FCVTAS */
9498 case 0x3a: /* FCVTPS */
9499 case 0x3b: /* FCVTZS */
9500 {
9501 TCGv_i32 tcg_shift = tcg_const_i32(0);
9502 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9503 tcg_temp_free_i32(tcg_shift);
9504 break;
9505 }
9506 case 0x5a: /* FCVTNU */
9507 case 0x5b: /* FCVTMU */
9508 case 0x5c: /* FCVTAU */
9509 case 0x7a: /* FCVTPU */
9510 case 0x7b: /* FCVTZU */
9511 {
9512 TCGv_i32 tcg_shift = tcg_const_i32(0);
9513 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9514 tcg_temp_free_i32(tcg_shift);
9515 break;
9516 }
9517 case 0x18: /* FRINTN */
9518 case 0x19: /* FRINTM */
9519 case 0x38: /* FRINTP */
9520 case 0x39: /* FRINTZ */
9521 case 0x58: /* FRINTA */
9522 case 0x79: /* FRINTI */
9523 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
9524 break;
9525 case 0x59: /* FRINTX */
9526 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
9527 break;
9528 case 0x1e: /* FRINT32Z */
9529 case 0x5e: /* FRINT32X */
9530 gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
9531 break;
9532 case 0x1f: /* FRINT64Z */
9533 case 0x5f: /* FRINT64X */
9534 gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
9535 break;
9536 default:
9537 g_assert_not_reached();
9538 }
9539 }
9540
9541 static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
9542 bool is_scalar, bool is_u, bool is_q,
9543 int size, int rn, int rd)
9544 {
9545 bool is_double = (size == MO_64);
9546 TCGv_ptr fpst;
9547
9548 if (!fp_access_check(s)) {
9549 return;
9550 }
9551
9552 fpst = get_fpstatus_ptr(size == MO_16);
9553
9554 if (is_double) {
9555 TCGv_i64 tcg_op = tcg_temp_new_i64();
9556 TCGv_i64 tcg_zero = tcg_const_i64(0);
9557 TCGv_i64 tcg_res = tcg_temp_new_i64();
9558 NeonGenTwoDoubleOpFn *genfn;
9559 bool swap = false;
9560 int pass;
9561
9562 switch (opcode) {
9563 case 0x2e: /* FCMLT (zero) */
9564 swap = true;
9565 /* fallthrough */
9566 case 0x2c: /* FCMGT (zero) */
9567 genfn = gen_helper_neon_cgt_f64;
9568 break;
9569 case 0x2d: /* FCMEQ (zero) */
9570 genfn = gen_helper_neon_ceq_f64;
9571 break;
9572 case 0x6d: /* FCMLE (zero) */
9573 swap = true;
9574 /* fall through */
9575 case 0x6c: /* FCMGE (zero) */
9576 genfn = gen_helper_neon_cge_f64;
9577 break;
9578 default:
9579 g_assert_not_reached();
9580 }
9581
9582 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9583 read_vec_element(s, tcg_op, rn, pass, MO_64);
9584 if (swap) {
9585 genfn(tcg_res, tcg_zero, tcg_op, fpst);
9586 } else {
9587 genfn(tcg_res, tcg_op, tcg_zero, fpst);
9588 }
9589 write_vec_element(s, tcg_res, rd, pass, MO_64);
9590 }
9591 tcg_temp_free_i64(tcg_res);
9592 tcg_temp_free_i64(tcg_zero);
9593 tcg_temp_free_i64(tcg_op);
9594
9595 clear_vec_high(s, !is_scalar, rd);
9596 } else {
9597 TCGv_i32 tcg_op = tcg_temp_new_i32();
9598 TCGv_i32 tcg_zero = tcg_const_i32(0);
9599 TCGv_i32 tcg_res = tcg_temp_new_i32();
9600 NeonGenTwoSingleOpFn *genfn;
9601 bool swap = false;
9602 int pass, maxpasses;
9603
9604 if (size == MO_16) {
9605 switch (opcode) {
9606 case 0x2e: /* FCMLT (zero) */
9607 swap = true;
9608 /* fall through */
9609 case 0x2c: /* FCMGT (zero) */
9610 genfn = gen_helper_advsimd_cgt_f16;
9611 break;
9612 case 0x2d: /* FCMEQ (zero) */
9613 genfn = gen_helper_advsimd_ceq_f16;
9614 break;
9615 case 0x6d: /* FCMLE (zero) */
9616 swap = true;
9617 /* fall through */
9618 case 0x6c: /* FCMGE (zero) */
9619 genfn = gen_helper_advsimd_cge_f16;
9620 break;
9621 default:
9622 g_assert_not_reached();
9623 }
9624 } else {
9625 switch (opcode) {
9626 case 0x2e: /* FCMLT (zero) */
9627 swap = true;
9628 /* fall through */
9629 case 0x2c: /* FCMGT (zero) */
9630 genfn = gen_helper_neon_cgt_f32;
9631 break;
9632 case 0x2d: /* FCMEQ (zero) */
9633 genfn = gen_helper_neon_ceq_f32;
9634 break;
9635 case 0x6d: /* FCMLE (zero) */
9636 swap = true;
9637 /* fall through */
9638 case 0x6c: /* FCMGE (zero) */
9639 genfn = gen_helper_neon_cge_f32;
9640 break;
9641 default:
9642 g_assert_not_reached();
9643 }
9644 }
9645
9646 if (is_scalar) {
9647 maxpasses = 1;
9648 } else {
9649 int vector_size = 8 << is_q;
9650 maxpasses = vector_size >> size;
9651 }
9652
9653 for (pass = 0; pass < maxpasses; pass++) {
9654 read_vec_element_i32(s, tcg_op, rn, pass, size);
9655 if (swap) {
9656 genfn(tcg_res, tcg_zero, tcg_op, fpst);
9657 } else {
9658 genfn(tcg_res, tcg_op, tcg_zero, fpst);
9659 }
9660 if (is_scalar) {
9661 write_fp_sreg(s, rd, tcg_res);
9662 } else {
9663 write_vec_element_i32(s, tcg_res, rd, pass, size);
9664 }
9665 }
9666 tcg_temp_free_i32(tcg_res);
9667 tcg_temp_free_i32(tcg_zero);
9668 tcg_temp_free_i32(tcg_op);
9669 if (!is_scalar) {
9670 clear_vec_high(s, is_q, rd);
9671 }
9672 }
9673
9674 tcg_temp_free_ptr(fpst);
9675 }
9676
9677 static void handle_2misc_reciprocal(DisasContext *s, int opcode,
9678 bool is_scalar, bool is_u, bool is_q,
9679 int size, int rn, int rd)
9680 {
9681 bool is_double = (size == 3);
9682 TCGv_ptr fpst = get_fpstatus_ptr(false);
9683
9684 if (is_double) {
9685 TCGv_i64 tcg_op = tcg_temp_new_i64();
9686 TCGv_i64 tcg_res = tcg_temp_new_i64();
9687 int pass;
9688
9689 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9690 read_vec_element(s, tcg_op, rn, pass, MO_64);
9691 switch (opcode) {
9692 case 0x3d: /* FRECPE */
9693 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
9694 break;
9695 case 0x3f: /* FRECPX */
9696 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
9697 break;
9698 case 0x7d: /* FRSQRTE */
9699 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
9700 break;
9701 default:
9702 g_assert_not_reached();
9703 }
9704 write_vec_element(s, tcg_res, rd, pass, MO_64);
9705 }
9706 tcg_temp_free_i64(tcg_res);
9707 tcg_temp_free_i64(tcg_op);
9708 clear_vec_high(s, !is_scalar, rd);
9709 } else {
9710 TCGv_i32 tcg_op = tcg_temp_new_i32();
9711 TCGv_i32 tcg_res = tcg_temp_new_i32();
9712 int pass, maxpasses;
9713
9714 if (is_scalar) {
9715 maxpasses = 1;
9716 } else {
9717 maxpasses = is_q ? 4 : 2;
9718 }
9719
9720 for (pass = 0; pass < maxpasses; pass++) {
9721 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
9722
9723 switch (opcode) {
9724 case 0x3c: /* URECPE */
9725 gen_helper_recpe_u32(tcg_res, tcg_op);
9726 break;
9727 case 0x3d: /* FRECPE */
9728 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
9729 break;
9730 case 0x3f: /* FRECPX */
9731 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
9732 break;
9733 case 0x7d: /* FRSQRTE */
9734 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
9735 break;
9736 default:
9737 g_assert_not_reached();
9738 }
9739
9740 if (is_scalar) {
9741 write_fp_sreg(s, rd, tcg_res);
9742 } else {
9743 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9744 }
9745 }
9746 tcg_temp_free_i32(tcg_res);
9747 tcg_temp_free_i32(tcg_op);
9748 if (!is_scalar) {
9749 clear_vec_high(s, is_q, rd);
9750 }
9751 }
9752 tcg_temp_free_ptr(fpst);
9753 }
9754
9755 static void handle_2misc_narrow(DisasContext *s, bool scalar,
9756 int opcode, bool u, bool is_q,
9757 int size, int rn, int rd)
9758 {
9759 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
9760 * in the source becomes a size element in the destination).
9761 */
9762 int pass;
9763 TCGv_i32 tcg_res[2];
9764 int destelt = is_q ? 2 : 0;
9765 int passes = scalar ? 1 : 2;
9766
9767 if (scalar) {
9768 tcg_res[1] = tcg_const_i32(0);
9769 }
9770
9771 for (pass = 0; pass < passes; pass++) {
9772 TCGv_i64 tcg_op = tcg_temp_new_i64();
9773 NeonGenNarrowFn *genfn = NULL;
9774 NeonGenNarrowEnvFn *genenvfn = NULL;
9775
9776 if (scalar) {
9777 read_vec_element(s, tcg_op, rn, pass, size + 1);
9778 } else {
9779 read_vec_element(s, tcg_op, rn, pass, MO_64);
9780 }
9781 tcg_res[pass] = tcg_temp_new_i32();
9782
9783 switch (opcode) {
9784 case 0x12: /* XTN, SQXTUN */
9785 {
9786 static NeonGenNarrowFn * const xtnfns[3] = {
9787 gen_helper_neon_narrow_u8,
9788 gen_helper_neon_narrow_u16,
9789 tcg_gen_extrl_i64_i32,
9790 };
9791 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
9792 gen_helper_neon_unarrow_sat8,
9793 gen_helper_neon_unarrow_sat16,
9794 gen_helper_neon_unarrow_sat32,
9795 };
9796 if (u) {
9797 genenvfn = sqxtunfns[size];
9798 } else {
9799 genfn = xtnfns[size];
9800 }
9801 break;
9802 }
9803 case 0x14: /* SQXTN, UQXTN */
9804 {
9805 static NeonGenNarrowEnvFn * const fns[3][2] = {
9806 { gen_helper_neon_narrow_sat_s8,
9807 gen_helper_neon_narrow_sat_u8 },
9808 { gen_helper_neon_narrow_sat_s16,
9809 gen_helper_neon_narrow_sat_u16 },
9810 { gen_helper_neon_narrow_sat_s32,
9811 gen_helper_neon_narrow_sat_u32 },
9812 };
9813 genenvfn = fns[size][u];
9814 break;
9815 }
9816 case 0x16: /* FCVTN, FCVTN2 */
9817 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
9818 if (size == 2) {
9819 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
9820 } else {
9821 TCGv_i32 tcg_lo = tcg_temp_new_i32();
9822 TCGv_i32 tcg_hi = tcg_temp_new_i32();
9823 TCGv_ptr fpst = get_fpstatus_ptr(false);
9824 TCGv_i32 ahp = get_ahp_flag();
9825
9826 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
9827 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
9828 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
9829 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
9830 tcg_temp_free_i32(tcg_lo);
9831 tcg_temp_free_i32(tcg_hi);
9832 tcg_temp_free_ptr(fpst);
9833 tcg_temp_free_i32(ahp);
9834 }
9835 break;
9836 case 0x56: /* FCVTXN, FCVTXN2 */
9837 /* 64 bit to 32 bit float conversion
9838 * with von Neumann rounding (round to odd)
9839 */
9840 assert(size == 2);
9841 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
9842 break;
9843 default:
9844 g_assert_not_reached();
9845 }
9846
9847 if (genfn) {
9848 genfn(tcg_res[pass], tcg_op);
9849 } else if (genenvfn) {
9850 genenvfn(tcg_res[pass], cpu_env, tcg_op);
9851 }
9852
9853 tcg_temp_free_i64(tcg_op);
9854 }
9855
9856 for (pass = 0; pass < 2; pass++) {
9857 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
9858 tcg_temp_free_i32(tcg_res[pass]);
9859 }
9860 clear_vec_high(s, is_q, rd);
9861 }
9862
9863 /* Remaining saturating accumulating ops */
9864 static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
9865 bool is_q, int size, int rn, int rd)
9866 {
9867 bool is_double = (size == 3);
9868
9869 if (is_double) {
9870 TCGv_i64 tcg_rn = tcg_temp_new_i64();
9871 TCGv_i64 tcg_rd = tcg_temp_new_i64();
9872 int pass;
9873
9874 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9875 read_vec_element(s, tcg_rn, rn, pass, MO_64);
9876 read_vec_element(s, tcg_rd, rd, pass, MO_64);
9877
9878 if (is_u) { /* USQADD */
9879 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9880 } else { /* SUQADD */
9881 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9882 }
9883 write_vec_element(s, tcg_rd, rd, pass, MO_64);
9884 }
9885 tcg_temp_free_i64(tcg_rd);
9886 tcg_temp_free_i64(tcg_rn);
9887 clear_vec_high(s, !is_scalar, rd);
9888 } else {
9889 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9890 TCGv_i32 tcg_rd = tcg_temp_new_i32();
9891 int pass, maxpasses;
9892
9893 if (is_scalar) {
9894 maxpasses = 1;
9895 } else {
9896 maxpasses = is_q ? 4 : 2;
9897 }
9898
9899 for (pass = 0; pass < maxpasses; pass++) {
9900 if (is_scalar) {
9901 read_vec_element_i32(s, tcg_rn, rn, pass, size);
9902 read_vec_element_i32(s, tcg_rd, rd, pass, size);
9903 } else {
9904 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
9905 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9906 }
9907
9908 if (is_u) { /* USQADD */
9909 switch (size) {
9910 case 0:
9911 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9912 break;
9913 case 1:
9914 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9915 break;
9916 case 2:
9917 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9918 break;
9919 default:
9920 g_assert_not_reached();
9921 }
9922 } else { /* SUQADD */
9923 switch (size) {
9924 case 0:
9925 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9926 break;
9927 case 1:
9928 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9929 break;
9930 case 2:
9931 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9932 break;
9933 default:
9934 g_assert_not_reached();
9935 }
9936 }
9937
9938 if (is_scalar) {
9939 TCGv_i64 tcg_zero = tcg_const_i64(0);
9940 write_vec_element(s, tcg_zero, rd, 0, MO_64);
9941 tcg_temp_free_i64(tcg_zero);
9942 }
9943 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9944 }
9945 tcg_temp_free_i32(tcg_rd);
9946 tcg_temp_free_i32(tcg_rn);
9947 clear_vec_high(s, is_q, rd);
9948 }
9949 }
9950
9951 /* AdvSIMD scalar two reg misc
9952 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
9953 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9954 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
9955 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9956 */
9957 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
9958 {
9959 int rd = extract32(insn, 0, 5);
9960 int rn = extract32(insn, 5, 5);
9961 int opcode = extract32(insn, 12, 5);
9962 int size = extract32(insn, 22, 2);
9963 bool u = extract32(insn, 29, 1);
9964 bool is_fcvt = false;
9965 int rmode;
9966 TCGv_i32 tcg_rmode;
9967 TCGv_ptr tcg_fpstatus;
9968
9969 switch (opcode) {
9970 case 0x3: /* USQADD / SUQADD*/
9971 if (!fp_access_check(s)) {
9972 return;
9973 }
9974 handle_2misc_satacc(s, true, u, false, size, rn, rd);
9975 return;
9976 case 0x7: /* SQABS / SQNEG */
9977 break;
9978 case 0xa: /* CMLT */
9979 if (u) {
9980 unallocated_encoding(s);
9981 return;
9982 }
9983 /* fall through */
9984 case 0x8: /* CMGT, CMGE */
9985 case 0x9: /* CMEQ, CMLE */
9986 case 0xb: /* ABS, NEG */
9987 if (size != 3) {
9988 unallocated_encoding(s);
9989 return;
9990 }
9991 break;
9992 case 0x12: /* SQXTUN */
9993 if (!u) {
9994 unallocated_encoding(s);
9995 return;
9996 }
9997 /* fall through */
9998 case 0x14: /* SQXTN, UQXTN */
9999 if (size == 3) {
10000 unallocated_encoding(s);
10001 return;
10002 }
10003 if (!fp_access_check(s)) {
10004 return;
10005 }
10006 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
10007 return;
10008 case 0xc ... 0xf:
10009 case 0x16 ... 0x1d:
10010 case 0x1f:
10011 /* Floating point: U, size[1] and opcode indicate operation;
10012 * size[0] indicates single or double precision.
10013 */
10014 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
10015 size = extract32(size, 0, 1) ? 3 : 2;
10016 switch (opcode) {
10017 case 0x2c: /* FCMGT (zero) */
10018 case 0x2d: /* FCMEQ (zero) */
10019 case 0x2e: /* FCMLT (zero) */
10020 case 0x6c: /* FCMGE (zero) */
10021 case 0x6d: /* FCMLE (zero) */
10022 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
10023 return;
10024 case 0x1d: /* SCVTF */
10025 case 0x5d: /* UCVTF */
10026 {
10027 bool is_signed = (opcode == 0x1d);
10028 if (!fp_access_check(s)) {
10029 return;
10030 }
10031 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
10032 return;
10033 }
10034 case 0x3d: /* FRECPE */
10035 case 0x3f: /* FRECPX */
10036 case 0x7d: /* FRSQRTE */
10037 if (!fp_access_check(s)) {
10038 return;
10039 }
10040 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
10041 return;
10042 case 0x1a: /* FCVTNS */
10043 case 0x1b: /* FCVTMS */
10044 case 0x3a: /* FCVTPS */
10045 case 0x3b: /* FCVTZS */
10046 case 0x5a: /* FCVTNU */
10047 case 0x5b: /* FCVTMU */
10048 case 0x7a: /* FCVTPU */
10049 case 0x7b: /* FCVTZU */
10050 is_fcvt = true;
10051 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10052 break;
10053 case 0x1c: /* FCVTAS */
10054 case 0x5c: /* FCVTAU */
10055 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
10056 is_fcvt = true;
10057 rmode = FPROUNDING_TIEAWAY;
10058 break;
10059 case 0x56: /* FCVTXN, FCVTXN2 */
10060 if (size == 2) {
10061 unallocated_encoding(s);
10062 return;
10063 }
10064 if (!fp_access_check(s)) {
10065 return;
10066 }
10067 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
10068 return;
10069 default:
10070 unallocated_encoding(s);
10071 return;
10072 }
10073 break;
10074 default:
10075 unallocated_encoding(s);
10076 return;
10077 }
10078
10079 if (!fp_access_check(s)) {
10080 return;
10081 }
10082
10083 if (is_fcvt) {
10084 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
10085 tcg_fpstatus = get_fpstatus_ptr(false);
10086 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10087 } else {
10088 tcg_rmode = NULL;
10089 tcg_fpstatus = NULL;
10090 }
10091
10092 if (size == 3) {
10093 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
10094 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10095
10096 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
10097 write_fp_dreg(s, rd, tcg_rd);
10098 tcg_temp_free_i64(tcg_rd);
10099 tcg_temp_free_i64(tcg_rn);
10100 } else {
10101 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10102 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10103
10104 read_vec_element_i32(s, tcg_rn, rn, 0, size);
10105
10106 switch (opcode) {
10107 case 0x7: /* SQABS, SQNEG */
10108 {
10109 NeonGenOneOpEnvFn *genfn;
10110 static NeonGenOneOpEnvFn * const fns[3][2] = {
10111 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10112 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10113 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
10114 };
10115 genfn = fns[size][u];
10116 genfn(tcg_rd, cpu_env, tcg_rn);
10117 break;
10118 }
10119 case 0x1a: /* FCVTNS */
10120 case 0x1b: /* FCVTMS */
10121 case 0x1c: /* FCVTAS */
10122 case 0x3a: /* FCVTPS */
10123 case 0x3b: /* FCVTZS */
10124 {
10125 TCGv_i32 tcg_shift = tcg_const_i32(0);
10126 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
10127 tcg_temp_free_i32(tcg_shift);
10128 break;
10129 }
10130 case 0x5a: /* FCVTNU */
10131 case 0x5b: /* FCVTMU */
10132 case 0x5c: /* FCVTAU */
10133 case 0x7a: /* FCVTPU */
10134 case 0x7b: /* FCVTZU */
10135 {
10136 TCGv_i32 tcg_shift = tcg_const_i32(0);
10137 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
10138 tcg_temp_free_i32(tcg_shift);
10139 break;
10140 }
10141 default:
10142 g_assert_not_reached();
10143 }
10144
10145 write_fp_sreg(s, rd, tcg_rd);
10146 tcg_temp_free_i32(tcg_rd);
10147 tcg_temp_free_i32(tcg_rn);
10148 }
10149
10150 if (is_fcvt) {
10151 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10152 tcg_temp_free_i32(tcg_rmode);
10153 tcg_temp_free_ptr(tcg_fpstatus);
10154 }
10155 }
10156
10157 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
10158 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
10159 int immh, int immb, int opcode, int rn, int rd)
10160 {
10161 int size = 32 - clz32(immh) - 1;
10162 int immhb = immh << 3 | immb;
10163 int shift = 2 * (8 << size) - immhb;
10164 GVecGen2iFn *gvec_fn;
10165
10166 if (extract32(immh, 3, 1) && !is_q) {
10167 unallocated_encoding(s);
10168 return;
10169 }
10170 tcg_debug_assert(size <= 3);
10171
10172 if (!fp_access_check(s)) {
10173 return;
10174 }
10175
10176 switch (opcode) {
10177 case 0x02: /* SSRA / USRA (accumulate) */
10178 gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
10179 break;
10180
10181 case 0x08: /* SRI */
10182 gvec_fn = gen_gvec_sri;
10183 break;
10184
10185 case 0x00: /* SSHR / USHR */
10186 if (is_u) {
10187 if (shift == 8 << size) {
10188 /* Shift count the same size as element size produces zero. */
10189 tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
10190 is_q ? 16 : 8, vec_full_reg_size(s), 0);
10191 return;
10192 }
10193 gvec_fn = tcg_gen_gvec_shri;
10194 } else {
10195 /* Shift count the same size as element size produces all sign. */
10196 if (shift == 8 << size) {
10197 shift -= 1;
10198 }
10199 gvec_fn = tcg_gen_gvec_sari;
10200 }
10201 break;
10202
10203 case 0x04: /* SRSHR / URSHR (rounding) */
10204 gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
10205 break;
10206
10207 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10208 gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
10209 break;
10210
10211 default:
10212 g_assert_not_reached();
10213 }
10214
10215 gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
10216 }
10217
10218 /* SHL/SLI - Vector shift left */
10219 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
10220 int immh, int immb, int opcode, int rn, int rd)
10221 {
10222 int size = 32 - clz32(immh) - 1;
10223 int immhb = immh << 3 | immb;
10224 int shift = immhb - (8 << size);
10225
10226 /* Range of size is limited by decode: immh is a non-zero 4 bit field */
10227 assert(size >= 0 && size <= 3);
10228
10229 if (extract32(immh, 3, 1) && !is_q) {
10230 unallocated_encoding(s);
10231 return;
10232 }
10233
10234 if (!fp_access_check(s)) {
10235 return;
10236 }
10237
10238 if (insert) {
10239 gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
10240 } else {
10241 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
10242 }
10243 }
10244
10245 /* USHLL/SHLL - Vector shift left with widening */
10246 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
10247 int immh, int immb, int opcode, int rn, int rd)
10248 {
10249 int size = 32 - clz32(immh) - 1;
10250 int immhb = immh << 3 | immb;
10251 int shift = immhb - (8 << size);
10252 int dsize = 64;
10253 int esize = 8 << size;
10254 int elements = dsize/esize;
10255 TCGv_i64 tcg_rn = new_tmp_a64(s);
10256 TCGv_i64 tcg_rd = new_tmp_a64(s);
10257 int i;
10258
10259 if (size >= 3) {
10260 unallocated_encoding(s);
10261 return;
10262 }
10263
10264 if (!fp_access_check(s)) {
10265 return;
10266 }
10267
10268 /* For the LL variants the store is larger than the load,
10269 * so if rd == rn we would overwrite parts of our input.
10270 * So load everything right now and use shifts in the main loop.
10271 */
10272 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
10273
10274 for (i = 0; i < elements; i++) {
10275 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
10276 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
10277 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
10278 write_vec_element(s, tcg_rd, rd, i, size + 1);
10279 }
10280 }
10281
10282 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
10283 static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
10284 int immh, int immb, int opcode, int rn, int rd)
10285 {
10286 int immhb = immh << 3 | immb;
10287 int size = 32 - clz32(immh) - 1;
10288 int dsize = 64;
10289 int esize = 8 << size;
10290 int elements = dsize/esize;
10291 int shift = (2 * esize) - immhb;
10292 bool round = extract32(opcode, 0, 1);
10293 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
10294 TCGv_i64 tcg_round;
10295 int i;
10296
10297 if (extract32(immh, 3, 1)) {
10298 unallocated_encoding(s);
10299 return;
10300 }
10301
10302 if (!fp_access_check(s)) {
10303 return;
10304 }
10305
10306 tcg_rn = tcg_temp_new_i64();
10307 tcg_rd = tcg_temp_new_i64();
10308 tcg_final = tcg_temp_new_i64();
10309 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
10310
10311 if (round) {
10312 uint64_t round_const = 1ULL << (shift - 1);
10313 tcg_round = tcg_const_i64(round_const);
10314 } else {
10315 tcg_round = NULL;
10316 }
10317
10318 for (i = 0; i < elements; i++) {
10319 read_vec_element(s, tcg_rn, rn, i, size+1);
10320 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10321 false, true, size+1, shift);
10322
10323 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
10324 }
10325
10326 if (!is_q) {
10327 write_vec_element(s, tcg_final, rd, 0, MO_64);
10328 } else {
10329 write_vec_element(s, tcg_final, rd, 1, MO_64);
10330 }
10331 if (round) {
10332 tcg_temp_free_i64(tcg_round);
10333 }
10334 tcg_temp_free_i64(tcg_rn);
10335 tcg_temp_free_i64(tcg_rd);
10336 tcg_temp_free_i64(tcg_final);
10337
10338 clear_vec_high(s, is_q, rd);
10339 }
10340
10341
10342 /* AdvSIMD shift by immediate
10343 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
10344 * +---+---+---+-------------+------+------+--------+---+------+------+
10345 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
10346 * +---+---+---+-------------+------+------+--------+---+------+------+
10347 */
10348 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
10349 {
10350 int rd = extract32(insn, 0, 5);
10351 int rn = extract32(insn, 5, 5);
10352 int opcode = extract32(insn, 11, 5);
10353 int immb = extract32(insn, 16, 3);
10354 int immh = extract32(insn, 19, 4);
10355 bool is_u = extract32(insn, 29, 1);
10356 bool is_q = extract32(insn, 30, 1);
10357
10358 /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
10359 assert(immh != 0);
10360
10361 switch (opcode) {
10362 case 0x08: /* SRI */
10363 if (!is_u) {
10364 unallocated_encoding(s);
10365 return;
10366 }
10367 /* fall through */
10368 case 0x00: /* SSHR / USHR */
10369 case 0x02: /* SSRA / USRA (accumulate) */
10370 case 0x04: /* SRSHR / URSHR (rounding) */
10371 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10372 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
10373 break;
10374 case 0x0a: /* SHL / SLI */
10375 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10376 break;
10377 case 0x10: /* SHRN */
10378 case 0x11: /* RSHRN / SQRSHRUN */
10379 if (is_u) {
10380 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
10381 opcode, rn, rd);
10382 } else {
10383 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
10384 }
10385 break;
10386 case 0x12: /* SQSHRN / UQSHRN */
10387 case 0x13: /* SQRSHRN / UQRSHRN */
10388 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
10389 opcode, rn, rd);
10390 break;
10391 case 0x14: /* SSHLL / USHLL */
10392 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10393 break;
10394 case 0x1c: /* SCVTF / UCVTF */
10395 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
10396 opcode, rn, rd);
10397 break;
10398 case 0xc: /* SQSHLU */
10399 if (!is_u) {
10400 unallocated_encoding(s);
10401 return;
10402 }
10403 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
10404 break;
10405 case 0xe: /* SQSHL, UQSHL */
10406 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
10407 break;
10408 case 0x1f: /* FCVTZS/ FCVTZU */
10409 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
10410 return;
10411 default:
10412 unallocated_encoding(s);
10413 return;
10414 }
10415 }
10416
10417 /* Generate code to do a "long" addition or subtraction, ie one done in
10418 * TCGv_i64 on vector lanes twice the width specified by size.
10419 */
10420 static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
10421 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
10422 {
10423 static NeonGenTwo64OpFn * const fns[3][2] = {
10424 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
10425 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
10426 { tcg_gen_add_i64, tcg_gen_sub_i64 },
10427 };
10428 NeonGenTwo64OpFn *genfn;
10429 assert(size < 3);
10430
10431 genfn = fns[size][is_sub];
10432 genfn(tcg_res, tcg_op1, tcg_op2);
10433 }
10434
10435 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
10436 int opcode, int rd, int rn, int rm)
10437 {
10438 /* 3-reg-different widening insns: 64 x 64 -> 128 */
10439 TCGv_i64 tcg_res[2];
10440 int pass, accop;
10441
10442 tcg_res[0] = tcg_temp_new_i64();
10443 tcg_res[1] = tcg_temp_new_i64();
10444
10445 /* Does this op do an adding accumulate, a subtracting accumulate,
10446 * or no accumulate at all?
10447 */
10448 switch (opcode) {
10449 case 5:
10450 case 8:
10451 case 9:
10452 accop = 1;
10453 break;
10454 case 10:
10455 case 11:
10456 accop = -1;
10457 break;
10458 default:
10459 accop = 0;
10460 break;
10461 }
10462
10463 if (accop != 0) {
10464 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10465 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10466 }
10467
10468 /* size == 2 means two 32x32->64 operations; this is worth special
10469 * casing because we can generally handle it inline.
10470 */
10471 if (size == 2) {
10472 for (pass = 0; pass < 2; pass++) {
10473 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10474 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10475 TCGv_i64 tcg_passres;
10476 MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10477
10478 int elt = pass + is_q * 2;
10479
10480 read_vec_element(s, tcg_op1, rn, elt, memop);
10481 read_vec_element(s, tcg_op2, rm, elt, memop);
10482
10483 if (accop == 0) {
10484 tcg_passres = tcg_res[pass];
10485 } else {
10486 tcg_passres = tcg_temp_new_i64();
10487 }
10488
10489 switch (opcode) {
10490 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10491 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10492 break;
10493 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10494 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10495 break;
10496 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10497 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10498 {
10499 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10500 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10501
10502 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10503 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10504 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10505 tcg_passres,
10506 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10507 tcg_temp_free_i64(tcg_tmp1);
10508 tcg_temp_free_i64(tcg_tmp2);
10509 break;
10510 }
10511 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10512 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10513 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10514 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10515 break;
10516 case 9: /* SQDMLAL, SQDMLAL2 */
10517 case 11: /* SQDMLSL, SQDMLSL2 */
10518 case 13: /* SQDMULL, SQDMULL2 */
10519 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10520 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10521 tcg_passres, tcg_passres);
10522 break;
10523 default:
10524 g_assert_not_reached();
10525 }
10526
10527 if (opcode == 9 || opcode == 11) {
10528 /* saturating accumulate ops */
10529 if (accop < 0) {
10530 tcg_gen_neg_i64(tcg_passres, tcg_passres);
10531 }
10532 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10533 tcg_res[pass], tcg_passres);
10534 } else if (accop > 0) {
10535 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10536 } else if (accop < 0) {
10537 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10538 }
10539
10540 if (accop != 0) {
10541 tcg_temp_free_i64(tcg_passres);
10542 }
10543
10544 tcg_temp_free_i64(tcg_op1);
10545 tcg_temp_free_i64(tcg_op2);
10546 }
10547 } else {
10548 /* size 0 or 1, generally helper functions */
10549 for (pass = 0; pass < 2; pass++) {
10550 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10551 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10552 TCGv_i64 tcg_passres;
10553 int elt = pass + is_q * 2;
10554
10555 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
10556 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
10557
10558 if (accop == 0) {
10559 tcg_passres = tcg_res[pass];
10560 } else {
10561 tcg_passres = tcg_temp_new_i64();
10562 }
10563
10564 switch (opcode) {
10565 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10566 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10567 {
10568 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
10569 static NeonGenWidenFn * const widenfns[2][2] = {
10570 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10571 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10572 };
10573 NeonGenWidenFn *widenfn = widenfns[size][is_u];
10574
10575 widenfn(tcg_op2_64, tcg_op2);
10576 widenfn(tcg_passres, tcg_op1);
10577 gen_neon_addl(size, (opcode == 2), tcg_passres,
10578 tcg_passres, tcg_op2_64);
10579 tcg_temp_free_i64(tcg_op2_64);
10580 break;
10581 }
10582 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10583 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10584 if (size == 0) {
10585 if (is_u) {
10586 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
10587 } else {
10588 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
10589 }
10590 } else {
10591 if (is_u) {
10592 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
10593 } else {
10594 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
10595 }
10596 }
10597 break;
10598 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10599 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10600 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10601 if (size == 0) {
10602 if (is_u) {
10603 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
10604 } else {
10605 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
10606 }
10607 } else {
10608 if (is_u) {
10609 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
10610 } else {
10611 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10612 }
10613 }
10614 break;
10615 case 9: /* SQDMLAL, SQDMLAL2 */
10616 case 11: /* SQDMLSL, SQDMLSL2 */
10617 case 13: /* SQDMULL, SQDMULL2 */
10618 assert(size == 1);
10619 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10620 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
10621 tcg_passres, tcg_passres);
10622 break;
10623 default:
10624 g_assert_not_reached();
10625 }
10626 tcg_temp_free_i32(tcg_op1);
10627 tcg_temp_free_i32(tcg_op2);
10628
10629 if (accop != 0) {
10630 if (opcode == 9 || opcode == 11) {
10631 /* saturating accumulate ops */
10632 if (accop < 0) {
10633 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
10634 }
10635 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
10636 tcg_res[pass],
10637 tcg_passres);
10638 } else {
10639 gen_neon_addl(size, (accop < 0), tcg_res[pass],
10640 tcg_res[pass], tcg_passres);
10641 }
10642 tcg_temp_free_i64(tcg_passres);
10643 }
10644 }
10645 }
10646
10647 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
10648 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
10649 tcg_temp_free_i64(tcg_res[0]);
10650 tcg_temp_free_i64(tcg_res[1]);
10651 }
10652
10653 static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
10654 int opcode, int rd, int rn, int rm)
10655 {
10656 TCGv_i64 tcg_res[2];
10657 int part = is_q ? 2 : 0;
10658 int pass;
10659
10660 for (pass = 0; pass < 2; pass++) {
10661 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10662 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10663 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
10664 static NeonGenWidenFn * const widenfns[3][2] = {
10665 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10666 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10667 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
10668 };
10669 NeonGenWidenFn *widenfn = widenfns[size][is_u];
10670
10671 read_vec_element(s, tcg_op1, rn, pass, MO_64);
10672 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
10673 widenfn(tcg_op2_wide, tcg_op2);
10674 tcg_temp_free_i32(tcg_op2);
10675 tcg_res[pass] = tcg_temp_new_i64();
10676 gen_neon_addl(size, (opcode == 3),
10677 tcg_res[pass], tcg_op1, tcg_op2_wide);
10678 tcg_temp_free_i64(tcg_op1);
10679 tcg_temp_free_i64(tcg_op2_wide);
10680 }
10681
10682 for (pass = 0; pass < 2; pass++) {
10683 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10684 tcg_temp_free_i64(tcg_res[pass]);
10685 }
10686 }
10687
10688 static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
10689 {
10690 tcg_gen_addi_i64(in, in, 1U << 31);
10691 tcg_gen_extrh_i64_i32(res, in);
10692 }
10693
10694 static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
10695 int opcode, int rd, int rn, int rm)
10696 {
10697 TCGv_i32 tcg_res[2];
10698 int part = is_q ? 2 : 0;
10699 int pass;
10700
10701 for (pass = 0; pass < 2; pass++) {
10702 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10703 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10704 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
10705 static NeonGenNarrowFn * const narrowfns[3][2] = {
10706 { gen_helper_neon_narrow_high_u8,
10707 gen_helper_neon_narrow_round_high_u8 },
10708 { gen_helper_neon_narrow_high_u16,
10709 gen_helper_neon_narrow_round_high_u16 },
10710 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
10711 };
10712 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
10713
10714 read_vec_element(s, tcg_op1, rn, pass, MO_64);
10715 read_vec_element(s, tcg_op2, rm, pass, MO_64);
10716
10717 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
10718
10719 tcg_temp_free_i64(tcg_op1);
10720 tcg_temp_free_i64(tcg_op2);
10721
10722 tcg_res[pass] = tcg_temp_new_i32();
10723 gennarrow(tcg_res[pass], tcg_wideres);
10724 tcg_temp_free_i64(tcg_wideres);
10725 }
10726
10727 for (pass = 0; pass < 2; pass++) {
10728 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
10729 tcg_temp_free_i32(tcg_res[pass]);
10730 }
10731 clear_vec_high(s, is_q, rd);
10732 }
10733
10734 /* AdvSIMD three different
10735 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
10736 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10737 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
10738 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10739 */
10740 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
10741 {
10742 /* Instructions in this group fall into three basic classes
10743 * (in each case with the operation working on each element in
10744 * the input vectors):
10745 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
10746 * 128 bit input)
10747 * (2) wide 64 x 128 -> 128
10748 * (3) narrowing 128 x 128 -> 64
10749 * Here we do initial decode, catch unallocated cases and
10750 * dispatch to separate functions for each class.
10751 */
10752 int is_q = extract32(insn, 30, 1);
10753 int is_u = extract32(insn, 29, 1);
10754 int size = extract32(insn, 22, 2);
10755 int opcode = extract32(insn, 12, 4);
10756 int rm = extract32(insn, 16, 5);
10757 int rn = extract32(insn, 5, 5);
10758 int rd = extract32(insn, 0, 5);
10759
10760 switch (opcode) {
10761 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10762 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10763 /* 64 x 128 -> 128 */
10764 if (size == 3) {
10765 unallocated_encoding(s);
10766 return;
10767 }
10768 if (!fp_access_check(s)) {
10769 return;
10770 }
10771 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
10772 break;
10773 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10774 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10775 /* 128 x 128 -> 64 */
10776 if (size == 3) {
10777 unallocated_encoding(s);
10778 return;
10779 }
10780 if (!fp_access_check(s)) {
10781 return;
10782 }
10783 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
10784 break;
10785 case 14: /* PMULL, PMULL2 */
10786 if (is_u) {
10787 unallocated_encoding(s);
10788 return;
10789 }
10790 switch (size) {
10791 case 0: /* PMULL.P8 */
10792 if (!fp_access_check(s)) {
10793 return;
10794 }
10795 /* The Q field specifies lo/hi half input for this insn. */
10796 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
10797 gen_helper_neon_pmull_h);
10798 break;
10799
10800 case 3: /* PMULL.P64 */
10801 if (!dc_isar_feature(aa64_pmull, s)) {
10802 unallocated_encoding(s);
10803 return;
10804 }
10805 if (!fp_access_check(s)) {
10806 return;
10807 }
10808 /* The Q field specifies lo/hi half input for this insn. */
10809 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
10810 gen_helper_gvec_pmull_q);
10811 break;
10812
10813 default:
10814 unallocated_encoding(s);
10815 break;
10816 }
10817 return;
10818 case 9: /* SQDMLAL, SQDMLAL2 */
10819 case 11: /* SQDMLSL, SQDMLSL2 */
10820 case 13: /* SQDMULL, SQDMULL2 */
10821 if (is_u || size == 0) {
10822 unallocated_encoding(s);
10823 return;
10824 }
10825 /* fall through */
10826 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10827 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10828 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10829 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10830 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10831 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10832 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10833 /* 64 x 64 -> 128 */
10834 if (size == 3) {
10835 unallocated_encoding(s);
10836 return;
10837 }
10838 if (!fp_access_check(s)) {
10839 return;
10840 }
10841
10842 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
10843 break;
10844 default:
10845 /* opcode 15 not allocated */
10846 unallocated_encoding(s);
10847 break;
10848 }
10849 }
10850
10851 /* Logic op (opcode == 3) subgroup of C3.6.16. */
10852 static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
10853 {
10854 int rd = extract32(insn, 0, 5);
10855 int rn = extract32(insn, 5, 5);
10856 int rm = extract32(insn, 16, 5);
10857 int size = extract32(insn, 22, 2);
10858 bool is_u = extract32(insn, 29, 1);
10859 bool is_q = extract32(insn, 30, 1);
10860
10861 if (!fp_access_check(s)) {
10862 return;
10863 }
10864
10865 switch (size + 4 * is_u) {
10866 case 0: /* AND */
10867 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
10868 return;
10869 case 1: /* BIC */
10870 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
10871 return;
10872 case 2: /* ORR */
10873 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
10874 return;
10875 case 3: /* ORN */
10876 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
10877 return;
10878 case 4: /* EOR */
10879 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
10880 return;
10881
10882 case 5: /* BSL bitwise select */
10883 gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
10884 return;
10885 case 6: /* BIT, bitwise insert if true */
10886 gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
10887 return;
10888 case 7: /* BIF, bitwise insert if false */
10889 gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
10890 return;
10891
10892 default:
10893 g_assert_not_reached();
10894 }
10895 }
10896
10897 /* Pairwise op subgroup of C3.6.16.
10898 *
10899 * This is called directly or via the handle_3same_float for float pairwise
10900 * operations where the opcode and size are calculated differently.
10901 */
10902 static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
10903 int size, int rn, int rm, int rd)
10904 {
10905 TCGv_ptr fpst;
10906 int pass;
10907
10908 /* Floating point operations need fpst */
10909 if (opcode >= 0x58) {
10910 fpst = get_fpstatus_ptr(false);
10911 } else {
10912 fpst = NULL;
10913 }
10914
10915 if (!fp_access_check(s)) {
10916 return;
10917 }
10918
10919 /* These operations work on the concatenated rm:rn, with each pair of
10920 * adjacent elements being operated on to produce an element in the result.
10921 */
10922 if (size == 3) {
10923 TCGv_i64 tcg_res[2];
10924
10925 for (pass = 0; pass < 2; pass++) {
10926 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10927 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10928 int passreg = (pass == 0) ? rn : rm;
10929
10930 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
10931 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
10932 tcg_res[pass] = tcg_temp_new_i64();
10933
10934 switch (opcode) {
10935 case 0x17: /* ADDP */
10936 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
10937 break;
10938 case 0x58: /* FMAXNMP */
10939 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10940 break;
10941 case 0x5a: /* FADDP */
10942 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10943 break;
10944 case 0x5e: /* FMAXP */
10945 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10946 break;
10947 case 0x78: /* FMINNMP */
10948 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10949 break;
10950 case 0x7e: /* FMINP */
10951 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10952 break;
10953 default:
10954 g_assert_not_reached();
10955 }
10956
10957 tcg_temp_free_i64(tcg_op1);
10958 tcg_temp_free_i64(tcg_op2);
10959 }
10960
10961 for (pass = 0; pass < 2; pass++) {
10962 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10963 tcg_temp_free_i64(tcg_res[pass]);
10964 }
10965 } else {
10966 int maxpass = is_q ? 4 : 2;
10967 TCGv_i32 tcg_res[4];
10968
10969 for (pass = 0; pass < maxpass; pass++) {
10970 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10971 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10972 NeonGenTwoOpFn *genfn = NULL;
10973 int passreg = pass < (maxpass / 2) ? rn : rm;
10974 int passelt = (is_q && (pass & 1)) ? 2 : 0;
10975
10976 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
10977 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
10978 tcg_res[pass] = tcg_temp_new_i32();
10979
10980 switch (opcode) {
10981 case 0x17: /* ADDP */
10982 {
10983 static NeonGenTwoOpFn * const fns[3] = {
10984 gen_helper_neon_padd_u8,
10985 gen_helper_neon_padd_u16,
10986 tcg_gen_add_i32,
10987 };
10988 genfn = fns[size];
10989 break;
10990 }
10991 case 0x14: /* SMAXP, UMAXP */
10992 {
10993 static NeonGenTwoOpFn * const fns[3][2] = {
10994 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
10995 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
10996 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
10997 };
10998 genfn = fns[size][u];
10999 break;
11000 }
11001 case 0x15: /* SMINP, UMINP */
11002 {
11003 static NeonGenTwoOpFn * const fns[3][2] = {
11004 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
11005 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
11006 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
11007 };
11008 genfn = fns[size][u];
11009 break;
11010 }
11011 /* The FP operations are all on single floats (32 bit) */
11012 case 0x58: /* FMAXNMP */
11013 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11014 break;
11015 case 0x5a: /* FADDP */
11016 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11017 break;
11018 case 0x5e: /* FMAXP */
11019 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11020 break;
11021 case 0x78: /* FMINNMP */
11022 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11023 break;
11024 case 0x7e: /* FMINP */
11025 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11026 break;
11027 default:
11028 g_assert_not_reached();
11029 }
11030
11031 /* FP ops called directly, otherwise call now */
11032 if (genfn) {
11033 genfn(tcg_res[pass], tcg_op1, tcg_op2);
11034 }
11035
11036 tcg_temp_free_i32(tcg_op1);
11037 tcg_temp_free_i32(tcg_op2);
11038 }
11039
11040 for (pass = 0; pass < maxpass; pass++) {
11041 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11042 tcg_temp_free_i32(tcg_res[pass]);
11043 }
11044 clear_vec_high(s, is_q, rd);
11045 }
11046
11047 if (fpst) {
11048 tcg_temp_free_ptr(fpst);
11049 }
11050 }
11051
11052 /* Floating point op subgroup of C3.6.16. */
11053 static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
11054 {
11055 /* For floating point ops, the U, size[1] and opcode bits
11056 * together indicate the operation. size[0] indicates single
11057 * or double.
11058 */
11059 int fpopcode = extract32(insn, 11, 5)
11060 | (extract32(insn, 23, 1) << 5)
11061 | (extract32(insn, 29, 1) << 6);
11062 int is_q = extract32(insn, 30, 1);
11063 int size = extract32(insn, 22, 1);
11064 int rm = extract32(insn, 16, 5);
11065 int rn = extract32(insn, 5, 5);
11066 int rd = extract32(insn, 0, 5);
11067
11068 int datasize = is_q ? 128 : 64;
11069 int esize = 32 << size;
11070 int elements = datasize / esize;
11071
11072 if (size == 1 && !is_q) {
11073 unallocated_encoding(s);
11074 return;
11075 }
11076
11077 switch (fpopcode) {
11078 case 0x58: /* FMAXNMP */
11079 case 0x5a: /* FADDP */
11080 case 0x5e: /* FMAXP */
11081 case 0x78: /* FMINNMP */
11082 case 0x7e: /* FMINP */
11083 if (size && !is_q) {
11084 unallocated_encoding(s);
11085 return;
11086 }
11087 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
11088 rn, rm, rd);
11089 return;
11090 case 0x1b: /* FMULX */
11091 case 0x1f: /* FRECPS */
11092 case 0x3f: /* FRSQRTS */
11093 case 0x5d: /* FACGE */
11094 case 0x7d: /* FACGT */
11095 case 0x19: /* FMLA */
11096 case 0x39: /* FMLS */
11097 case 0x18: /* FMAXNM */
11098 case 0x1a: /* FADD */
11099 case 0x1c: /* FCMEQ */
11100 case 0x1e: /* FMAX */
11101 case 0x38: /* FMINNM */
11102 case 0x3a: /* FSUB */
11103 case 0x3e: /* FMIN */
11104 case 0x5b: /* FMUL */
11105 case 0x5c: /* FCMGE */
11106 case 0x5f: /* FDIV */
11107 case 0x7a: /* FABD */
11108 case 0x7c: /* FCMGT */
11109 if (!fp_access_check(s)) {
11110 return;
11111 }
11112 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
11113 return;
11114
11115 case 0x1d: /* FMLAL */
11116 case 0x3d: /* FMLSL */
11117 case 0x59: /* FMLAL2 */
11118 case 0x79: /* FMLSL2 */
11119 if (size & 1 || !dc_isar_feature(aa64_fhm, s)) {
11120 unallocated_encoding(s);
11121 return;
11122 }
11123 if (fp_access_check(s)) {
11124 int is_s = extract32(insn, 23, 1);
11125 int is_2 = extract32(insn, 29, 1);
11126 int data = (is_2 << 1) | is_s;
11127 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
11128 vec_full_reg_offset(s, rn),
11129 vec_full_reg_offset(s, rm), cpu_env,
11130 is_q ? 16 : 8, vec_full_reg_size(s),
11131 data, gen_helper_gvec_fmlal_a64);
11132 }
11133 return;
11134
11135 default:
11136 unallocated_encoding(s);
11137 return;
11138 }
11139 }
11140
11141 /* Integer op subgroup of C3.6.16. */
11142 static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
11143 {
11144 int is_q = extract32(insn, 30, 1);
11145 int u = extract32(insn, 29, 1);
11146 int size = extract32(insn, 22, 2);
11147 int opcode = extract32(insn, 11, 5);
11148 int rm = extract32(insn, 16, 5);
11149 int rn = extract32(insn, 5, 5);
11150 int rd = extract32(insn, 0, 5);
11151 int pass;
11152 TCGCond cond;
11153
11154 switch (opcode) {
11155 case 0x13: /* MUL, PMUL */
11156 if (u && size != 0) {
11157 unallocated_encoding(s);
11158 return;
11159 }
11160 /* fall through */
11161 case 0x0: /* SHADD, UHADD */
11162 case 0x2: /* SRHADD, URHADD */
11163 case 0x4: /* SHSUB, UHSUB */
11164 case 0xc: /* SMAX, UMAX */
11165 case 0xd: /* SMIN, UMIN */
11166 case 0xe: /* SABD, UABD */
11167 case 0xf: /* SABA, UABA */
11168 case 0x12: /* MLA, MLS */
11169 if (size == 3) {
11170 unallocated_encoding(s);
11171 return;
11172 }
11173 break;
11174 case 0x16: /* SQDMULH, SQRDMULH */
11175 if (size == 0 || size == 3) {
11176 unallocated_encoding(s);
11177 return;
11178 }
11179 break;
11180 default:
11181 if (size == 3 && !is_q) {
11182 unallocated_encoding(s);
11183 return;
11184 }
11185 break;
11186 }
11187
11188 if (!fp_access_check(s)) {
11189 return;
11190 }
11191
11192 switch (opcode) {
11193 case 0x01: /* SQADD, UQADD */
11194 if (u) {
11195 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size);
11196 } else {
11197 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size);
11198 }
11199 return;
11200 case 0x05: /* SQSUB, UQSUB */
11201 if (u) {
11202 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size);
11203 } else {
11204 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size);
11205 }
11206 return;
11207 case 0x08: /* SSHL, USHL */
11208 if (u) {
11209 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
11210 } else {
11211 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
11212 }
11213 return;
11214 case 0x0c: /* SMAX, UMAX */
11215 if (u) {
11216 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
11217 } else {
11218 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
11219 }
11220 return;
11221 case 0x0d: /* SMIN, UMIN */
11222 if (u) {
11223 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
11224 } else {
11225 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
11226 }
11227 return;
11228 case 0xe: /* SABD, UABD */
11229 if (u) {
11230 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size);
11231 } else {
11232 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size);
11233 }
11234 return;
11235 case 0xf: /* SABA, UABA */
11236 if (u) {
11237 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size);
11238 } else {
11239 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size);
11240 }
11241 return;
11242 case 0x10: /* ADD, SUB */
11243 if (u) {
11244 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
11245 } else {
11246 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
11247 }
11248 return;
11249 case 0x13: /* MUL, PMUL */
11250 if (!u) { /* MUL */
11251 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
11252 } else { /* PMUL */
11253 gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
11254 }
11255 return;
11256 case 0x12: /* MLA, MLS */
11257 if (u) {
11258 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size);
11259 } else {
11260 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size);
11261 }
11262 return;
11263 case 0x11:
11264 if (!u) { /* CMTST */
11265 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
11266 return;
11267 }
11268 /* else CMEQ */
11269 cond = TCG_COND_EQ;
11270 goto do_gvec_cmp;
11271 case 0x06: /* CMGT, CMHI */
11272 cond = u ? TCG_COND_GTU : TCG_COND_GT;
11273 goto do_gvec_cmp;
11274 case 0x07: /* CMGE, CMHS */
11275 cond = u ? TCG_COND_GEU : TCG_COND_GE;
11276 do_gvec_cmp:
11277 tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
11278 vec_full_reg_offset(s, rn),
11279 vec_full_reg_offset(s, rm),
11280 is_q ? 16 : 8, vec_full_reg_size(s));
11281 return;
11282 }
11283
11284 if (size == 3) {
11285 assert(is_q);
11286 for (pass = 0; pass < 2; pass++) {
11287 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11288 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11289 TCGv_i64 tcg_res = tcg_temp_new_i64();
11290
11291 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11292 read_vec_element(s, tcg_op2, rm, pass, MO_64);
11293
11294 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
11295
11296 write_vec_element(s, tcg_res, rd, pass, MO_64);
11297
11298 tcg_temp_free_i64(tcg_res);
11299 tcg_temp_free_i64(tcg_op1);
11300 tcg_temp_free_i64(tcg_op2);
11301 }
11302 } else {
11303 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11304 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11305 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11306 TCGv_i32 tcg_res = tcg_temp_new_i32();
11307 NeonGenTwoOpFn *genfn = NULL;
11308 NeonGenTwoOpEnvFn *genenvfn = NULL;
11309
11310 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
11311 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
11312
11313 switch (opcode) {
11314 case 0x0: /* SHADD, UHADD */
11315 {
11316 static NeonGenTwoOpFn * const fns[3][2] = {
11317 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
11318 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
11319 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
11320 };
11321 genfn = fns[size][u];
11322 break;
11323 }
11324 case 0x2: /* SRHADD, URHADD */
11325 {
11326 static NeonGenTwoOpFn * const fns[3][2] = {
11327 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11328 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11329 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11330 };
11331 genfn = fns[size][u];
11332 break;
11333 }
11334 case 0x4: /* SHSUB, UHSUB */
11335 {
11336 static NeonGenTwoOpFn * const fns[3][2] = {
11337 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11338 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11339 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11340 };
11341 genfn = fns[size][u];
11342 break;
11343 }
11344 case 0x9: /* SQSHL, UQSHL */
11345 {
11346 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11347 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11348 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11349 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11350 };
11351 genenvfn = fns[size][u];
11352 break;
11353 }
11354 case 0xa: /* SRSHL, URSHL */
11355 {
11356 static NeonGenTwoOpFn * const fns[3][2] = {
11357 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11358 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11359 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11360 };
11361 genfn = fns[size][u];
11362 break;
11363 }
11364 case 0xb: /* SQRSHL, UQRSHL */
11365 {
11366 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11367 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11368 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11369 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11370 };
11371 genenvfn = fns[size][u];
11372 break;
11373 }
11374 case 0x16: /* SQDMULH, SQRDMULH */
11375 {
11376 static NeonGenTwoOpEnvFn * const fns[2][2] = {
11377 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
11378 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
11379 };
11380 assert(size == 1 || size == 2);
11381 genenvfn = fns[size - 1][u];
11382 break;
11383 }
11384 default:
11385 g_assert_not_reached();
11386 }
11387
11388 if (genenvfn) {
11389 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11390 } else {
11391 genfn(tcg_res, tcg_op1, tcg_op2);
11392 }
11393
11394 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11395
11396 tcg_temp_free_i32(tcg_res);
11397 tcg_temp_free_i32(tcg_op1);
11398 tcg_temp_free_i32(tcg_op2);
11399 }
11400 }
11401 clear_vec_high(s, is_q, rd);
11402 }
11403
11404 /* AdvSIMD three same
11405 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
11406 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11407 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
11408 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11409 */
11410 static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11411 {
11412 int opcode = extract32(insn, 11, 5);
11413
11414 switch (opcode) {
11415 case 0x3: /* logic ops */
11416 disas_simd_3same_logic(s, insn);
11417 break;
11418 case 0x17: /* ADDP */
11419 case 0x14: /* SMAXP, UMAXP */
11420 case 0x15: /* SMINP, UMINP */
11421 {
11422 /* Pairwise operations */
11423 int is_q = extract32(insn, 30, 1);
11424 int u = extract32(insn, 29, 1);
11425 int size = extract32(insn, 22, 2);
11426 int rm = extract32(insn, 16, 5);
11427 int rn = extract32(insn, 5, 5);
11428 int rd = extract32(insn, 0, 5);
11429 if (opcode == 0x17) {
11430 if (u || (size == 3 && !is_q)) {
11431 unallocated_encoding(s);
11432 return;
11433 }
11434 } else {
11435 if (size == 3) {
11436 unallocated_encoding(s);
11437 return;
11438 }
11439 }
11440 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11441 break;
11442 }
11443 case 0x18 ... 0x31:
11444 /* floating point ops, sz[1] and U are part of opcode */
11445 disas_simd_3same_float(s, insn);
11446 break;
11447 default:
11448 disas_simd_3same_int(s, insn);
11449 break;
11450 }
11451 }
11452
11453 /*
11454 * Advanced SIMD three same (ARMv8.2 FP16 variants)
11455 *
11456 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
11457 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11458 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
11459 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11460 *
11461 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11462 * (register), FACGE, FABD, FCMGT (register) and FACGT.
11463 *
11464 */
11465 static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11466 {
11467 int opcode, fpopcode;
11468 int is_q, u, a, rm, rn, rd;
11469 int datasize, elements;
11470 int pass;
11471 TCGv_ptr fpst;
11472 bool pairwise = false;
11473
11474 if (!dc_isar_feature(aa64_fp16, s)) {
11475 unallocated_encoding(s);
11476 return;
11477 }
11478
11479 if (!fp_access_check(s)) {
11480 return;
11481 }
11482
11483 /* For these floating point ops, the U, a and opcode bits
11484 * together indicate the operation.
11485 */
11486 opcode = extract32(insn, 11, 3);
11487 u = extract32(insn, 29, 1);
11488 a = extract32(insn, 23, 1);
11489 is_q = extract32(insn, 30, 1);
11490 rm = extract32(insn, 16, 5);
11491 rn = extract32(insn, 5, 5);
11492 rd = extract32(insn, 0, 5);
11493
11494 fpopcode = opcode | (a << 3) | (u << 4);
11495 datasize = is_q ? 128 : 64;
11496 elements = datasize / 16;
11497
11498 switch (fpopcode) {
11499 case 0x10: /* FMAXNMP */
11500 case 0x12: /* FADDP */
11501 case 0x16: /* FMAXP */
11502 case 0x18: /* FMINNMP */
11503 case 0x1e: /* FMINP */
11504 pairwise = true;
11505 break;
11506 }
11507
11508 fpst = get_fpstatus_ptr(true);
11509
11510 if (pairwise) {
11511 int maxpass = is_q ? 8 : 4;
11512 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11513 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11514 TCGv_i32 tcg_res[8];
11515
11516 for (pass = 0; pass < maxpass; pass++) {
11517 int passreg = pass < (maxpass / 2) ? rn : rm;
11518 int passelt = (pass << 1) & (maxpass - 1);
11519
11520 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
11521 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
11522 tcg_res[pass] = tcg_temp_new_i32();
11523
11524 switch (fpopcode) {
11525 case 0x10: /* FMAXNMP */
11526 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
11527 fpst);
11528 break;
11529 case 0x12: /* FADDP */
11530 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11531 break;
11532 case 0x16: /* FMAXP */
11533 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11534 break;
11535 case 0x18: /* FMINNMP */
11536 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
11537 fpst);
11538 break;
11539 case 0x1e: /* FMINP */
11540 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11541 break;
11542 default:
11543 g_assert_not_reached();
11544 }
11545 }
11546
11547 for (pass = 0; pass < maxpass; pass++) {
11548 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
11549 tcg_temp_free_i32(tcg_res[pass]);
11550 }
11551
11552 tcg_temp_free_i32(tcg_op1);
11553 tcg_temp_free_i32(tcg_op2);
11554
11555 } else {
11556 for (pass = 0; pass < elements; pass++) {
11557 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11558 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11559 TCGv_i32 tcg_res = tcg_temp_new_i32();
11560
11561 read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
11562 read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
11563
11564 switch (fpopcode) {
11565 case 0x0: /* FMAXNM */
11566 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11567 break;
11568 case 0x1: /* FMLA */
11569 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11570 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11571 fpst);
11572 break;
11573 case 0x2: /* FADD */
11574 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
11575 break;
11576 case 0x3: /* FMULX */
11577 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
11578 break;
11579 case 0x4: /* FCMEQ */
11580 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11581 break;
11582 case 0x6: /* FMAX */
11583 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
11584 break;
11585 case 0x7: /* FRECPS */
11586 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11587 break;
11588 case 0x8: /* FMINNM */
11589 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11590 break;
11591 case 0x9: /* FMLS */
11592 /* As usual for ARM, separate negation for fused multiply-add */
11593 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
11594 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11595 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11596 fpst);
11597 break;
11598 case 0xa: /* FSUB */
11599 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11600 break;
11601 case 0xe: /* FMIN */
11602 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
11603 break;
11604 case 0xf: /* FRSQRTS */
11605 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11606 break;
11607 case 0x13: /* FMUL */
11608 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
11609 break;
11610 case 0x14: /* FCMGE */
11611 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11612 break;
11613 case 0x15: /* FACGE */
11614 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11615 break;
11616 case 0x17: /* FDIV */
11617 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
11618 break;
11619 case 0x1a: /* FABD */
11620 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11621 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
11622 break;
11623 case 0x1c: /* FCMGT */
11624 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11625 break;
11626 case 0x1d: /* FACGT */
11627 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11628 break;
11629 default:
11630 fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
11631 __func__, insn, fpopcode, s->pc_curr);
11632 g_assert_not_reached();
11633 }
11634
11635 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11636 tcg_temp_free_i32(tcg_res);
11637 tcg_temp_free_i32(tcg_op1);
11638 tcg_temp_free_i32(tcg_op2);
11639 }
11640 }
11641
11642 tcg_temp_free_ptr(fpst);
11643
11644 clear_vec_high(s, is_q, rd);
11645 }
11646
11647 /* AdvSIMD three same extra
11648 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
11649 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11650 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
11651 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11652 */
11653 static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
11654 {
11655 int rd = extract32(insn, 0, 5);
11656 int rn = extract32(insn, 5, 5);
11657 int opcode = extract32(insn, 11, 4);
11658 int rm = extract32(insn, 16, 5);
11659 int size = extract32(insn, 22, 2);
11660 bool u = extract32(insn, 29, 1);
11661 bool is_q = extract32(insn, 30, 1);
11662 bool feature;
11663 int rot;
11664
11665 switch (u * 16 + opcode) {
11666 case 0x10: /* SQRDMLAH (vector) */
11667 case 0x11: /* SQRDMLSH (vector) */
11668 if (size != 1 && size != 2) {
11669 unallocated_encoding(s);
11670 return;
11671 }
11672 feature = dc_isar_feature(aa64_rdm, s);
11673 break;
11674 case 0x02: /* SDOT (vector) */
11675 case 0x12: /* UDOT (vector) */
11676 if (size != MO_32) {
11677 unallocated_encoding(s);
11678 return;
11679 }
11680 feature = dc_isar_feature(aa64_dp, s);
11681 break;
11682 case 0x18: /* FCMLA, #0 */
11683 case 0x19: /* FCMLA, #90 */
11684 case 0x1a: /* FCMLA, #180 */
11685 case 0x1b: /* FCMLA, #270 */
11686 case 0x1c: /* FCADD, #90 */
11687 case 0x1e: /* FCADD, #270 */
11688 if (size == 0
11689 || (size == 1 && !dc_isar_feature(aa64_fp16, s))
11690 || (size == 3 && !is_q)) {
11691 unallocated_encoding(s);
11692 return;
11693 }
11694 feature = dc_isar_feature(aa64_fcma, s);
11695 break;
11696 default:
11697 unallocated_encoding(s);
11698 return;
11699 }
11700 if (!feature) {
11701 unallocated_encoding(s);
11702 return;
11703 }
11704 if (!fp_access_check(s)) {
11705 return;
11706 }
11707
11708 switch (opcode) {
11709 case 0x0: /* SQRDMLAH (vector) */
11710 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size);
11711 return;
11712
11713 case 0x1: /* SQRDMLSH (vector) */
11714 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size);
11715 return;
11716
11717 case 0x2: /* SDOT / UDOT */
11718 gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0,
11719 u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
11720 return;
11721
11722 case 0x8: /* FCMLA, #0 */
11723 case 0x9: /* FCMLA, #90 */
11724 case 0xa: /* FCMLA, #180 */
11725 case 0xb: /* FCMLA, #270 */
11726 rot = extract32(opcode, 0, 2);
11727 switch (size) {
11728 case 1:
11729 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot,
11730 gen_helper_gvec_fcmlah);
11731 break;
11732 case 2:
11733 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
11734 gen_helper_gvec_fcmlas);
11735 break;
11736 case 3:
11737 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
11738 gen_helper_gvec_fcmlad);
11739 break;
11740 default:
11741 g_assert_not_reached();
11742 }
11743 return;
11744
11745 case 0xc: /* FCADD, #90 */
11746 case 0xe: /* FCADD, #270 */
11747 rot = extract32(opcode, 1, 1);
11748 switch (size) {
11749 case 1:
11750 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11751 gen_helper_gvec_fcaddh);
11752 break;
11753 case 2:
11754 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11755 gen_helper_gvec_fcadds);
11756 break;
11757 case 3:
11758 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11759 gen_helper_gvec_fcaddd);
11760 break;
11761 default:
11762 g_assert_not_reached();
11763 }
11764 return;
11765
11766 default:
11767 g_assert_not_reached();
11768 }
11769 }
11770
11771 static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
11772 int size, int rn, int rd)
11773 {
11774 /* Handle 2-reg-misc ops which are widening (so each size element
11775 * in the source becomes a 2*size element in the destination.
11776 * The only instruction like this is FCVTL.
11777 */
11778 int pass;
11779
11780 if (size == 3) {
11781 /* 32 -> 64 bit fp conversion */
11782 TCGv_i64 tcg_res[2];
11783 int srcelt = is_q ? 2 : 0;
11784
11785 for (pass = 0; pass < 2; pass++) {
11786 TCGv_i32 tcg_op = tcg_temp_new_i32();
11787 tcg_res[pass] = tcg_temp_new_i64();
11788
11789 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
11790 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
11791 tcg_temp_free_i32(tcg_op);
11792 }
11793 for (pass = 0; pass < 2; pass++) {
11794 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11795 tcg_temp_free_i64(tcg_res[pass]);
11796 }
11797 } else {
11798 /* 16 -> 32 bit fp conversion */
11799 int srcelt = is_q ? 4 : 0;
11800 TCGv_i32 tcg_res[4];
11801 TCGv_ptr fpst = get_fpstatus_ptr(false);
11802 TCGv_i32 ahp = get_ahp_flag();
11803
11804 for (pass = 0; pass < 4; pass++) {
11805 tcg_res[pass] = tcg_temp_new_i32();
11806
11807 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
11808 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
11809 fpst, ahp);
11810 }
11811 for (pass = 0; pass < 4; pass++) {
11812 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11813 tcg_temp_free_i32(tcg_res[pass]);
11814 }
11815
11816 tcg_temp_free_ptr(fpst);
11817 tcg_temp_free_i32(ahp);
11818 }
11819 }
11820
11821 static void handle_rev(DisasContext *s, int opcode, bool u,
11822 bool is_q, int size, int rn, int rd)
11823 {
11824 int op = (opcode << 1) | u;
11825 int opsz = op + size;
11826 int grp_size = 3 - opsz;
11827 int dsize = is_q ? 128 : 64;
11828 int i;
11829
11830 if (opsz >= 3) {
11831 unallocated_encoding(s);
11832 return;
11833 }
11834
11835 if (!fp_access_check(s)) {
11836 return;
11837 }
11838
11839 if (size == 0) {
11840 /* Special case bytes, use bswap op on each group of elements */
11841 int groups = dsize / (8 << grp_size);
11842
11843 for (i = 0; i < groups; i++) {
11844 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
11845
11846 read_vec_element(s, tcg_tmp, rn, i, grp_size);
11847 switch (grp_size) {
11848 case MO_16:
11849 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
11850 break;
11851 case MO_32:
11852 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
11853 break;
11854 case MO_64:
11855 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
11856 break;
11857 default:
11858 g_assert_not_reached();
11859 }
11860 write_vec_element(s, tcg_tmp, rd, i, grp_size);
11861 tcg_temp_free_i64(tcg_tmp);
11862 }
11863 clear_vec_high(s, is_q, rd);
11864 } else {
11865 int revmask = (1 << grp_size) - 1;
11866 int esize = 8 << size;
11867 int elements = dsize / esize;
11868 TCGv_i64 tcg_rn = tcg_temp_new_i64();
11869 TCGv_i64 tcg_rd = tcg_const_i64(0);
11870 TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
11871
11872 for (i = 0; i < elements; i++) {
11873 int e_rev = (i & 0xf) ^ revmask;
11874 int off = e_rev * esize;
11875 read_vec_element(s, tcg_rn, rn, i, size);
11876 if (off >= 64) {
11877 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
11878 tcg_rn, off - 64, esize);
11879 } else {
11880 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
11881 }
11882 }
11883 write_vec_element(s, tcg_rd, rd, 0, MO_64);
11884 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
11885
11886 tcg_temp_free_i64(tcg_rd_hi);
11887 tcg_temp_free_i64(tcg_rd);
11888 tcg_temp_free_i64(tcg_rn);
11889 }
11890 }
11891
11892 static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
11893 bool is_q, int size, int rn, int rd)
11894 {
11895 /* Implement the pairwise operations from 2-misc:
11896 * SADDLP, UADDLP, SADALP, UADALP.
11897 * These all add pairs of elements in the input to produce a
11898 * double-width result element in the output (possibly accumulating).
11899 */
11900 bool accum = (opcode == 0x6);
11901 int maxpass = is_q ? 2 : 1;
11902 int pass;
11903 TCGv_i64 tcg_res[2];
11904
11905 if (size == 2) {
11906 /* 32 + 32 -> 64 op */
11907 MemOp memop = size + (u ? 0 : MO_SIGN);
11908
11909 for (pass = 0; pass < maxpass; pass++) {
11910 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11911 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11912
11913 tcg_res[pass] = tcg_temp_new_i64();
11914
11915 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
11916 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
11917 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11918 if (accum) {
11919 read_vec_element(s, tcg_op1, rd, pass, MO_64);
11920 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
11921 }
11922
11923 tcg_temp_free_i64(tcg_op1);
11924 tcg_temp_free_i64(tcg_op2);
11925 }
11926 } else {
11927 for (pass = 0; pass < maxpass; pass++) {
11928 TCGv_i64 tcg_op = tcg_temp_new_i64();
11929 NeonGenOne64OpFn *genfn;
11930 static NeonGenOne64OpFn * const fns[2][2] = {
11931 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
11932 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
11933 };
11934
11935 genfn = fns[size][u];
11936
11937 tcg_res[pass] = tcg_temp_new_i64();
11938
11939 read_vec_element(s, tcg_op, rn, pass, MO_64);
11940 genfn(tcg_res[pass], tcg_op);
11941
11942 if (accum) {
11943 read_vec_element(s, tcg_op, rd, pass, MO_64);
11944 if (size == 0) {
11945 gen_helper_neon_addl_u16(tcg_res[pass],
11946 tcg_res[pass], tcg_op);
11947 } else {
11948 gen_helper_neon_addl_u32(tcg_res[pass],
11949 tcg_res[pass], tcg_op);
11950 }
11951 }
11952 tcg_temp_free_i64(tcg_op);
11953 }
11954 }
11955 if (!is_q) {
11956 tcg_res[1] = tcg_const_i64(0);
11957 }
11958 for (pass = 0; pass < 2; pass++) {
11959 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11960 tcg_temp_free_i64(tcg_res[pass]);
11961 }
11962 }
11963
11964 static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
11965 {
11966 /* Implement SHLL and SHLL2 */
11967 int pass;
11968 int part = is_q ? 2 : 0;
11969 TCGv_i64 tcg_res[2];
11970
11971 for (pass = 0; pass < 2; pass++) {
11972 static NeonGenWidenFn * const widenfns[3] = {
11973 gen_helper_neon_widen_u8,
11974 gen_helper_neon_widen_u16,
11975 tcg_gen_extu_i32_i64,
11976 };
11977 NeonGenWidenFn *widenfn = widenfns[size];
11978 TCGv_i32 tcg_op = tcg_temp_new_i32();
11979
11980 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
11981 tcg_res[pass] = tcg_temp_new_i64();
11982 widenfn(tcg_res[pass], tcg_op);
11983 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
11984
11985 tcg_temp_free_i32(tcg_op);
11986 }
11987
11988 for (pass = 0; pass < 2; pass++) {
11989 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11990 tcg_temp_free_i64(tcg_res[pass]);
11991 }
11992 }
11993
11994 /* AdvSIMD two reg misc
11995 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
11996 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11997 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
11998 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11999 */
12000 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
12001 {
12002 int size = extract32(insn, 22, 2);
12003 int opcode = extract32(insn, 12, 5);
12004 bool u = extract32(insn, 29, 1);
12005 bool is_q = extract32(insn, 30, 1);
12006 int rn = extract32(insn, 5, 5);
12007 int rd = extract32(insn, 0, 5);
12008 bool need_fpstatus = false;
12009 bool need_rmode = false;
12010 int rmode = -1;
12011 TCGv_i32 tcg_rmode;
12012 TCGv_ptr tcg_fpstatus;
12013
12014 switch (opcode) {
12015 case 0x0: /* REV64, REV32 */
12016 case 0x1: /* REV16 */
12017 handle_rev(s, opcode, u, is_q, size, rn, rd);
12018 return;
12019 case 0x5: /* CNT, NOT, RBIT */
12020 if (u && size == 0) {
12021 /* NOT */
12022 break;
12023 } else if (u && size == 1) {
12024 /* RBIT */
12025 break;
12026 } else if (!u && size == 0) {
12027 /* CNT */
12028 break;
12029 }
12030 unallocated_encoding(s);
12031 return;
12032 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
12033 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
12034 if (size == 3) {
12035 unallocated_encoding(s);
12036 return;
12037 }
12038 if (!fp_access_check(s)) {
12039 return;
12040 }
12041
12042 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
12043 return;
12044 case 0x4: /* CLS, CLZ */
12045 if (size == 3) {
12046 unallocated_encoding(s);
12047 return;
12048 }
12049 break;
12050 case 0x2: /* SADDLP, UADDLP */
12051 case 0x6: /* SADALP, UADALP */
12052 if (size == 3) {
12053 unallocated_encoding(s);
12054 return;
12055 }
12056 if (!fp_access_check(s)) {
12057 return;
12058 }
12059 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
12060 return;
12061 case 0x13: /* SHLL, SHLL2 */
12062 if (u == 0 || size == 3) {
12063 unallocated_encoding(s);
12064 return;
12065 }
12066 if (!fp_access_check(s)) {
12067 return;
12068 }
12069 handle_shll(s, is_q, size, rn, rd);
12070 return;
12071 case 0xa: /* CMLT */
12072 if (u == 1) {
12073 unallocated_encoding(s);
12074 return;
12075 }
12076 /* fall through */
12077 case 0x8: /* CMGT, CMGE */
12078 case 0x9: /* CMEQ, CMLE */
12079 case 0xb: /* ABS, NEG */
12080 if (size == 3 && !is_q) {
12081 unallocated_encoding(s);
12082 return;
12083 }
12084 break;
12085 case 0x3: /* SUQADD, USQADD */
12086 if (size == 3 && !is_q) {
12087 unallocated_encoding(s);
12088 return;
12089 }
12090 if (!fp_access_check(s)) {
12091 return;
12092 }
12093 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
12094 return;
12095 case 0x7: /* SQABS, SQNEG */
12096 if (size == 3 && !is_q) {
12097 unallocated_encoding(s);
12098 return;
12099 }
12100 break;
12101 case 0xc ... 0xf:
12102 case 0x16 ... 0x1f:
12103 {
12104 /* Floating point: U, size[1] and opcode indicate operation;
12105 * size[0] indicates single or double precision.
12106 */
12107 int is_double = extract32(size, 0, 1);
12108 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
12109 size = is_double ? 3 : 2;
12110 switch (opcode) {
12111 case 0x2f: /* FABS */
12112 case 0x6f: /* FNEG */
12113 if (size == 3 && !is_q) {
12114 unallocated_encoding(s);
12115 return;
12116 }
12117 break;
12118 case 0x1d: /* SCVTF */
12119 case 0x5d: /* UCVTF */
12120 {
12121 bool is_signed = (opcode == 0x1d) ? true : false;
12122 int elements = is_double ? 2 : is_q ? 4 : 2;
12123 if (is_double && !is_q) {
12124 unallocated_encoding(s);
12125 return;
12126 }
12127 if (!fp_access_check(s)) {
12128 return;
12129 }
12130 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
12131 return;
12132 }
12133 case 0x2c: /* FCMGT (zero) */
12134 case 0x2d: /* FCMEQ (zero) */
12135 case 0x2e: /* FCMLT (zero) */
12136 case 0x6c: /* FCMGE (zero) */
12137 case 0x6d: /* FCMLE (zero) */
12138 if (size == 3 && !is_q) {
12139 unallocated_encoding(s);
12140 return;
12141 }
12142 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
12143 return;
12144 case 0x7f: /* FSQRT */
12145 if (size == 3 && !is_q) {
12146 unallocated_encoding(s);
12147 return;
12148 }
12149 break;
12150 case 0x1a: /* FCVTNS */
12151 case 0x1b: /* FCVTMS */
12152 case 0x3a: /* FCVTPS */
12153 case 0x3b: /* FCVTZS */
12154 case 0x5a: /* FCVTNU */
12155 case 0x5b: /* FCVTMU */
12156 case 0x7a: /* FCVTPU */
12157 case 0x7b: /* FCVTZU */
12158 need_fpstatus = true;
12159 need_rmode = true;
12160 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12161 if (size == 3 && !is_q) {
12162 unallocated_encoding(s);
12163 return;
12164 }
12165 break;
12166 case 0x5c: /* FCVTAU */
12167 case 0x1c: /* FCVTAS */
12168 need_fpstatus = true;
12169 need_rmode = true;
12170 rmode = FPROUNDING_TIEAWAY;
12171 if (size == 3 && !is_q) {
12172 unallocated_encoding(s);
12173 return;
12174 }
12175 break;
12176 case 0x3c: /* URECPE */
12177 if (size == 3) {
12178 unallocated_encoding(s);
12179 return;
12180 }
12181 /* fall through */
12182 case 0x3d: /* FRECPE */
12183 case 0x7d: /* FRSQRTE */
12184 if (size == 3 && !is_q) {
12185 unallocated_encoding(s);
12186 return;
12187 }
12188 if (!fp_access_check(s)) {
12189 return;
12190 }
12191 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
12192 return;
12193 case 0x56: /* FCVTXN, FCVTXN2 */
12194 if (size == 2) {
12195 unallocated_encoding(s);
12196 return;
12197 }
12198 /* fall through */
12199 case 0x16: /* FCVTN, FCVTN2 */
12200 /* handle_2misc_narrow does a 2*size -> size operation, but these
12201 * instructions encode the source size rather than dest size.
12202 */
12203 if (!fp_access_check(s)) {
12204 return;
12205 }
12206 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12207 return;
12208 case 0x17: /* FCVTL, FCVTL2 */
12209 if (!fp_access_check(s)) {
12210 return;
12211 }
12212 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
12213 return;
12214 case 0x18: /* FRINTN */
12215 case 0x19: /* FRINTM */
12216 case 0x38: /* FRINTP */
12217 case 0x39: /* FRINTZ */
12218 need_rmode = true;
12219 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12220 /* fall through */
12221 case 0x59: /* FRINTX */
12222 case 0x79: /* FRINTI */
12223 need_fpstatus = true;
12224 if (size == 3 && !is_q) {
12225 unallocated_encoding(s);
12226 return;
12227 }
12228 break;
12229 case 0x58: /* FRINTA */
12230 need_rmode = true;
12231 rmode = FPROUNDING_TIEAWAY;
12232 need_fpstatus = true;
12233 if (size == 3 && !is_q) {
12234 unallocated_encoding(s);
12235 return;
12236 }
12237 break;
12238 case 0x7c: /* URSQRTE */
12239 if (size == 3) {
12240 unallocated_encoding(s);
12241 return;
12242 }
12243 break;
12244 case 0x1e: /* FRINT32Z */
12245 case 0x1f: /* FRINT64Z */
12246 need_rmode = true;
12247 rmode = FPROUNDING_ZERO;
12248 /* fall through */
12249 case 0x5e: /* FRINT32X */
12250 case 0x5f: /* FRINT64X */
12251 need_fpstatus = true;
12252 if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
12253 unallocated_encoding(s);
12254 return;
12255 }
12256 break;
12257 default:
12258 unallocated_encoding(s);
12259 return;
12260 }
12261 break;
12262 }
12263 default:
12264 unallocated_encoding(s);
12265 return;
12266 }
12267
12268 if (!fp_access_check(s)) {
12269 return;
12270 }
12271
12272 if (need_fpstatus || need_rmode) {
12273 tcg_fpstatus = get_fpstatus_ptr(false);
12274 } else {
12275 tcg_fpstatus = NULL;
12276 }
12277 if (need_rmode) {
12278 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12279 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12280 } else {
12281 tcg_rmode = NULL;
12282 }
12283
12284 switch (opcode) {
12285 case 0x5:
12286 if (u && size == 0) { /* NOT */
12287 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12288 return;
12289 }
12290 break;
12291 case 0x8: /* CMGT, CMGE */
12292 if (u) {
12293 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
12294 } else {
12295 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
12296 }
12297 return;
12298 case 0x9: /* CMEQ, CMLE */
12299 if (u) {
12300 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
12301 } else {
12302 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
12303 }
12304 return;
12305 case 0xa: /* CMLT */
12306 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
12307 return;
12308 case 0xb:
12309 if (u) { /* ABS, NEG */
12310 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12311 } else {
12312 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
12313 }
12314 return;
12315 }
12316
12317 if (size == 3) {
12318 /* All 64-bit element operations can be shared with scalar 2misc */
12319 int pass;
12320
12321 /* Coverity claims (size == 3 && !is_q) has been eliminated
12322 * from all paths leading to here.
12323 */
12324 tcg_debug_assert(is_q);
12325 for (pass = 0; pass < 2; pass++) {
12326 TCGv_i64 tcg_op = tcg_temp_new_i64();
12327 TCGv_i64 tcg_res = tcg_temp_new_i64();
12328
12329 read_vec_element(s, tcg_op, rn, pass, MO_64);
12330
12331 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12332 tcg_rmode, tcg_fpstatus);
12333
12334 write_vec_element(s, tcg_res, rd, pass, MO_64);
12335
12336 tcg_temp_free_i64(tcg_res);
12337 tcg_temp_free_i64(tcg_op);
12338 }
12339 } else {
12340 int pass;
12341
12342 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12343 TCGv_i32 tcg_op = tcg_temp_new_i32();
12344 TCGv_i32 tcg_res = tcg_temp_new_i32();
12345
12346 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12347
12348 if (size == 2) {
12349 /* Special cases for 32 bit elements */
12350 switch (opcode) {
12351 case 0x4: /* CLS */
12352 if (u) {
12353 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12354 } else {
12355 tcg_gen_clrsb_i32(tcg_res, tcg_op);
12356 }
12357 break;
12358 case 0x7: /* SQABS, SQNEG */
12359 if (u) {
12360 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12361 } else {
12362 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12363 }
12364 break;
12365 case 0x2f: /* FABS */
12366 gen_helper_vfp_abss(tcg_res, tcg_op);
12367 break;
12368 case 0x6f: /* FNEG */
12369 gen_helper_vfp_negs(tcg_res, tcg_op);
12370 break;
12371 case 0x7f: /* FSQRT */
12372 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12373 break;
12374 case 0x1a: /* FCVTNS */
12375 case 0x1b: /* FCVTMS */
12376 case 0x1c: /* FCVTAS */
12377 case 0x3a: /* FCVTPS */
12378 case 0x3b: /* FCVTZS */
12379 {
12380 TCGv_i32 tcg_shift = tcg_const_i32(0);
12381 gen_helper_vfp_tosls(tcg_res, tcg_op,
12382 tcg_shift, tcg_fpstatus);
12383 tcg_temp_free_i32(tcg_shift);
12384 break;
12385 }
12386 case 0x5a: /* FCVTNU */
12387 case 0x5b: /* FCVTMU */
12388 case 0x5c: /* FCVTAU */
12389 case 0x7a: /* FCVTPU */
12390 case 0x7b: /* FCVTZU */
12391 {
12392 TCGv_i32 tcg_shift = tcg_const_i32(0);
12393 gen_helper_vfp_touls(tcg_res, tcg_op,
12394 tcg_shift, tcg_fpstatus);
12395 tcg_temp_free_i32(tcg_shift);
12396 break;
12397 }
12398 case 0x18: /* FRINTN */
12399 case 0x19: /* FRINTM */
12400 case 0x38: /* FRINTP */
12401 case 0x39: /* FRINTZ */
12402 case 0x58: /* FRINTA */
12403 case 0x79: /* FRINTI */
12404 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12405 break;
12406 case 0x59: /* FRINTX */
12407 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12408 break;
12409 case 0x7c: /* URSQRTE */
12410 gen_helper_rsqrte_u32(tcg_res, tcg_op);
12411 break;
12412 case 0x1e: /* FRINT32Z */
12413 case 0x5e: /* FRINT32X */
12414 gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
12415 break;
12416 case 0x1f: /* FRINT64Z */
12417 case 0x5f: /* FRINT64X */
12418 gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
12419 break;
12420 default:
12421 g_assert_not_reached();
12422 }
12423 } else {
12424 /* Use helpers for 8 and 16 bit elements */
12425 switch (opcode) {
12426 case 0x5: /* CNT, RBIT */
12427 /* For these two insns size is part of the opcode specifier
12428 * (handled earlier); they always operate on byte elements.
12429 */
12430 if (u) {
12431 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12432 } else {
12433 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12434 }
12435 break;
12436 case 0x7: /* SQABS, SQNEG */
12437 {
12438 NeonGenOneOpEnvFn *genfn;
12439 static NeonGenOneOpEnvFn * const fns[2][2] = {
12440 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12441 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12442 };
12443 genfn = fns[size][u];
12444 genfn(tcg_res, cpu_env, tcg_op);
12445 break;
12446 }
12447 case 0x4: /* CLS, CLZ */
12448 if (u) {
12449 if (size == 0) {
12450 gen_helper_neon_clz_u8(tcg_res, tcg_op);
12451 } else {
12452 gen_helper_neon_clz_u16(tcg_res, tcg_op);
12453 }
12454 } else {
12455 if (size == 0) {
12456 gen_helper_neon_cls_s8(tcg_res, tcg_op);
12457 } else {
12458 gen_helper_neon_cls_s16(tcg_res, tcg_op);
12459 }
12460 }
12461 break;
12462 default:
12463 g_assert_not_reached();
12464 }
12465 }
12466
12467 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12468
12469 tcg_temp_free_i32(tcg_res);
12470 tcg_temp_free_i32(tcg_op);
12471 }
12472 }
12473 clear_vec_high(s, is_q, rd);
12474
12475 if (need_rmode) {
12476 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12477 tcg_temp_free_i32(tcg_rmode);
12478 }
12479 if (need_fpstatus) {
12480 tcg_temp_free_ptr(tcg_fpstatus);
12481 }
12482 }
12483
12484 /* AdvSIMD [scalar] two register miscellaneous (FP16)
12485 *
12486 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
12487 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12488 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
12489 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12490 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
12491 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
12492 *
12493 * This actually covers two groups where scalar access is governed by
12494 * bit 28. A bunch of the instructions (float to integral) only exist
12495 * in the vector form and are un-allocated for the scalar decode. Also
12496 * in the scalar decode Q is always 1.
12497 */
12498 static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
12499 {
12500 int fpop, opcode, a, u;
12501 int rn, rd;
12502 bool is_q;
12503 bool is_scalar;
12504 bool only_in_vector = false;
12505
12506 int pass;
12507 TCGv_i32 tcg_rmode = NULL;
12508 TCGv_ptr tcg_fpstatus = NULL;
12509 bool need_rmode = false;
12510 bool need_fpst = true;
12511 int rmode;
12512
12513 if (!dc_isar_feature(aa64_fp16, s)) {
12514 unallocated_encoding(s);
12515 return;
12516 }
12517
12518 rd = extract32(insn, 0, 5);
12519 rn = extract32(insn, 5, 5);
12520
12521 a = extract32(insn, 23, 1);
12522 u = extract32(insn, 29, 1);
12523 is_scalar = extract32(insn, 28, 1);
12524 is_q = extract32(insn, 30, 1);
12525
12526 opcode = extract32(insn, 12, 5);
12527 fpop = deposit32(opcode, 5, 1, a);
12528 fpop = deposit32(fpop, 6, 1, u);
12529
12530 rd = extract32(insn, 0, 5);
12531 rn = extract32(insn, 5, 5);
12532
12533 switch (fpop) {
12534 case 0x1d: /* SCVTF */
12535 case 0x5d: /* UCVTF */
12536 {
12537 int elements;
12538
12539 if (is_scalar) {
12540 elements = 1;
12541 } else {
12542 elements = (is_q ? 8 : 4);
12543 }
12544
12545 if (!fp_access_check(s)) {
12546 return;
12547 }
12548 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
12549 return;
12550 }
12551 break;
12552 case 0x2c: /* FCMGT (zero) */
12553 case 0x2d: /* FCMEQ (zero) */
12554 case 0x2e: /* FCMLT (zero) */
12555 case 0x6c: /* FCMGE (zero) */
12556 case 0x6d: /* FCMLE (zero) */
12557 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
12558 return;
12559 case 0x3d: /* FRECPE */
12560 case 0x3f: /* FRECPX */
12561 break;
12562 case 0x18: /* FRINTN */
12563 need_rmode = true;
12564 only_in_vector = true;
12565 rmode = FPROUNDING_TIEEVEN;
12566 break;
12567 case 0x19: /* FRINTM */
12568 need_rmode = true;
12569 only_in_vector = true;
12570 rmode = FPROUNDING_NEGINF;
12571 break;
12572 case 0x38: /* FRINTP */
12573 need_rmode = true;
12574 only_in_vector = true;
12575 rmode = FPROUNDING_POSINF;
12576 break;
12577 case 0x39: /* FRINTZ */
12578 need_rmode = true;
12579 only_in_vector = true;
12580 rmode = FPROUNDING_ZERO;
12581 break;
12582 case 0x58: /* FRINTA */
12583 need_rmode = true;
12584 only_in_vector = true;
12585 rmode = FPROUNDING_TIEAWAY;
12586 break;
12587 case 0x59: /* FRINTX */
12588 case 0x79: /* FRINTI */
12589 only_in_vector = true;
12590 /* current rounding mode */
12591 break;
12592 case 0x1a: /* FCVTNS */
12593 need_rmode = true;
12594 rmode = FPROUNDING_TIEEVEN;
12595 break;
12596 case 0x1b: /* FCVTMS */
12597 need_rmode = true;
12598 rmode = FPROUNDING_NEGINF;
12599 break;
12600 case 0x1c: /* FCVTAS */
12601 need_rmode = true;
12602 rmode = FPROUNDING_TIEAWAY;
12603 break;
12604 case 0x3a: /* FCVTPS */
12605 need_rmode = true;
12606 rmode = FPROUNDING_POSINF;
12607 break;
12608 case 0x3b: /* FCVTZS */
12609 need_rmode = true;
12610 rmode = FPROUNDING_ZERO;
12611 break;
12612 case 0x5a: /* FCVTNU */
12613 need_rmode = true;
12614 rmode = FPROUNDING_TIEEVEN;
12615 break;
12616 case 0x5b: /* FCVTMU */
12617 need_rmode = true;
12618 rmode = FPROUNDING_NEGINF;
12619 break;
12620 case 0x5c: /* FCVTAU */
12621 need_rmode = true;
12622 rmode = FPROUNDING_TIEAWAY;
12623 break;
12624 case 0x7a: /* FCVTPU */
12625 need_rmode = true;
12626 rmode = FPROUNDING_POSINF;
12627 break;
12628 case 0x7b: /* FCVTZU */
12629 need_rmode = true;
12630 rmode = FPROUNDING_ZERO;
12631 break;
12632 case 0x2f: /* FABS */
12633 case 0x6f: /* FNEG */
12634 need_fpst = false;
12635 break;
12636 case 0x7d: /* FRSQRTE */
12637 case 0x7f: /* FSQRT (vector) */
12638 break;
12639 default:
12640 fprintf(stderr, "%s: insn %#04x fpop %#2x\n", __func__, insn, fpop);
12641 g_assert_not_reached();
12642 }
12643
12644
12645 /* Check additional constraints for the scalar encoding */
12646 if (is_scalar) {
12647 if (!is_q) {
12648 unallocated_encoding(s);
12649 return;
12650 }
12651 /* FRINTxx is only in the vector form */
12652 if (only_in_vector) {
12653 unallocated_encoding(s);
12654 return;
12655 }
12656 }
12657
12658 if (!fp_access_check(s)) {
12659 return;
12660 }
12661
12662 if (need_rmode || need_fpst) {
12663 tcg_fpstatus = get_fpstatus_ptr(true);
12664 }
12665
12666 if (need_rmode) {
12667 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12668 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12669 }
12670
12671 if (is_scalar) {
12672 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
12673 TCGv_i32 tcg_res = tcg_temp_new_i32();
12674
12675 switch (fpop) {
12676 case 0x1a: /* FCVTNS */
12677 case 0x1b: /* FCVTMS */
12678 case 0x1c: /* FCVTAS */
12679 case 0x3a: /* FCVTPS */
12680 case 0x3b: /* FCVTZS */
12681 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12682 break;
12683 case 0x3d: /* FRECPE */
12684 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12685 break;
12686 case 0x3f: /* FRECPX */
12687 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
12688 break;
12689 case 0x5a: /* FCVTNU */
12690 case 0x5b: /* FCVTMU */
12691 case 0x5c: /* FCVTAU */
12692 case 0x7a: /* FCVTPU */
12693 case 0x7b: /* FCVTZU */
12694 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12695 break;
12696 case 0x6f: /* FNEG */
12697 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12698 break;
12699 case 0x7d: /* FRSQRTE */
12700 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12701 break;
12702 default:
12703 g_assert_not_reached();
12704 }
12705
12706 /* limit any sign extension going on */
12707 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
12708 write_fp_sreg(s, rd, tcg_res);
12709
12710 tcg_temp_free_i32(tcg_res);
12711 tcg_temp_free_i32(tcg_op);
12712 } else {
12713 for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
12714 TCGv_i32 tcg_op = tcg_temp_new_i32();
12715 TCGv_i32 tcg_res = tcg_temp_new_i32();
12716
12717 read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
12718
12719 switch (fpop) {
12720 case 0x1a: /* FCVTNS */
12721 case 0x1b: /* FCVTMS */
12722 case 0x1c: /* FCVTAS */
12723 case 0x3a: /* FCVTPS */
12724 case 0x3b: /* FCVTZS */
12725 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12726 break;
12727 case 0x3d: /* FRECPE */
12728 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12729 break;
12730 case 0x5a: /* FCVTNU */
12731 case 0x5b: /* FCVTMU */
12732 case 0x5c: /* FCVTAU */
12733 case 0x7a: /* FCVTPU */
12734 case 0x7b: /* FCVTZU */
12735 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12736 break;
12737 case 0x18: /* FRINTN */
12738 case 0x19: /* FRINTM */
12739 case 0x38: /* FRINTP */
12740 case 0x39: /* FRINTZ */
12741 case 0x58: /* FRINTA */
12742 case 0x79: /* FRINTI */
12743 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
12744 break;
12745 case 0x59: /* FRINTX */
12746 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
12747 break;
12748 case 0x2f: /* FABS */
12749 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
12750 break;
12751 case 0x6f: /* FNEG */
12752 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12753 break;
12754 case 0x7d: /* FRSQRTE */
12755 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12756 break;
12757 case 0x7f: /* FSQRT */
12758 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
12759 break;
12760 default:
12761 g_assert_not_reached();
12762 }
12763
12764 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12765
12766 tcg_temp_free_i32(tcg_res);
12767 tcg_temp_free_i32(tcg_op);
12768 }
12769
12770 clear_vec_high(s, is_q, rd);
12771 }
12772
12773 if (tcg_rmode) {
12774 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12775 tcg_temp_free_i32(tcg_rmode);
12776 }
12777
12778 if (tcg_fpstatus) {
12779 tcg_temp_free_ptr(tcg_fpstatus);
12780 }
12781 }
12782
12783 /* AdvSIMD scalar x indexed element
12784 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12785 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12786 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12787 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12788 * AdvSIMD vector x indexed element
12789 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12790 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12791 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12792 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12793 */
12794 static void disas_simd_indexed(DisasContext *s, uint32_t insn)
12795 {
12796 /* This encoding has two kinds of instruction:
12797 * normal, where we perform elt x idxelt => elt for each
12798 * element in the vector
12799 * long, where we perform elt x idxelt and generate a result of
12800 * double the width of the input element
12801 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12802 */
12803 bool is_scalar = extract32(insn, 28, 1);
12804 bool is_q = extract32(insn, 30, 1);
12805 bool u = extract32(insn, 29, 1);
12806 int size = extract32(insn, 22, 2);
12807 int l = extract32(insn, 21, 1);
12808 int m = extract32(insn, 20, 1);
12809 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12810 int rm = extract32(insn, 16, 4);
12811 int opcode = extract32(insn, 12, 4);
12812 int h = extract32(insn, 11, 1);
12813 int rn = extract32(insn, 5, 5);
12814 int rd = extract32(insn, 0, 5);
12815 bool is_long = false;
12816 int is_fp = 0;
12817 bool is_fp16 = false;
12818 int index;
12819 TCGv_ptr fpst;
12820
12821 switch (16 * u + opcode) {
12822 case 0x08: /* MUL */
12823 case 0x10: /* MLA */
12824 case 0x14: /* MLS */
12825 if (is_scalar) {
12826 unallocated_encoding(s);
12827 return;
12828 }
12829 break;
12830 case 0x02: /* SMLAL, SMLAL2 */
12831 case 0x12: /* UMLAL, UMLAL2 */
12832 case 0x06: /* SMLSL, SMLSL2 */
12833 case 0x16: /* UMLSL, UMLSL2 */
12834 case 0x0a: /* SMULL, SMULL2 */
12835 case 0x1a: /* UMULL, UMULL2 */
12836 if (is_scalar) {
12837 unallocated_encoding(s);
12838 return;
12839 }
12840 is_long = true;
12841 break;
12842 case 0x03: /* SQDMLAL, SQDMLAL2 */
12843 case 0x07: /* SQDMLSL, SQDMLSL2 */
12844 case 0x0b: /* SQDMULL, SQDMULL2 */
12845 is_long = true;
12846 break;
12847 case 0x0c: /* SQDMULH */
12848 case 0x0d: /* SQRDMULH */
12849 break;
12850 case 0x01: /* FMLA */
12851 case 0x05: /* FMLS */
12852 case 0x09: /* FMUL */
12853 case 0x19: /* FMULX */
12854 is_fp = 1;
12855 break;
12856 case 0x1d: /* SQRDMLAH */
12857 case 0x1f: /* SQRDMLSH */
12858 if (!dc_isar_feature(aa64_rdm, s)) {
12859 unallocated_encoding(s);
12860 return;
12861 }
12862 break;
12863 case 0x0e: /* SDOT */
12864 case 0x1e: /* UDOT */
12865 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
12866 unallocated_encoding(s);
12867 return;
12868 }
12869 break;
12870 case 0x11: /* FCMLA #0 */
12871 case 0x13: /* FCMLA #90 */
12872 case 0x15: /* FCMLA #180 */
12873 case 0x17: /* FCMLA #270 */
12874 if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
12875 unallocated_encoding(s);
12876 return;
12877 }
12878 is_fp = 2;
12879 break;
12880 case 0x00: /* FMLAL */
12881 case 0x04: /* FMLSL */
12882 case 0x18: /* FMLAL2 */
12883 case 0x1c: /* FMLSL2 */
12884 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
12885 unallocated_encoding(s);
12886 return;
12887 }
12888 size = MO_16;
12889 /* is_fp, but we pass cpu_env not fp_status. */
12890 break;
12891 default:
12892 unallocated_encoding(s);
12893 return;
12894 }
12895
12896 switch (is_fp) {
12897 case 1: /* normal fp */
12898 /* convert insn encoded size to MemOp size */
12899 switch (size) {
12900 case 0: /* half-precision */
12901 size = MO_16;
12902 is_fp16 = true;
12903 break;
12904 case MO_32: /* single precision */
12905 case MO_64: /* double precision */
12906 break;
12907 default:
12908 unallocated_encoding(s);
12909 return;
12910 }
12911 break;
12912
12913 case 2: /* complex fp */
12914 /* Each indexable element is a complex pair. */
12915 size += 1;
12916 switch (size) {
12917 case MO_32:
12918 if (h && !is_q) {
12919 unallocated_encoding(s);
12920 return;
12921 }
12922 is_fp16 = true;
12923 break;
12924 case MO_64:
12925 break;
12926 default:
12927 unallocated_encoding(s);
12928 return;
12929 }
12930 break;
12931
12932 default: /* integer */
12933 switch (size) {
12934 case MO_8:
12935 case MO_64:
12936 unallocated_encoding(s);
12937 return;
12938 }
12939 break;
12940 }
12941 if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
12942 unallocated_encoding(s);
12943 return;
12944 }
12945
12946 /* Given MemOp size, adjust register and indexing. */
12947 switch (size) {
12948 case MO_16:
12949 index = h << 2 | l << 1 | m;
12950 break;
12951 case MO_32:
12952 index = h << 1 | l;
12953 rm |= m << 4;
12954 break;
12955 case MO_64:
12956 if (l || !is_q) {
12957 unallocated_encoding(s);
12958 return;
12959 }
12960 index = h;
12961 rm |= m << 4;
12962 break;
12963 default:
12964 g_assert_not_reached();
12965 }
12966
12967 if (!fp_access_check(s)) {
12968 return;
12969 }
12970
12971 if (is_fp) {
12972 fpst = get_fpstatus_ptr(is_fp16);
12973 } else {
12974 fpst = NULL;
12975 }
12976
12977 switch (16 * u + opcode) {
12978 case 0x0e: /* SDOT */
12979 case 0x1e: /* UDOT */
12980 gen_gvec_op3_ool(s, is_q, rd, rn, rm, index,
12981 u ? gen_helper_gvec_udot_idx_b
12982 : gen_helper_gvec_sdot_idx_b);
12983 return;
12984 case 0x11: /* FCMLA #0 */
12985 case 0x13: /* FCMLA #90 */
12986 case 0x15: /* FCMLA #180 */
12987 case 0x17: /* FCMLA #270 */
12988 {
12989 int rot = extract32(insn, 13, 2);
12990 int data = (index << 2) | rot;
12991 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
12992 vec_full_reg_offset(s, rn),
12993 vec_full_reg_offset(s, rm), fpst,
12994 is_q ? 16 : 8, vec_full_reg_size(s), data,
12995 size == MO_64
12996 ? gen_helper_gvec_fcmlas_idx
12997 : gen_helper_gvec_fcmlah_idx);
12998 tcg_temp_free_ptr(fpst);
12999 }
13000 return;
13001
13002 case 0x00: /* FMLAL */
13003 case 0x04: /* FMLSL */
13004 case 0x18: /* FMLAL2 */
13005 case 0x1c: /* FMLSL2 */
13006 {
13007 int is_s = extract32(opcode, 2, 1);
13008 int is_2 = u;
13009 int data = (index << 2) | (is_2 << 1) | is_s;
13010 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
13011 vec_full_reg_offset(s, rn),
13012 vec_full_reg_offset(s, rm), cpu_env,
13013 is_q ? 16 : 8, vec_full_reg_size(s),
13014 data, gen_helper_gvec_fmlal_idx_a64);
13015 }
13016 return;
13017 }
13018
13019 if (size == 3) {
13020 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13021 int pass;
13022
13023 assert(is_fp && is_q && !is_long);
13024
13025 read_vec_element(s, tcg_idx, rm, index, MO_64);
13026
13027 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13028 TCGv_i64 tcg_op = tcg_temp_new_i64();
13029 TCGv_i64 tcg_res = tcg_temp_new_i64();
13030
13031 read_vec_element(s, tcg_op, rn, pass, MO_64);
13032
13033 switch (16 * u + opcode) {
13034 case 0x05: /* FMLS */
13035 /* As usual for ARM, separate negation for fused multiply-add */
13036 gen_helper_vfp_negd(tcg_op, tcg_op);
13037 /* fall through */
13038 case 0x01: /* FMLA */
13039 read_vec_element(s, tcg_res, rd, pass, MO_64);
13040 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
13041 break;
13042 case 0x09: /* FMUL */
13043 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
13044 break;
13045 case 0x19: /* FMULX */
13046 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
13047 break;
13048 default:
13049 g_assert_not_reached();
13050 }
13051
13052 write_vec_element(s, tcg_res, rd, pass, MO_64);
13053 tcg_temp_free_i64(tcg_op);
13054 tcg_temp_free_i64(tcg_res);
13055 }
13056
13057 tcg_temp_free_i64(tcg_idx);
13058 clear_vec_high(s, !is_scalar, rd);
13059 } else if (!is_long) {
13060 /* 32 bit floating point, or 16 or 32 bit integer.
13061 * For the 16 bit scalar case we use the usual Neon helpers and
13062 * rely on the fact that 0 op 0 == 0 with no side effects.
13063 */
13064 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13065 int pass, maxpasses;
13066
13067 if (is_scalar) {
13068 maxpasses = 1;
13069 } else {
13070 maxpasses = is_q ? 4 : 2;
13071 }
13072
13073 read_vec_element_i32(s, tcg_idx, rm, index, size);
13074
13075 if (size == 1 && !is_scalar) {
13076 /* The simplest way to handle the 16x16 indexed ops is to duplicate
13077 * the index into both halves of the 32 bit tcg_idx and then use
13078 * the usual Neon helpers.
13079 */
13080 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13081 }
13082
13083 for (pass = 0; pass < maxpasses; pass++) {
13084 TCGv_i32 tcg_op = tcg_temp_new_i32();
13085 TCGv_i32 tcg_res = tcg_temp_new_i32();
13086
13087 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
13088
13089 switch (16 * u + opcode) {
13090 case 0x08: /* MUL */
13091 case 0x10: /* MLA */
13092 case 0x14: /* MLS */
13093 {
13094 static NeonGenTwoOpFn * const fns[2][2] = {
13095 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
13096 { tcg_gen_add_i32, tcg_gen_sub_i32 },
13097 };
13098 NeonGenTwoOpFn *genfn;
13099 bool is_sub = opcode == 0x4;
13100
13101 if (size == 1) {
13102 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
13103 } else {
13104 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
13105 }
13106 if (opcode == 0x8) {
13107 break;
13108 }
13109 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
13110 genfn = fns[size - 1][is_sub];
13111 genfn(tcg_res, tcg_op, tcg_res);
13112 break;
13113 }
13114 case 0x05: /* FMLS */
13115 case 0x01: /* FMLA */
13116 read_vec_element_i32(s, tcg_res, rd, pass,
13117 is_scalar ? size : MO_32);
13118 switch (size) {
13119 case 1:
13120 if (opcode == 0x5) {
13121 /* As usual for ARM, separate negation for fused
13122 * multiply-add */
13123 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
13124 }
13125 if (is_scalar) {
13126 gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
13127 tcg_res, fpst);
13128 } else {
13129 gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
13130 tcg_res, fpst);
13131 }
13132 break;
13133 case 2:
13134 if (opcode == 0x5) {
13135 /* As usual for ARM, separate negation for
13136 * fused multiply-add */
13137 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
13138 }
13139 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
13140 tcg_res, fpst);
13141 break;
13142 default:
13143 g_assert_not_reached();
13144 }
13145 break;
13146 case 0x09: /* FMUL */
13147 switch (size) {
13148 case 1:
13149 if (is_scalar) {
13150 gen_helper_advsimd_mulh(tcg_res, tcg_op,
13151 tcg_idx, fpst);
13152 } else {
13153 gen_helper_advsimd_mul2h(tcg_res, tcg_op,
13154 tcg_idx, fpst);
13155 }
13156 break;
13157 case 2:
13158 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
13159 break;
13160 default:
13161 g_assert_not_reached();
13162 }
13163 break;
13164 case 0x19: /* FMULX */
13165 switch (size) {
13166 case 1:
13167 if (is_scalar) {
13168 gen_helper_advsimd_mulxh(tcg_res, tcg_op,
13169 tcg_idx, fpst);
13170 } else {
13171 gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
13172 tcg_idx, fpst);
13173 }
13174 break;
13175 case 2:
13176 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
13177 break;
13178 default:
13179 g_assert_not_reached();
13180 }
13181 break;
13182 case 0x0c: /* SQDMULH */
13183 if (size == 1) {
13184 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
13185 tcg_op, tcg_idx);
13186 } else {
13187 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
13188 tcg_op, tcg_idx);
13189 }
13190 break;
13191 case 0x0d: /* SQRDMULH */
13192 if (size == 1) {
13193 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
13194 tcg_op, tcg_idx);
13195 } else {
13196 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
13197 tcg_op, tcg_idx);
13198 }
13199 break;
13200 case 0x1d: /* SQRDMLAH */
13201 read_vec_element_i32(s, tcg_res, rd, pass,
13202 is_scalar ? size : MO_32);
13203 if (size == 1) {
13204 gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
13205 tcg_op, tcg_idx, tcg_res);
13206 } else {
13207 gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
13208 tcg_op, tcg_idx, tcg_res);
13209 }
13210 break;
13211 case 0x1f: /* SQRDMLSH */
13212 read_vec_element_i32(s, tcg_res, rd, pass,
13213 is_scalar ? size : MO_32);
13214 if (size == 1) {
13215 gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
13216 tcg_op, tcg_idx, tcg_res);
13217 } else {
13218 gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
13219 tcg_op, tcg_idx, tcg_res);
13220 }
13221 break;
13222 default:
13223 g_assert_not_reached();
13224 }
13225
13226 if (is_scalar) {
13227 write_fp_sreg(s, rd, tcg_res);
13228 } else {
13229 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13230 }
13231
13232 tcg_temp_free_i32(tcg_op);
13233 tcg_temp_free_i32(tcg_res);
13234 }
13235
13236 tcg_temp_free_i32(tcg_idx);
13237 clear_vec_high(s, is_q, rd);
13238 } else {
13239 /* long ops: 16x16->32 or 32x32->64 */
13240 TCGv_i64 tcg_res[2];
13241 int pass;
13242 bool satop = extract32(opcode, 0, 1);
13243 MemOp memop = MO_32;
13244
13245 if (satop || !u) {
13246 memop |= MO_SIGN;
13247 }
13248
13249 if (size == 2) {
13250 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13251
13252 read_vec_element(s, tcg_idx, rm, index, memop);
13253
13254 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13255 TCGv_i64 tcg_op = tcg_temp_new_i64();
13256 TCGv_i64 tcg_passres;
13257 int passelt;
13258
13259 if (is_scalar) {
13260 passelt = 0;
13261 } else {
13262 passelt = pass + (is_q * 2);
13263 }
13264
13265 read_vec_element(s, tcg_op, rn, passelt, memop);
13266
13267 tcg_res[pass] = tcg_temp_new_i64();
13268
13269 if (opcode == 0xa || opcode == 0xb) {
13270 /* Non-accumulating ops */
13271 tcg_passres = tcg_res[pass];
13272 } else {
13273 tcg_passres = tcg_temp_new_i64();
13274 }
13275
13276 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13277 tcg_temp_free_i64(tcg_op);
13278
13279 if (satop) {
13280 /* saturating, doubling */
13281 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13282 tcg_passres, tcg_passres);
13283 }
13284
13285 if (opcode == 0xa || opcode == 0xb) {
13286 continue;
13287 }
13288
13289 /* Accumulating op: handle accumulate step */
13290 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13291
13292 switch (opcode) {
13293 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13294 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13295 break;
13296 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13297 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13298 break;
13299 case 0x7: /* SQDMLSL, SQDMLSL2 */
13300 tcg_gen_neg_i64(tcg_passres, tcg_passres);
13301 /* fall through */
13302 case 0x3: /* SQDMLAL, SQDMLAL2 */
13303 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13304 tcg_res[pass],
13305 tcg_passres);
13306 break;
13307 default:
13308 g_assert_not_reached();
13309 }
13310 tcg_temp_free_i64(tcg_passres);
13311 }
13312 tcg_temp_free_i64(tcg_idx);
13313
13314 clear_vec_high(s, !is_scalar, rd);
13315 } else {
13316 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13317
13318 assert(size == 1);
13319 read_vec_element_i32(s, tcg_idx, rm, index, size);
13320
13321 if (!is_scalar) {
13322 /* The simplest way to handle the 16x16 indexed ops is to
13323 * duplicate the index into both halves of the 32 bit tcg_idx
13324 * and then use the usual Neon helpers.
13325 */
13326 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13327 }
13328
13329 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13330 TCGv_i32 tcg_op = tcg_temp_new_i32();
13331 TCGv_i64 tcg_passres;
13332
13333 if (is_scalar) {
13334 read_vec_element_i32(s, tcg_op, rn, pass, size);
13335 } else {
13336 read_vec_element_i32(s, tcg_op, rn,
13337 pass + (is_q * 2), MO_32);
13338 }
13339
13340 tcg_res[pass] = tcg_temp_new_i64();
13341
13342 if (opcode == 0xa || opcode == 0xb) {
13343 /* Non-accumulating ops */
13344 tcg_passres = tcg_res[pass];
13345 } else {
13346 tcg_passres = tcg_temp_new_i64();
13347 }
13348
13349 if (memop & MO_SIGN) {
13350 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13351 } else {
13352 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13353 }
13354 if (satop) {
13355 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13356 tcg_passres, tcg_passres);
13357 }
13358 tcg_temp_free_i32(tcg_op);
13359
13360 if (opcode == 0xa || opcode == 0xb) {
13361 continue;
13362 }
13363
13364 /* Accumulating op: handle accumulate step */
13365 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13366
13367 switch (opcode) {
13368 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13369 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
13370 tcg_passres);
13371 break;
13372 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13373 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
13374 tcg_passres);
13375 break;
13376 case 0x7: /* SQDMLSL, SQDMLSL2 */
13377 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
13378 /* fall through */
13379 case 0x3: /* SQDMLAL, SQDMLAL2 */
13380 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
13381 tcg_res[pass],
13382 tcg_passres);
13383 break;
13384 default:
13385 g_assert_not_reached();
13386 }
13387 tcg_temp_free_i64(tcg_passres);
13388 }
13389 tcg_temp_free_i32(tcg_idx);
13390
13391 if (is_scalar) {
13392 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
13393 }
13394 }
13395
13396 if (is_scalar) {
13397 tcg_res[1] = tcg_const_i64(0);
13398 }
13399
13400 for (pass = 0; pass < 2; pass++) {
13401 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13402 tcg_temp_free_i64(tcg_res[pass]);
13403 }
13404 }
13405
13406 if (fpst) {
13407 tcg_temp_free_ptr(fpst);
13408 }
13409 }
13410
13411 /* Crypto AES
13412 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13413 * +-----------------+------+-----------+--------+-----+------+------+
13414 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13415 * +-----------------+------+-----------+--------+-----+------+------+
13416 */
13417 static void disas_crypto_aes(DisasContext *s, uint32_t insn)
13418 {
13419 int size = extract32(insn, 22, 2);
13420 int opcode = extract32(insn, 12, 5);
13421 int rn = extract32(insn, 5, 5);
13422 int rd = extract32(insn, 0, 5);
13423 int decrypt;
13424 gen_helper_gvec_2 *genfn2 = NULL;
13425 gen_helper_gvec_3 *genfn3 = NULL;
13426
13427 if (!dc_isar_feature(aa64_aes, s) || size != 0) {
13428 unallocated_encoding(s);
13429 return;
13430 }
13431
13432 switch (opcode) {
13433 case 0x4: /* AESE */
13434 decrypt = 0;
13435 genfn3 = gen_helper_crypto_aese;
13436 break;
13437 case 0x6: /* AESMC */
13438 decrypt = 0;
13439 genfn2 = gen_helper_crypto_aesmc;
13440 break;
13441 case 0x5: /* AESD */
13442 decrypt = 1;
13443 genfn3 = gen_helper_crypto_aese;
13444 break;
13445 case 0x7: /* AESIMC */
13446 decrypt = 1;
13447 genfn2 = gen_helper_crypto_aesmc;
13448 break;
13449 default:
13450 unallocated_encoding(s);
13451 return;
13452 }
13453
13454 if (!fp_access_check(s)) {
13455 return;
13456 }
13457 if (genfn2) {
13458 gen_gvec_op2_ool(s, true, rd, rn, decrypt, genfn2);
13459 } else {
13460 gen_gvec_op3_ool(s, true, rd, rd, rn, decrypt, genfn3);
13461 }
13462 }
13463
13464 /* Crypto three-reg SHA
13465 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
13466 * +-----------------+------+---+------+---+--------+-----+------+------+
13467 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
13468 * +-----------------+------+---+------+---+--------+-----+------+------+
13469 */
13470 static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
13471 {
13472 int size = extract32(insn, 22, 2);
13473 int opcode = extract32(insn, 12, 3);
13474 int rm = extract32(insn, 16, 5);
13475 int rn = extract32(insn, 5, 5);
13476 int rd = extract32(insn, 0, 5);
13477 gen_helper_gvec_3 *genfn;
13478 bool feature;
13479
13480 if (size != 0) {
13481 unallocated_encoding(s);
13482 return;
13483 }
13484
13485 switch (opcode) {
13486 case 0: /* SHA1C */
13487 genfn = gen_helper_crypto_sha1c;
13488 feature = dc_isar_feature(aa64_sha1, s);
13489 break;
13490 case 1: /* SHA1P */
13491 genfn = gen_helper_crypto_sha1p;
13492 feature = dc_isar_feature(aa64_sha1, s);
13493 break;
13494 case 2: /* SHA1M */
13495 genfn = gen_helper_crypto_sha1m;
13496 feature = dc_isar_feature(aa64_sha1, s);
13497 break;
13498 case 3: /* SHA1SU0 */
13499 genfn = gen_helper_crypto_sha1su0;
13500 feature = dc_isar_feature(aa64_sha1, s);
13501 break;
13502 case 4: /* SHA256H */
13503 genfn = gen_helper_crypto_sha256h;
13504 feature = dc_isar_feature(aa64_sha256, s);
13505 break;
13506 case 5: /* SHA256H2 */
13507 genfn = gen_helper_crypto_sha256h2;
13508 feature = dc_isar_feature(aa64_sha256, s);
13509 break;
13510 case 6: /* SHA256SU1 */
13511 genfn = gen_helper_crypto_sha256su1;
13512 feature = dc_isar_feature(aa64_sha256, s);
13513 break;
13514 default:
13515 unallocated_encoding(s);
13516 return;
13517 }
13518
13519 if (!feature) {
13520 unallocated_encoding(s);
13521 return;
13522 }
13523
13524 if (!fp_access_check(s)) {
13525 return;
13526 }
13527 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn);
13528 }
13529
13530 /* Crypto two-reg SHA
13531 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13532 * +-----------------+------+-----------+--------+-----+------+------+
13533 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13534 * +-----------------+------+-----------+--------+-----+------+------+
13535 */
13536 static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
13537 {
13538 int size = extract32(insn, 22, 2);
13539 int opcode = extract32(insn, 12, 5);
13540 int rn = extract32(insn, 5, 5);
13541 int rd = extract32(insn, 0, 5);
13542 gen_helper_gvec_2 *genfn;
13543 bool feature;
13544
13545 if (size != 0) {
13546 unallocated_encoding(s);
13547 return;
13548 }
13549
13550 switch (opcode) {
13551 case 0: /* SHA1H */
13552 feature = dc_isar_feature(aa64_sha1, s);
13553 genfn = gen_helper_crypto_sha1h;
13554 break;
13555 case 1: /* SHA1SU1 */
13556 feature = dc_isar_feature(aa64_sha1, s);
13557 genfn = gen_helper_crypto_sha1su1;
13558 break;
13559 case 2: /* SHA256SU0 */
13560 feature = dc_isar_feature(aa64_sha256, s);
13561 genfn = gen_helper_crypto_sha256su0;
13562 break;
13563 default:
13564 unallocated_encoding(s);
13565 return;
13566 }
13567
13568 if (!feature) {
13569 unallocated_encoding(s);
13570 return;
13571 }
13572
13573 if (!fp_access_check(s)) {
13574 return;
13575 }
13576 gen_gvec_op2_ool(s, true, rd, rn, 0, genfn);
13577 }
13578
13579 static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m)
13580 {
13581 tcg_gen_rotli_i64(d, m, 1);
13582 tcg_gen_xor_i64(d, d, n);
13583 }
13584
13585 static void gen_rax1_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m)
13586 {
13587 tcg_gen_rotli_vec(vece, d, m, 1);
13588 tcg_gen_xor_vec(vece, d, d, n);
13589 }
13590
13591 void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
13592 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
13593 {
13594 static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
13595 static const GVecGen3 op = {
13596 .fni8 = gen_rax1_i64,
13597 .fniv = gen_rax1_vec,
13598 .opt_opc = vecop_list,
13599 .fno = gen_helper_crypto_rax1,
13600 .vece = MO_64,
13601 };
13602 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &op);
13603 }
13604
13605 /* Crypto three-reg SHA512
13606 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13607 * +-----------------------+------+---+---+-----+--------+------+------+
13608 * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
13609 * +-----------------------+------+---+---+-----+--------+------+------+
13610 */
13611 static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
13612 {
13613 int opcode = extract32(insn, 10, 2);
13614 int o = extract32(insn, 14, 1);
13615 int rm = extract32(insn, 16, 5);
13616 int rn = extract32(insn, 5, 5);
13617 int rd = extract32(insn, 0, 5);
13618 bool feature;
13619 gen_helper_gvec_3 *oolfn = NULL;
13620 GVecGen3Fn *gvecfn = NULL;
13621
13622 if (o == 0) {
13623 switch (opcode) {
13624 case 0: /* SHA512H */
13625 feature = dc_isar_feature(aa64_sha512, s);
13626 oolfn = gen_helper_crypto_sha512h;
13627 break;
13628 case 1: /* SHA512H2 */
13629 feature = dc_isar_feature(aa64_sha512, s);
13630 oolfn = gen_helper_crypto_sha512h2;
13631 break;
13632 case 2: /* SHA512SU1 */
13633 feature = dc_isar_feature(aa64_sha512, s);
13634 oolfn = gen_helper_crypto_sha512su1;
13635 break;
13636 case 3: /* RAX1 */
13637 feature = dc_isar_feature(aa64_sha3, s);
13638 gvecfn = gen_gvec_rax1;
13639 break;
13640 default:
13641 g_assert_not_reached();
13642 }
13643 } else {
13644 switch (opcode) {
13645 case 0: /* SM3PARTW1 */
13646 feature = dc_isar_feature(aa64_sm3, s);
13647 oolfn = gen_helper_crypto_sm3partw1;
13648 break;
13649 case 1: /* SM3PARTW2 */
13650 feature = dc_isar_feature(aa64_sm3, s);
13651 oolfn = gen_helper_crypto_sm3partw2;
13652 break;
13653 case 2: /* SM4EKEY */
13654 feature = dc_isar_feature(aa64_sm4, s);
13655 oolfn = gen_helper_crypto_sm4ekey;
13656 break;
13657 default:
13658 unallocated_encoding(s);
13659 return;
13660 }
13661 }
13662
13663 if (!feature) {
13664 unallocated_encoding(s);
13665 return;
13666 }
13667
13668 if (!fp_access_check(s)) {
13669 return;
13670 }
13671
13672 if (oolfn) {
13673 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, oolfn);
13674 } else {
13675 gen_gvec_fn3(s, true, rd, rn, rm, gvecfn, MO_64);
13676 }
13677 }
13678
13679 /* Crypto two-reg SHA512
13680 * 31 12 11 10 9 5 4 0
13681 * +-----------------------------------------+--------+------+------+
13682 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
13683 * +-----------------------------------------+--------+------+------+
13684 */
13685 static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
13686 {
13687 int opcode = extract32(insn, 10, 2);
13688 int rn = extract32(insn, 5, 5);
13689 int rd = extract32(insn, 0, 5);
13690 bool feature;
13691
13692 switch (opcode) {
13693 case 0: /* SHA512SU0 */
13694 feature = dc_isar_feature(aa64_sha512, s);
13695 break;
13696 case 1: /* SM4E */
13697 feature = dc_isar_feature(aa64_sm4, s);
13698 break;
13699 default:
13700 unallocated_encoding(s);
13701 return;
13702 }
13703
13704 if (!feature) {
13705 unallocated_encoding(s);
13706 return;
13707 }
13708
13709 if (!fp_access_check(s)) {
13710 return;
13711 }
13712
13713 switch (opcode) {
13714 case 0: /* SHA512SU0 */
13715 gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0);
13716 break;
13717 case 1: /* SM4E */
13718 gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e);
13719 break;
13720 default:
13721 g_assert_not_reached();
13722 }
13723 }
13724
13725 /* Crypto four-register
13726 * 31 23 22 21 20 16 15 14 10 9 5 4 0
13727 * +-------------------+-----+------+---+------+------+------+
13728 * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
13729 * +-------------------+-----+------+---+------+------+------+
13730 */
13731 static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
13732 {
13733 int op0 = extract32(insn, 21, 2);
13734 int rm = extract32(insn, 16, 5);
13735 int ra = extract32(insn, 10, 5);
13736 int rn = extract32(insn, 5, 5);
13737 int rd = extract32(insn, 0, 5);
13738 bool feature;
13739
13740 switch (op0) {
13741 case 0: /* EOR3 */
13742 case 1: /* BCAX */
13743 feature = dc_isar_feature(aa64_sha3, s);
13744 break;
13745 case 2: /* SM3SS1 */
13746 feature = dc_isar_feature(aa64_sm3, s);
13747 break;
13748 default:
13749 unallocated_encoding(s);
13750 return;
13751 }
13752
13753 if (!feature) {
13754 unallocated_encoding(s);
13755 return;
13756 }
13757
13758 if (!fp_access_check(s)) {
13759 return;
13760 }
13761
13762 if (op0 < 2) {
13763 TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
13764 int pass;
13765
13766 tcg_op1 = tcg_temp_new_i64();
13767 tcg_op2 = tcg_temp_new_i64();
13768 tcg_op3 = tcg_temp_new_i64();
13769 tcg_res[0] = tcg_temp_new_i64();
13770 tcg_res[1] = tcg_temp_new_i64();
13771
13772 for (pass = 0; pass < 2; pass++) {
13773 read_vec_element(s, tcg_op1, rn, pass, MO_64);
13774 read_vec_element(s, tcg_op2, rm, pass, MO_64);
13775 read_vec_element(s, tcg_op3, ra, pass, MO_64);
13776
13777 if (op0 == 0) {
13778 /* EOR3 */
13779 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
13780 } else {
13781 /* BCAX */
13782 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
13783 }
13784 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13785 }
13786 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13787 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13788
13789 tcg_temp_free_i64(tcg_op1);
13790 tcg_temp_free_i64(tcg_op2);
13791 tcg_temp_free_i64(tcg_op3);
13792 tcg_temp_free_i64(tcg_res[0]);
13793 tcg_temp_free_i64(tcg_res[1]);
13794 } else {
13795 TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
13796
13797 tcg_op1 = tcg_temp_new_i32();
13798 tcg_op2 = tcg_temp_new_i32();
13799 tcg_op3 = tcg_temp_new_i32();
13800 tcg_res = tcg_temp_new_i32();
13801 tcg_zero = tcg_const_i32(0);
13802
13803 read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
13804 read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
13805 read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
13806
13807 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
13808 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
13809 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
13810 tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
13811
13812 write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
13813 write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
13814 write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
13815 write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
13816
13817 tcg_temp_free_i32(tcg_op1);
13818 tcg_temp_free_i32(tcg_op2);
13819 tcg_temp_free_i32(tcg_op3);
13820 tcg_temp_free_i32(tcg_res);
13821 tcg_temp_free_i32(tcg_zero);
13822 }
13823 }
13824
13825 /* Crypto XAR
13826 * 31 21 20 16 15 10 9 5 4 0
13827 * +-----------------------+------+--------+------+------+
13828 * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
13829 * +-----------------------+------+--------+------+------+
13830 */
13831 static void disas_crypto_xar(DisasContext *s, uint32_t insn)
13832 {
13833 int rm = extract32(insn, 16, 5);
13834 int imm6 = extract32(insn, 10, 6);
13835 int rn = extract32(insn, 5, 5);
13836 int rd = extract32(insn, 0, 5);
13837 TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
13838 int pass;
13839
13840 if (!dc_isar_feature(aa64_sha3, s)) {
13841 unallocated_encoding(s);
13842 return;
13843 }
13844
13845 if (!fp_access_check(s)) {
13846 return;
13847 }
13848
13849 tcg_op1 = tcg_temp_new_i64();
13850 tcg_op2 = tcg_temp_new_i64();
13851 tcg_res[0] = tcg_temp_new_i64();
13852 tcg_res[1] = tcg_temp_new_i64();
13853
13854 for (pass = 0; pass < 2; pass++) {
13855 read_vec_element(s, tcg_op1, rn, pass, MO_64);
13856 read_vec_element(s, tcg_op2, rm, pass, MO_64);
13857
13858 tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
13859 tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6);
13860 }
13861 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13862 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13863
13864 tcg_temp_free_i64(tcg_op1);
13865 tcg_temp_free_i64(tcg_op2);
13866 tcg_temp_free_i64(tcg_res[0]);
13867 tcg_temp_free_i64(tcg_res[1]);
13868 }
13869
13870 /* Crypto three-reg imm2
13871 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13872 * +-----------------------+------+-----+------+--------+------+------+
13873 * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
13874 * +-----------------------+------+-----+------+--------+------+------+
13875 */
13876 static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
13877 {
13878 static gen_helper_gvec_3 * const fns[4] = {
13879 gen_helper_crypto_sm3tt1a, gen_helper_crypto_sm3tt1b,
13880 gen_helper_crypto_sm3tt2a, gen_helper_crypto_sm3tt2b,
13881 };
13882 int opcode = extract32(insn, 10, 2);
13883 int imm2 = extract32(insn, 12, 2);
13884 int rm = extract32(insn, 16, 5);
13885 int rn = extract32(insn, 5, 5);
13886 int rd = extract32(insn, 0, 5);
13887
13888 if (!dc_isar_feature(aa64_sm3, s)) {
13889 unallocated_encoding(s);
13890 return;
13891 }
13892
13893 if (!fp_access_check(s)) {
13894 return;
13895 }
13896
13897 gen_gvec_op3_ool(s, true, rd, rn, rm, imm2, fns[opcode]);
13898 }
13899
13900 /* C3.6 Data processing - SIMD, inc Crypto
13901 *
13902 * As the decode gets a little complex we are using a table based
13903 * approach for this part of the decode.
13904 */
13905 static const AArch64DecodeTable data_proc_simd[] = {
13906 /* pattern , mask , fn */
13907 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
13908 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
13909 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
13910 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
13911 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
13912 { 0x0e000400, 0x9fe08400, disas_simd_copy },
13913 { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
13914 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
13915 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
13916 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
13917 { 0x0e000000, 0xbf208c00, disas_simd_tb },
13918 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
13919 { 0x2e000000, 0xbf208400, disas_simd_ext },
13920 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
13921 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
13922 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
13923 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
13924 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
13925 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
13926 { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
13927 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
13928 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
13929 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
13930 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
13931 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
13932 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
13933 { 0xce000000, 0xff808000, disas_crypto_four_reg },
13934 { 0xce800000, 0xffe00000, disas_crypto_xar },
13935 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
13936 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
13937 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
13938 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
13939 { 0x00000000, 0x00000000, NULL }
13940 };
13941
13942 static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
13943 {
13944 /* Note that this is called with all non-FP cases from
13945 * table C3-6 so it must UNDEF for entries not specifically
13946 * allocated to instructions in that table.
13947 */
13948 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
13949 if (fn) {
13950 fn(s, insn);
13951 } else {
13952 unallocated_encoding(s);
13953 }
13954 }
13955
13956 /* C3.6 Data processing - SIMD and floating point */
13957 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
13958 {
13959 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
13960 disas_data_proc_fp(s, insn);
13961 } else {
13962 /* SIMD, including crypto */
13963 disas_data_proc_simd(s, insn);
13964 }
13965 }
13966
13967 /**
13968 * is_guarded_page:
13969 * @env: The cpu environment
13970 * @s: The DisasContext
13971 *
13972 * Return true if the page is guarded.
13973 */
13974 static bool is_guarded_page(CPUARMState *env, DisasContext *s)
13975 {
13976 #ifdef CONFIG_USER_ONLY
13977 return false; /* FIXME */
13978 #else
13979 uint64_t addr = s->base.pc_first;
13980 int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
13981 unsigned int index = tlb_index(env, mmu_idx, addr);
13982 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
13983
13984 /*
13985 * We test this immediately after reading an insn, which means
13986 * that any normal page must be in the TLB. The only exception
13987 * would be for executing from flash or device memory, which
13988 * does not retain the TLB entry.
13989 *
13990 * FIXME: Assume false for those, for now. We could use
13991 * arm_cpu_get_phys_page_attrs_debug to re-read the page
13992 * table entry even for that case.
13993 */
13994 return (tlb_hit(entry->addr_code, addr) &&
13995 env_tlb(env)->d[mmu_idx].iotlb[index].attrs.target_tlb_bit0);
13996 #endif
13997 }
13998
13999 /**
14000 * btype_destination_ok:
14001 * @insn: The instruction at the branch destination
14002 * @bt: SCTLR_ELx.BT
14003 * @btype: PSTATE.BTYPE, and is non-zero
14004 *
14005 * On a guarded page, there are a limited number of insns
14006 * that may be present at the branch target:
14007 * - branch target identifiers,
14008 * - paciasp, pacibsp,
14009 * - BRK insn
14010 * - HLT insn
14011 * Anything else causes a Branch Target Exception.
14012 *
14013 * Return true if the branch is compatible, false to raise BTITRAP.
14014 */
14015 static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
14016 {
14017 if ((insn & 0xfffff01fu) == 0xd503201fu) {
14018 /* HINT space */
14019 switch (extract32(insn, 5, 7)) {
14020 case 0b011001: /* PACIASP */
14021 case 0b011011: /* PACIBSP */
14022 /*
14023 * If SCTLR_ELx.BT, then PACI*SP are not compatible
14024 * with btype == 3. Otherwise all btype are ok.
14025 */
14026 return !bt || btype != 3;
14027 case 0b100000: /* BTI */
14028 /* Not compatible with any btype. */
14029 return false;
14030 case 0b100010: /* BTI c */
14031 /* Not compatible with btype == 3 */
14032 return btype != 3;
14033 case 0b100100: /* BTI j */
14034 /* Not compatible with btype == 2 */
14035 return btype != 2;
14036 case 0b100110: /* BTI jc */
14037 /* Compatible with any btype. */
14038 return true;
14039 }
14040 } else {
14041 switch (insn & 0xffe0001fu) {
14042 case 0xd4200000u: /* BRK */
14043 case 0xd4400000u: /* HLT */
14044 /* Give priority to the breakpoint exception. */
14045 return true;
14046 }
14047 }
14048 return false;
14049 }
14050
14051 /* C3.1 A64 instruction index by encoding */
14052 static void disas_a64_insn(CPUARMState *env, DisasContext *s)
14053 {
14054 uint32_t insn;
14055
14056 s->pc_curr = s->base.pc_next;
14057 insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
14058 s->insn = insn;
14059 s->base.pc_next += 4;
14060
14061 s->fp_access_checked = false;
14062
14063 if (dc_isar_feature(aa64_bti, s)) {
14064 if (s->base.num_insns == 1) {
14065 /*
14066 * At the first insn of the TB, compute s->guarded_page.
14067 * We delayed computing this until successfully reading
14068 * the first insn of the TB, above. This (mostly) ensures
14069 * that the softmmu tlb entry has been populated, and the
14070 * page table GP bit is available.
14071 *
14072 * Note that we need to compute this even if btype == 0,
14073 * because this value is used for BR instructions later
14074 * where ENV is not available.
14075 */
14076 s->guarded_page = is_guarded_page(env, s);
14077
14078 /* First insn can have btype set to non-zero. */
14079 tcg_debug_assert(s->btype >= 0);
14080
14081 /*
14082 * Note that the Branch Target Exception has fairly high
14083 * priority -- below debugging exceptions but above most
14084 * everything else. This allows us to handle this now
14085 * instead of waiting until the insn is otherwise decoded.
14086 */
14087 if (s->btype != 0
14088 && s->guarded_page
14089 && !btype_destination_ok(insn, s->bt, s->btype)) {
14090 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
14091 syn_btitrap(s->btype),
14092 default_exception_el(s));
14093 return;
14094 }
14095 } else {
14096 /* Not the first insn: btype must be 0. */
14097 tcg_debug_assert(s->btype == 0);
14098 }
14099 }
14100
14101 switch (extract32(insn, 25, 4)) {
14102 case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
14103 unallocated_encoding(s);
14104 break;
14105 case 0x2:
14106 if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) {
14107 unallocated_encoding(s);
14108 }
14109 break;
14110 case 0x8: case 0x9: /* Data processing - immediate */
14111 disas_data_proc_imm(s, insn);
14112 break;
14113 case 0xa: case 0xb: /* Branch, exception generation and system insns */
14114 disas_b_exc_sys(s, insn);
14115 break;
14116 case 0x4:
14117 case 0x6:
14118 case 0xc:
14119 case 0xe: /* Loads and stores */
14120 disas_ldst(s, insn);
14121 break;
14122 case 0x5:
14123 case 0xd: /* Data processing - register */
14124 disas_data_proc_reg(s, insn);
14125 break;
14126 case 0x7:
14127 case 0xf: /* Data processing - SIMD and floating point */
14128 disas_data_proc_simd_fp(s, insn);
14129 break;
14130 default:
14131 assert(FALSE); /* all 15 cases should be handled above */
14132 break;
14133 }
14134
14135 /* if we allocated any temporaries, free them here */
14136 free_tmp_a64(s);
14137
14138 /*
14139 * After execution of most insns, btype is reset to 0.
14140 * Note that we set btype == -1 when the insn sets btype.
14141 */
14142 if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
14143 reset_btype(s);
14144 }
14145 }
14146
14147 static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
14148 CPUState *cpu)
14149 {
14150 DisasContext *dc = container_of(dcbase, DisasContext, base);
14151 CPUARMState *env = cpu->env_ptr;
14152 ARMCPU *arm_cpu = env_archcpu(env);
14153 uint32_t tb_flags = dc->base.tb->flags;
14154 int bound, core_mmu_idx;
14155
14156 dc->isar = &arm_cpu->isar;
14157 dc->condjmp = 0;
14158
14159 dc->aarch64 = 1;
14160 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
14161 * there is no secure EL1, so we route exceptions to EL3.
14162 */
14163 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
14164 !arm_el_is_aa64(env, 3);
14165 dc->thumb = 0;
14166 dc->sctlr_b = 0;
14167 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
14168 dc->condexec_mask = 0;
14169 dc->condexec_cond = 0;
14170 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
14171 dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
14172 dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII);
14173 dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID);
14174 dc->tcma = FIELD_EX32(tb_flags, TBFLAG_A64, TCMA);
14175 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
14176 #if !defined(CONFIG_USER_ONLY)
14177 dc->user = (dc->current_el == 0);
14178 #endif
14179 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
14180 dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL);
14181 dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16;
14182 dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE);
14183 dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT);
14184 dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE);
14185 dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV);
14186 dc->ata = FIELD_EX32(tb_flags, TBFLAG_A64, ATA);
14187 dc->mte_active[0] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE_ACTIVE);
14188 dc->mte_active[1] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE0_ACTIVE);
14189 dc->vec_len = 0;
14190 dc->vec_stride = 0;
14191 dc->cp_regs = arm_cpu->cp_regs;
14192 dc->features = env->features;
14193
14194 /* Single step state. The code-generation logic here is:
14195 * SS_ACTIVE == 0:
14196 * generate code with no special handling for single-stepping (except
14197 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
14198 * this happens anyway because those changes are all system register or
14199 * PSTATE writes).
14200 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
14201 * emit code for one insn
14202 * emit code to clear PSTATE.SS
14203 * emit code to generate software step exception for completed step
14204 * end TB (as usual for having generated an exception)
14205 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
14206 * emit code to generate a software step exception
14207 * end the TB
14208 */
14209 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
14210 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
14211 dc->is_ldex = false;
14212 dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
14213
14214 /* Bound the number of insns to execute to those left on the page. */
14215 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
14216
14217 /* If architectural single step active, limit to 1. */
14218 if (dc->ss_active) {
14219 bound = 1;
14220 }
14221 dc->base.max_insns = MIN(dc->base.max_insns, bound);
14222
14223 init_tmp_a64_array(dc);
14224 }
14225
14226 static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
14227 {
14228 }
14229
14230 static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
14231 {
14232 DisasContext *dc = container_of(dcbase, DisasContext, base);
14233
14234 tcg_gen_insn_start(dc->base.pc_next, 0, 0);
14235 dc->insn_start = tcg_last_op();
14236 }
14237
14238 static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
14239 const CPUBreakpoint *bp)
14240 {
14241 DisasContext *dc = container_of(dcbase, DisasContext, base);
14242
14243 if (bp->flags & BP_CPU) {
14244 gen_a64_set_pc_im(dc->base.pc_next);
14245 gen_helper_check_breakpoints(cpu_env);
14246 /* End the TB early; it likely won't be executed */
14247 dc->base.is_jmp = DISAS_TOO_MANY;
14248 } else {
14249 gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
14250 /* The address covered by the breakpoint must be
14251 included in [tb->pc, tb->pc + tb->size) in order
14252 to for it to be properly cleared -- thus we
14253 increment the PC here so that the logic setting
14254 tb->size below does the right thing. */
14255 dc->base.pc_next += 4;
14256 dc->base.is_jmp = DISAS_NORETURN;
14257 }
14258
14259 return true;
14260 }
14261
14262 static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
14263 {
14264 DisasContext *dc = container_of(dcbase, DisasContext, base);
14265 CPUARMState *env = cpu->env_ptr;
14266
14267 if (dc->ss_active && !dc->pstate_ss) {
14268 /* Singlestep state is Active-pending.
14269 * If we're in this state at the start of a TB then either
14270 * a) we just took an exception to an EL which is being debugged
14271 * and this is the first insn in the exception handler
14272 * b) debug exceptions were masked and we just unmasked them
14273 * without changing EL (eg by clearing PSTATE.D)
14274 * In either case we're going to take a swstep exception in the
14275 * "did not step an insn" case, and so the syndrome ISV and EX
14276 * bits should be zero.
14277 */
14278 assert(dc->base.num_insns == 1);
14279 gen_swstep_exception(dc, 0, 0);
14280 dc->base.is_jmp = DISAS_NORETURN;
14281 } else {
14282 disas_a64_insn(env, dc);
14283 }
14284
14285 translator_loop_temp_check(&dc->base);
14286 }
14287
14288 static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14289 {
14290 DisasContext *dc = container_of(dcbase, DisasContext, base);
14291
14292 if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
14293 /* Note that this means single stepping WFI doesn't halt the CPU.
14294 * For conditional branch insns this is harmless unreachable code as
14295 * gen_goto_tb() has already handled emitting the debug exception
14296 * (and thus a tb-jump is not possible when singlestepping).
14297 */
14298 switch (dc->base.is_jmp) {
14299 default:
14300 gen_a64_set_pc_im(dc->base.pc_next);
14301 /* fall through */
14302 case DISAS_EXIT:
14303 case DISAS_JUMP:
14304 if (dc->base.singlestep_enabled) {
14305 gen_exception_internal(EXCP_DEBUG);
14306 } else {
14307 gen_step_complete_exception(dc);
14308 }
14309 break;
14310 case DISAS_NORETURN:
14311 break;
14312 }
14313 } else {
14314 switch (dc->base.is_jmp) {
14315 case DISAS_NEXT:
14316 case DISAS_TOO_MANY:
14317 gen_goto_tb(dc, 1, dc->base.pc_next);
14318 break;
14319 default:
14320 case DISAS_UPDATE_EXIT:
14321 gen_a64_set_pc_im(dc->base.pc_next);
14322 /* fall through */
14323 case DISAS_EXIT:
14324 tcg_gen_exit_tb(NULL, 0);
14325 break;
14326 case DISAS_UPDATE_NOCHAIN:
14327 gen_a64_set_pc_im(dc->base.pc_next);
14328 /* fall through */
14329 case DISAS_JUMP:
14330 tcg_gen_lookup_and_goto_ptr();
14331 break;
14332 case DISAS_NORETURN:
14333 case DISAS_SWI:
14334 break;
14335 case DISAS_WFE:
14336 gen_a64_set_pc_im(dc->base.pc_next);
14337 gen_helper_wfe(cpu_env);
14338 break;
14339 case DISAS_YIELD:
14340 gen_a64_set_pc_im(dc->base.pc_next);
14341 gen_helper_yield(cpu_env);
14342 break;
14343 case DISAS_WFI:
14344 {
14345 /* This is a special case because we don't want to just halt the CPU
14346 * if trying to debug across a WFI.
14347 */
14348 TCGv_i32 tmp = tcg_const_i32(4);
14349
14350 gen_a64_set_pc_im(dc->base.pc_next);
14351 gen_helper_wfi(cpu_env, tmp);
14352 tcg_temp_free_i32(tmp);
14353 /* The helper doesn't necessarily throw an exception, but we
14354 * must go back to the main loop to check for interrupts anyway.
14355 */
14356 tcg_gen_exit_tb(NULL, 0);
14357 break;
14358 }
14359 }
14360 }
14361 }
14362
14363 static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
14364 CPUState *cpu)
14365 {
14366 DisasContext *dc = container_of(dcbase, DisasContext, base);
14367
14368 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
14369 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
14370 }
14371
14372 const TranslatorOps aarch64_translator_ops = {
14373 .init_disas_context = aarch64_tr_init_disas_context,
14374 .tb_start = aarch64_tr_tb_start,
14375 .insn_start = aarch64_tr_insn_start,
14376 .breakpoint_check = aarch64_tr_breakpoint_check,
14377 .translate_insn = aarch64_tr_translate_insn,
14378 .tb_stop = aarch64_tr_tb_stop,
14379 .disas_log = aarch64_tr_disas_log,
14380 };