]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/tcg/translate-a64.c
729947b11a4f501cb19ee6b7beb0b89eb7b79994
[mirror_qemu.git] / target / arm / tcg / translate-a64.c
1 /*
2 * AArch64 translation
3 *
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20
21 #include "translate.h"
22 #include "translate-a64.h"
23 #include "qemu/log.h"
24 #include "disas/disas.h"
25 #include "arm_ldst.h"
26 #include "semihosting/semihost.h"
27 #include "cpregs.h"
28
29 static TCGv_i64 cpu_X[32];
30 static TCGv_i64 cpu_pc;
31
32 /* Load/store exclusive handling */
33 static TCGv_i64 cpu_exclusive_high;
34
35 static const char *regnames[] = {
36 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
37 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
38 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
39 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
40 };
41
42 enum a64_shift_type {
43 A64_SHIFT_TYPE_LSL = 0,
44 A64_SHIFT_TYPE_LSR = 1,
45 A64_SHIFT_TYPE_ASR = 2,
46 A64_SHIFT_TYPE_ROR = 3
47 };
48
49 /*
50 * Include the generated decoders.
51 */
52
53 #include "decode-sme-fa64.c.inc"
54 #include "decode-a64.c.inc"
55
56 /* Table based decoder typedefs - used when the relevant bits for decode
57 * are too awkwardly scattered across the instruction (eg SIMD).
58 */
59 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
60
61 typedef struct AArch64DecodeTable {
62 uint32_t pattern;
63 uint32_t mask;
64 AArch64DecodeFn *disas_fn;
65 } AArch64DecodeTable;
66
67 /* initialize TCG globals. */
68 void a64_translate_init(void)
69 {
70 int i;
71
72 cpu_pc = tcg_global_mem_new_i64(cpu_env,
73 offsetof(CPUARMState, pc),
74 "pc");
75 for (i = 0; i < 32; i++) {
76 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
77 offsetof(CPUARMState, xregs[i]),
78 regnames[i]);
79 }
80
81 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
82 offsetof(CPUARMState, exclusive_high), "exclusive_high");
83 }
84
85 /*
86 * Return the core mmu_idx to use for A64 "unprivileged load/store" insns
87 */
88 static int get_a64_user_mem_index(DisasContext *s)
89 {
90 /*
91 * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
92 * which is the usual mmu_idx for this cpu state.
93 */
94 ARMMMUIdx useridx = s->mmu_idx;
95
96 if (s->unpriv) {
97 /*
98 * We have pre-computed the condition for AccType_UNPRIV.
99 * Therefore we should never get here with a mmu_idx for
100 * which we do not know the corresponding user mmu_idx.
101 */
102 switch (useridx) {
103 case ARMMMUIdx_E10_1:
104 case ARMMMUIdx_E10_1_PAN:
105 useridx = ARMMMUIdx_E10_0;
106 break;
107 case ARMMMUIdx_E20_2:
108 case ARMMMUIdx_E20_2_PAN:
109 useridx = ARMMMUIdx_E20_0;
110 break;
111 default:
112 g_assert_not_reached();
113 }
114 }
115 return arm_to_core_mmu_idx(useridx);
116 }
117
118 static void set_btype_raw(int val)
119 {
120 tcg_gen_st_i32(tcg_constant_i32(val), cpu_env,
121 offsetof(CPUARMState, btype));
122 }
123
124 static void set_btype(DisasContext *s, int val)
125 {
126 /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
127 tcg_debug_assert(val >= 1 && val <= 3);
128 set_btype_raw(val);
129 s->btype = -1;
130 }
131
132 static void reset_btype(DisasContext *s)
133 {
134 if (s->btype != 0) {
135 set_btype_raw(0);
136 s->btype = 0;
137 }
138 }
139
140 static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff)
141 {
142 assert(s->pc_save != -1);
143 if (tb_cflags(s->base.tb) & CF_PCREL) {
144 tcg_gen_addi_i64(dest, cpu_pc, (s->pc_curr - s->pc_save) + diff);
145 } else {
146 tcg_gen_movi_i64(dest, s->pc_curr + diff);
147 }
148 }
149
150 void gen_a64_update_pc(DisasContext *s, target_long diff)
151 {
152 gen_pc_plus_diff(s, cpu_pc, diff);
153 s->pc_save = s->pc_curr + diff;
154 }
155
156 /*
157 * Handle Top Byte Ignore (TBI) bits.
158 *
159 * If address tagging is enabled via the TCR TBI bits:
160 * + for EL2 and EL3 there is only one TBI bit, and if it is set
161 * then the address is zero-extended, clearing bits [63:56]
162 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
163 * and TBI1 controls addressses with bit 55 == 1.
164 * If the appropriate TBI bit is set for the address then
165 * the address is sign-extended from bit 55 into bits [63:56]
166 *
167 * Here We have concatenated TBI{1,0} into tbi.
168 */
169 static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
170 TCGv_i64 src, int tbi)
171 {
172 if (tbi == 0) {
173 /* Load unmodified address */
174 tcg_gen_mov_i64(dst, src);
175 } else if (!regime_has_2_ranges(s->mmu_idx)) {
176 /* Force tag byte to all zero */
177 tcg_gen_extract_i64(dst, src, 0, 56);
178 } else {
179 /* Sign-extend from bit 55. */
180 tcg_gen_sextract_i64(dst, src, 0, 56);
181
182 switch (tbi) {
183 case 1:
184 /* tbi0 but !tbi1: only use the extension if positive */
185 tcg_gen_and_i64(dst, dst, src);
186 break;
187 case 2:
188 /* !tbi0 but tbi1: only use the extension if negative */
189 tcg_gen_or_i64(dst, dst, src);
190 break;
191 case 3:
192 /* tbi0 and tbi1: always use the extension */
193 break;
194 default:
195 g_assert_not_reached();
196 }
197 }
198 }
199
200 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
201 {
202 /*
203 * If address tagging is enabled for instructions via the TCR TBI bits,
204 * then loading an address into the PC will clear out any tag.
205 */
206 gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
207 s->pc_save = -1;
208 }
209
210 /*
211 * Handle MTE and/or TBI.
212 *
213 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
214 * for the tag to be present in the FAR_ELx register. But for user-only
215 * mode we do not have a TLB with which to implement this, so we must
216 * remove the top byte now.
217 *
218 * Always return a fresh temporary that we can increment independently
219 * of the write-back address.
220 */
221
222 TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
223 {
224 TCGv_i64 clean = tcg_temp_new_i64();
225 #ifdef CONFIG_USER_ONLY
226 gen_top_byte_ignore(s, clean, addr, s->tbid);
227 #else
228 tcg_gen_mov_i64(clean, addr);
229 #endif
230 return clean;
231 }
232
233 /* Insert a zero tag into src, with the result at dst. */
234 static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src)
235 {
236 tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4));
237 }
238
239 static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
240 MMUAccessType acc, int log2_size)
241 {
242 gen_helper_probe_access(cpu_env, ptr,
243 tcg_constant_i32(acc),
244 tcg_constant_i32(get_mem_index(s)),
245 tcg_constant_i32(1 << log2_size));
246 }
247
248 /*
249 * For MTE, check a single logical or atomic access. This probes a single
250 * address, the exact one specified. The size and alignment of the access
251 * is not relevant to MTE, per se, but watchpoints do require the size,
252 * and we want to recognize those before making any other changes to state.
253 */
254 static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
255 bool is_write, bool tag_checked,
256 int log2_size, bool is_unpriv,
257 int core_idx)
258 {
259 if (tag_checked && s->mte_active[is_unpriv]) {
260 TCGv_i64 ret;
261 int desc = 0;
262
263 desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
264 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
265 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
266 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
267 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1);
268
269 ret = tcg_temp_new_i64();
270 gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
271
272 return ret;
273 }
274 return clean_data_tbi(s, addr);
275 }
276
277 TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
278 bool tag_checked, int log2_size)
279 {
280 return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size,
281 false, get_mem_index(s));
282 }
283
284 /*
285 * For MTE, check multiple logical sequential accesses.
286 */
287 TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
288 bool tag_checked, int size)
289 {
290 if (tag_checked && s->mte_active[0]) {
291 TCGv_i64 ret;
292 int desc = 0;
293
294 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
295 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
296 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
297 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
298 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1);
299
300 ret = tcg_temp_new_i64();
301 gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
302
303 return ret;
304 }
305 return clean_data_tbi(s, addr);
306 }
307
308 typedef struct DisasCompare64 {
309 TCGCond cond;
310 TCGv_i64 value;
311 } DisasCompare64;
312
313 static void a64_test_cc(DisasCompare64 *c64, int cc)
314 {
315 DisasCompare c32;
316
317 arm_test_cc(&c32, cc);
318
319 /*
320 * Sign-extend the 32-bit value so that the GE/LT comparisons work
321 * properly. The NE/EQ comparisons are also fine with this choice.
322 */
323 c64->cond = c32.cond;
324 c64->value = tcg_temp_new_i64();
325 tcg_gen_ext_i32_i64(c64->value, c32.value);
326 }
327
328 static void gen_rebuild_hflags(DisasContext *s)
329 {
330 gen_helper_rebuild_hflags_a64(cpu_env, tcg_constant_i32(s->current_el));
331 }
332
333 static void gen_exception_internal(int excp)
334 {
335 assert(excp_is_internal(excp));
336 gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp));
337 }
338
339 static void gen_exception_internal_insn(DisasContext *s, int excp)
340 {
341 gen_a64_update_pc(s, 0);
342 gen_exception_internal(excp);
343 s->base.is_jmp = DISAS_NORETURN;
344 }
345
346 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
347 {
348 gen_a64_update_pc(s, 0);
349 gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syndrome));
350 s->base.is_jmp = DISAS_NORETURN;
351 }
352
353 static void gen_step_complete_exception(DisasContext *s)
354 {
355 /* We just completed step of an insn. Move from Active-not-pending
356 * to Active-pending, and then also take the swstep exception.
357 * This corresponds to making the (IMPDEF) choice to prioritize
358 * swstep exceptions over asynchronous exceptions taken to an exception
359 * level where debug is disabled. This choice has the advantage that
360 * we do not need to maintain internal state corresponding to the
361 * ISV/EX syndrome bits between completion of the step and generation
362 * of the exception, and our syndrome information is always correct.
363 */
364 gen_ss_advance(s);
365 gen_swstep_exception(s, 1, s->is_ldex);
366 s->base.is_jmp = DISAS_NORETURN;
367 }
368
369 static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
370 {
371 if (s->ss_active) {
372 return false;
373 }
374 return translator_use_goto_tb(&s->base, dest);
375 }
376
377 static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
378 {
379 if (use_goto_tb(s, s->pc_curr + diff)) {
380 /*
381 * For pcrel, the pc must always be up-to-date on entry to
382 * the linked TB, so that it can use simple additions for all
383 * further adjustments. For !pcrel, the linked TB is compiled
384 * to know its full virtual address, so we can delay the
385 * update to pc to the unlinked path. A long chain of links
386 * can thus avoid many updates to the PC.
387 */
388 if (tb_cflags(s->base.tb) & CF_PCREL) {
389 gen_a64_update_pc(s, diff);
390 tcg_gen_goto_tb(n);
391 } else {
392 tcg_gen_goto_tb(n);
393 gen_a64_update_pc(s, diff);
394 }
395 tcg_gen_exit_tb(s->base.tb, n);
396 s->base.is_jmp = DISAS_NORETURN;
397 } else {
398 gen_a64_update_pc(s, diff);
399 if (s->ss_active) {
400 gen_step_complete_exception(s);
401 } else {
402 tcg_gen_lookup_and_goto_ptr();
403 s->base.is_jmp = DISAS_NORETURN;
404 }
405 }
406 }
407
408 /*
409 * Register access functions
410 *
411 * These functions are used for directly accessing a register in where
412 * changes to the final register value are likely to be made. If you
413 * need to use a register for temporary calculation (e.g. index type
414 * operations) use the read_* form.
415 *
416 * B1.2.1 Register mappings
417 *
418 * In instruction register encoding 31 can refer to ZR (zero register) or
419 * the SP (stack pointer) depending on context. In QEMU's case we map SP
420 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
421 * This is the point of the _sp forms.
422 */
423 TCGv_i64 cpu_reg(DisasContext *s, int reg)
424 {
425 if (reg == 31) {
426 TCGv_i64 t = tcg_temp_new_i64();
427 tcg_gen_movi_i64(t, 0);
428 return t;
429 } else {
430 return cpu_X[reg];
431 }
432 }
433
434 /* register access for when 31 == SP */
435 TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
436 {
437 return cpu_X[reg];
438 }
439
440 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
441 * representing the register contents. This TCGv is an auto-freed
442 * temporary so it need not be explicitly freed, and may be modified.
443 */
444 TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
445 {
446 TCGv_i64 v = tcg_temp_new_i64();
447 if (reg != 31) {
448 if (sf) {
449 tcg_gen_mov_i64(v, cpu_X[reg]);
450 } else {
451 tcg_gen_ext32u_i64(v, cpu_X[reg]);
452 }
453 } else {
454 tcg_gen_movi_i64(v, 0);
455 }
456 return v;
457 }
458
459 TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
460 {
461 TCGv_i64 v = tcg_temp_new_i64();
462 if (sf) {
463 tcg_gen_mov_i64(v, cpu_X[reg]);
464 } else {
465 tcg_gen_ext32u_i64(v, cpu_X[reg]);
466 }
467 return v;
468 }
469
470 /* Return the offset into CPUARMState of a slice (from
471 * the least significant end) of FP register Qn (ie
472 * Dn, Sn, Hn or Bn).
473 * (Note that this is not the same mapping as for A32; see cpu.h)
474 */
475 static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
476 {
477 return vec_reg_offset(s, regno, 0, size);
478 }
479
480 /* Offset of the high half of the 128 bit vector Qn */
481 static inline int fp_reg_hi_offset(DisasContext *s, int regno)
482 {
483 return vec_reg_offset(s, regno, 1, MO_64);
484 }
485
486 /* Convenience accessors for reading and writing single and double
487 * FP registers. Writing clears the upper parts of the associated
488 * 128 bit vector register, as required by the architecture.
489 * Note that unlike the GP register accessors, the values returned
490 * by the read functions must be manually freed.
491 */
492 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
493 {
494 TCGv_i64 v = tcg_temp_new_i64();
495
496 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
497 return v;
498 }
499
500 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
501 {
502 TCGv_i32 v = tcg_temp_new_i32();
503
504 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
505 return v;
506 }
507
508 static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
509 {
510 TCGv_i32 v = tcg_temp_new_i32();
511
512 tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
513 return v;
514 }
515
516 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
517 * If SVE is not enabled, then there are only 128 bits in the vector.
518 */
519 static void clear_vec_high(DisasContext *s, bool is_q, int rd)
520 {
521 unsigned ofs = fp_reg_offset(s, rd, MO_64);
522 unsigned vsz = vec_full_reg_size(s);
523
524 /* Nop move, with side effect of clearing the tail. */
525 tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz);
526 }
527
528 void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
529 {
530 unsigned ofs = fp_reg_offset(s, reg, MO_64);
531
532 tcg_gen_st_i64(v, cpu_env, ofs);
533 clear_vec_high(s, false, reg);
534 }
535
536 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
537 {
538 TCGv_i64 tmp = tcg_temp_new_i64();
539
540 tcg_gen_extu_i32_i64(tmp, v);
541 write_fp_dreg(s, reg, tmp);
542 }
543
544 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
545 static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
546 GVecGen2Fn *gvec_fn, int vece)
547 {
548 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
549 is_q ? 16 : 8, vec_full_reg_size(s));
550 }
551
552 /* Expand a 2-operand + immediate AdvSIMD vector operation using
553 * an expander function.
554 */
555 static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
556 int64_t imm, GVecGen2iFn *gvec_fn, int vece)
557 {
558 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
559 imm, is_q ? 16 : 8, vec_full_reg_size(s));
560 }
561
562 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
563 static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
564 GVecGen3Fn *gvec_fn, int vece)
565 {
566 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
567 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
568 }
569
570 /* Expand a 4-operand AdvSIMD vector operation using an expander function. */
571 static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
572 int rx, GVecGen4Fn *gvec_fn, int vece)
573 {
574 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
575 vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
576 is_q ? 16 : 8, vec_full_reg_size(s));
577 }
578
579 /* Expand a 2-operand operation using an out-of-line helper. */
580 static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd,
581 int rn, int data, gen_helper_gvec_2 *fn)
582 {
583 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
584 vec_full_reg_offset(s, rn),
585 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
586 }
587
588 /* Expand a 3-operand operation using an out-of-line helper. */
589 static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
590 int rn, int rm, int data, gen_helper_gvec_3 *fn)
591 {
592 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
593 vec_full_reg_offset(s, rn),
594 vec_full_reg_offset(s, rm),
595 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
596 }
597
598 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
599 * an out-of-line helper.
600 */
601 static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
602 int rm, bool is_fp16, int data,
603 gen_helper_gvec_3_ptr *fn)
604 {
605 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
606 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
607 vec_full_reg_offset(s, rn),
608 vec_full_reg_offset(s, rm), fpst,
609 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
610 }
611
612 /* Expand a 3-operand + qc + operation using an out-of-line helper. */
613 static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
614 int rm, gen_helper_gvec_3_ptr *fn)
615 {
616 TCGv_ptr qc_ptr = tcg_temp_new_ptr();
617
618 tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
619 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
620 vec_full_reg_offset(s, rn),
621 vec_full_reg_offset(s, rm), qc_ptr,
622 is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
623 }
624
625 /* Expand a 4-operand operation using an out-of-line helper. */
626 static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
627 int rm, int ra, int data, gen_helper_gvec_4 *fn)
628 {
629 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
630 vec_full_reg_offset(s, rn),
631 vec_full_reg_offset(s, rm),
632 vec_full_reg_offset(s, ra),
633 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
634 }
635
636 /*
637 * Expand a 4-operand + fpstatus pointer + simd data value operation using
638 * an out-of-line helper.
639 */
640 static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
641 int rm, int ra, bool is_fp16, int data,
642 gen_helper_gvec_4_ptr *fn)
643 {
644 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
645 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
646 vec_full_reg_offset(s, rn),
647 vec_full_reg_offset(s, rm),
648 vec_full_reg_offset(s, ra), fpst,
649 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
650 }
651
652 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
653 * than the 32 bit equivalent.
654 */
655 static inline void gen_set_NZ64(TCGv_i64 result)
656 {
657 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
658 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
659 }
660
661 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
662 static inline void gen_logic_CC(int sf, TCGv_i64 result)
663 {
664 if (sf) {
665 gen_set_NZ64(result);
666 } else {
667 tcg_gen_extrl_i64_i32(cpu_ZF, result);
668 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
669 }
670 tcg_gen_movi_i32(cpu_CF, 0);
671 tcg_gen_movi_i32(cpu_VF, 0);
672 }
673
674 /* dest = T0 + T1; compute C, N, V and Z flags */
675 static void gen_add64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
676 {
677 TCGv_i64 result, flag, tmp;
678 result = tcg_temp_new_i64();
679 flag = tcg_temp_new_i64();
680 tmp = tcg_temp_new_i64();
681
682 tcg_gen_movi_i64(tmp, 0);
683 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
684
685 tcg_gen_extrl_i64_i32(cpu_CF, flag);
686
687 gen_set_NZ64(result);
688
689 tcg_gen_xor_i64(flag, result, t0);
690 tcg_gen_xor_i64(tmp, t0, t1);
691 tcg_gen_andc_i64(flag, flag, tmp);
692 tcg_gen_extrh_i64_i32(cpu_VF, flag);
693
694 tcg_gen_mov_i64(dest, result);
695 }
696
697 static void gen_add32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
698 {
699 TCGv_i32 t0_32 = tcg_temp_new_i32();
700 TCGv_i32 t1_32 = tcg_temp_new_i32();
701 TCGv_i32 tmp = tcg_temp_new_i32();
702
703 tcg_gen_movi_i32(tmp, 0);
704 tcg_gen_extrl_i64_i32(t0_32, t0);
705 tcg_gen_extrl_i64_i32(t1_32, t1);
706 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
707 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
708 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
709 tcg_gen_xor_i32(tmp, t0_32, t1_32);
710 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
711 tcg_gen_extu_i32_i64(dest, cpu_NF);
712 }
713
714 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
715 {
716 if (sf) {
717 gen_add64_CC(dest, t0, t1);
718 } else {
719 gen_add32_CC(dest, t0, t1);
720 }
721 }
722
723 /* dest = T0 - T1; compute C, N, V and Z flags */
724 static void gen_sub64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
725 {
726 /* 64 bit arithmetic */
727 TCGv_i64 result, flag, tmp;
728
729 result = tcg_temp_new_i64();
730 flag = tcg_temp_new_i64();
731 tcg_gen_sub_i64(result, t0, t1);
732
733 gen_set_NZ64(result);
734
735 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
736 tcg_gen_extrl_i64_i32(cpu_CF, flag);
737
738 tcg_gen_xor_i64(flag, result, t0);
739 tmp = tcg_temp_new_i64();
740 tcg_gen_xor_i64(tmp, t0, t1);
741 tcg_gen_and_i64(flag, flag, tmp);
742 tcg_gen_extrh_i64_i32(cpu_VF, flag);
743 tcg_gen_mov_i64(dest, result);
744 }
745
746 static void gen_sub32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
747 {
748 /* 32 bit arithmetic */
749 TCGv_i32 t0_32 = tcg_temp_new_i32();
750 TCGv_i32 t1_32 = tcg_temp_new_i32();
751 TCGv_i32 tmp;
752
753 tcg_gen_extrl_i64_i32(t0_32, t0);
754 tcg_gen_extrl_i64_i32(t1_32, t1);
755 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
756 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
757 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
758 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
759 tmp = tcg_temp_new_i32();
760 tcg_gen_xor_i32(tmp, t0_32, t1_32);
761 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
762 tcg_gen_extu_i32_i64(dest, cpu_NF);
763 }
764
765 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
766 {
767 if (sf) {
768 gen_sub64_CC(dest, t0, t1);
769 } else {
770 gen_sub32_CC(dest, t0, t1);
771 }
772 }
773
774 /* dest = T0 + T1 + CF; do not compute flags. */
775 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
776 {
777 TCGv_i64 flag = tcg_temp_new_i64();
778 tcg_gen_extu_i32_i64(flag, cpu_CF);
779 tcg_gen_add_i64(dest, t0, t1);
780 tcg_gen_add_i64(dest, dest, flag);
781
782 if (!sf) {
783 tcg_gen_ext32u_i64(dest, dest);
784 }
785 }
786
787 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
788 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
789 {
790 if (sf) {
791 TCGv_i64 result = tcg_temp_new_i64();
792 TCGv_i64 cf_64 = tcg_temp_new_i64();
793 TCGv_i64 vf_64 = tcg_temp_new_i64();
794 TCGv_i64 tmp = tcg_temp_new_i64();
795 TCGv_i64 zero = tcg_constant_i64(0);
796
797 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
798 tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero);
799 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero);
800 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
801 gen_set_NZ64(result);
802
803 tcg_gen_xor_i64(vf_64, result, t0);
804 tcg_gen_xor_i64(tmp, t0, t1);
805 tcg_gen_andc_i64(vf_64, vf_64, tmp);
806 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
807
808 tcg_gen_mov_i64(dest, result);
809 } else {
810 TCGv_i32 t0_32 = tcg_temp_new_i32();
811 TCGv_i32 t1_32 = tcg_temp_new_i32();
812 TCGv_i32 tmp = tcg_temp_new_i32();
813 TCGv_i32 zero = tcg_constant_i32(0);
814
815 tcg_gen_extrl_i64_i32(t0_32, t0);
816 tcg_gen_extrl_i64_i32(t1_32, t1);
817 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero);
818 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero);
819
820 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
821 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
822 tcg_gen_xor_i32(tmp, t0_32, t1_32);
823 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
824 tcg_gen_extu_i32_i64(dest, cpu_NF);
825 }
826 }
827
828 /*
829 * Load/Store generators
830 */
831
832 /*
833 * Store from GPR register to memory.
834 */
835 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
836 TCGv_i64 tcg_addr, MemOp memop, int memidx,
837 bool iss_valid,
838 unsigned int iss_srt,
839 bool iss_sf, bool iss_ar)
840 {
841 memop = finalize_memop(s, memop);
842 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
843
844 if (iss_valid) {
845 uint32_t syn;
846
847 syn = syn_data_abort_with_iss(0,
848 (memop & MO_SIZE),
849 false,
850 iss_srt,
851 iss_sf,
852 iss_ar,
853 0, 0, 0, 0, 0, false);
854 disas_set_insn_syndrome(s, syn);
855 }
856 }
857
858 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
859 TCGv_i64 tcg_addr, MemOp memop,
860 bool iss_valid,
861 unsigned int iss_srt,
862 bool iss_sf, bool iss_ar)
863 {
864 do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
865 iss_valid, iss_srt, iss_sf, iss_ar);
866 }
867
868 /*
869 * Load from memory to GPR register
870 */
871 static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
872 MemOp memop, bool extend, int memidx,
873 bool iss_valid, unsigned int iss_srt,
874 bool iss_sf, bool iss_ar)
875 {
876 memop = finalize_memop(s, memop);
877 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
878
879 if (extend && (memop & MO_SIGN)) {
880 g_assert((memop & MO_SIZE) <= MO_32);
881 tcg_gen_ext32u_i64(dest, dest);
882 }
883
884 if (iss_valid) {
885 uint32_t syn;
886
887 syn = syn_data_abort_with_iss(0,
888 (memop & MO_SIZE),
889 (memop & MO_SIGN) != 0,
890 iss_srt,
891 iss_sf,
892 iss_ar,
893 0, 0, 0, 0, 0, false);
894 disas_set_insn_syndrome(s, syn);
895 }
896 }
897
898 static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
899 MemOp memop, bool extend,
900 bool iss_valid, unsigned int iss_srt,
901 bool iss_sf, bool iss_ar)
902 {
903 do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
904 iss_valid, iss_srt, iss_sf, iss_ar);
905 }
906
907 /*
908 * Store from FP register to memory
909 */
910 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
911 {
912 /* This writes the bottom N bits of a 128 bit wide vector to memory */
913 TCGv_i64 tmplo = tcg_temp_new_i64();
914 MemOp mop = finalize_memop_asimd(s, size);
915
916 tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
917
918 if (size < MO_128) {
919 tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
920 } else {
921 TCGv_i64 tmphi = tcg_temp_new_i64();
922 TCGv_i128 t16 = tcg_temp_new_i128();
923
924 tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
925 tcg_gen_concat_i64_i128(t16, tmplo, tmphi);
926
927 tcg_gen_qemu_st_i128(t16, tcg_addr, get_mem_index(s), mop);
928 }
929 }
930
931 /*
932 * Load from memory to FP register
933 */
934 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
935 {
936 /* This always zero-extends and writes to a full 128 bit wide vector */
937 TCGv_i64 tmplo = tcg_temp_new_i64();
938 TCGv_i64 tmphi = NULL;
939 MemOp mop = finalize_memop_asimd(s, size);
940
941 if (size < MO_128) {
942 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
943 } else {
944 TCGv_i128 t16 = tcg_temp_new_i128();
945
946 tcg_gen_qemu_ld_i128(t16, tcg_addr, get_mem_index(s), mop);
947
948 tmphi = tcg_temp_new_i64();
949 tcg_gen_extr_i128_i64(tmplo, tmphi, t16);
950 }
951
952 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
953
954 if (tmphi) {
955 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
956 }
957 clear_vec_high(s, tmphi != NULL, destidx);
958 }
959
960 /*
961 * Vector load/store helpers.
962 *
963 * The principal difference between this and a FP load is that we don't
964 * zero extend as we are filling a partial chunk of the vector register.
965 * These functions don't support 128 bit loads/stores, which would be
966 * normal load/store operations.
967 *
968 * The _i32 versions are useful when operating on 32 bit quantities
969 * (eg for floating point single or using Neon helper functions).
970 */
971
972 /* Get value of an element within a vector register */
973 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
974 int element, MemOp memop)
975 {
976 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
977 switch ((unsigned)memop) {
978 case MO_8:
979 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
980 break;
981 case MO_16:
982 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
983 break;
984 case MO_32:
985 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
986 break;
987 case MO_8|MO_SIGN:
988 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
989 break;
990 case MO_16|MO_SIGN:
991 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
992 break;
993 case MO_32|MO_SIGN:
994 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
995 break;
996 case MO_64:
997 case MO_64|MO_SIGN:
998 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
999 break;
1000 default:
1001 g_assert_not_reached();
1002 }
1003 }
1004
1005 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1006 int element, MemOp memop)
1007 {
1008 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1009 switch (memop) {
1010 case MO_8:
1011 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1012 break;
1013 case MO_16:
1014 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1015 break;
1016 case MO_8|MO_SIGN:
1017 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1018 break;
1019 case MO_16|MO_SIGN:
1020 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1021 break;
1022 case MO_32:
1023 case MO_32|MO_SIGN:
1024 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1025 break;
1026 default:
1027 g_assert_not_reached();
1028 }
1029 }
1030
1031 /* Set value of an element within a vector register */
1032 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1033 int element, MemOp memop)
1034 {
1035 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1036 switch (memop) {
1037 case MO_8:
1038 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1039 break;
1040 case MO_16:
1041 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1042 break;
1043 case MO_32:
1044 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1045 break;
1046 case MO_64:
1047 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1048 break;
1049 default:
1050 g_assert_not_reached();
1051 }
1052 }
1053
1054 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1055 int destidx, int element, MemOp memop)
1056 {
1057 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1058 switch (memop) {
1059 case MO_8:
1060 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1061 break;
1062 case MO_16:
1063 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1064 break;
1065 case MO_32:
1066 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1067 break;
1068 default:
1069 g_assert_not_reached();
1070 }
1071 }
1072
1073 /* Store from vector register to memory */
1074 static void do_vec_st(DisasContext *s, int srcidx, int element,
1075 TCGv_i64 tcg_addr, MemOp mop)
1076 {
1077 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1078
1079 read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
1080 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1081 }
1082
1083 /* Load from memory to vector register */
1084 static void do_vec_ld(DisasContext *s, int destidx, int element,
1085 TCGv_i64 tcg_addr, MemOp mop)
1086 {
1087 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1088
1089 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1090 write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
1091 }
1092
1093 /* Check that FP/Neon access is enabled. If it is, return
1094 * true. If not, emit code to generate an appropriate exception,
1095 * and return false; the caller should not emit any code for
1096 * the instruction. Note that this check must happen after all
1097 * unallocated-encoding checks (otherwise the syndrome information
1098 * for the resulting exception will be incorrect).
1099 */
1100 static bool fp_access_check_only(DisasContext *s)
1101 {
1102 if (s->fp_excp_el) {
1103 assert(!s->fp_access_checked);
1104 s->fp_access_checked = true;
1105
1106 gen_exception_insn_el(s, 0, EXCP_UDEF,
1107 syn_fp_access_trap(1, 0xe, false, 0),
1108 s->fp_excp_el);
1109 return false;
1110 }
1111 s->fp_access_checked = true;
1112 return true;
1113 }
1114
1115 static bool fp_access_check(DisasContext *s)
1116 {
1117 if (!fp_access_check_only(s)) {
1118 return false;
1119 }
1120 if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
1121 gen_exception_insn(s, 0, EXCP_UDEF,
1122 syn_smetrap(SME_ET_Streaming, false));
1123 return false;
1124 }
1125 return true;
1126 }
1127
1128 /*
1129 * Check that SVE access is enabled. If it is, return true.
1130 * If not, emit code to generate an appropriate exception and return false.
1131 * This function corresponds to CheckSVEEnabled().
1132 */
1133 bool sve_access_check(DisasContext *s)
1134 {
1135 if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
1136 assert(dc_isar_feature(aa64_sme, s));
1137 if (!sme_sm_enabled_check(s)) {
1138 goto fail_exit;
1139 }
1140 } else if (s->sve_excp_el) {
1141 gen_exception_insn_el(s, 0, EXCP_UDEF,
1142 syn_sve_access_trap(), s->sve_excp_el);
1143 goto fail_exit;
1144 }
1145 s->sve_access_checked = true;
1146 return fp_access_check(s);
1147
1148 fail_exit:
1149 /* Assert that we only raise one exception per instruction. */
1150 assert(!s->sve_access_checked);
1151 s->sve_access_checked = true;
1152 return false;
1153 }
1154
1155 /*
1156 * Check that SME access is enabled, raise an exception if not.
1157 * Note that this function corresponds to CheckSMEAccess and is
1158 * only used directly for cpregs.
1159 */
1160 static bool sme_access_check(DisasContext *s)
1161 {
1162 if (s->sme_excp_el) {
1163 gen_exception_insn_el(s, 0, EXCP_UDEF,
1164 syn_smetrap(SME_ET_AccessTrap, false),
1165 s->sme_excp_el);
1166 return false;
1167 }
1168 return true;
1169 }
1170
1171 /* This function corresponds to CheckSMEEnabled. */
1172 bool sme_enabled_check(DisasContext *s)
1173 {
1174 /*
1175 * Note that unlike sve_excp_el, we have not constrained sme_excp_el
1176 * to be zero when fp_excp_el has priority. This is because we need
1177 * sme_excp_el by itself for cpregs access checks.
1178 */
1179 if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
1180 s->fp_access_checked = true;
1181 return sme_access_check(s);
1182 }
1183 return fp_access_check_only(s);
1184 }
1185
1186 /* Common subroutine for CheckSMEAnd*Enabled. */
1187 bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
1188 {
1189 if (!sme_enabled_check(s)) {
1190 return false;
1191 }
1192 if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
1193 gen_exception_insn(s, 0, EXCP_UDEF,
1194 syn_smetrap(SME_ET_NotStreaming, false));
1195 return false;
1196 }
1197 if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
1198 gen_exception_insn(s, 0, EXCP_UDEF,
1199 syn_smetrap(SME_ET_InactiveZA, false));
1200 return false;
1201 }
1202 return true;
1203 }
1204
1205 /*
1206 * This utility function is for doing register extension with an
1207 * optional shift. You will likely want to pass a temporary for the
1208 * destination register. See DecodeRegExtend() in the ARM ARM.
1209 */
1210 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1211 int option, unsigned int shift)
1212 {
1213 int extsize = extract32(option, 0, 2);
1214 bool is_signed = extract32(option, 2, 1);
1215
1216 if (is_signed) {
1217 switch (extsize) {
1218 case 0:
1219 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1220 break;
1221 case 1:
1222 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1223 break;
1224 case 2:
1225 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1226 break;
1227 case 3:
1228 tcg_gen_mov_i64(tcg_out, tcg_in);
1229 break;
1230 }
1231 } else {
1232 switch (extsize) {
1233 case 0:
1234 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1235 break;
1236 case 1:
1237 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1238 break;
1239 case 2:
1240 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1241 break;
1242 case 3:
1243 tcg_gen_mov_i64(tcg_out, tcg_in);
1244 break;
1245 }
1246 }
1247
1248 if (shift) {
1249 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1250 }
1251 }
1252
1253 static inline void gen_check_sp_alignment(DisasContext *s)
1254 {
1255 /* The AArch64 architecture mandates that (if enabled via PSTATE
1256 * or SCTLR bits) there is a check that SP is 16-aligned on every
1257 * SP-relative load or store (with an exception generated if it is not).
1258 * In line with general QEMU practice regarding misaligned accesses,
1259 * we omit these checks for the sake of guest program performance.
1260 * This function is provided as a hook so we can more easily add these
1261 * checks in future (possibly as a "favour catching guest program bugs
1262 * over speed" user selectable option).
1263 */
1264 }
1265
1266 /*
1267 * This provides a simple table based table lookup decoder. It is
1268 * intended to be used when the relevant bits for decode are too
1269 * awkwardly placed and switch/if based logic would be confusing and
1270 * deeply nested. Since it's a linear search through the table, tables
1271 * should be kept small.
1272 *
1273 * It returns the first handler where insn & mask == pattern, or
1274 * NULL if there is no match.
1275 * The table is terminated by an empty mask (i.e. 0)
1276 */
1277 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1278 uint32_t insn)
1279 {
1280 const AArch64DecodeTable *tptr = table;
1281
1282 while (tptr->mask) {
1283 if ((insn & tptr->mask) == tptr->pattern) {
1284 return tptr->disas_fn;
1285 }
1286 tptr++;
1287 }
1288 return NULL;
1289 }
1290
1291 /*
1292 * The instruction disassembly implemented here matches
1293 * the instruction encoding classifications in chapter C4
1294 * of the ARM Architecture Reference Manual (DDI0487B_a);
1295 * classification names and decode diagrams here should generally
1296 * match up with those in the manual.
1297 */
1298
1299 static bool trans_B(DisasContext *s, arg_i *a)
1300 {
1301 reset_btype(s);
1302 gen_goto_tb(s, 0, a->imm);
1303 return true;
1304 }
1305
1306 static bool trans_BL(DisasContext *s, arg_i *a)
1307 {
1308 gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s));
1309 reset_btype(s);
1310 gen_goto_tb(s, 0, a->imm);
1311 return true;
1312 }
1313
1314
1315 static bool trans_CBZ(DisasContext *s, arg_cbz *a)
1316 {
1317 DisasLabel match;
1318 TCGv_i64 tcg_cmp;
1319
1320 tcg_cmp = read_cpu_reg(s, a->rt, a->sf);
1321 reset_btype(s);
1322
1323 match = gen_disas_label(s);
1324 tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ,
1325 tcg_cmp, 0, match.label);
1326 gen_goto_tb(s, 0, 4);
1327 set_disas_label(s, match);
1328 gen_goto_tb(s, 1, a->imm);
1329 return true;
1330 }
1331
1332 static bool trans_TBZ(DisasContext *s, arg_tbz *a)
1333 {
1334 DisasLabel match;
1335 TCGv_i64 tcg_cmp;
1336
1337 tcg_cmp = tcg_temp_new_i64();
1338 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, a->rt), 1ULL << a->bitpos);
1339
1340 reset_btype(s);
1341
1342 match = gen_disas_label(s);
1343 tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ,
1344 tcg_cmp, 0, match.label);
1345 gen_goto_tb(s, 0, 4);
1346 set_disas_label(s, match);
1347 gen_goto_tb(s, 1, a->imm);
1348 return true;
1349 }
1350
1351 static bool trans_B_cond(DisasContext *s, arg_B_cond *a)
1352 {
1353 reset_btype(s);
1354 if (a->cond < 0x0e) {
1355 /* genuinely conditional branches */
1356 DisasLabel match = gen_disas_label(s);
1357 arm_gen_test_cc(a->cond, match.label);
1358 gen_goto_tb(s, 0, 4);
1359 set_disas_label(s, match);
1360 gen_goto_tb(s, 1, a->imm);
1361 } else {
1362 /* 0xe and 0xf are both "always" conditions */
1363 gen_goto_tb(s, 0, a->imm);
1364 }
1365 return true;
1366 }
1367
1368 static void set_btype_for_br(DisasContext *s, int rn)
1369 {
1370 if (dc_isar_feature(aa64_bti, s)) {
1371 /* BR to {x16,x17} or !guard -> 1, else 3. */
1372 set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
1373 }
1374 }
1375
1376 static void set_btype_for_blr(DisasContext *s)
1377 {
1378 if (dc_isar_feature(aa64_bti, s)) {
1379 /* BLR sets BTYPE to 2, regardless of source guarded page. */
1380 set_btype(s, 2);
1381 }
1382 }
1383
1384 static bool trans_BR(DisasContext *s, arg_r *a)
1385 {
1386 gen_a64_set_pc(s, cpu_reg(s, a->rn));
1387 set_btype_for_br(s, a->rn);
1388 s->base.is_jmp = DISAS_JUMP;
1389 return true;
1390 }
1391
1392 static bool trans_BLR(DisasContext *s, arg_r *a)
1393 {
1394 TCGv_i64 dst = cpu_reg(s, a->rn);
1395 TCGv_i64 lr = cpu_reg(s, 30);
1396 if (dst == lr) {
1397 TCGv_i64 tmp = tcg_temp_new_i64();
1398 tcg_gen_mov_i64(tmp, dst);
1399 dst = tmp;
1400 }
1401 gen_pc_plus_diff(s, lr, curr_insn_len(s));
1402 gen_a64_set_pc(s, dst);
1403 set_btype_for_blr(s);
1404 s->base.is_jmp = DISAS_JUMP;
1405 return true;
1406 }
1407
1408 static bool trans_RET(DisasContext *s, arg_r *a)
1409 {
1410 gen_a64_set_pc(s, cpu_reg(s, a->rn));
1411 s->base.is_jmp = DISAS_JUMP;
1412 return true;
1413 }
1414
1415 static TCGv_i64 auth_branch_target(DisasContext *s, TCGv_i64 dst,
1416 TCGv_i64 modifier, bool use_key_a)
1417 {
1418 TCGv_i64 truedst;
1419 /*
1420 * Return the branch target for a BRAA/RETA/etc, which is either
1421 * just the destination dst, or that value with the pauth check
1422 * done and the code removed from the high bits.
1423 */
1424 if (!s->pauth_active) {
1425 return dst;
1426 }
1427
1428 truedst = tcg_temp_new_i64();
1429 if (use_key_a) {
1430 gen_helper_autia(truedst, cpu_env, dst, modifier);
1431 } else {
1432 gen_helper_autib(truedst, cpu_env, dst, modifier);
1433 }
1434 return truedst;
1435 }
1436
1437 static bool trans_BRAZ(DisasContext *s, arg_braz *a)
1438 {
1439 TCGv_i64 dst;
1440
1441 if (!dc_isar_feature(aa64_pauth, s)) {
1442 return false;
1443 }
1444
1445 dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
1446 gen_a64_set_pc(s, dst);
1447 set_btype_for_br(s, a->rn);
1448 s->base.is_jmp = DISAS_JUMP;
1449 return true;
1450 }
1451
1452 static bool trans_BLRAZ(DisasContext *s, arg_braz *a)
1453 {
1454 TCGv_i64 dst, lr;
1455
1456 if (!dc_isar_feature(aa64_pauth, s)) {
1457 return false;
1458 }
1459
1460 dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
1461 lr = cpu_reg(s, 30);
1462 if (dst == lr) {
1463 TCGv_i64 tmp = tcg_temp_new_i64();
1464 tcg_gen_mov_i64(tmp, dst);
1465 dst = tmp;
1466 }
1467 gen_pc_plus_diff(s, lr, curr_insn_len(s));
1468 gen_a64_set_pc(s, dst);
1469 set_btype_for_blr(s);
1470 s->base.is_jmp = DISAS_JUMP;
1471 return true;
1472 }
1473
1474 static bool trans_RETA(DisasContext *s, arg_reta *a)
1475 {
1476 TCGv_i64 dst;
1477
1478 dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m);
1479 gen_a64_set_pc(s, dst);
1480 s->base.is_jmp = DISAS_JUMP;
1481 return true;
1482 }
1483
1484 static bool trans_BRA(DisasContext *s, arg_bra *a)
1485 {
1486 TCGv_i64 dst;
1487
1488 if (!dc_isar_feature(aa64_pauth, s)) {
1489 return false;
1490 }
1491 dst = auth_branch_target(s, cpu_reg(s,a->rn), cpu_reg_sp(s, a->rm), !a->m);
1492 gen_a64_set_pc(s, dst);
1493 set_btype_for_br(s, a->rn);
1494 s->base.is_jmp = DISAS_JUMP;
1495 return true;
1496 }
1497
1498 static bool trans_BLRA(DisasContext *s, arg_bra *a)
1499 {
1500 TCGv_i64 dst, lr;
1501
1502 if (!dc_isar_feature(aa64_pauth, s)) {
1503 return false;
1504 }
1505 dst = auth_branch_target(s, cpu_reg(s, a->rn), cpu_reg_sp(s, a->rm), !a->m);
1506 lr = cpu_reg(s, 30);
1507 if (dst == lr) {
1508 TCGv_i64 tmp = tcg_temp_new_i64();
1509 tcg_gen_mov_i64(tmp, dst);
1510 dst = tmp;
1511 }
1512 gen_pc_plus_diff(s, lr, curr_insn_len(s));
1513 gen_a64_set_pc(s, dst);
1514 set_btype_for_blr(s);
1515 s->base.is_jmp = DISAS_JUMP;
1516 return true;
1517 }
1518
1519 static bool trans_ERET(DisasContext *s, arg_ERET *a)
1520 {
1521 TCGv_i64 dst;
1522
1523 if (s->current_el == 0) {
1524 return false;
1525 }
1526 if (s->fgt_eret) {
1527 gen_exception_insn_el(s, 0, EXCP_UDEF, 0, 2);
1528 return true;
1529 }
1530 dst = tcg_temp_new_i64();
1531 tcg_gen_ld_i64(dst, cpu_env,
1532 offsetof(CPUARMState, elr_el[s->current_el]));
1533
1534 translator_io_start(&s->base);
1535
1536 gen_helper_exception_return(cpu_env, dst);
1537 /* Must exit loop to check un-masked IRQs */
1538 s->base.is_jmp = DISAS_EXIT;
1539 return true;
1540 }
1541
1542 static bool trans_ERETA(DisasContext *s, arg_reta *a)
1543 {
1544 TCGv_i64 dst;
1545
1546 if (!dc_isar_feature(aa64_pauth, s)) {
1547 return false;
1548 }
1549 if (s->current_el == 0) {
1550 return false;
1551 }
1552 /* The FGT trap takes precedence over an auth trap. */
1553 if (s->fgt_eret) {
1554 gen_exception_insn_el(s, 0, EXCP_UDEF, a->m ? 3 : 2, 2);
1555 return true;
1556 }
1557 dst = tcg_temp_new_i64();
1558 tcg_gen_ld_i64(dst, cpu_env,
1559 offsetof(CPUARMState, elr_el[s->current_el]));
1560
1561 dst = auth_branch_target(s, dst, cpu_X[31], !a->m);
1562
1563 translator_io_start(&s->base);
1564
1565 gen_helper_exception_return(cpu_env, dst);
1566 /* Must exit loop to check un-masked IRQs */
1567 s->base.is_jmp = DISAS_EXIT;
1568 return true;
1569 }
1570
1571 /* HINT instruction group, including various allocated HINTs */
1572 static void handle_hint(DisasContext *s, uint32_t insn,
1573 unsigned int op1, unsigned int op2, unsigned int crm)
1574 {
1575 unsigned int selector = crm << 3 | op2;
1576
1577 if (op1 != 3) {
1578 unallocated_encoding(s);
1579 return;
1580 }
1581
1582 switch (selector) {
1583 case 0b00000: /* NOP */
1584 break;
1585 case 0b00011: /* WFI */
1586 s->base.is_jmp = DISAS_WFI;
1587 break;
1588 case 0b00001: /* YIELD */
1589 /* When running in MTTCG we don't generate jumps to the yield and
1590 * WFE helpers as it won't affect the scheduling of other vCPUs.
1591 * If we wanted to more completely model WFE/SEV so we don't busy
1592 * spin unnecessarily we would need to do something more involved.
1593 */
1594 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1595 s->base.is_jmp = DISAS_YIELD;
1596 }
1597 break;
1598 case 0b00010: /* WFE */
1599 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1600 s->base.is_jmp = DISAS_WFE;
1601 }
1602 break;
1603 case 0b00100: /* SEV */
1604 case 0b00101: /* SEVL */
1605 case 0b00110: /* DGH */
1606 /* we treat all as NOP at least for now */
1607 break;
1608 case 0b00111: /* XPACLRI */
1609 if (s->pauth_active) {
1610 gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
1611 }
1612 break;
1613 case 0b01000: /* PACIA1716 */
1614 if (s->pauth_active) {
1615 gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1616 }
1617 break;
1618 case 0b01010: /* PACIB1716 */
1619 if (s->pauth_active) {
1620 gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1621 }
1622 break;
1623 case 0b01100: /* AUTIA1716 */
1624 if (s->pauth_active) {
1625 gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1626 }
1627 break;
1628 case 0b01110: /* AUTIB1716 */
1629 if (s->pauth_active) {
1630 gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1631 }
1632 break;
1633 case 0b10000: /* ESB */
1634 /* Without RAS, we must implement this as NOP. */
1635 if (dc_isar_feature(aa64_ras, s)) {
1636 /*
1637 * QEMU does not have a source of physical SErrors,
1638 * so we are only concerned with virtual SErrors.
1639 * The pseudocode in the ARM for this case is
1640 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
1641 * AArch64.vESBOperation();
1642 * Most of the condition can be evaluated at translation time.
1643 * Test for EL2 present, and defer test for SEL2 to runtime.
1644 */
1645 if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
1646 gen_helper_vesb(cpu_env);
1647 }
1648 }
1649 break;
1650 case 0b11000: /* PACIAZ */
1651 if (s->pauth_active) {
1652 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
1653 tcg_constant_i64(0));
1654 }
1655 break;
1656 case 0b11001: /* PACIASP */
1657 if (s->pauth_active) {
1658 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1659 }
1660 break;
1661 case 0b11010: /* PACIBZ */
1662 if (s->pauth_active) {
1663 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
1664 tcg_constant_i64(0));
1665 }
1666 break;
1667 case 0b11011: /* PACIBSP */
1668 if (s->pauth_active) {
1669 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1670 }
1671 break;
1672 case 0b11100: /* AUTIAZ */
1673 if (s->pauth_active) {
1674 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
1675 tcg_constant_i64(0));
1676 }
1677 break;
1678 case 0b11101: /* AUTIASP */
1679 if (s->pauth_active) {
1680 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1681 }
1682 break;
1683 case 0b11110: /* AUTIBZ */
1684 if (s->pauth_active) {
1685 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
1686 tcg_constant_i64(0));
1687 }
1688 break;
1689 case 0b11111: /* AUTIBSP */
1690 if (s->pauth_active) {
1691 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1692 }
1693 break;
1694 default:
1695 /* default specified as NOP equivalent */
1696 break;
1697 }
1698 }
1699
1700 static void gen_clrex(DisasContext *s, uint32_t insn)
1701 {
1702 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1703 }
1704
1705 /* CLREX, DSB, DMB, ISB */
1706 static void handle_sync(DisasContext *s, uint32_t insn,
1707 unsigned int op1, unsigned int op2, unsigned int crm)
1708 {
1709 TCGBar bar;
1710
1711 if (op1 != 3) {
1712 unallocated_encoding(s);
1713 return;
1714 }
1715
1716 switch (op2) {
1717 case 2: /* CLREX */
1718 gen_clrex(s, insn);
1719 return;
1720 case 4: /* DSB */
1721 case 5: /* DMB */
1722 switch (crm & 3) {
1723 case 1: /* MBReqTypes_Reads */
1724 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1725 break;
1726 case 2: /* MBReqTypes_Writes */
1727 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1728 break;
1729 default: /* MBReqTypes_All */
1730 bar = TCG_BAR_SC | TCG_MO_ALL;
1731 break;
1732 }
1733 tcg_gen_mb(bar);
1734 return;
1735 case 6: /* ISB */
1736 /* We need to break the TB after this insn to execute
1737 * a self-modified code correctly and also to take
1738 * any pending interrupts immediately.
1739 */
1740 reset_btype(s);
1741 gen_goto_tb(s, 0, 4);
1742 return;
1743
1744 case 7: /* SB */
1745 if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
1746 goto do_unallocated;
1747 }
1748 /*
1749 * TODO: There is no speculation barrier opcode for TCG;
1750 * MB and end the TB instead.
1751 */
1752 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1753 gen_goto_tb(s, 0, 4);
1754 return;
1755
1756 default:
1757 do_unallocated:
1758 unallocated_encoding(s);
1759 return;
1760 }
1761 }
1762
1763 static void gen_xaflag(void)
1764 {
1765 TCGv_i32 z = tcg_temp_new_i32();
1766
1767 tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
1768
1769 /*
1770 * (!C & !Z) << 31
1771 * (!(C | Z)) << 31
1772 * ~((C | Z) << 31)
1773 * ~-(C | Z)
1774 * (C | Z) - 1
1775 */
1776 tcg_gen_or_i32(cpu_NF, cpu_CF, z);
1777 tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
1778
1779 /* !(Z & C) */
1780 tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
1781 tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
1782
1783 /* (!C & Z) << 31 -> -(Z & ~C) */
1784 tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
1785 tcg_gen_neg_i32(cpu_VF, cpu_VF);
1786
1787 /* C | Z */
1788 tcg_gen_or_i32(cpu_CF, cpu_CF, z);
1789 }
1790
1791 static void gen_axflag(void)
1792 {
1793 tcg_gen_sari_i32(cpu_VF, cpu_VF, 31); /* V ? -1 : 0 */
1794 tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF); /* C & !V */
1795
1796 /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
1797 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
1798
1799 tcg_gen_movi_i32(cpu_NF, 0);
1800 tcg_gen_movi_i32(cpu_VF, 0);
1801 }
1802
1803 /* MSR (immediate) - move immediate to processor state field */
1804 static void handle_msr_i(DisasContext *s, uint32_t insn,
1805 unsigned int op1, unsigned int op2, unsigned int crm)
1806 {
1807 int op = op1 << 3 | op2;
1808
1809 /* End the TB by default, chaining is ok. */
1810 s->base.is_jmp = DISAS_TOO_MANY;
1811
1812 switch (op) {
1813 case 0x00: /* CFINV */
1814 if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
1815 goto do_unallocated;
1816 }
1817 tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
1818 s->base.is_jmp = DISAS_NEXT;
1819 break;
1820
1821 case 0x01: /* XAFlag */
1822 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1823 goto do_unallocated;
1824 }
1825 gen_xaflag();
1826 s->base.is_jmp = DISAS_NEXT;
1827 break;
1828
1829 case 0x02: /* AXFlag */
1830 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1831 goto do_unallocated;
1832 }
1833 gen_axflag();
1834 s->base.is_jmp = DISAS_NEXT;
1835 break;
1836
1837 case 0x03: /* UAO */
1838 if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
1839 goto do_unallocated;
1840 }
1841 if (crm & 1) {
1842 set_pstate_bits(PSTATE_UAO);
1843 } else {
1844 clear_pstate_bits(PSTATE_UAO);
1845 }
1846 gen_rebuild_hflags(s);
1847 break;
1848
1849 case 0x04: /* PAN */
1850 if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
1851 goto do_unallocated;
1852 }
1853 if (crm & 1) {
1854 set_pstate_bits(PSTATE_PAN);
1855 } else {
1856 clear_pstate_bits(PSTATE_PAN);
1857 }
1858 gen_rebuild_hflags(s);
1859 break;
1860
1861 case 0x05: /* SPSel */
1862 if (s->current_el == 0) {
1863 goto do_unallocated;
1864 }
1865 gen_helper_msr_i_spsel(cpu_env, tcg_constant_i32(crm & PSTATE_SP));
1866 break;
1867
1868 case 0x19: /* SSBS */
1869 if (!dc_isar_feature(aa64_ssbs, s)) {
1870 goto do_unallocated;
1871 }
1872 if (crm & 1) {
1873 set_pstate_bits(PSTATE_SSBS);
1874 } else {
1875 clear_pstate_bits(PSTATE_SSBS);
1876 }
1877 /* Don't need to rebuild hflags since SSBS is a nop */
1878 break;
1879
1880 case 0x1a: /* DIT */
1881 if (!dc_isar_feature(aa64_dit, s)) {
1882 goto do_unallocated;
1883 }
1884 if (crm & 1) {
1885 set_pstate_bits(PSTATE_DIT);
1886 } else {
1887 clear_pstate_bits(PSTATE_DIT);
1888 }
1889 /* There's no need to rebuild hflags because DIT is a nop */
1890 break;
1891
1892 case 0x1e: /* DAIFSet */
1893 gen_helper_msr_i_daifset(cpu_env, tcg_constant_i32(crm));
1894 break;
1895
1896 case 0x1f: /* DAIFClear */
1897 gen_helper_msr_i_daifclear(cpu_env, tcg_constant_i32(crm));
1898 /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
1899 s->base.is_jmp = DISAS_UPDATE_EXIT;
1900 break;
1901
1902 case 0x1c: /* TCO */
1903 if (dc_isar_feature(aa64_mte, s)) {
1904 /* Full MTE is enabled -- set the TCO bit as directed. */
1905 if (crm & 1) {
1906 set_pstate_bits(PSTATE_TCO);
1907 } else {
1908 clear_pstate_bits(PSTATE_TCO);
1909 }
1910 gen_rebuild_hflags(s);
1911 /* Many factors, including TCO, go into MTE_ACTIVE. */
1912 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
1913 } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
1914 /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */
1915 s->base.is_jmp = DISAS_NEXT;
1916 } else {
1917 goto do_unallocated;
1918 }
1919 break;
1920
1921 case 0x1b: /* SVCR* */
1922 if (!dc_isar_feature(aa64_sme, s) || crm < 2 || crm > 7) {
1923 goto do_unallocated;
1924 }
1925 if (sme_access_check(s)) {
1926 int old = s->pstate_sm | (s->pstate_za << 1);
1927 int new = (crm & 1) * 3;
1928 int msk = (crm >> 1) & 3;
1929
1930 if ((old ^ new) & msk) {
1931 /* At least one bit changes. */
1932 gen_helper_set_svcr(cpu_env, tcg_constant_i32(new),
1933 tcg_constant_i32(msk));
1934 } else {
1935 s->base.is_jmp = DISAS_NEXT;
1936 }
1937 }
1938 break;
1939
1940 default:
1941 do_unallocated:
1942 unallocated_encoding(s);
1943 return;
1944 }
1945 }
1946
1947 static void gen_get_nzcv(TCGv_i64 tcg_rt)
1948 {
1949 TCGv_i32 tmp = tcg_temp_new_i32();
1950 TCGv_i32 nzcv = tcg_temp_new_i32();
1951
1952 /* build bit 31, N */
1953 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1954 /* build bit 30, Z */
1955 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1956 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1957 /* build bit 29, C */
1958 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1959 /* build bit 28, V */
1960 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1961 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1962 /* generate result */
1963 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1964 }
1965
1966 static void gen_set_nzcv(TCGv_i64 tcg_rt)
1967 {
1968 TCGv_i32 nzcv = tcg_temp_new_i32();
1969
1970 /* take NZCV from R[t] */
1971 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1972
1973 /* bit 31, N */
1974 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1975 /* bit 30, Z */
1976 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1977 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1978 /* bit 29, C */
1979 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1980 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1981 /* bit 28, V */
1982 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1983 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1984 }
1985
1986 static void gen_sysreg_undef(DisasContext *s, bool isread,
1987 uint8_t op0, uint8_t op1, uint8_t op2,
1988 uint8_t crn, uint8_t crm, uint8_t rt)
1989 {
1990 /*
1991 * Generate code to emit an UNDEF with correct syndrome
1992 * information for a failed system register access.
1993 * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
1994 * but if FEAT_IDST is implemented then read accesses to registers
1995 * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
1996 * syndrome.
1997 */
1998 uint32_t syndrome;
1999
2000 if (isread && dc_isar_feature(aa64_ids, s) &&
2001 arm_cpreg_encoding_in_idspace(op0, op1, op2, crn, crm)) {
2002 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
2003 } else {
2004 syndrome = syn_uncategorized();
2005 }
2006 gen_exception_insn(s, 0, EXCP_UDEF, syndrome);
2007 }
2008
2009 /* MRS - move from system register
2010 * MSR (register) - move to system register
2011 * SYS
2012 * SYSL
2013 * These are all essentially the same insn in 'read' and 'write'
2014 * versions, with varying op0 fields.
2015 */
2016 static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
2017 unsigned int op0, unsigned int op1, unsigned int op2,
2018 unsigned int crn, unsigned int crm, unsigned int rt)
2019 {
2020 uint32_t key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
2021 crn, crm, op0, op1, op2);
2022 const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key);
2023 bool need_exit_tb = false;
2024 TCGv_ptr tcg_ri = NULL;
2025 TCGv_i64 tcg_rt;
2026
2027 if (!ri) {
2028 /* Unknown register; this might be a guest error or a QEMU
2029 * unimplemented feature.
2030 */
2031 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
2032 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
2033 isread ? "read" : "write", op0, op1, crn, crm, op2);
2034 gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
2035 return;
2036 }
2037
2038 /* Check access permissions */
2039 if (!cp_access_ok(s->current_el, ri, isread)) {
2040 gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
2041 return;
2042 }
2043
2044 if (ri->accessfn || (ri->fgt && s->fgt_active)) {
2045 /* Emit code to perform further access permissions checks at
2046 * runtime; this may result in an exception.
2047 */
2048 uint32_t syndrome;
2049
2050 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
2051 gen_a64_update_pc(s, 0);
2052 tcg_ri = tcg_temp_new_ptr();
2053 gen_helper_access_check_cp_reg(tcg_ri, cpu_env,
2054 tcg_constant_i32(key),
2055 tcg_constant_i32(syndrome),
2056 tcg_constant_i32(isread));
2057 } else if (ri->type & ARM_CP_RAISES_EXC) {
2058 /*
2059 * The readfn or writefn might raise an exception;
2060 * synchronize the CPU state in case it does.
2061 */
2062 gen_a64_update_pc(s, 0);
2063 }
2064
2065 /* Handle special cases first */
2066 switch (ri->type & ARM_CP_SPECIAL_MASK) {
2067 case 0:
2068 break;
2069 case ARM_CP_NOP:
2070 return;
2071 case ARM_CP_NZCV:
2072 tcg_rt = cpu_reg(s, rt);
2073 if (isread) {
2074 gen_get_nzcv(tcg_rt);
2075 } else {
2076 gen_set_nzcv(tcg_rt);
2077 }
2078 return;
2079 case ARM_CP_CURRENTEL:
2080 /* Reads as current EL value from pstate, which is
2081 * guaranteed to be constant by the tb flags.
2082 */
2083 tcg_rt = cpu_reg(s, rt);
2084 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
2085 return;
2086 case ARM_CP_DC_ZVA:
2087 /* Writes clear the aligned block of memory which rt points into. */
2088 if (s->mte_active[0]) {
2089 int desc = 0;
2090
2091 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
2092 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
2093 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
2094
2095 tcg_rt = tcg_temp_new_i64();
2096 gen_helper_mte_check_zva(tcg_rt, cpu_env,
2097 tcg_constant_i32(desc), cpu_reg(s, rt));
2098 } else {
2099 tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
2100 }
2101 gen_helper_dc_zva(cpu_env, tcg_rt);
2102 return;
2103 case ARM_CP_DC_GVA:
2104 {
2105 TCGv_i64 clean_addr, tag;
2106
2107 /*
2108 * DC_GVA, like DC_ZVA, requires that we supply the original
2109 * pointer for an invalid page. Probe that address first.
2110 */
2111 tcg_rt = cpu_reg(s, rt);
2112 clean_addr = clean_data_tbi(s, tcg_rt);
2113 gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8);
2114
2115 if (s->ata) {
2116 /* Extract the tag from the register to match STZGM. */
2117 tag = tcg_temp_new_i64();
2118 tcg_gen_shri_i64(tag, tcg_rt, 56);
2119 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
2120 }
2121 }
2122 return;
2123 case ARM_CP_DC_GZVA:
2124 {
2125 TCGv_i64 clean_addr, tag;
2126
2127 /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
2128 tcg_rt = cpu_reg(s, rt);
2129 clean_addr = clean_data_tbi(s, tcg_rt);
2130 gen_helper_dc_zva(cpu_env, clean_addr);
2131
2132 if (s->ata) {
2133 /* Extract the tag from the register to match STZGM. */
2134 tag = tcg_temp_new_i64();
2135 tcg_gen_shri_i64(tag, tcg_rt, 56);
2136 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
2137 }
2138 }
2139 return;
2140 default:
2141 g_assert_not_reached();
2142 }
2143 if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
2144 return;
2145 } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
2146 return;
2147 } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) {
2148 return;
2149 }
2150
2151 if (ri->type & ARM_CP_IO) {
2152 /* I/O operations must end the TB here (whether read or write) */
2153 need_exit_tb = translator_io_start(&s->base);
2154 }
2155
2156 tcg_rt = cpu_reg(s, rt);
2157
2158 if (isread) {
2159 if (ri->type & ARM_CP_CONST) {
2160 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
2161 } else if (ri->readfn) {
2162 if (!tcg_ri) {
2163 tcg_ri = gen_lookup_cp_reg(key);
2164 }
2165 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tcg_ri);
2166 } else {
2167 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
2168 }
2169 } else {
2170 if (ri->type & ARM_CP_CONST) {
2171 /* If not forbidden by access permissions, treat as WI */
2172 return;
2173 } else if (ri->writefn) {
2174 if (!tcg_ri) {
2175 tcg_ri = gen_lookup_cp_reg(key);
2176 }
2177 gen_helper_set_cp_reg64(cpu_env, tcg_ri, tcg_rt);
2178 } else {
2179 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
2180 }
2181 }
2182
2183 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
2184 /*
2185 * A write to any coprocessor regiser that ends a TB
2186 * must rebuild the hflags for the next TB.
2187 */
2188 gen_rebuild_hflags(s);
2189 /*
2190 * We default to ending the TB on a coprocessor register write,
2191 * but allow this to be suppressed by the register definition
2192 * (usually only necessary to work around guest bugs).
2193 */
2194 need_exit_tb = true;
2195 }
2196 if (need_exit_tb) {
2197 s->base.is_jmp = DISAS_UPDATE_EXIT;
2198 }
2199 }
2200
2201 /* System
2202 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
2203 * +---------------------+---+-----+-----+-------+-------+-----+------+
2204 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
2205 * +---------------------+---+-----+-----+-------+-------+-----+------+
2206 */
2207 static void disas_system(DisasContext *s, uint32_t insn)
2208 {
2209 unsigned int l, op0, op1, crn, crm, op2, rt;
2210 l = extract32(insn, 21, 1);
2211 op0 = extract32(insn, 19, 2);
2212 op1 = extract32(insn, 16, 3);
2213 crn = extract32(insn, 12, 4);
2214 crm = extract32(insn, 8, 4);
2215 op2 = extract32(insn, 5, 3);
2216 rt = extract32(insn, 0, 5);
2217
2218 if (op0 == 0) {
2219 if (l || rt != 31) {
2220 unallocated_encoding(s);
2221 return;
2222 }
2223 switch (crn) {
2224 case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
2225 handle_hint(s, insn, op1, op2, crm);
2226 break;
2227 case 3: /* CLREX, DSB, DMB, ISB */
2228 handle_sync(s, insn, op1, op2, crm);
2229 break;
2230 case 4: /* MSR (immediate) */
2231 handle_msr_i(s, insn, op1, op2, crm);
2232 break;
2233 default:
2234 unallocated_encoding(s);
2235 break;
2236 }
2237 return;
2238 }
2239 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
2240 }
2241
2242 /* Exception generation
2243 *
2244 * 31 24 23 21 20 5 4 2 1 0
2245 * +-----------------+-----+------------------------+-----+----+
2246 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
2247 * +-----------------------+------------------------+----------+
2248 */
2249 static void disas_exc(DisasContext *s, uint32_t insn)
2250 {
2251 int opc = extract32(insn, 21, 3);
2252 int op2_ll = extract32(insn, 0, 5);
2253 int imm16 = extract32(insn, 5, 16);
2254 uint32_t syndrome;
2255
2256 switch (opc) {
2257 case 0:
2258 /* For SVC, HVC and SMC we advance the single-step state
2259 * machine before taking the exception. This is architecturally
2260 * mandated, to ensure that single-stepping a system call
2261 * instruction works properly.
2262 */
2263 switch (op2_ll) {
2264 case 1: /* SVC */
2265 syndrome = syn_aa64_svc(imm16);
2266 if (s->fgt_svc) {
2267 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
2268 break;
2269 }
2270 gen_ss_advance(s);
2271 gen_exception_insn(s, 4, EXCP_SWI, syndrome);
2272 break;
2273 case 2: /* HVC */
2274 if (s->current_el == 0) {
2275 unallocated_encoding(s);
2276 break;
2277 }
2278 /* The pre HVC helper handles cases when HVC gets trapped
2279 * as an undefined insn by runtime configuration.
2280 */
2281 gen_a64_update_pc(s, 0);
2282 gen_helper_pre_hvc(cpu_env);
2283 gen_ss_advance(s);
2284 gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(imm16), 2);
2285 break;
2286 case 3: /* SMC */
2287 if (s->current_el == 0) {
2288 unallocated_encoding(s);
2289 break;
2290 }
2291 gen_a64_update_pc(s, 0);
2292 gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(imm16)));
2293 gen_ss_advance(s);
2294 gen_exception_insn_el(s, 4, EXCP_SMC, syn_aa64_smc(imm16), 3);
2295 break;
2296 default:
2297 unallocated_encoding(s);
2298 break;
2299 }
2300 break;
2301 case 1:
2302 if (op2_ll != 0) {
2303 unallocated_encoding(s);
2304 break;
2305 }
2306 /* BRK */
2307 gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
2308 break;
2309 case 2:
2310 if (op2_ll != 0) {
2311 unallocated_encoding(s);
2312 break;
2313 }
2314 /* HLT. This has two purposes.
2315 * Architecturally, it is an external halting debug instruction.
2316 * Since QEMU doesn't implement external debug, we treat this as
2317 * it is required for halting debug disabled: it will UNDEF.
2318 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
2319 */
2320 if (semihosting_enabled(s->current_el == 0) && imm16 == 0xf000) {
2321 gen_exception_internal_insn(s, EXCP_SEMIHOST);
2322 } else {
2323 unallocated_encoding(s);
2324 }
2325 break;
2326 case 5:
2327 if (op2_ll < 1 || op2_ll > 3) {
2328 unallocated_encoding(s);
2329 break;
2330 }
2331 /* DCPS1, DCPS2, DCPS3 */
2332 unallocated_encoding(s);
2333 break;
2334 default:
2335 unallocated_encoding(s);
2336 break;
2337 }
2338 }
2339
2340 /* Branches, exception generating and system instructions */
2341 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
2342 {
2343 switch (extract32(insn, 25, 7)) {
2344 case 0x6a: /* Exception generation / System */
2345 if (insn & (1 << 24)) {
2346 if (extract32(insn, 22, 2) == 0) {
2347 disas_system(s, insn);
2348 } else {
2349 unallocated_encoding(s);
2350 }
2351 } else {
2352 disas_exc(s, insn);
2353 }
2354 break;
2355 default:
2356 unallocated_encoding(s);
2357 break;
2358 }
2359 }
2360
2361 /*
2362 * Load/Store exclusive instructions are implemented by remembering
2363 * the value/address loaded, and seeing if these are the same
2364 * when the store is performed. This is not actually the architecturally
2365 * mandated semantics, but it works for typical guest code sequences
2366 * and avoids having to monitor regular stores.
2367 *
2368 * The store exclusive uses the atomic cmpxchg primitives to avoid
2369 * races in multi-threaded linux-user and when MTTCG softmmu is
2370 * enabled.
2371 */
2372 static void gen_load_exclusive(DisasContext *s, int rt, int rt2, int rn,
2373 int size, bool is_pair)
2374 {
2375 int idx = get_mem_index(s);
2376 MemOp memop;
2377 TCGv_i64 dirty_addr, clean_addr;
2378
2379 s->is_ldex = true;
2380 dirty_addr = cpu_reg_sp(s, rn);
2381 clean_addr = gen_mte_check1(s, dirty_addr, false, rn != 31, size);
2382
2383 g_assert(size <= 3);
2384 if (is_pair) {
2385 g_assert(size >= 2);
2386 if (size == 2) {
2387 /* The pair must be single-copy atomic for the doubleword. */
2388 memop = finalize_memop(s, MO_64 | MO_ALIGN);
2389 tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop);
2390 if (s->be_data == MO_LE) {
2391 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2392 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2393 } else {
2394 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2395 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2396 }
2397 } else {
2398 /*
2399 * The pair must be single-copy atomic for *each* doubleword, not
2400 * the entire quadword, however it must be quadword aligned.
2401 * Expose the complete load to tcg, for ease of tlb lookup,
2402 * but indicate that only 8-byte atomicity is required.
2403 */
2404 TCGv_i128 t16 = tcg_temp_new_i128();
2405
2406 memop = finalize_memop_atom(s, MO_128 | MO_ALIGN_16,
2407 MO_ATOM_IFALIGN_PAIR);
2408 tcg_gen_qemu_ld_i128(t16, clean_addr, idx, memop);
2409
2410 if (s->be_data == MO_LE) {
2411 tcg_gen_extr_i128_i64(cpu_exclusive_val,
2412 cpu_exclusive_high, t16);
2413 } else {
2414 tcg_gen_extr_i128_i64(cpu_exclusive_high,
2415 cpu_exclusive_val, t16);
2416 }
2417 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2418 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2419 }
2420 } else {
2421 memop = finalize_memop(s, size | MO_ALIGN);
2422 tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop);
2423 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2424 }
2425 tcg_gen_mov_i64(cpu_exclusive_addr, clean_addr);
2426 }
2427
2428 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2429 int rn, int size, int is_pair)
2430 {
2431 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2432 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2433 * [addr] = {Rt};
2434 * if (is_pair) {
2435 * [addr + datasize] = {Rt2};
2436 * }
2437 * {Rd} = 0;
2438 * } else {
2439 * {Rd} = 1;
2440 * }
2441 * env->exclusive_addr = -1;
2442 */
2443 TCGLabel *fail_label = gen_new_label();
2444 TCGLabel *done_label = gen_new_label();
2445 TCGv_i64 tmp, dirty_addr, clean_addr;
2446
2447 dirty_addr = cpu_reg_sp(s, rn);
2448 clean_addr = gen_mte_check1(s, dirty_addr, true, rn != 31, size);
2449
2450 tcg_gen_brcond_i64(TCG_COND_NE, clean_addr, cpu_exclusive_addr, fail_label);
2451
2452 tmp = tcg_temp_new_i64();
2453 if (is_pair) {
2454 if (size == 2) {
2455 if (s->be_data == MO_LE) {
2456 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2457 } else {
2458 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2459 }
2460 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2461 cpu_exclusive_val, tmp,
2462 get_mem_index(s),
2463 MO_64 | MO_ALIGN | s->be_data);
2464 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2465 } else {
2466 TCGv_i128 t16 = tcg_temp_new_i128();
2467 TCGv_i128 c16 = tcg_temp_new_i128();
2468 TCGv_i64 a, b;
2469
2470 if (s->be_data == MO_LE) {
2471 tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt), cpu_reg(s, rt2));
2472 tcg_gen_concat_i64_i128(c16, cpu_exclusive_val,
2473 cpu_exclusive_high);
2474 } else {
2475 tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt2), cpu_reg(s, rt));
2476 tcg_gen_concat_i64_i128(c16, cpu_exclusive_high,
2477 cpu_exclusive_val);
2478 }
2479
2480 tcg_gen_atomic_cmpxchg_i128(t16, cpu_exclusive_addr, c16, t16,
2481 get_mem_index(s),
2482 MO_128 | MO_ALIGN | s->be_data);
2483
2484 a = tcg_temp_new_i64();
2485 b = tcg_temp_new_i64();
2486 if (s->be_data == MO_LE) {
2487 tcg_gen_extr_i128_i64(a, b, t16);
2488 } else {
2489 tcg_gen_extr_i128_i64(b, a, t16);
2490 }
2491
2492 tcg_gen_xor_i64(a, a, cpu_exclusive_val);
2493 tcg_gen_xor_i64(b, b, cpu_exclusive_high);
2494 tcg_gen_or_i64(tmp, a, b);
2495
2496 tcg_gen_setcondi_i64(TCG_COND_NE, tmp, tmp, 0);
2497 }
2498 } else {
2499 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2500 cpu_reg(s, rt), get_mem_index(s),
2501 size | MO_ALIGN | s->be_data);
2502 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2503 }
2504 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2505 tcg_gen_br(done_label);
2506
2507 gen_set_label(fail_label);
2508 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2509 gen_set_label(done_label);
2510 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2511 }
2512
2513 static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2514 int rn, int size)
2515 {
2516 TCGv_i64 tcg_rs = cpu_reg(s, rs);
2517 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2518 int memidx = get_mem_index(s);
2519 TCGv_i64 clean_addr;
2520
2521 if (rn == 31) {
2522 gen_check_sp_alignment(s);
2523 }
2524 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
2525 tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
2526 size | MO_ALIGN | s->be_data);
2527 }
2528
2529 static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2530 int rn, int size)
2531 {
2532 TCGv_i64 s1 = cpu_reg(s, rs);
2533 TCGv_i64 s2 = cpu_reg(s, rs + 1);
2534 TCGv_i64 t1 = cpu_reg(s, rt);
2535 TCGv_i64 t2 = cpu_reg(s, rt + 1);
2536 TCGv_i64 clean_addr;
2537 int memidx = get_mem_index(s);
2538
2539 if (rn == 31) {
2540 gen_check_sp_alignment(s);
2541 }
2542
2543 /* This is a single atomic access, despite the "pair". */
2544 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1);
2545
2546 if (size == 2) {
2547 TCGv_i64 cmp = tcg_temp_new_i64();
2548 TCGv_i64 val = tcg_temp_new_i64();
2549
2550 if (s->be_data == MO_LE) {
2551 tcg_gen_concat32_i64(val, t1, t2);
2552 tcg_gen_concat32_i64(cmp, s1, s2);
2553 } else {
2554 tcg_gen_concat32_i64(val, t2, t1);
2555 tcg_gen_concat32_i64(cmp, s2, s1);
2556 }
2557
2558 tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
2559 MO_64 | MO_ALIGN | s->be_data);
2560
2561 if (s->be_data == MO_LE) {
2562 tcg_gen_extr32_i64(s1, s2, cmp);
2563 } else {
2564 tcg_gen_extr32_i64(s2, s1, cmp);
2565 }
2566 } else {
2567 TCGv_i128 cmp = tcg_temp_new_i128();
2568 TCGv_i128 val = tcg_temp_new_i128();
2569
2570 if (s->be_data == MO_LE) {
2571 tcg_gen_concat_i64_i128(val, t1, t2);
2572 tcg_gen_concat_i64_i128(cmp, s1, s2);
2573 } else {
2574 tcg_gen_concat_i64_i128(val, t2, t1);
2575 tcg_gen_concat_i64_i128(cmp, s2, s1);
2576 }
2577
2578 tcg_gen_atomic_cmpxchg_i128(cmp, clean_addr, cmp, val, memidx,
2579 MO_128 | MO_ALIGN | s->be_data);
2580
2581 if (s->be_data == MO_LE) {
2582 tcg_gen_extr_i128_i64(s1, s2, cmp);
2583 } else {
2584 tcg_gen_extr_i128_i64(s2, s1, cmp);
2585 }
2586 }
2587 }
2588
2589 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
2590 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2591 */
2592 static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2593 {
2594 int opc0 = extract32(opc, 0, 1);
2595 int regsize;
2596
2597 if (is_signed) {
2598 regsize = opc0 ? 32 : 64;
2599 } else {
2600 regsize = size == 3 ? 64 : 32;
2601 }
2602 return regsize == 64;
2603 }
2604
2605 /* Load/store exclusive
2606 *
2607 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
2608 * +-----+-------------+----+---+----+------+----+-------+------+------+
2609 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
2610 * +-----+-------------+----+---+----+------+----+-------+------+------+
2611 *
2612 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2613 * L: 0 -> store, 1 -> load
2614 * o2: 0 -> exclusive, 1 -> not
2615 * o1: 0 -> single register, 1 -> register pair
2616 * o0: 1 -> load-acquire/store-release, 0 -> not
2617 */
2618 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2619 {
2620 int rt = extract32(insn, 0, 5);
2621 int rn = extract32(insn, 5, 5);
2622 int rt2 = extract32(insn, 10, 5);
2623 int rs = extract32(insn, 16, 5);
2624 int is_lasr = extract32(insn, 15, 1);
2625 int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2626 int size = extract32(insn, 30, 2);
2627 TCGv_i64 clean_addr;
2628
2629 switch (o2_L_o1_o0) {
2630 case 0x0: /* STXR */
2631 case 0x1: /* STLXR */
2632 if (rn == 31) {
2633 gen_check_sp_alignment(s);
2634 }
2635 if (is_lasr) {
2636 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2637 }
2638 gen_store_exclusive(s, rs, rt, rt2, rn, size, false);
2639 return;
2640
2641 case 0x4: /* LDXR */
2642 case 0x5: /* LDAXR */
2643 if (rn == 31) {
2644 gen_check_sp_alignment(s);
2645 }
2646 gen_load_exclusive(s, rt, rt2, rn, size, false);
2647 if (is_lasr) {
2648 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2649 }
2650 return;
2651
2652 case 0x8: /* STLLR */
2653 if (!dc_isar_feature(aa64_lor, s)) {
2654 break;
2655 }
2656 /* StoreLORelease is the same as Store-Release for QEMU. */
2657 /* fall through */
2658 case 0x9: /* STLR */
2659 /* Generate ISS for non-exclusive accesses including LASR. */
2660 if (rn == 31) {
2661 gen_check_sp_alignment(s);
2662 }
2663 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2664 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2665 true, rn != 31, size);
2666 /* TODO: ARMv8.4-LSE SCTLR.nAA */
2667 do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
2668 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2669 return;
2670
2671 case 0xc: /* LDLAR */
2672 if (!dc_isar_feature(aa64_lor, s)) {
2673 break;
2674 }
2675 /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
2676 /* fall through */
2677 case 0xd: /* LDAR */
2678 /* Generate ISS for non-exclusive accesses including LASR. */
2679 if (rn == 31) {
2680 gen_check_sp_alignment(s);
2681 }
2682 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2683 false, rn != 31, size);
2684 /* TODO: ARMv8.4-LSE SCTLR.nAA */
2685 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
2686 rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2687 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2688 return;
2689
2690 case 0x2: case 0x3: /* CASP / STXP */
2691 if (size & 2) { /* STXP / STLXP */
2692 if (rn == 31) {
2693 gen_check_sp_alignment(s);
2694 }
2695 if (is_lasr) {
2696 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2697 }
2698 gen_store_exclusive(s, rs, rt, rt2, rn, size, true);
2699 return;
2700 }
2701 if (rt2 == 31
2702 && ((rt | rs) & 1) == 0
2703 && dc_isar_feature(aa64_atomics, s)) {
2704 /* CASP / CASPL */
2705 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2706 return;
2707 }
2708 break;
2709
2710 case 0x6: case 0x7: /* CASPA / LDXP */
2711 if (size & 2) { /* LDXP / LDAXP */
2712 if (rn == 31) {
2713 gen_check_sp_alignment(s);
2714 }
2715 gen_load_exclusive(s, rt, rt2, rn, size, true);
2716 if (is_lasr) {
2717 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2718 }
2719 return;
2720 }
2721 if (rt2 == 31
2722 && ((rt | rs) & 1) == 0
2723 && dc_isar_feature(aa64_atomics, s)) {
2724 /* CASPA / CASPAL */
2725 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2726 return;
2727 }
2728 break;
2729
2730 case 0xa: /* CAS */
2731 case 0xb: /* CASL */
2732 case 0xe: /* CASA */
2733 case 0xf: /* CASAL */
2734 if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
2735 gen_compare_and_swap(s, rs, rt, rn, size);
2736 return;
2737 }
2738 break;
2739 }
2740 unallocated_encoding(s);
2741 }
2742
2743 /*
2744 * Load register (literal)
2745 *
2746 * 31 30 29 27 26 25 24 23 5 4 0
2747 * +-----+-------+---+-----+-------------------+-------+
2748 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
2749 * +-----+-------+---+-----+-------------------+-------+
2750 *
2751 * V: 1 -> vector (simd/fp)
2752 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2753 * 10-> 32 bit signed, 11 -> prefetch
2754 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2755 */
2756 static void disas_ld_lit(DisasContext *s, uint32_t insn)
2757 {
2758 int rt = extract32(insn, 0, 5);
2759 int64_t imm = sextract32(insn, 5, 19) << 2;
2760 bool is_vector = extract32(insn, 26, 1);
2761 int opc = extract32(insn, 30, 2);
2762 bool is_signed = false;
2763 int size = 2;
2764 TCGv_i64 tcg_rt, clean_addr;
2765
2766 if (is_vector) {
2767 if (opc == 3) {
2768 unallocated_encoding(s);
2769 return;
2770 }
2771 size = 2 + opc;
2772 if (!fp_access_check(s)) {
2773 return;
2774 }
2775 } else {
2776 if (opc == 3) {
2777 /* PRFM (literal) : prefetch */
2778 return;
2779 }
2780 size = 2 + extract32(opc, 0, 1);
2781 is_signed = extract32(opc, 1, 1);
2782 }
2783
2784 tcg_rt = cpu_reg(s, rt);
2785
2786 clean_addr = tcg_temp_new_i64();
2787 gen_pc_plus_diff(s, clean_addr, imm);
2788 if (is_vector) {
2789 do_fp_ld(s, rt, clean_addr, size);
2790 } else {
2791 /* Only unsigned 32bit loads target 32bit registers. */
2792 bool iss_sf = opc != 0;
2793
2794 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
2795 false, true, rt, iss_sf, false);
2796 }
2797 }
2798
2799 /*
2800 * LDNP (Load Pair - non-temporal hint)
2801 * LDP (Load Pair - non vector)
2802 * LDPSW (Load Pair Signed Word - non vector)
2803 * STNP (Store Pair - non-temporal hint)
2804 * STP (Store Pair - non vector)
2805 * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2806 * LDP (Load Pair of SIMD&FP)
2807 * STNP (Store Pair of SIMD&FP - non-temporal hint)
2808 * STP (Store Pair of SIMD&FP)
2809 *
2810 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
2811 * +-----+-------+---+---+-------+---+-----------------------------+
2812 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
2813 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2814 *
2815 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
2816 * LDPSW/STGP 01
2817 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2818 * V: 0 -> GPR, 1 -> Vector
2819 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2820 * 10 -> signed offset, 11 -> pre-index
2821 * L: 0 -> Store 1 -> Load
2822 *
2823 * Rt, Rt2 = GPR or SIMD registers to be stored
2824 * Rn = general purpose register containing address
2825 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2826 */
2827 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2828 {
2829 int rt = extract32(insn, 0, 5);
2830 int rn = extract32(insn, 5, 5);
2831 int rt2 = extract32(insn, 10, 5);
2832 uint64_t offset = sextract64(insn, 15, 7);
2833 int index = extract32(insn, 23, 2);
2834 bool is_vector = extract32(insn, 26, 1);
2835 bool is_load = extract32(insn, 22, 1);
2836 int opc = extract32(insn, 30, 2);
2837
2838 bool is_signed = false;
2839 bool postindex = false;
2840 bool wback = false;
2841 bool set_tag = false;
2842
2843 TCGv_i64 clean_addr, dirty_addr;
2844
2845 int size;
2846
2847 if (opc == 3) {
2848 unallocated_encoding(s);
2849 return;
2850 }
2851
2852 if (is_vector) {
2853 size = 2 + opc;
2854 } else if (opc == 1 && !is_load) {
2855 /* STGP */
2856 if (!dc_isar_feature(aa64_mte_insn_reg, s) || index == 0) {
2857 unallocated_encoding(s);
2858 return;
2859 }
2860 size = 3;
2861 set_tag = true;
2862 } else {
2863 size = 2 + extract32(opc, 1, 1);
2864 is_signed = extract32(opc, 0, 1);
2865 if (!is_load && is_signed) {
2866 unallocated_encoding(s);
2867 return;
2868 }
2869 }
2870
2871 switch (index) {
2872 case 1: /* post-index */
2873 postindex = true;
2874 wback = true;
2875 break;
2876 case 0:
2877 /* signed offset with "non-temporal" hint. Since we don't emulate
2878 * caches we don't care about hints to the cache system about
2879 * data access patterns, and handle this identically to plain
2880 * signed offset.
2881 */
2882 if (is_signed) {
2883 /* There is no non-temporal-hint version of LDPSW */
2884 unallocated_encoding(s);
2885 return;
2886 }
2887 postindex = false;
2888 break;
2889 case 2: /* signed offset, rn not updated */
2890 postindex = false;
2891 break;
2892 case 3: /* pre-index */
2893 postindex = false;
2894 wback = true;
2895 break;
2896 }
2897
2898 if (is_vector && !fp_access_check(s)) {
2899 return;
2900 }
2901
2902 offset <<= (set_tag ? LOG2_TAG_GRANULE : size);
2903
2904 if (rn == 31) {
2905 gen_check_sp_alignment(s);
2906 }
2907
2908 dirty_addr = read_cpu_reg_sp(s, rn, 1);
2909 if (!postindex) {
2910 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
2911 }
2912
2913 if (set_tag) {
2914 if (!s->ata) {
2915 /*
2916 * TODO: We could rely on the stores below, at least for
2917 * system mode, if we arrange to add MO_ALIGN_16.
2918 */
2919 gen_helper_stg_stub(cpu_env, dirty_addr);
2920 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2921 gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
2922 } else {
2923 gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
2924 }
2925 }
2926
2927 clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
2928 (wback || rn != 31) && !set_tag, 2 << size);
2929
2930 if (is_vector) {
2931 if (is_load) {
2932 do_fp_ld(s, rt, clean_addr, size);
2933 } else {
2934 do_fp_st(s, rt, clean_addr, size);
2935 }
2936 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2937 if (is_load) {
2938 do_fp_ld(s, rt2, clean_addr, size);
2939 } else {
2940 do_fp_st(s, rt2, clean_addr, size);
2941 }
2942 } else {
2943 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2944 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2945
2946 if (is_load) {
2947 TCGv_i64 tmp = tcg_temp_new_i64();
2948
2949 /* Do not modify tcg_rt before recognizing any exception
2950 * from the second load.
2951 */
2952 do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
2953 false, false, 0, false, false);
2954 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2955 do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
2956 false, false, 0, false, false);
2957
2958 tcg_gen_mov_i64(tcg_rt, tmp);
2959 } else {
2960 do_gpr_st(s, tcg_rt, clean_addr, size,
2961 false, 0, false, false);
2962 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2963 do_gpr_st(s, tcg_rt2, clean_addr, size,
2964 false, 0, false, false);
2965 }
2966 }
2967
2968 if (wback) {
2969 if (postindex) {
2970 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
2971 }
2972 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
2973 }
2974 }
2975
2976 /*
2977 * Load/store (immediate post-indexed)
2978 * Load/store (immediate pre-indexed)
2979 * Load/store (unscaled immediate)
2980 *
2981 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
2982 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2983 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
2984 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2985 *
2986 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
2987 10 -> unprivileged
2988 * V = 0 -> non-vector
2989 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
2990 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2991 */
2992 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
2993 int opc,
2994 int size,
2995 int rt,
2996 bool is_vector)
2997 {
2998 int rn = extract32(insn, 5, 5);
2999 int imm9 = sextract32(insn, 12, 9);
3000 int idx = extract32(insn, 10, 2);
3001 bool is_signed = false;
3002 bool is_store = false;
3003 bool is_extended = false;
3004 bool is_unpriv = (idx == 2);
3005 bool iss_valid;
3006 bool post_index;
3007 bool writeback;
3008 int memidx;
3009
3010 TCGv_i64 clean_addr, dirty_addr;
3011
3012 if (is_vector) {
3013 size |= (opc & 2) << 1;
3014 if (size > 4 || is_unpriv) {
3015 unallocated_encoding(s);
3016 return;
3017 }
3018 is_store = ((opc & 1) == 0);
3019 if (!fp_access_check(s)) {
3020 return;
3021 }
3022 } else {
3023 if (size == 3 && opc == 2) {
3024 /* PRFM - prefetch */
3025 if (idx != 0) {
3026 unallocated_encoding(s);
3027 return;
3028 }
3029 return;
3030 }
3031 if (opc == 3 && size > 1) {
3032 unallocated_encoding(s);
3033 return;
3034 }
3035 is_store = (opc == 0);
3036 is_signed = extract32(opc, 1, 1);
3037 is_extended = (size < 3) && extract32(opc, 0, 1);
3038 }
3039
3040 switch (idx) {
3041 case 0:
3042 case 2:
3043 post_index = false;
3044 writeback = false;
3045 break;
3046 case 1:
3047 post_index = true;
3048 writeback = true;
3049 break;
3050 case 3:
3051 post_index = false;
3052 writeback = true;
3053 break;
3054 default:
3055 g_assert_not_reached();
3056 }
3057
3058 iss_valid = !is_vector && !writeback;
3059
3060 if (rn == 31) {
3061 gen_check_sp_alignment(s);
3062 }
3063
3064 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3065 if (!post_index) {
3066 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3067 }
3068
3069 memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
3070 clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
3071 writeback || rn != 31,
3072 size, is_unpriv, memidx);
3073
3074 if (is_vector) {
3075 if (is_store) {
3076 do_fp_st(s, rt, clean_addr, size);
3077 } else {
3078 do_fp_ld(s, rt, clean_addr, size);
3079 }
3080 } else {
3081 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3082 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3083
3084 if (is_store) {
3085 do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
3086 iss_valid, rt, iss_sf, false);
3087 } else {
3088 do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3089 is_extended, memidx,
3090 iss_valid, rt, iss_sf, false);
3091 }
3092 }
3093
3094 if (writeback) {
3095 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3096 if (post_index) {
3097 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3098 }
3099 tcg_gen_mov_i64(tcg_rn, dirty_addr);
3100 }
3101 }
3102
3103 /*
3104 * Load/store (register offset)
3105 *
3106 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3107 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
3108 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
3109 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
3110 *
3111 * For non-vector:
3112 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3113 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3114 * For vector:
3115 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3116 * opc<0>: 0 -> store, 1 -> load
3117 * V: 1 -> vector/simd
3118 * opt: extend encoding (see DecodeRegExtend)
3119 * S: if S=1 then scale (essentially index by sizeof(size))
3120 * Rt: register to transfer into/out of
3121 * Rn: address register or SP for base
3122 * Rm: offset register or ZR for offset
3123 */
3124 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
3125 int opc,
3126 int size,
3127 int rt,
3128 bool is_vector)
3129 {
3130 int rn = extract32(insn, 5, 5);
3131 int shift = extract32(insn, 12, 1);
3132 int rm = extract32(insn, 16, 5);
3133 int opt = extract32(insn, 13, 3);
3134 bool is_signed = false;
3135 bool is_store = false;
3136 bool is_extended = false;
3137
3138 TCGv_i64 tcg_rm, clean_addr, dirty_addr;
3139
3140 if (extract32(opt, 1, 1) == 0) {
3141 unallocated_encoding(s);
3142 return;
3143 }
3144
3145 if (is_vector) {
3146 size |= (opc & 2) << 1;
3147 if (size > 4) {
3148 unallocated_encoding(s);
3149 return;
3150 }
3151 is_store = !extract32(opc, 0, 1);
3152 if (!fp_access_check(s)) {
3153 return;
3154 }
3155 } else {
3156 if (size == 3 && opc == 2) {
3157 /* PRFM - prefetch */
3158 return;
3159 }
3160 if (opc == 3 && size > 1) {
3161 unallocated_encoding(s);
3162 return;
3163 }
3164 is_store = (opc == 0);
3165 is_signed = extract32(opc, 1, 1);
3166 is_extended = (size < 3) && extract32(opc, 0, 1);
3167 }
3168
3169 if (rn == 31) {
3170 gen_check_sp_alignment(s);
3171 }
3172 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3173
3174 tcg_rm = read_cpu_reg(s, rm, 1);
3175 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
3176
3177 tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
3178 clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size);
3179
3180 if (is_vector) {
3181 if (is_store) {
3182 do_fp_st(s, rt, clean_addr, size);
3183 } else {
3184 do_fp_ld(s, rt, clean_addr, size);
3185 }
3186 } else {
3187 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3188 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3189 if (is_store) {
3190 do_gpr_st(s, tcg_rt, clean_addr, size,
3191 true, rt, iss_sf, false);
3192 } else {
3193 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3194 is_extended, true, rt, iss_sf, false);
3195 }
3196 }
3197 }
3198
3199 /*
3200 * Load/store (unsigned immediate)
3201 *
3202 * 31 30 29 27 26 25 24 23 22 21 10 9 5
3203 * +----+-------+---+-----+-----+------------+-------+------+
3204 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
3205 * +----+-------+---+-----+-----+------------+-------+------+
3206 *
3207 * For non-vector:
3208 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3209 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3210 * For vector:
3211 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3212 * opc<0>: 0 -> store, 1 -> load
3213 * Rn: base address register (inc SP)
3214 * Rt: target register
3215 */
3216 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
3217 int opc,
3218 int size,
3219 int rt,
3220 bool is_vector)
3221 {
3222 int rn = extract32(insn, 5, 5);
3223 unsigned int imm12 = extract32(insn, 10, 12);
3224 unsigned int offset;
3225
3226 TCGv_i64 clean_addr, dirty_addr;
3227
3228 bool is_store;
3229 bool is_signed = false;
3230 bool is_extended = false;
3231
3232 if (is_vector) {
3233 size |= (opc & 2) << 1;
3234 if (size > 4) {
3235 unallocated_encoding(s);
3236 return;
3237 }
3238 is_store = !extract32(opc, 0, 1);
3239 if (!fp_access_check(s)) {
3240 return;
3241 }
3242 } else {
3243 if (size == 3 && opc == 2) {
3244 /* PRFM - prefetch */
3245 return;
3246 }
3247 if (opc == 3 && size > 1) {
3248 unallocated_encoding(s);
3249 return;
3250 }
3251 is_store = (opc == 0);
3252 is_signed = extract32(opc, 1, 1);
3253 is_extended = (size < 3) && extract32(opc, 0, 1);
3254 }
3255
3256 if (rn == 31) {
3257 gen_check_sp_alignment(s);
3258 }
3259 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3260 offset = imm12 << size;
3261 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3262 clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size);
3263
3264 if (is_vector) {
3265 if (is_store) {
3266 do_fp_st(s, rt, clean_addr, size);
3267 } else {
3268 do_fp_ld(s, rt, clean_addr, size);
3269 }
3270 } else {
3271 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3272 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3273 if (is_store) {
3274 do_gpr_st(s, tcg_rt, clean_addr, size,
3275 true, rt, iss_sf, false);
3276 } else {
3277 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3278 is_extended, true, rt, iss_sf, false);
3279 }
3280 }
3281 }
3282
3283 /* Atomic memory operations
3284 *
3285 * 31 30 27 26 24 22 21 16 15 12 10 5 0
3286 * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
3287 * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt |
3288 * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
3289 *
3290 * Rt: the result register
3291 * Rn: base address or SP
3292 * Rs: the source register for the operation
3293 * V: vector flag (always 0 as of v8.3)
3294 * A: acquire flag
3295 * R: release flag
3296 */
3297 static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
3298 int size, int rt, bool is_vector)
3299 {
3300 int rs = extract32(insn, 16, 5);
3301 int rn = extract32(insn, 5, 5);
3302 int o3_opc = extract32(insn, 12, 4);
3303 bool r = extract32(insn, 22, 1);
3304 bool a = extract32(insn, 23, 1);
3305 TCGv_i64 tcg_rs, tcg_rt, clean_addr;
3306 AtomicThreeOpFn *fn = NULL;
3307 MemOp mop = s->be_data | size | MO_ALIGN;
3308
3309 if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
3310 unallocated_encoding(s);
3311 return;
3312 }
3313 switch (o3_opc) {
3314 case 000: /* LDADD */
3315 fn = tcg_gen_atomic_fetch_add_i64;
3316 break;
3317 case 001: /* LDCLR */
3318 fn = tcg_gen_atomic_fetch_and_i64;
3319 break;
3320 case 002: /* LDEOR */
3321 fn = tcg_gen_atomic_fetch_xor_i64;
3322 break;
3323 case 003: /* LDSET */
3324 fn = tcg_gen_atomic_fetch_or_i64;
3325 break;
3326 case 004: /* LDSMAX */
3327 fn = tcg_gen_atomic_fetch_smax_i64;
3328 mop |= MO_SIGN;
3329 break;
3330 case 005: /* LDSMIN */
3331 fn = tcg_gen_atomic_fetch_smin_i64;
3332 mop |= MO_SIGN;
3333 break;
3334 case 006: /* LDUMAX */
3335 fn = tcg_gen_atomic_fetch_umax_i64;
3336 break;
3337 case 007: /* LDUMIN */
3338 fn = tcg_gen_atomic_fetch_umin_i64;
3339 break;
3340 case 010: /* SWP */
3341 fn = tcg_gen_atomic_xchg_i64;
3342 break;
3343 case 014: /* LDAPR, LDAPRH, LDAPRB */
3344 if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
3345 rs != 31 || a != 1 || r != 0) {
3346 unallocated_encoding(s);
3347 return;
3348 }
3349 break;
3350 default:
3351 unallocated_encoding(s);
3352 return;
3353 }
3354
3355 if (rn == 31) {
3356 gen_check_sp_alignment(s);
3357 }
3358 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
3359
3360 if (o3_opc == 014) {
3361 /*
3362 * LDAPR* are a special case because they are a simple load, not a
3363 * fetch-and-do-something op.
3364 * The architectural consistency requirements here are weaker than
3365 * full load-acquire (we only need "load-acquire processor consistent"),
3366 * but we choose to implement them as full LDAQ.
3367 */
3368 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
3369 true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
3370 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3371 return;
3372 }
3373
3374 tcg_rs = read_cpu_reg(s, rs, true);
3375 tcg_rt = cpu_reg(s, rt);
3376
3377 if (o3_opc == 1) { /* LDCLR */
3378 tcg_gen_not_i64(tcg_rs, tcg_rs);
3379 }
3380
3381 /* The tcg atomic primitives are all full barriers. Therefore we
3382 * can ignore the Acquire and Release bits of this instruction.
3383 */
3384 fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
3385
3386 if ((mop & MO_SIGN) && size != MO_64) {
3387 tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
3388 }
3389 }
3390
3391 /*
3392 * PAC memory operations
3393 *
3394 * 31 30 27 26 24 22 21 12 11 10 5 0
3395 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3396 * | size | 1 1 1 | V | 0 0 | M S | 1 | imm9 | W | 1 | Rn | Rt |
3397 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3398 *
3399 * Rt: the result register
3400 * Rn: base address or SP
3401 * V: vector flag (always 0 as of v8.3)
3402 * M: clear for key DA, set for key DB
3403 * W: pre-indexing flag
3404 * S: sign for imm9.
3405 */
3406 static void disas_ldst_pac(DisasContext *s, uint32_t insn,
3407 int size, int rt, bool is_vector)
3408 {
3409 int rn = extract32(insn, 5, 5);
3410 bool is_wback = extract32(insn, 11, 1);
3411 bool use_key_a = !extract32(insn, 23, 1);
3412 int offset;
3413 TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3414
3415 if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
3416 unallocated_encoding(s);
3417 return;
3418 }
3419
3420 if (rn == 31) {
3421 gen_check_sp_alignment(s);
3422 }
3423 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3424
3425 if (s->pauth_active) {
3426 if (use_key_a) {
3427 gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
3428 tcg_constant_i64(0));
3429 } else {
3430 gen_helper_autdb(dirty_addr, cpu_env, dirty_addr,
3431 tcg_constant_i64(0));
3432 }
3433 }
3434
3435 /* Form the 10-bit signed, scaled offset. */
3436 offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
3437 offset = sextract32(offset << size, 0, 10 + size);
3438 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3439
3440 /* Note that "clean" and "dirty" here refer to TBI not PAC. */
3441 clean_addr = gen_mte_check1(s, dirty_addr, false,
3442 is_wback || rn != 31, size);
3443
3444 tcg_rt = cpu_reg(s, rt);
3445 do_gpr_ld(s, tcg_rt, clean_addr, size,
3446 /* extend */ false, /* iss_valid */ !is_wback,
3447 /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
3448
3449 if (is_wback) {
3450 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3451 }
3452 }
3453
3454 /*
3455 * LDAPR/STLR (unscaled immediate)
3456 *
3457 * 31 30 24 22 21 12 10 5 0
3458 * +------+-------------+-----+---+--------+-----+----+-----+
3459 * | size | 0 1 1 0 0 1 | opc | 0 | imm9 | 0 0 | Rn | Rt |
3460 * +------+-------------+-----+---+--------+-----+----+-----+
3461 *
3462 * Rt: source or destination register
3463 * Rn: base register
3464 * imm9: unscaled immediate offset
3465 * opc: 00: STLUR*, 01/10/11: various LDAPUR*
3466 * size: size of load/store
3467 */
3468 static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
3469 {
3470 int rt = extract32(insn, 0, 5);
3471 int rn = extract32(insn, 5, 5);
3472 int offset = sextract32(insn, 12, 9);
3473 int opc = extract32(insn, 22, 2);
3474 int size = extract32(insn, 30, 2);
3475 TCGv_i64 clean_addr, dirty_addr;
3476 bool is_store = false;
3477 bool extend = false;
3478 bool iss_sf;
3479 MemOp mop;
3480
3481 if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3482 unallocated_encoding(s);
3483 return;
3484 }
3485
3486 /* TODO: ARMv8.4-LSE SCTLR.nAA */
3487 mop = size | MO_ALIGN;
3488
3489 switch (opc) {
3490 case 0: /* STLURB */
3491 is_store = true;
3492 break;
3493 case 1: /* LDAPUR* */
3494 break;
3495 case 2: /* LDAPURS* 64-bit variant */
3496 if (size == 3) {
3497 unallocated_encoding(s);
3498 return;
3499 }
3500 mop |= MO_SIGN;
3501 break;
3502 case 3: /* LDAPURS* 32-bit variant */
3503 if (size > 1) {
3504 unallocated_encoding(s);
3505 return;
3506 }
3507 mop |= MO_SIGN;
3508 extend = true; /* zero-extend 32->64 after signed load */
3509 break;
3510 default:
3511 g_assert_not_reached();
3512 }
3513
3514 iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
3515
3516 if (rn == 31) {
3517 gen_check_sp_alignment(s);
3518 }
3519
3520 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3521 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3522 clean_addr = clean_data_tbi(s, dirty_addr);
3523
3524 if (is_store) {
3525 /* Store-Release semantics */
3526 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3527 do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
3528 } else {
3529 /*
3530 * Load-AcquirePC semantics; we implement as the slightly more
3531 * restrictive Load-Acquire.
3532 */
3533 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
3534 extend, true, rt, iss_sf, true);
3535 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3536 }
3537 }
3538
3539 /* Load/store register (all forms) */
3540 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
3541 {
3542 int rt = extract32(insn, 0, 5);
3543 int opc = extract32(insn, 22, 2);
3544 bool is_vector = extract32(insn, 26, 1);
3545 int size = extract32(insn, 30, 2);
3546
3547 switch (extract32(insn, 24, 2)) {
3548 case 0:
3549 if (extract32(insn, 21, 1) == 0) {
3550 /* Load/store register (unscaled immediate)
3551 * Load/store immediate pre/post-indexed
3552 * Load/store register unprivileged
3553 */
3554 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
3555 return;
3556 }
3557 switch (extract32(insn, 10, 2)) {
3558 case 0:
3559 disas_ldst_atomic(s, insn, size, rt, is_vector);
3560 return;
3561 case 2:
3562 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
3563 return;
3564 default:
3565 disas_ldst_pac(s, insn, size, rt, is_vector);
3566 return;
3567 }
3568 break;
3569 case 1:
3570 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
3571 return;
3572 }
3573 unallocated_encoding(s);
3574 }
3575
3576 /* AdvSIMD load/store multiple structures
3577 *
3578 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
3579 * +---+---+---------------+---+-------------+--------+------+------+------+
3580 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
3581 * +---+---+---------------+---+-------------+--------+------+------+------+
3582 *
3583 * AdvSIMD load/store multiple structures (post-indexed)
3584 *
3585 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
3586 * +---+---+---------------+---+---+---------+--------+------+------+------+
3587 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
3588 * +---+---+---------------+---+---+---------+--------+------+------+------+
3589 *
3590 * Rt: first (or only) SIMD&FP register to be transferred
3591 * Rn: base address or SP
3592 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3593 */
3594 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3595 {
3596 int rt = extract32(insn, 0, 5);
3597 int rn = extract32(insn, 5, 5);
3598 int rm = extract32(insn, 16, 5);
3599 int size = extract32(insn, 10, 2);
3600 int opcode = extract32(insn, 12, 4);
3601 bool is_store = !extract32(insn, 22, 1);
3602 bool is_postidx = extract32(insn, 23, 1);
3603 bool is_q = extract32(insn, 30, 1);
3604 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3605 MemOp endian, align, mop;
3606
3607 int total; /* total bytes */
3608 int elements; /* elements per vector */
3609 int rpt; /* num iterations */
3610 int selem; /* structure elements */
3611 int r;
3612
3613 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3614 unallocated_encoding(s);
3615 return;
3616 }
3617
3618 if (!is_postidx && rm != 0) {
3619 unallocated_encoding(s);
3620 return;
3621 }
3622
3623 /* From the shared decode logic */
3624 switch (opcode) {
3625 case 0x0:
3626 rpt = 1;
3627 selem = 4;
3628 break;
3629 case 0x2:
3630 rpt = 4;
3631 selem = 1;
3632 break;
3633 case 0x4:
3634 rpt = 1;
3635 selem = 3;
3636 break;
3637 case 0x6:
3638 rpt = 3;
3639 selem = 1;
3640 break;
3641 case 0x7:
3642 rpt = 1;
3643 selem = 1;
3644 break;
3645 case 0x8:
3646 rpt = 1;
3647 selem = 2;
3648 break;
3649 case 0xa:
3650 rpt = 2;
3651 selem = 1;
3652 break;
3653 default:
3654 unallocated_encoding(s);
3655 return;
3656 }
3657
3658 if (size == 3 && !is_q && selem != 1) {
3659 /* reserved */
3660 unallocated_encoding(s);
3661 return;
3662 }
3663
3664 if (!fp_access_check(s)) {
3665 return;
3666 }
3667
3668 if (rn == 31) {
3669 gen_check_sp_alignment(s);
3670 }
3671
3672 /* For our purposes, bytes are always little-endian. */
3673 endian = s->be_data;
3674 if (size == 0) {
3675 endian = MO_LE;
3676 }
3677
3678 total = rpt * selem * (is_q ? 16 : 8);
3679 tcg_rn = cpu_reg_sp(s, rn);
3680
3681 /*
3682 * Issue the MTE check vs the logical repeat count, before we
3683 * promote consecutive little-endian elements below.
3684 */
3685 clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
3686 total);
3687
3688 /*
3689 * Consecutive little-endian elements from a single register
3690 * can be promoted to a larger little-endian operation.
3691 */
3692 align = MO_ALIGN;
3693 if (selem == 1 && endian == MO_LE) {
3694 align = pow2_align(size);
3695 size = 3;
3696 }
3697 if (!s->align_mem) {
3698 align = 0;
3699 }
3700 mop = endian | size | align;
3701
3702 elements = (is_q ? 16 : 8) >> size;
3703 tcg_ebytes = tcg_constant_i64(1 << size);
3704 for (r = 0; r < rpt; r++) {
3705 int e;
3706 for (e = 0; e < elements; e++) {
3707 int xs;
3708 for (xs = 0; xs < selem; xs++) {
3709 int tt = (rt + r + xs) % 32;
3710 if (is_store) {
3711 do_vec_st(s, tt, e, clean_addr, mop);
3712 } else {
3713 do_vec_ld(s, tt, e, clean_addr, mop);
3714 }
3715 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3716 }
3717 }
3718 }
3719
3720 if (!is_store) {
3721 /* For non-quad operations, setting a slice of the low
3722 * 64 bits of the register clears the high 64 bits (in
3723 * the ARM ARM pseudocode this is implicit in the fact
3724 * that 'rval' is a 64 bit wide variable).
3725 * For quad operations, we might still need to zero the
3726 * high bits of SVE.
3727 */
3728 for (r = 0; r < rpt * selem; r++) {
3729 int tt = (rt + r) % 32;
3730 clear_vec_high(s, is_q, tt);
3731 }
3732 }
3733
3734 if (is_postidx) {
3735 if (rm == 31) {
3736 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3737 } else {
3738 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3739 }
3740 }
3741 }
3742
3743 /* AdvSIMD load/store single structure
3744 *
3745 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3746 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3747 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
3748 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3749 *
3750 * AdvSIMD load/store single structure (post-indexed)
3751 *
3752 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3753 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3754 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
3755 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3756 *
3757 * Rt: first (or only) SIMD&FP register to be transferred
3758 * Rn: base address or SP
3759 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3760 * index = encoded in Q:S:size dependent on size
3761 *
3762 * lane_size = encoded in R, opc
3763 * transfer width = encoded in opc, S, size
3764 */
3765 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3766 {
3767 int rt = extract32(insn, 0, 5);
3768 int rn = extract32(insn, 5, 5);
3769 int rm = extract32(insn, 16, 5);
3770 int size = extract32(insn, 10, 2);
3771 int S = extract32(insn, 12, 1);
3772 int opc = extract32(insn, 13, 3);
3773 int R = extract32(insn, 21, 1);
3774 int is_load = extract32(insn, 22, 1);
3775 int is_postidx = extract32(insn, 23, 1);
3776 int is_q = extract32(insn, 30, 1);
3777
3778 int scale = extract32(opc, 1, 2);
3779 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3780 bool replicate = false;
3781 int index = is_q << 3 | S << 2 | size;
3782 int xs, total;
3783 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3784 MemOp mop;
3785
3786 if (extract32(insn, 31, 1)) {
3787 unallocated_encoding(s);
3788 return;
3789 }
3790 if (!is_postidx && rm != 0) {
3791 unallocated_encoding(s);
3792 return;
3793 }
3794
3795 switch (scale) {
3796 case 3:
3797 if (!is_load || S) {
3798 unallocated_encoding(s);
3799 return;
3800 }
3801 scale = size;
3802 replicate = true;
3803 break;
3804 case 0:
3805 break;
3806 case 1:
3807 if (extract32(size, 0, 1)) {
3808 unallocated_encoding(s);
3809 return;
3810 }
3811 index >>= 1;
3812 break;
3813 case 2:
3814 if (extract32(size, 1, 1)) {
3815 unallocated_encoding(s);
3816 return;
3817 }
3818 if (!extract32(size, 0, 1)) {
3819 index >>= 2;
3820 } else {
3821 if (S) {
3822 unallocated_encoding(s);
3823 return;
3824 }
3825 index >>= 3;
3826 scale = 3;
3827 }
3828 break;
3829 default:
3830 g_assert_not_reached();
3831 }
3832
3833 if (!fp_access_check(s)) {
3834 return;
3835 }
3836
3837 if (rn == 31) {
3838 gen_check_sp_alignment(s);
3839 }
3840
3841 total = selem << scale;
3842 tcg_rn = cpu_reg_sp(s, rn);
3843
3844 clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
3845 total);
3846 mop = finalize_memop(s, scale);
3847
3848 tcg_ebytes = tcg_constant_i64(1 << scale);
3849 for (xs = 0; xs < selem; xs++) {
3850 if (replicate) {
3851 /* Load and replicate to all elements */
3852 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3853
3854 tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
3855 tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
3856 (is_q + 1) * 8, vec_full_reg_size(s),
3857 tcg_tmp);
3858 } else {
3859 /* Load/store one element per register */
3860 if (is_load) {
3861 do_vec_ld(s, rt, index, clean_addr, mop);
3862 } else {
3863 do_vec_st(s, rt, index, clean_addr, mop);
3864 }
3865 }
3866 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3867 rt = (rt + 1) % 32;
3868 }
3869
3870 if (is_postidx) {
3871 if (rm == 31) {
3872 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3873 } else {
3874 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3875 }
3876 }
3877 }
3878
3879 /*
3880 * Load/Store memory tags
3881 *
3882 * 31 30 29 24 22 21 12 10 5 0
3883 * +-----+-------------+-----+---+------+-----+------+------+
3884 * | 1 1 | 0 1 1 0 0 1 | op1 | 1 | imm9 | op2 | Rn | Rt |
3885 * +-----+-------------+-----+---+------+-----+------+------+
3886 */
3887 static void disas_ldst_tag(DisasContext *s, uint32_t insn)
3888 {
3889 int rt = extract32(insn, 0, 5);
3890 int rn = extract32(insn, 5, 5);
3891 uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
3892 int op2 = extract32(insn, 10, 2);
3893 int op1 = extract32(insn, 22, 2);
3894 bool is_load = false, is_pair = false, is_zero = false, is_mult = false;
3895 int index = 0;
3896 TCGv_i64 addr, clean_addr, tcg_rt;
3897
3898 /* We checked insn bits [29:24,21] in the caller. */
3899 if (extract32(insn, 30, 2) != 3) {
3900 goto do_unallocated;
3901 }
3902
3903 /*
3904 * @index is a tri-state variable which has 3 states:
3905 * < 0 : post-index, writeback
3906 * = 0 : signed offset
3907 * > 0 : pre-index, writeback
3908 */
3909 switch (op1) {
3910 case 0:
3911 if (op2 != 0) {
3912 /* STG */
3913 index = op2 - 2;
3914 } else {
3915 /* STZGM */
3916 if (s->current_el == 0 || offset != 0) {
3917 goto do_unallocated;
3918 }
3919 is_mult = is_zero = true;
3920 }
3921 break;
3922 case 1:
3923 if (op2 != 0) {
3924 /* STZG */
3925 is_zero = true;
3926 index = op2 - 2;
3927 } else {
3928 /* LDG */
3929 is_load = true;
3930 }
3931 break;
3932 case 2:
3933 if (op2 != 0) {
3934 /* ST2G */
3935 is_pair = true;
3936 index = op2 - 2;
3937 } else {
3938 /* STGM */
3939 if (s->current_el == 0 || offset != 0) {
3940 goto do_unallocated;
3941 }
3942 is_mult = true;
3943 }
3944 break;
3945 case 3:
3946 if (op2 != 0) {
3947 /* STZ2G */
3948 is_pair = is_zero = true;
3949 index = op2 - 2;
3950 } else {
3951 /* LDGM */
3952 if (s->current_el == 0 || offset != 0) {
3953 goto do_unallocated;
3954 }
3955 is_mult = is_load = true;
3956 }
3957 break;
3958
3959 default:
3960 do_unallocated:
3961 unallocated_encoding(s);
3962 return;
3963 }
3964
3965 if (is_mult
3966 ? !dc_isar_feature(aa64_mte, s)
3967 : !dc_isar_feature(aa64_mte_insn_reg, s)) {
3968 goto do_unallocated;
3969 }
3970
3971 if (rn == 31) {
3972 gen_check_sp_alignment(s);
3973 }
3974
3975 addr = read_cpu_reg_sp(s, rn, true);
3976 if (index >= 0) {
3977 /* pre-index or signed offset */
3978 tcg_gen_addi_i64(addr, addr, offset);
3979 }
3980
3981 if (is_mult) {
3982 tcg_rt = cpu_reg(s, rt);
3983
3984 if (is_zero) {
3985 int size = 4 << s->dcz_blocksize;
3986
3987 if (s->ata) {
3988 gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
3989 }
3990 /*
3991 * The non-tags portion of STZGM is mostly like DC_ZVA,
3992 * except the alignment happens before the access.
3993 */
3994 clean_addr = clean_data_tbi(s, addr);
3995 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
3996 gen_helper_dc_zva(cpu_env, clean_addr);
3997 } else if (s->ata) {
3998 if (is_load) {
3999 gen_helper_ldgm(tcg_rt, cpu_env, addr);
4000 } else {
4001 gen_helper_stgm(cpu_env, addr, tcg_rt);
4002 }
4003 } else {
4004 MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
4005 int size = 4 << GMID_EL1_BS;
4006
4007 clean_addr = clean_data_tbi(s, addr);
4008 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4009 gen_probe_access(s, clean_addr, acc, size);
4010
4011 if (is_load) {
4012 /* The result tags are zeros. */
4013 tcg_gen_movi_i64(tcg_rt, 0);
4014 }
4015 }
4016 return;
4017 }
4018
4019 if (is_load) {
4020 tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
4021 tcg_rt = cpu_reg(s, rt);
4022 if (s->ata) {
4023 gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
4024 } else {
4025 clean_addr = clean_data_tbi(s, addr);
4026 gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
4027 gen_address_with_allocation_tag0(tcg_rt, addr);
4028 }
4029 } else {
4030 tcg_rt = cpu_reg_sp(s, rt);
4031 if (!s->ata) {
4032 /*
4033 * For STG and ST2G, we need to check alignment and probe memory.
4034 * TODO: For STZG and STZ2G, we could rely on the stores below,
4035 * at least for system mode; user-only won't enforce alignment.
4036 */
4037 if (is_pair) {
4038 gen_helper_st2g_stub(cpu_env, addr);
4039 } else {
4040 gen_helper_stg_stub(cpu_env, addr);
4041 }
4042 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
4043 if (is_pair) {
4044 gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
4045 } else {
4046 gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
4047 }
4048 } else {
4049 if (is_pair) {
4050 gen_helper_st2g(cpu_env, addr, tcg_rt);
4051 } else {
4052 gen_helper_stg(cpu_env, addr, tcg_rt);
4053 }
4054 }
4055 }
4056
4057 if (is_zero) {
4058 TCGv_i64 clean_addr = clean_data_tbi(s, addr);
4059 TCGv_i64 zero64 = tcg_constant_i64(0);
4060 TCGv_i128 zero128 = tcg_temp_new_i128();
4061 int mem_index = get_mem_index(s);
4062 MemOp mop = finalize_memop(s, MO_128 | MO_ALIGN);
4063
4064 tcg_gen_concat_i64_i128(zero128, zero64, zero64);
4065
4066 /* This is 1 or 2 atomic 16-byte operations. */
4067 tcg_gen_qemu_st_i128(zero128, clean_addr, mem_index, mop);
4068 if (is_pair) {
4069 tcg_gen_addi_i64(clean_addr, clean_addr, 16);
4070 tcg_gen_qemu_st_i128(zero128, clean_addr, mem_index, mop);
4071 }
4072 }
4073
4074 if (index != 0) {
4075 /* pre-index or post-index */
4076 if (index < 0) {
4077 /* post-index */
4078 tcg_gen_addi_i64(addr, addr, offset);
4079 }
4080 tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
4081 }
4082 }
4083
4084 /* Loads and stores */
4085 static void disas_ldst(DisasContext *s, uint32_t insn)
4086 {
4087 switch (extract32(insn, 24, 6)) {
4088 case 0x08: /* Load/store exclusive */
4089 disas_ldst_excl(s, insn);
4090 break;
4091 case 0x18: case 0x1c: /* Load register (literal) */
4092 disas_ld_lit(s, insn);
4093 break;
4094 case 0x28: case 0x29:
4095 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
4096 disas_ldst_pair(s, insn);
4097 break;
4098 case 0x38: case 0x39:
4099 case 0x3c: case 0x3d: /* Load/store register (all forms) */
4100 disas_ldst_reg(s, insn);
4101 break;
4102 case 0x0c: /* AdvSIMD load/store multiple structures */
4103 disas_ldst_multiple_struct(s, insn);
4104 break;
4105 case 0x0d: /* AdvSIMD load/store single structure */
4106 disas_ldst_single_struct(s, insn);
4107 break;
4108 case 0x19:
4109 if (extract32(insn, 21, 1) != 0) {
4110 disas_ldst_tag(s, insn);
4111 } else if (extract32(insn, 10, 2) == 0) {
4112 disas_ldst_ldapr_stlr(s, insn);
4113 } else {
4114 unallocated_encoding(s);
4115 }
4116 break;
4117 default:
4118 unallocated_encoding(s);
4119 break;
4120 }
4121 }
4122
4123 typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64);
4124
4125 static bool gen_rri(DisasContext *s, arg_rri_sf *a,
4126 bool rd_sp, bool rn_sp, ArithTwoOp *fn)
4127 {
4128 TCGv_i64 tcg_rn = rn_sp ? cpu_reg_sp(s, a->rn) : cpu_reg(s, a->rn);
4129 TCGv_i64 tcg_rd = rd_sp ? cpu_reg_sp(s, a->rd) : cpu_reg(s, a->rd);
4130 TCGv_i64 tcg_imm = tcg_constant_i64(a->imm);
4131
4132 fn(tcg_rd, tcg_rn, tcg_imm);
4133 if (!a->sf) {
4134 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4135 }
4136 return true;
4137 }
4138
4139 /*
4140 * PC-rel. addressing
4141 */
4142
4143 static bool trans_ADR(DisasContext *s, arg_ri *a)
4144 {
4145 gen_pc_plus_diff(s, cpu_reg(s, a->rd), a->imm);
4146 return true;
4147 }
4148
4149 static bool trans_ADRP(DisasContext *s, arg_ri *a)
4150 {
4151 int64_t offset = (int64_t)a->imm << 12;
4152
4153 /* The page offset is ok for CF_PCREL. */
4154 offset -= s->pc_curr & 0xfff;
4155 gen_pc_plus_diff(s, cpu_reg(s, a->rd), offset);
4156 return true;
4157 }
4158
4159 /*
4160 * Add/subtract (immediate)
4161 */
4162 TRANS(ADD_i, gen_rri, a, 1, 1, tcg_gen_add_i64)
4163 TRANS(SUB_i, gen_rri, a, 1, 1, tcg_gen_sub_i64)
4164 TRANS(ADDS_i, gen_rri, a, 0, 1, a->sf ? gen_add64_CC : gen_add32_CC)
4165 TRANS(SUBS_i, gen_rri, a, 0, 1, a->sf ? gen_sub64_CC : gen_sub32_CC)
4166
4167 /*
4168 * Add/subtract (immediate, with tags)
4169 */
4170
4171 static bool gen_add_sub_imm_with_tags(DisasContext *s, arg_rri_tag *a,
4172 bool sub_op)
4173 {
4174 TCGv_i64 tcg_rn, tcg_rd;
4175 int imm;
4176
4177 imm = a->uimm6 << LOG2_TAG_GRANULE;
4178 if (sub_op) {
4179 imm = -imm;
4180 }
4181
4182 tcg_rn = cpu_reg_sp(s, a->rn);
4183 tcg_rd = cpu_reg_sp(s, a->rd);
4184
4185 if (s->ata) {
4186 gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn,
4187 tcg_constant_i32(imm),
4188 tcg_constant_i32(a->uimm4));
4189 } else {
4190 tcg_gen_addi_i64(tcg_rd, tcg_rn, imm);
4191 gen_address_with_allocation_tag0(tcg_rd, tcg_rd);
4192 }
4193 return true;
4194 }
4195
4196 TRANS_FEAT(ADDG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, false)
4197 TRANS_FEAT(SUBG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, true)
4198
4199 /* The input should be a value in the bottom e bits (with higher
4200 * bits zero); returns that value replicated into every element
4201 * of size e in a 64 bit integer.
4202 */
4203 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
4204 {
4205 assert(e != 0);
4206 while (e < 64) {
4207 mask |= mask << e;
4208 e *= 2;
4209 }
4210 return mask;
4211 }
4212
4213 /*
4214 * Logical (immediate)
4215 */
4216
4217 /*
4218 * Simplified variant of pseudocode DecodeBitMasks() for the case where we
4219 * only require the wmask. Returns false if the imms/immr/immn are a reserved
4220 * value (ie should cause a guest UNDEF exception), and true if they are
4221 * valid, in which case the decoded bit pattern is written to result.
4222 */
4223 bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
4224 unsigned int imms, unsigned int immr)
4225 {
4226 uint64_t mask;
4227 unsigned e, levels, s, r;
4228 int len;
4229
4230 assert(immn < 2 && imms < 64 && immr < 64);
4231
4232 /* The bit patterns we create here are 64 bit patterns which
4233 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
4234 * 64 bits each. Each element contains the same value: a run
4235 * of between 1 and e-1 non-zero bits, rotated within the
4236 * element by between 0 and e-1 bits.
4237 *
4238 * The element size and run length are encoded into immn (1 bit)
4239 * and imms (6 bits) as follows:
4240 * 64 bit elements: immn = 1, imms = <length of run - 1>
4241 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
4242 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
4243 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
4244 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
4245 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
4246 * Notice that immn = 0, imms = 11111x is the only combination
4247 * not covered by one of the above options; this is reserved.
4248 * Further, <length of run - 1> all-ones is a reserved pattern.
4249 *
4250 * In all cases the rotation is by immr % e (and immr is 6 bits).
4251 */
4252
4253 /* First determine the element size */
4254 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
4255 if (len < 1) {
4256 /* This is the immn == 0, imms == 0x11111x case */
4257 return false;
4258 }
4259 e = 1 << len;
4260
4261 levels = e - 1;
4262 s = imms & levels;
4263 r = immr & levels;
4264
4265 if (s == levels) {
4266 /* <length of run - 1> mustn't be all-ones. */
4267 return false;
4268 }
4269
4270 /* Create the value of one element: s+1 set bits rotated
4271 * by r within the element (which is e bits wide)...
4272 */
4273 mask = MAKE_64BIT_MASK(0, s + 1);
4274 if (r) {
4275 mask = (mask >> r) | (mask << (e - r));
4276 mask &= MAKE_64BIT_MASK(0, e);
4277 }
4278 /* ...then replicate the element over the whole 64 bit value */
4279 mask = bitfield_replicate(mask, e);
4280 *result = mask;
4281 return true;
4282 }
4283
4284 static bool gen_rri_log(DisasContext *s, arg_rri_log *a, bool set_cc,
4285 void (*fn)(TCGv_i64, TCGv_i64, int64_t))
4286 {
4287 TCGv_i64 tcg_rd, tcg_rn;
4288 uint64_t imm;
4289
4290 /* Some immediate field values are reserved. */
4291 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
4292 extract32(a->dbm, 0, 6),
4293 extract32(a->dbm, 6, 6))) {
4294 return false;
4295 }
4296 if (!a->sf) {
4297 imm &= 0xffffffffull;
4298 }
4299
4300 tcg_rd = set_cc ? cpu_reg(s, a->rd) : cpu_reg_sp(s, a->rd);
4301 tcg_rn = cpu_reg(s, a->rn);
4302
4303 fn(tcg_rd, tcg_rn, imm);
4304 if (set_cc) {
4305 gen_logic_CC(a->sf, tcg_rd);
4306 }
4307 if (!a->sf) {
4308 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4309 }
4310 return true;
4311 }
4312
4313 TRANS(AND_i, gen_rri_log, a, false, tcg_gen_andi_i64)
4314 TRANS(ORR_i, gen_rri_log, a, false, tcg_gen_ori_i64)
4315 TRANS(EOR_i, gen_rri_log, a, false, tcg_gen_xori_i64)
4316 TRANS(ANDS_i, gen_rri_log, a, true, tcg_gen_andi_i64)
4317
4318 /*
4319 * Move wide (immediate)
4320 */
4321
4322 static bool trans_MOVZ(DisasContext *s, arg_movw *a)
4323 {
4324 int pos = a->hw << 4;
4325 tcg_gen_movi_i64(cpu_reg(s, a->rd), (uint64_t)a->imm << pos);
4326 return true;
4327 }
4328
4329 static bool trans_MOVN(DisasContext *s, arg_movw *a)
4330 {
4331 int pos = a->hw << 4;
4332 uint64_t imm = a->imm;
4333
4334 imm = ~(imm << pos);
4335 if (!a->sf) {
4336 imm = (uint32_t)imm;
4337 }
4338 tcg_gen_movi_i64(cpu_reg(s, a->rd), imm);
4339 return true;
4340 }
4341
4342 static bool trans_MOVK(DisasContext *s, arg_movw *a)
4343 {
4344 int pos = a->hw << 4;
4345 TCGv_i64 tcg_rd, tcg_im;
4346
4347 tcg_rd = cpu_reg(s, a->rd);
4348 tcg_im = tcg_constant_i64(a->imm);
4349 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_im, pos, 16);
4350 if (!a->sf) {
4351 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4352 }
4353 return true;
4354 }
4355
4356 /*
4357 * Bitfield
4358 */
4359
4360 static bool trans_SBFM(DisasContext *s, arg_SBFM *a)
4361 {
4362 TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4363 TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4364 unsigned int bitsize = a->sf ? 64 : 32;
4365 unsigned int ri = a->immr;
4366 unsigned int si = a->imms;
4367 unsigned int pos, len;
4368
4369 if (si >= ri) {
4370 /* Wd<s-r:0> = Wn<s:r> */
4371 len = (si - ri) + 1;
4372 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
4373 if (!a->sf) {
4374 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4375 }
4376 } else {
4377 /* Wd<32+s-r,32-r> = Wn<s:0> */
4378 len = si + 1;
4379 pos = (bitsize - ri) & (bitsize - 1);
4380
4381 if (len < ri) {
4382 /*
4383 * Sign extend the destination field from len to fill the
4384 * balance of the word. Let the deposit below insert all
4385 * of those sign bits.
4386 */
4387 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
4388 len = ri;
4389 }
4390
4391 /*
4392 * We start with zero, and we haven't modified any bits outside
4393 * bitsize, therefore no final zero-extension is unneeded for !sf.
4394 */
4395 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4396 }
4397 return true;
4398 }
4399
4400 static bool trans_UBFM(DisasContext *s, arg_UBFM *a)
4401 {
4402 TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4403 TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4404 unsigned int bitsize = a->sf ? 64 : 32;
4405 unsigned int ri = a->immr;
4406 unsigned int si = a->imms;
4407 unsigned int pos, len;
4408
4409 tcg_rd = cpu_reg(s, a->rd);
4410 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4411
4412 if (si >= ri) {
4413 /* Wd<s-r:0> = Wn<s:r> */
4414 len = (si - ri) + 1;
4415 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
4416 } else {
4417 /* Wd<32+s-r,32-r> = Wn<s:0> */
4418 len = si + 1;
4419 pos = (bitsize - ri) & (bitsize - 1);
4420 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4421 }
4422 return true;
4423 }
4424
4425 static bool trans_BFM(DisasContext *s, arg_BFM *a)
4426 {
4427 TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4428 TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4429 unsigned int bitsize = a->sf ? 64 : 32;
4430 unsigned int ri = a->immr;
4431 unsigned int si = a->imms;
4432 unsigned int pos, len;
4433
4434 tcg_rd = cpu_reg(s, a->rd);
4435 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4436
4437 if (si >= ri) {
4438 /* Wd<s-r:0> = Wn<s:r> */
4439 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
4440 len = (si - ri) + 1;
4441 pos = 0;
4442 } else {
4443 /* Wd<32+s-r,32-r> = Wn<s:0> */
4444 len = si + 1;
4445 pos = (bitsize - ri) & (bitsize - 1);
4446 }
4447
4448 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
4449 if (!a->sf) {
4450 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4451 }
4452 return true;
4453 }
4454
4455 static bool trans_EXTR(DisasContext *s, arg_extract *a)
4456 {
4457 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
4458
4459 tcg_rd = cpu_reg(s, a->rd);
4460
4461 if (unlikely(a->imm == 0)) {
4462 /*
4463 * tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4464 * so an extract from bit 0 is a special case.
4465 */
4466 if (a->sf) {
4467 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, a->rm));
4468 } else {
4469 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, a->rm));
4470 }
4471 } else {
4472 tcg_rm = cpu_reg(s, a->rm);
4473 tcg_rn = cpu_reg(s, a->rn);
4474
4475 if (a->sf) {
4476 /* Specialization to ROR happens in EXTRACT2. */
4477 tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, a->imm);
4478 } else {
4479 TCGv_i32 t0 = tcg_temp_new_i32();
4480
4481 tcg_gen_extrl_i64_i32(t0, tcg_rm);
4482 if (a->rm == a->rn) {
4483 tcg_gen_rotri_i32(t0, t0, a->imm);
4484 } else {
4485 TCGv_i32 t1 = tcg_temp_new_i32();
4486 tcg_gen_extrl_i64_i32(t1, tcg_rn);
4487 tcg_gen_extract2_i32(t0, t0, t1, a->imm);
4488 }
4489 tcg_gen_extu_i32_i64(tcg_rd, t0);
4490 }
4491 }
4492 return true;
4493 }
4494
4495 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
4496 * Note that it is the caller's responsibility to ensure that the
4497 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
4498 * mandated semantics for out of range shifts.
4499 */
4500 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
4501 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
4502 {
4503 switch (shift_type) {
4504 case A64_SHIFT_TYPE_LSL:
4505 tcg_gen_shl_i64(dst, src, shift_amount);
4506 break;
4507 case A64_SHIFT_TYPE_LSR:
4508 tcg_gen_shr_i64(dst, src, shift_amount);
4509 break;
4510 case A64_SHIFT_TYPE_ASR:
4511 if (!sf) {
4512 tcg_gen_ext32s_i64(dst, src);
4513 }
4514 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
4515 break;
4516 case A64_SHIFT_TYPE_ROR:
4517 if (sf) {
4518 tcg_gen_rotr_i64(dst, src, shift_amount);
4519 } else {
4520 TCGv_i32 t0, t1;
4521 t0 = tcg_temp_new_i32();
4522 t1 = tcg_temp_new_i32();
4523 tcg_gen_extrl_i64_i32(t0, src);
4524 tcg_gen_extrl_i64_i32(t1, shift_amount);
4525 tcg_gen_rotr_i32(t0, t0, t1);
4526 tcg_gen_extu_i32_i64(dst, t0);
4527 }
4528 break;
4529 default:
4530 assert(FALSE); /* all shift types should be handled */
4531 break;
4532 }
4533
4534 if (!sf) { /* zero extend final result */
4535 tcg_gen_ext32u_i64(dst, dst);
4536 }
4537 }
4538
4539 /* Shift a TCGv src by immediate, put result in dst.
4540 * The shift amount must be in range (this should always be true as the
4541 * relevant instructions will UNDEF on bad shift immediates).
4542 */
4543 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
4544 enum a64_shift_type shift_type, unsigned int shift_i)
4545 {
4546 assert(shift_i < (sf ? 64 : 32));
4547
4548 if (shift_i == 0) {
4549 tcg_gen_mov_i64(dst, src);
4550 } else {
4551 shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i));
4552 }
4553 }
4554
4555 /* Logical (shifted register)
4556 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4557 * +----+-----+-----------+-------+---+------+--------+------+------+
4558 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
4559 * +----+-----+-----------+-------+---+------+--------+------+------+
4560 */
4561 static void disas_logic_reg(DisasContext *s, uint32_t insn)
4562 {
4563 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
4564 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
4565
4566 sf = extract32(insn, 31, 1);
4567 opc = extract32(insn, 29, 2);
4568 shift_type = extract32(insn, 22, 2);
4569 invert = extract32(insn, 21, 1);
4570 rm = extract32(insn, 16, 5);
4571 shift_amount = extract32(insn, 10, 6);
4572 rn = extract32(insn, 5, 5);
4573 rd = extract32(insn, 0, 5);
4574
4575 if (!sf && (shift_amount & (1 << 5))) {
4576 unallocated_encoding(s);
4577 return;
4578 }
4579
4580 tcg_rd = cpu_reg(s, rd);
4581
4582 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
4583 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
4584 * register-register MOV and MVN, so it is worth special casing.
4585 */
4586 tcg_rm = cpu_reg(s, rm);
4587 if (invert) {
4588 tcg_gen_not_i64(tcg_rd, tcg_rm);
4589 if (!sf) {
4590 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4591 }
4592 } else {
4593 if (sf) {
4594 tcg_gen_mov_i64(tcg_rd, tcg_rm);
4595 } else {
4596 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
4597 }
4598 }
4599 return;
4600 }
4601
4602 tcg_rm = read_cpu_reg(s, rm, sf);
4603
4604 if (shift_amount) {
4605 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
4606 }
4607
4608 tcg_rn = cpu_reg(s, rn);
4609
4610 switch (opc | (invert << 2)) {
4611 case 0: /* AND */
4612 case 3: /* ANDS */
4613 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
4614 break;
4615 case 1: /* ORR */
4616 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
4617 break;
4618 case 2: /* EOR */
4619 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
4620 break;
4621 case 4: /* BIC */
4622 case 7: /* BICS */
4623 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
4624 break;
4625 case 5: /* ORN */
4626 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
4627 break;
4628 case 6: /* EON */
4629 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
4630 break;
4631 default:
4632 assert(FALSE);
4633 break;
4634 }
4635
4636 if (!sf) {
4637 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4638 }
4639
4640 if (opc == 3) {
4641 gen_logic_CC(sf, tcg_rd);
4642 }
4643 }
4644
4645 /*
4646 * Add/subtract (extended register)
4647 *
4648 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
4649 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4650 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
4651 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4652 *
4653 * sf: 0 -> 32bit, 1 -> 64bit
4654 * op: 0 -> add , 1 -> sub
4655 * S: 1 -> set flags
4656 * opt: 00
4657 * option: extension type (see DecodeRegExtend)
4658 * imm3: optional shift to Rm
4659 *
4660 * Rd = Rn + LSL(extend(Rm), amount)
4661 */
4662 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
4663 {
4664 int rd = extract32(insn, 0, 5);
4665 int rn = extract32(insn, 5, 5);
4666 int imm3 = extract32(insn, 10, 3);
4667 int option = extract32(insn, 13, 3);
4668 int rm = extract32(insn, 16, 5);
4669 int opt = extract32(insn, 22, 2);
4670 bool setflags = extract32(insn, 29, 1);
4671 bool sub_op = extract32(insn, 30, 1);
4672 bool sf = extract32(insn, 31, 1);
4673
4674 TCGv_i64 tcg_rm, tcg_rn; /* temps */
4675 TCGv_i64 tcg_rd;
4676 TCGv_i64 tcg_result;
4677
4678 if (imm3 > 4 || opt != 0) {
4679 unallocated_encoding(s);
4680 return;
4681 }
4682
4683 /* non-flag setting ops may use SP */
4684 if (!setflags) {
4685 tcg_rd = cpu_reg_sp(s, rd);
4686 } else {
4687 tcg_rd = cpu_reg(s, rd);
4688 }
4689 tcg_rn = read_cpu_reg_sp(s, rn, sf);
4690
4691 tcg_rm = read_cpu_reg(s, rm, sf);
4692 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
4693
4694 tcg_result = tcg_temp_new_i64();
4695
4696 if (!setflags) {
4697 if (sub_op) {
4698 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4699 } else {
4700 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4701 }
4702 } else {
4703 if (sub_op) {
4704 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4705 } else {
4706 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4707 }
4708 }
4709
4710 if (sf) {
4711 tcg_gen_mov_i64(tcg_rd, tcg_result);
4712 } else {
4713 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4714 }
4715 }
4716
4717 /*
4718 * Add/subtract (shifted register)
4719 *
4720 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4721 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4722 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
4723 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4724 *
4725 * sf: 0 -> 32bit, 1 -> 64bit
4726 * op: 0 -> add , 1 -> sub
4727 * S: 1 -> set flags
4728 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4729 * imm6: Shift amount to apply to Rm before the add/sub
4730 */
4731 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4732 {
4733 int rd = extract32(insn, 0, 5);
4734 int rn = extract32(insn, 5, 5);
4735 int imm6 = extract32(insn, 10, 6);
4736 int rm = extract32(insn, 16, 5);
4737 int shift_type = extract32(insn, 22, 2);
4738 bool setflags = extract32(insn, 29, 1);
4739 bool sub_op = extract32(insn, 30, 1);
4740 bool sf = extract32(insn, 31, 1);
4741
4742 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4743 TCGv_i64 tcg_rn, tcg_rm;
4744 TCGv_i64 tcg_result;
4745
4746 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4747 unallocated_encoding(s);
4748 return;
4749 }
4750
4751 tcg_rn = read_cpu_reg(s, rn, sf);
4752 tcg_rm = read_cpu_reg(s, rm, sf);
4753
4754 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4755
4756 tcg_result = tcg_temp_new_i64();
4757
4758 if (!setflags) {
4759 if (sub_op) {
4760 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4761 } else {
4762 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4763 }
4764 } else {
4765 if (sub_op) {
4766 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4767 } else {
4768 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4769 }
4770 }
4771
4772 if (sf) {
4773 tcg_gen_mov_i64(tcg_rd, tcg_result);
4774 } else {
4775 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4776 }
4777 }
4778
4779 /* Data-processing (3 source)
4780 *
4781 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
4782 * +--+------+-----------+------+------+----+------+------+------+
4783 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
4784 * +--+------+-----------+------+------+----+------+------+------+
4785 */
4786 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
4787 {
4788 int rd = extract32(insn, 0, 5);
4789 int rn = extract32(insn, 5, 5);
4790 int ra = extract32(insn, 10, 5);
4791 int rm = extract32(insn, 16, 5);
4792 int op_id = (extract32(insn, 29, 3) << 4) |
4793 (extract32(insn, 21, 3) << 1) |
4794 extract32(insn, 15, 1);
4795 bool sf = extract32(insn, 31, 1);
4796 bool is_sub = extract32(op_id, 0, 1);
4797 bool is_high = extract32(op_id, 2, 1);
4798 bool is_signed = false;
4799 TCGv_i64 tcg_op1;
4800 TCGv_i64 tcg_op2;
4801 TCGv_i64 tcg_tmp;
4802
4803 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
4804 switch (op_id) {
4805 case 0x42: /* SMADDL */
4806 case 0x43: /* SMSUBL */
4807 case 0x44: /* SMULH */
4808 is_signed = true;
4809 break;
4810 case 0x0: /* MADD (32bit) */
4811 case 0x1: /* MSUB (32bit) */
4812 case 0x40: /* MADD (64bit) */
4813 case 0x41: /* MSUB (64bit) */
4814 case 0x4a: /* UMADDL */
4815 case 0x4b: /* UMSUBL */
4816 case 0x4c: /* UMULH */
4817 break;
4818 default:
4819 unallocated_encoding(s);
4820 return;
4821 }
4822
4823 if (is_high) {
4824 TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
4825 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4826 TCGv_i64 tcg_rn = cpu_reg(s, rn);
4827 TCGv_i64 tcg_rm = cpu_reg(s, rm);
4828
4829 if (is_signed) {
4830 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4831 } else {
4832 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4833 }
4834 return;
4835 }
4836
4837 tcg_op1 = tcg_temp_new_i64();
4838 tcg_op2 = tcg_temp_new_i64();
4839 tcg_tmp = tcg_temp_new_i64();
4840
4841 if (op_id < 0x42) {
4842 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
4843 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
4844 } else {
4845 if (is_signed) {
4846 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
4847 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
4848 } else {
4849 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
4850 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
4851 }
4852 }
4853
4854 if (ra == 31 && !is_sub) {
4855 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
4856 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
4857 } else {
4858 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
4859 if (is_sub) {
4860 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4861 } else {
4862 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4863 }
4864 }
4865
4866 if (!sf) {
4867 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
4868 }
4869 }
4870
4871 /* Add/subtract (with carry)
4872 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
4873 * +--+--+--+------------------------+------+-------------+------+-----+
4874 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd |
4875 * +--+--+--+------------------------+------+-------------+------+-----+
4876 */
4877
4878 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
4879 {
4880 unsigned int sf, op, setflags, rm, rn, rd;
4881 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
4882
4883 sf = extract32(insn, 31, 1);
4884 op = extract32(insn, 30, 1);
4885 setflags = extract32(insn, 29, 1);
4886 rm = extract32(insn, 16, 5);
4887 rn = extract32(insn, 5, 5);
4888 rd = extract32(insn, 0, 5);
4889
4890 tcg_rd = cpu_reg(s, rd);
4891 tcg_rn = cpu_reg(s, rn);
4892
4893 if (op) {
4894 tcg_y = tcg_temp_new_i64();
4895 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
4896 } else {
4897 tcg_y = cpu_reg(s, rm);
4898 }
4899
4900 if (setflags) {
4901 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
4902 } else {
4903 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
4904 }
4905 }
4906
4907 /*
4908 * Rotate right into flags
4909 * 31 30 29 21 15 10 5 4 0
4910 * +--+--+--+-----------------+--------+-----------+------+--+------+
4911 * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask |
4912 * +--+--+--+-----------------+--------+-----------+------+--+------+
4913 */
4914 static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
4915 {
4916 int mask = extract32(insn, 0, 4);
4917 int o2 = extract32(insn, 4, 1);
4918 int rn = extract32(insn, 5, 5);
4919 int imm6 = extract32(insn, 15, 6);
4920 int sf_op_s = extract32(insn, 29, 3);
4921 TCGv_i64 tcg_rn;
4922 TCGv_i32 nzcv;
4923
4924 if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
4925 unallocated_encoding(s);
4926 return;
4927 }
4928
4929 tcg_rn = read_cpu_reg(s, rn, 1);
4930 tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
4931
4932 nzcv = tcg_temp_new_i32();
4933 tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
4934
4935 if (mask & 8) { /* N */
4936 tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
4937 }
4938 if (mask & 4) { /* Z */
4939 tcg_gen_not_i32(cpu_ZF, nzcv);
4940 tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
4941 }
4942 if (mask & 2) { /* C */
4943 tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
4944 }
4945 if (mask & 1) { /* V */
4946 tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
4947 }
4948 }
4949
4950 /*
4951 * Evaluate into flags
4952 * 31 30 29 21 15 14 10 5 4 0
4953 * +--+--+--+-----------------+---------+----+---------+------+--+------+
4954 * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask |
4955 * +--+--+--+-----------------+---------+----+---------+------+--+------+
4956 */
4957 static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
4958 {
4959 int o3_mask = extract32(insn, 0, 5);
4960 int rn = extract32(insn, 5, 5);
4961 int o2 = extract32(insn, 15, 6);
4962 int sz = extract32(insn, 14, 1);
4963 int sf_op_s = extract32(insn, 29, 3);
4964 TCGv_i32 tmp;
4965 int shift;
4966
4967 if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
4968 !dc_isar_feature(aa64_condm_4, s)) {
4969 unallocated_encoding(s);
4970 return;
4971 }
4972 shift = sz ? 16 : 24; /* SETF16 or SETF8 */
4973
4974 tmp = tcg_temp_new_i32();
4975 tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
4976 tcg_gen_shli_i32(cpu_NF, tmp, shift);
4977 tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
4978 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
4979 tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
4980 }
4981
4982 /* Conditional compare (immediate / register)
4983 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
4984 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4985 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
4986 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4987 * [1] y [0] [0]
4988 */
4989 static void disas_cc(DisasContext *s, uint32_t insn)
4990 {
4991 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
4992 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
4993 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
4994 DisasCompare c;
4995
4996 if (!extract32(insn, 29, 1)) {
4997 unallocated_encoding(s);
4998 return;
4999 }
5000 if (insn & (1 << 10 | 1 << 4)) {
5001 unallocated_encoding(s);
5002 return;
5003 }
5004 sf = extract32(insn, 31, 1);
5005 op = extract32(insn, 30, 1);
5006 is_imm = extract32(insn, 11, 1);
5007 y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
5008 cond = extract32(insn, 12, 4);
5009 rn = extract32(insn, 5, 5);
5010 nzcv = extract32(insn, 0, 4);
5011
5012 /* Set T0 = !COND. */
5013 tcg_t0 = tcg_temp_new_i32();
5014 arm_test_cc(&c, cond);
5015 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
5016
5017 /* Load the arguments for the new comparison. */
5018 if (is_imm) {
5019 tcg_y = tcg_temp_new_i64();
5020 tcg_gen_movi_i64(tcg_y, y);
5021 } else {
5022 tcg_y = cpu_reg(s, y);
5023 }
5024 tcg_rn = cpu_reg(s, rn);
5025
5026 /* Set the flags for the new comparison. */
5027 tcg_tmp = tcg_temp_new_i64();
5028 if (op) {
5029 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5030 } else {
5031 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5032 }
5033
5034 /* If COND was false, force the flags to #nzcv. Compute two masks
5035 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
5036 * For tcg hosts that support ANDC, we can make do with just T1.
5037 * In either case, allow the tcg optimizer to delete any unused mask.
5038 */
5039 tcg_t1 = tcg_temp_new_i32();
5040 tcg_t2 = tcg_temp_new_i32();
5041 tcg_gen_neg_i32(tcg_t1, tcg_t0);
5042 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
5043
5044 if (nzcv & 8) { /* N */
5045 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
5046 } else {
5047 if (TCG_TARGET_HAS_andc_i32) {
5048 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
5049 } else {
5050 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
5051 }
5052 }
5053 if (nzcv & 4) { /* Z */
5054 if (TCG_TARGET_HAS_andc_i32) {
5055 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
5056 } else {
5057 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
5058 }
5059 } else {
5060 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
5061 }
5062 if (nzcv & 2) { /* C */
5063 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
5064 } else {
5065 if (TCG_TARGET_HAS_andc_i32) {
5066 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
5067 } else {
5068 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
5069 }
5070 }
5071 if (nzcv & 1) { /* V */
5072 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
5073 } else {
5074 if (TCG_TARGET_HAS_andc_i32) {
5075 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
5076 } else {
5077 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
5078 }
5079 }
5080 }
5081
5082 /* Conditional select
5083 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
5084 * +----+----+---+-----------------+------+------+-----+------+------+
5085 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
5086 * +----+----+---+-----------------+------+------+-----+------+------+
5087 */
5088 static void disas_cond_select(DisasContext *s, uint32_t insn)
5089 {
5090 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
5091 TCGv_i64 tcg_rd, zero;
5092 DisasCompare64 c;
5093
5094 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
5095 /* S == 1 or op2<1> == 1 */
5096 unallocated_encoding(s);
5097 return;
5098 }
5099 sf = extract32(insn, 31, 1);
5100 else_inv = extract32(insn, 30, 1);
5101 rm = extract32(insn, 16, 5);
5102 cond = extract32(insn, 12, 4);
5103 else_inc = extract32(insn, 10, 1);
5104 rn = extract32(insn, 5, 5);
5105 rd = extract32(insn, 0, 5);
5106
5107 tcg_rd = cpu_reg(s, rd);
5108
5109 a64_test_cc(&c, cond);
5110 zero = tcg_constant_i64(0);
5111
5112 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
5113 /* CSET & CSETM. */
5114 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
5115 if (else_inv) {
5116 tcg_gen_neg_i64(tcg_rd, tcg_rd);
5117 }
5118 } else {
5119 TCGv_i64 t_true = cpu_reg(s, rn);
5120 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
5121 if (else_inv && else_inc) {
5122 tcg_gen_neg_i64(t_false, t_false);
5123 } else if (else_inv) {
5124 tcg_gen_not_i64(t_false, t_false);
5125 } else if (else_inc) {
5126 tcg_gen_addi_i64(t_false, t_false, 1);
5127 }
5128 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
5129 }
5130
5131 if (!sf) {
5132 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5133 }
5134 }
5135
5136 static void handle_clz(DisasContext *s, unsigned int sf,
5137 unsigned int rn, unsigned int rd)
5138 {
5139 TCGv_i64 tcg_rd, tcg_rn;
5140 tcg_rd = cpu_reg(s, rd);
5141 tcg_rn = cpu_reg(s, rn);
5142
5143 if (sf) {
5144 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
5145 } else {
5146 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5147 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5148 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
5149 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5150 }
5151 }
5152
5153 static void handle_cls(DisasContext *s, unsigned int sf,
5154 unsigned int rn, unsigned int rd)
5155 {
5156 TCGv_i64 tcg_rd, tcg_rn;
5157 tcg_rd = cpu_reg(s, rd);
5158 tcg_rn = cpu_reg(s, rn);
5159
5160 if (sf) {
5161 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
5162 } else {
5163 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5164 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5165 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
5166 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5167 }
5168 }
5169
5170 static void handle_rbit(DisasContext *s, unsigned int sf,
5171 unsigned int rn, unsigned int rd)
5172 {
5173 TCGv_i64 tcg_rd, tcg_rn;
5174 tcg_rd = cpu_reg(s, rd);
5175 tcg_rn = cpu_reg(s, rn);
5176
5177 if (sf) {
5178 gen_helper_rbit64(tcg_rd, tcg_rn);
5179 } else {
5180 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5181 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5182 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
5183 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5184 }
5185 }
5186
5187 /* REV with sf==1, opcode==3 ("REV64") */
5188 static void handle_rev64(DisasContext *s, unsigned int sf,
5189 unsigned int rn, unsigned int rd)
5190 {
5191 if (!sf) {
5192 unallocated_encoding(s);
5193 return;
5194 }
5195 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
5196 }
5197
5198 /* REV with sf==0, opcode==2
5199 * REV32 (sf==1, opcode==2)
5200 */
5201 static void handle_rev32(DisasContext *s, unsigned int sf,
5202 unsigned int rn, unsigned int rd)
5203 {
5204 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5205 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5206
5207 if (sf) {
5208 tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
5209 tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
5210 } else {
5211 tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
5212 }
5213 }
5214
5215 /* REV16 (opcode==1) */
5216 static void handle_rev16(DisasContext *s, unsigned int sf,
5217 unsigned int rn, unsigned int rd)
5218 {
5219 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5220 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5221 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5222 TCGv_i64 mask = tcg_constant_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
5223
5224 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
5225 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
5226 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
5227 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
5228 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
5229 }
5230
5231 /* Data-processing (1 source)
5232 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5233 * +----+---+---+-----------------+---------+--------+------+------+
5234 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
5235 * +----+---+---+-----------------+---------+--------+------+------+
5236 */
5237 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
5238 {
5239 unsigned int sf, opcode, opcode2, rn, rd;
5240 TCGv_i64 tcg_rd;
5241
5242 if (extract32(insn, 29, 1)) {
5243 unallocated_encoding(s);
5244 return;
5245 }
5246
5247 sf = extract32(insn, 31, 1);
5248 opcode = extract32(insn, 10, 6);
5249 opcode2 = extract32(insn, 16, 5);
5250 rn = extract32(insn, 5, 5);
5251 rd = extract32(insn, 0, 5);
5252
5253 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5254
5255 switch (MAP(sf, opcode2, opcode)) {
5256 case MAP(0, 0x00, 0x00): /* RBIT */
5257 case MAP(1, 0x00, 0x00):
5258 handle_rbit(s, sf, rn, rd);
5259 break;
5260 case MAP(0, 0x00, 0x01): /* REV16 */
5261 case MAP(1, 0x00, 0x01):
5262 handle_rev16(s, sf, rn, rd);
5263 break;
5264 case MAP(0, 0x00, 0x02): /* REV/REV32 */
5265 case MAP(1, 0x00, 0x02):
5266 handle_rev32(s, sf, rn, rd);
5267 break;
5268 case MAP(1, 0x00, 0x03): /* REV64 */
5269 handle_rev64(s, sf, rn, rd);
5270 break;
5271 case MAP(0, 0x00, 0x04): /* CLZ */
5272 case MAP(1, 0x00, 0x04):
5273 handle_clz(s, sf, rn, rd);
5274 break;
5275 case MAP(0, 0x00, 0x05): /* CLS */
5276 case MAP(1, 0x00, 0x05):
5277 handle_cls(s, sf, rn, rd);
5278 break;
5279 case MAP(1, 0x01, 0x00): /* PACIA */
5280 if (s->pauth_active) {
5281 tcg_rd = cpu_reg(s, rd);
5282 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5283 } else if (!dc_isar_feature(aa64_pauth, s)) {
5284 goto do_unallocated;
5285 }
5286 break;
5287 case MAP(1, 0x01, 0x01): /* PACIB */
5288 if (s->pauth_active) {
5289 tcg_rd = cpu_reg(s, rd);
5290 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5291 } else if (!dc_isar_feature(aa64_pauth, s)) {
5292 goto do_unallocated;
5293 }
5294 break;
5295 case MAP(1, 0x01, 0x02): /* PACDA */
5296 if (s->pauth_active) {
5297 tcg_rd = cpu_reg(s, rd);
5298 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5299 } else if (!dc_isar_feature(aa64_pauth, s)) {
5300 goto do_unallocated;
5301 }
5302 break;
5303 case MAP(1, 0x01, 0x03): /* PACDB */
5304 if (s->pauth_active) {
5305 tcg_rd = cpu_reg(s, rd);
5306 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5307 } else if (!dc_isar_feature(aa64_pauth, s)) {
5308 goto do_unallocated;
5309 }
5310 break;
5311 case MAP(1, 0x01, 0x04): /* AUTIA */
5312 if (s->pauth_active) {
5313 tcg_rd = cpu_reg(s, rd);
5314 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5315 } else if (!dc_isar_feature(aa64_pauth, s)) {
5316 goto do_unallocated;
5317 }
5318 break;
5319 case MAP(1, 0x01, 0x05): /* AUTIB */
5320 if (s->pauth_active) {
5321 tcg_rd = cpu_reg(s, rd);
5322 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5323 } else if (!dc_isar_feature(aa64_pauth, s)) {
5324 goto do_unallocated;
5325 }
5326 break;
5327 case MAP(1, 0x01, 0x06): /* AUTDA */
5328 if (s->pauth_active) {
5329 tcg_rd = cpu_reg(s, rd);
5330 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5331 } else if (!dc_isar_feature(aa64_pauth, s)) {
5332 goto do_unallocated;
5333 }
5334 break;
5335 case MAP(1, 0x01, 0x07): /* AUTDB */
5336 if (s->pauth_active) {
5337 tcg_rd = cpu_reg(s, rd);
5338 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5339 } else if (!dc_isar_feature(aa64_pauth, s)) {
5340 goto do_unallocated;
5341 }
5342 break;
5343 case MAP(1, 0x01, 0x08): /* PACIZA */
5344 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5345 goto do_unallocated;
5346 } else if (s->pauth_active) {
5347 tcg_rd = cpu_reg(s, rd);
5348 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5349 }
5350 break;
5351 case MAP(1, 0x01, 0x09): /* PACIZB */
5352 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5353 goto do_unallocated;
5354 } else if (s->pauth_active) {
5355 tcg_rd = cpu_reg(s, rd);
5356 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5357 }
5358 break;
5359 case MAP(1, 0x01, 0x0a): /* PACDZA */
5360 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5361 goto do_unallocated;
5362 } else if (s->pauth_active) {
5363 tcg_rd = cpu_reg(s, rd);
5364 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5365 }
5366 break;
5367 case MAP(1, 0x01, 0x0b): /* PACDZB */
5368 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5369 goto do_unallocated;
5370 } else if (s->pauth_active) {
5371 tcg_rd = cpu_reg(s, rd);
5372 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5373 }
5374 break;
5375 case MAP(1, 0x01, 0x0c): /* AUTIZA */
5376 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5377 goto do_unallocated;
5378 } else if (s->pauth_active) {
5379 tcg_rd = cpu_reg(s, rd);
5380 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5381 }
5382 break;
5383 case MAP(1, 0x01, 0x0d): /* AUTIZB */
5384 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5385 goto do_unallocated;
5386 } else if (s->pauth_active) {
5387 tcg_rd = cpu_reg(s, rd);
5388 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5389 }
5390 break;
5391 case MAP(1, 0x01, 0x0e): /* AUTDZA */
5392 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5393 goto do_unallocated;
5394 } else if (s->pauth_active) {
5395 tcg_rd = cpu_reg(s, rd);
5396 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5397 }
5398 break;
5399 case MAP(1, 0x01, 0x0f): /* AUTDZB */
5400 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5401 goto do_unallocated;
5402 } else if (s->pauth_active) {
5403 tcg_rd = cpu_reg(s, rd);
5404 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5405 }
5406 break;
5407 case MAP(1, 0x01, 0x10): /* XPACI */
5408 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5409 goto do_unallocated;
5410 } else if (s->pauth_active) {
5411 tcg_rd = cpu_reg(s, rd);
5412 gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
5413 }
5414 break;
5415 case MAP(1, 0x01, 0x11): /* XPACD */
5416 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5417 goto do_unallocated;
5418 } else if (s->pauth_active) {
5419 tcg_rd = cpu_reg(s, rd);
5420 gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
5421 }
5422 break;
5423 default:
5424 do_unallocated:
5425 unallocated_encoding(s);
5426 break;
5427 }
5428
5429 #undef MAP
5430 }
5431
5432 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
5433 unsigned int rm, unsigned int rn, unsigned int rd)
5434 {
5435 TCGv_i64 tcg_n, tcg_m, tcg_rd;
5436 tcg_rd = cpu_reg(s, rd);
5437
5438 if (!sf && is_signed) {
5439 tcg_n = tcg_temp_new_i64();
5440 tcg_m = tcg_temp_new_i64();
5441 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
5442 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
5443 } else {
5444 tcg_n = read_cpu_reg(s, rn, sf);
5445 tcg_m = read_cpu_reg(s, rm, sf);
5446 }
5447
5448 if (is_signed) {
5449 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
5450 } else {
5451 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
5452 }
5453
5454 if (!sf) { /* zero extend final result */
5455 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5456 }
5457 }
5458
5459 /* LSLV, LSRV, ASRV, RORV */
5460 static void handle_shift_reg(DisasContext *s,
5461 enum a64_shift_type shift_type, unsigned int sf,
5462 unsigned int rm, unsigned int rn, unsigned int rd)
5463 {
5464 TCGv_i64 tcg_shift = tcg_temp_new_i64();
5465 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5466 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5467
5468 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
5469 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
5470 }
5471
5472 /* CRC32[BHWX], CRC32C[BHWX] */
5473 static void handle_crc32(DisasContext *s,
5474 unsigned int sf, unsigned int sz, bool crc32c,
5475 unsigned int rm, unsigned int rn, unsigned int rd)
5476 {
5477 TCGv_i64 tcg_acc, tcg_val;
5478 TCGv_i32 tcg_bytes;
5479
5480 if (!dc_isar_feature(aa64_crc32, s)
5481 || (sf == 1 && sz != 3)
5482 || (sf == 0 && sz == 3)) {
5483 unallocated_encoding(s);
5484 return;
5485 }
5486
5487 if (sz == 3) {
5488 tcg_val = cpu_reg(s, rm);
5489 } else {
5490 uint64_t mask;
5491 switch (sz) {
5492 case 0:
5493 mask = 0xFF;
5494 break;
5495 case 1:
5496 mask = 0xFFFF;
5497 break;
5498 case 2:
5499 mask = 0xFFFFFFFF;
5500 break;
5501 default:
5502 g_assert_not_reached();
5503 }
5504 tcg_val = tcg_temp_new_i64();
5505 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
5506 }
5507
5508 tcg_acc = cpu_reg(s, rn);
5509 tcg_bytes = tcg_constant_i32(1 << sz);
5510
5511 if (crc32c) {
5512 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5513 } else {
5514 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5515 }
5516 }
5517
5518 /* Data-processing (2 source)
5519 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5520 * +----+---+---+-----------------+------+--------+------+------+
5521 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
5522 * +----+---+---+-----------------+------+--------+------+------+
5523 */
5524 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
5525 {
5526 unsigned int sf, rm, opcode, rn, rd, setflag;
5527 sf = extract32(insn, 31, 1);
5528 setflag = extract32(insn, 29, 1);
5529 rm = extract32(insn, 16, 5);
5530 opcode = extract32(insn, 10, 6);
5531 rn = extract32(insn, 5, 5);
5532 rd = extract32(insn, 0, 5);
5533
5534 if (setflag && opcode != 0) {
5535 unallocated_encoding(s);
5536 return;
5537 }
5538
5539 switch (opcode) {
5540 case 0: /* SUBP(S) */
5541 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5542 goto do_unallocated;
5543 } else {
5544 TCGv_i64 tcg_n, tcg_m, tcg_d;
5545
5546 tcg_n = read_cpu_reg_sp(s, rn, true);
5547 tcg_m = read_cpu_reg_sp(s, rm, true);
5548 tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
5549 tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
5550 tcg_d = cpu_reg(s, rd);
5551
5552 if (setflag) {
5553 gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
5554 } else {
5555 tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
5556 }
5557 }
5558 break;
5559 case 2: /* UDIV */
5560 handle_div(s, false, sf, rm, rn, rd);
5561 break;
5562 case 3: /* SDIV */
5563 handle_div(s, true, sf, rm, rn, rd);
5564 break;
5565 case 4: /* IRG */
5566 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5567 goto do_unallocated;
5568 }
5569 if (s->ata) {
5570 gen_helper_irg(cpu_reg_sp(s, rd), cpu_env,
5571 cpu_reg_sp(s, rn), cpu_reg(s, rm));
5572 } else {
5573 gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
5574 cpu_reg_sp(s, rn));
5575 }
5576 break;
5577 case 5: /* GMI */
5578 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5579 goto do_unallocated;
5580 } else {
5581 TCGv_i64 t = tcg_temp_new_i64();
5582
5583 tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4);
5584 tcg_gen_shl_i64(t, tcg_constant_i64(1), t);
5585 tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t);
5586 }
5587 break;
5588 case 8: /* LSLV */
5589 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
5590 break;
5591 case 9: /* LSRV */
5592 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
5593 break;
5594 case 10: /* ASRV */
5595 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
5596 break;
5597 case 11: /* RORV */
5598 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
5599 break;
5600 case 12: /* PACGA */
5601 if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
5602 goto do_unallocated;
5603 }
5604 gen_helper_pacga(cpu_reg(s, rd), cpu_env,
5605 cpu_reg(s, rn), cpu_reg_sp(s, rm));
5606 break;
5607 case 16:
5608 case 17:
5609 case 18:
5610 case 19:
5611 case 20:
5612 case 21:
5613 case 22:
5614 case 23: /* CRC32 */
5615 {
5616 int sz = extract32(opcode, 0, 2);
5617 bool crc32c = extract32(opcode, 2, 1);
5618 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
5619 break;
5620 }
5621 default:
5622 do_unallocated:
5623 unallocated_encoding(s);
5624 break;
5625 }
5626 }
5627
5628 /*
5629 * Data processing - register
5630 * 31 30 29 28 25 21 20 16 10 0
5631 * +--+---+--+---+-------+-----+-------+-------+---------+
5632 * | |op0| |op1| 1 0 1 | op2 | | op3 | |
5633 * +--+---+--+---+-------+-----+-------+-------+---------+
5634 */
5635 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
5636 {
5637 int op0 = extract32(insn, 30, 1);
5638 int op1 = extract32(insn, 28, 1);
5639 int op2 = extract32(insn, 21, 4);
5640 int op3 = extract32(insn, 10, 6);
5641
5642 if (!op1) {
5643 if (op2 & 8) {
5644 if (op2 & 1) {
5645 /* Add/sub (extended register) */
5646 disas_add_sub_ext_reg(s, insn);
5647 } else {
5648 /* Add/sub (shifted register) */
5649 disas_add_sub_reg(s, insn);
5650 }
5651 } else {
5652 /* Logical (shifted register) */
5653 disas_logic_reg(s, insn);
5654 }
5655 return;
5656 }
5657
5658 switch (op2) {
5659 case 0x0:
5660 switch (op3) {
5661 case 0x00: /* Add/subtract (with carry) */
5662 disas_adc_sbc(s, insn);
5663 break;
5664
5665 case 0x01: /* Rotate right into flags */
5666 case 0x21:
5667 disas_rotate_right_into_flags(s, insn);
5668 break;
5669
5670 case 0x02: /* Evaluate into flags */
5671 case 0x12:
5672 case 0x22:
5673 case 0x32:
5674 disas_evaluate_into_flags(s, insn);
5675 break;
5676
5677 default:
5678 goto do_unallocated;
5679 }
5680 break;
5681
5682 case 0x2: /* Conditional compare */
5683 disas_cc(s, insn); /* both imm and reg forms */
5684 break;
5685
5686 case 0x4: /* Conditional select */
5687 disas_cond_select(s, insn);
5688 break;
5689
5690 case 0x6: /* Data-processing */
5691 if (op0) { /* (1 source) */
5692 disas_data_proc_1src(s, insn);
5693 } else { /* (2 source) */
5694 disas_data_proc_2src(s, insn);
5695 }
5696 break;
5697 case 0x8 ... 0xf: /* (3 source) */
5698 disas_data_proc_3src(s, insn);
5699 break;
5700
5701 default:
5702 do_unallocated:
5703 unallocated_encoding(s);
5704 break;
5705 }
5706 }
5707
5708 static void handle_fp_compare(DisasContext *s, int size,
5709 unsigned int rn, unsigned int rm,
5710 bool cmp_with_zero, bool signal_all_nans)
5711 {
5712 TCGv_i64 tcg_flags = tcg_temp_new_i64();
5713 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
5714
5715 if (size == MO_64) {
5716 TCGv_i64 tcg_vn, tcg_vm;
5717
5718 tcg_vn = read_fp_dreg(s, rn);
5719 if (cmp_with_zero) {
5720 tcg_vm = tcg_constant_i64(0);
5721 } else {
5722 tcg_vm = read_fp_dreg(s, rm);
5723 }
5724 if (signal_all_nans) {
5725 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5726 } else {
5727 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5728 }
5729 } else {
5730 TCGv_i32 tcg_vn = tcg_temp_new_i32();
5731 TCGv_i32 tcg_vm = tcg_temp_new_i32();
5732
5733 read_vec_element_i32(s, tcg_vn, rn, 0, size);
5734 if (cmp_with_zero) {
5735 tcg_gen_movi_i32(tcg_vm, 0);
5736 } else {
5737 read_vec_element_i32(s, tcg_vm, rm, 0, size);
5738 }
5739
5740 switch (size) {
5741 case MO_32:
5742 if (signal_all_nans) {
5743 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5744 } else {
5745 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5746 }
5747 break;
5748 case MO_16:
5749 if (signal_all_nans) {
5750 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5751 } else {
5752 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5753 }
5754 break;
5755 default:
5756 g_assert_not_reached();
5757 }
5758 }
5759
5760 gen_set_nzcv(tcg_flags);
5761 }
5762
5763 /* Floating point compare
5764 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
5765 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5766 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
5767 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5768 */
5769 static void disas_fp_compare(DisasContext *s, uint32_t insn)
5770 {
5771 unsigned int mos, type, rm, op, rn, opc, op2r;
5772 int size;
5773
5774 mos = extract32(insn, 29, 3);
5775 type = extract32(insn, 22, 2);
5776 rm = extract32(insn, 16, 5);
5777 op = extract32(insn, 14, 2);
5778 rn = extract32(insn, 5, 5);
5779 opc = extract32(insn, 3, 2);
5780 op2r = extract32(insn, 0, 3);
5781
5782 if (mos || op || op2r) {
5783 unallocated_encoding(s);
5784 return;
5785 }
5786
5787 switch (type) {
5788 case 0:
5789 size = MO_32;
5790 break;
5791 case 1:
5792 size = MO_64;
5793 break;
5794 case 3:
5795 size = MO_16;
5796 if (dc_isar_feature(aa64_fp16, s)) {
5797 break;
5798 }
5799 /* fallthru */
5800 default:
5801 unallocated_encoding(s);
5802 return;
5803 }
5804
5805 if (!fp_access_check(s)) {
5806 return;
5807 }
5808
5809 handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
5810 }
5811
5812 /* Floating point conditional compare
5813 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
5814 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5815 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
5816 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5817 */
5818 static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
5819 {
5820 unsigned int mos, type, rm, cond, rn, op, nzcv;
5821 TCGLabel *label_continue = NULL;
5822 int size;
5823
5824 mos = extract32(insn, 29, 3);
5825 type = extract32(insn, 22, 2);
5826 rm = extract32(insn, 16, 5);
5827 cond = extract32(insn, 12, 4);
5828 rn = extract32(insn, 5, 5);
5829 op = extract32(insn, 4, 1);
5830 nzcv = extract32(insn, 0, 4);
5831
5832 if (mos) {
5833 unallocated_encoding(s);
5834 return;
5835 }
5836
5837 switch (type) {
5838 case 0:
5839 size = MO_32;
5840 break;
5841 case 1:
5842 size = MO_64;
5843 break;
5844 case 3:
5845 size = MO_16;
5846 if (dc_isar_feature(aa64_fp16, s)) {
5847 break;
5848 }
5849 /* fallthru */
5850 default:
5851 unallocated_encoding(s);
5852 return;
5853 }
5854
5855 if (!fp_access_check(s)) {
5856 return;
5857 }
5858
5859 if (cond < 0x0e) { /* not always */
5860 TCGLabel *label_match = gen_new_label();
5861 label_continue = gen_new_label();
5862 arm_gen_test_cc(cond, label_match);
5863 /* nomatch: */
5864 gen_set_nzcv(tcg_constant_i64(nzcv << 28));
5865 tcg_gen_br(label_continue);
5866 gen_set_label(label_match);
5867 }
5868
5869 handle_fp_compare(s, size, rn, rm, false, op);
5870
5871 if (cond < 0x0e) {
5872 gen_set_label(label_continue);
5873 }
5874 }
5875
5876 /* Floating point conditional select
5877 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
5878 * +---+---+---+-----------+------+---+------+------+-----+------+------+
5879 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
5880 * +---+---+---+-----------+------+---+------+------+-----+------+------+
5881 */
5882 static void disas_fp_csel(DisasContext *s, uint32_t insn)
5883 {
5884 unsigned int mos, type, rm, cond, rn, rd;
5885 TCGv_i64 t_true, t_false;
5886 DisasCompare64 c;
5887 MemOp sz;
5888
5889 mos = extract32(insn, 29, 3);
5890 type = extract32(insn, 22, 2);
5891 rm = extract32(insn, 16, 5);
5892 cond = extract32(insn, 12, 4);
5893 rn = extract32(insn, 5, 5);
5894 rd = extract32(insn, 0, 5);
5895
5896 if (mos) {
5897 unallocated_encoding(s);
5898 return;
5899 }
5900
5901 switch (type) {
5902 case 0:
5903 sz = MO_32;
5904 break;
5905 case 1:
5906 sz = MO_64;
5907 break;
5908 case 3:
5909 sz = MO_16;
5910 if (dc_isar_feature(aa64_fp16, s)) {
5911 break;
5912 }
5913 /* fallthru */
5914 default:
5915 unallocated_encoding(s);
5916 return;
5917 }
5918
5919 if (!fp_access_check(s)) {
5920 return;
5921 }
5922
5923 /* Zero extend sreg & hreg inputs to 64 bits now. */
5924 t_true = tcg_temp_new_i64();
5925 t_false = tcg_temp_new_i64();
5926 read_vec_element(s, t_true, rn, 0, sz);
5927 read_vec_element(s, t_false, rm, 0, sz);
5928
5929 a64_test_cc(&c, cond);
5930 tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0),
5931 t_true, t_false);
5932
5933 /* Note that sregs & hregs write back zeros to the high bits,
5934 and we've already done the zero-extension. */
5935 write_fp_dreg(s, rd, t_true);
5936 }
5937
5938 /* Floating-point data-processing (1 source) - half precision */
5939 static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
5940 {
5941 TCGv_ptr fpst = NULL;
5942 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
5943 TCGv_i32 tcg_res = tcg_temp_new_i32();
5944
5945 switch (opcode) {
5946 case 0x0: /* FMOV */
5947 tcg_gen_mov_i32(tcg_res, tcg_op);
5948 break;
5949 case 0x1: /* FABS */
5950 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
5951 break;
5952 case 0x2: /* FNEG */
5953 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
5954 break;
5955 case 0x3: /* FSQRT */
5956 fpst = fpstatus_ptr(FPST_FPCR_F16);
5957 gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
5958 break;
5959 case 0x8: /* FRINTN */
5960 case 0x9: /* FRINTP */
5961 case 0xa: /* FRINTM */
5962 case 0xb: /* FRINTZ */
5963 case 0xc: /* FRINTA */
5964 {
5965 TCGv_i32 tcg_rmode;
5966
5967 fpst = fpstatus_ptr(FPST_FPCR_F16);
5968 tcg_rmode = gen_set_rmode(opcode & 7, fpst);
5969 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5970 gen_restore_rmode(tcg_rmode, fpst);
5971 break;
5972 }
5973 case 0xe: /* FRINTX */
5974 fpst = fpstatus_ptr(FPST_FPCR_F16);
5975 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
5976 break;
5977 case 0xf: /* FRINTI */
5978 fpst = fpstatus_ptr(FPST_FPCR_F16);
5979 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5980 break;
5981 default:
5982 g_assert_not_reached();
5983 }
5984
5985 write_fp_sreg(s, rd, tcg_res);
5986 }
5987
5988 /* Floating-point data-processing (1 source) - single precision */
5989 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
5990 {
5991 void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
5992 TCGv_i32 tcg_op, tcg_res;
5993 TCGv_ptr fpst;
5994 int rmode = -1;
5995
5996 tcg_op = read_fp_sreg(s, rn);
5997 tcg_res = tcg_temp_new_i32();
5998
5999 switch (opcode) {
6000 case 0x0: /* FMOV */
6001 tcg_gen_mov_i32(tcg_res, tcg_op);
6002 goto done;
6003 case 0x1: /* FABS */
6004 gen_helper_vfp_abss(tcg_res, tcg_op);
6005 goto done;
6006 case 0x2: /* FNEG */
6007 gen_helper_vfp_negs(tcg_res, tcg_op);
6008 goto done;
6009 case 0x3: /* FSQRT */
6010 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
6011 goto done;
6012 case 0x6: /* BFCVT */
6013 gen_fpst = gen_helper_bfcvt;
6014 break;
6015 case 0x8: /* FRINTN */
6016 case 0x9: /* FRINTP */
6017 case 0xa: /* FRINTM */
6018 case 0xb: /* FRINTZ */
6019 case 0xc: /* FRINTA */
6020 rmode = opcode & 7;
6021 gen_fpst = gen_helper_rints;
6022 break;
6023 case 0xe: /* FRINTX */
6024 gen_fpst = gen_helper_rints_exact;
6025 break;
6026 case 0xf: /* FRINTI */
6027 gen_fpst = gen_helper_rints;
6028 break;
6029 case 0x10: /* FRINT32Z */
6030 rmode = FPROUNDING_ZERO;
6031 gen_fpst = gen_helper_frint32_s;
6032 break;
6033 case 0x11: /* FRINT32X */
6034 gen_fpst = gen_helper_frint32_s;
6035 break;
6036 case 0x12: /* FRINT64Z */
6037 rmode = FPROUNDING_ZERO;
6038 gen_fpst = gen_helper_frint64_s;
6039 break;
6040 case 0x13: /* FRINT64X */
6041 gen_fpst = gen_helper_frint64_s;
6042 break;
6043 default:
6044 g_assert_not_reached();
6045 }
6046
6047 fpst = fpstatus_ptr(FPST_FPCR);
6048 if (rmode >= 0) {
6049 TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
6050 gen_fpst(tcg_res, tcg_op, fpst);
6051 gen_restore_rmode(tcg_rmode, fpst);
6052 } else {
6053 gen_fpst(tcg_res, tcg_op, fpst);
6054 }
6055
6056 done:
6057 write_fp_sreg(s, rd, tcg_res);
6058 }
6059
6060 /* Floating-point data-processing (1 source) - double precision */
6061 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
6062 {
6063 void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
6064 TCGv_i64 tcg_op, tcg_res;
6065 TCGv_ptr fpst;
6066 int rmode = -1;
6067
6068 switch (opcode) {
6069 case 0x0: /* FMOV */
6070 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
6071 return;
6072 }
6073
6074 tcg_op = read_fp_dreg(s, rn);
6075 tcg_res = tcg_temp_new_i64();
6076
6077 switch (opcode) {
6078 case 0x1: /* FABS */
6079 gen_helper_vfp_absd(tcg_res, tcg_op);
6080 goto done;
6081 case 0x2: /* FNEG */
6082 gen_helper_vfp_negd(tcg_res, tcg_op);
6083 goto done;
6084 case 0x3: /* FSQRT */
6085 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
6086 goto done;
6087 case 0x8: /* FRINTN */
6088 case 0x9: /* FRINTP */
6089 case 0xa: /* FRINTM */
6090 case 0xb: /* FRINTZ */
6091 case 0xc: /* FRINTA */
6092 rmode = opcode & 7;
6093 gen_fpst = gen_helper_rintd;
6094 break;
6095 case 0xe: /* FRINTX */
6096 gen_fpst = gen_helper_rintd_exact;
6097 break;
6098 case 0xf: /* FRINTI */
6099 gen_fpst = gen_helper_rintd;
6100 break;
6101 case 0x10: /* FRINT32Z */
6102 rmode = FPROUNDING_ZERO;
6103 gen_fpst = gen_helper_frint32_d;
6104 break;
6105 case 0x11: /* FRINT32X */
6106 gen_fpst = gen_helper_frint32_d;
6107 break;
6108 case 0x12: /* FRINT64Z */
6109 rmode = FPROUNDING_ZERO;
6110 gen_fpst = gen_helper_frint64_d;
6111 break;
6112 case 0x13: /* FRINT64X */
6113 gen_fpst = gen_helper_frint64_d;
6114 break;
6115 default:
6116 g_assert_not_reached();
6117 }
6118
6119 fpst = fpstatus_ptr(FPST_FPCR);
6120 if (rmode >= 0) {
6121 TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
6122 gen_fpst(tcg_res, tcg_op, fpst);
6123 gen_restore_rmode(tcg_rmode, fpst);
6124 } else {
6125 gen_fpst(tcg_res, tcg_op, fpst);
6126 }
6127
6128 done:
6129 write_fp_dreg(s, rd, tcg_res);
6130 }
6131
6132 static void handle_fp_fcvt(DisasContext *s, int opcode,
6133 int rd, int rn, int dtype, int ntype)
6134 {
6135 switch (ntype) {
6136 case 0x0:
6137 {
6138 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6139 if (dtype == 1) {
6140 /* Single to double */
6141 TCGv_i64 tcg_rd = tcg_temp_new_i64();
6142 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
6143 write_fp_dreg(s, rd, tcg_rd);
6144 } else {
6145 /* Single to half */
6146 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6147 TCGv_i32 ahp = get_ahp_flag();
6148 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6149
6150 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6151 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6152 write_fp_sreg(s, rd, tcg_rd);
6153 }
6154 break;
6155 }
6156 case 0x1:
6157 {
6158 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
6159 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6160 if (dtype == 0) {
6161 /* Double to single */
6162 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
6163 } else {
6164 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6165 TCGv_i32 ahp = get_ahp_flag();
6166 /* Double to half */
6167 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6168 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6169 }
6170 write_fp_sreg(s, rd, tcg_rd);
6171 break;
6172 }
6173 case 0x3:
6174 {
6175 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6176 TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR);
6177 TCGv_i32 tcg_ahp = get_ahp_flag();
6178 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
6179 if (dtype == 0) {
6180 /* Half to single */
6181 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6182 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6183 write_fp_sreg(s, rd, tcg_rd);
6184 } else {
6185 /* Half to double */
6186 TCGv_i64 tcg_rd = tcg_temp_new_i64();
6187 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6188 write_fp_dreg(s, rd, tcg_rd);
6189 }
6190 break;
6191 }
6192 default:
6193 g_assert_not_reached();
6194 }
6195 }
6196
6197 /* Floating point data-processing (1 source)
6198 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
6199 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6200 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
6201 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6202 */
6203 static void disas_fp_1src(DisasContext *s, uint32_t insn)
6204 {
6205 int mos = extract32(insn, 29, 3);
6206 int type = extract32(insn, 22, 2);
6207 int opcode = extract32(insn, 15, 6);
6208 int rn = extract32(insn, 5, 5);
6209 int rd = extract32(insn, 0, 5);
6210
6211 if (mos) {
6212 goto do_unallocated;
6213 }
6214
6215 switch (opcode) {
6216 case 0x4: case 0x5: case 0x7:
6217 {
6218 /* FCVT between half, single and double precision */
6219 int dtype = extract32(opcode, 0, 2);
6220 if (type == 2 || dtype == type) {
6221 goto do_unallocated;
6222 }
6223 if (!fp_access_check(s)) {
6224 return;
6225 }
6226
6227 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
6228 break;
6229 }
6230
6231 case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
6232 if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
6233 goto do_unallocated;
6234 }
6235 /* fall through */
6236 case 0x0 ... 0x3:
6237 case 0x8 ... 0xc:
6238 case 0xe ... 0xf:
6239 /* 32-to-32 and 64-to-64 ops */
6240 switch (type) {
6241 case 0:
6242 if (!fp_access_check(s)) {
6243 return;
6244 }
6245 handle_fp_1src_single(s, opcode, rd, rn);
6246 break;
6247 case 1:
6248 if (!fp_access_check(s)) {
6249 return;
6250 }
6251 handle_fp_1src_double(s, opcode, rd, rn);
6252 break;
6253 case 3:
6254 if (!dc_isar_feature(aa64_fp16, s)) {
6255 goto do_unallocated;
6256 }
6257
6258 if (!fp_access_check(s)) {
6259 return;
6260 }
6261 handle_fp_1src_half(s, opcode, rd, rn);
6262 break;
6263 default:
6264 goto do_unallocated;
6265 }
6266 break;
6267
6268 case 0x6:
6269 switch (type) {
6270 case 1: /* BFCVT */
6271 if (!dc_isar_feature(aa64_bf16, s)) {
6272 goto do_unallocated;
6273 }
6274 if (!fp_access_check(s)) {
6275 return;
6276 }
6277 handle_fp_1src_single(s, opcode, rd, rn);
6278 break;
6279 default:
6280 goto do_unallocated;
6281 }
6282 break;
6283
6284 default:
6285 do_unallocated:
6286 unallocated_encoding(s);
6287 break;
6288 }
6289 }
6290
6291 /* Floating-point data-processing (2 source) - single precision */
6292 static void handle_fp_2src_single(DisasContext *s, int opcode,
6293 int rd, int rn, int rm)
6294 {
6295 TCGv_i32 tcg_op1;
6296 TCGv_i32 tcg_op2;
6297 TCGv_i32 tcg_res;
6298 TCGv_ptr fpst;
6299
6300 tcg_res = tcg_temp_new_i32();
6301 fpst = fpstatus_ptr(FPST_FPCR);
6302 tcg_op1 = read_fp_sreg(s, rn);
6303 tcg_op2 = read_fp_sreg(s, rm);
6304
6305 switch (opcode) {
6306 case 0x0: /* FMUL */
6307 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6308 break;
6309 case 0x1: /* FDIV */
6310 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
6311 break;
6312 case 0x2: /* FADD */
6313 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6314 break;
6315 case 0x3: /* FSUB */
6316 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
6317 break;
6318 case 0x4: /* FMAX */
6319 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6320 break;
6321 case 0x5: /* FMIN */
6322 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6323 break;
6324 case 0x6: /* FMAXNM */
6325 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6326 break;
6327 case 0x7: /* FMINNM */
6328 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6329 break;
6330 case 0x8: /* FNMUL */
6331 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6332 gen_helper_vfp_negs(tcg_res, tcg_res);
6333 break;
6334 }
6335
6336 write_fp_sreg(s, rd, tcg_res);
6337 }
6338
6339 /* Floating-point data-processing (2 source) - double precision */
6340 static void handle_fp_2src_double(DisasContext *s, int opcode,
6341 int rd, int rn, int rm)
6342 {
6343 TCGv_i64 tcg_op1;
6344 TCGv_i64 tcg_op2;
6345 TCGv_i64 tcg_res;
6346 TCGv_ptr fpst;
6347
6348 tcg_res = tcg_temp_new_i64();
6349 fpst = fpstatus_ptr(FPST_FPCR);
6350 tcg_op1 = read_fp_dreg(s, rn);
6351 tcg_op2 = read_fp_dreg(s, rm);
6352
6353 switch (opcode) {
6354 case 0x0: /* FMUL */
6355 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6356 break;
6357 case 0x1: /* FDIV */
6358 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
6359 break;
6360 case 0x2: /* FADD */
6361 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6362 break;
6363 case 0x3: /* FSUB */
6364 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
6365 break;
6366 case 0x4: /* FMAX */
6367 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6368 break;
6369 case 0x5: /* FMIN */
6370 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6371 break;
6372 case 0x6: /* FMAXNM */
6373 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6374 break;
6375 case 0x7: /* FMINNM */
6376 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6377 break;
6378 case 0x8: /* FNMUL */
6379 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6380 gen_helper_vfp_negd(tcg_res, tcg_res);
6381 break;
6382 }
6383
6384 write_fp_dreg(s, rd, tcg_res);
6385 }
6386
6387 /* Floating-point data-processing (2 source) - half precision */
6388 static void handle_fp_2src_half(DisasContext *s, int opcode,
6389 int rd, int rn, int rm)
6390 {
6391 TCGv_i32 tcg_op1;
6392 TCGv_i32 tcg_op2;
6393 TCGv_i32 tcg_res;
6394 TCGv_ptr fpst;
6395
6396 tcg_res = tcg_temp_new_i32();
6397 fpst = fpstatus_ptr(FPST_FPCR_F16);
6398 tcg_op1 = read_fp_hreg(s, rn);
6399 tcg_op2 = read_fp_hreg(s, rm);
6400
6401 switch (opcode) {
6402 case 0x0: /* FMUL */
6403 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6404 break;
6405 case 0x1: /* FDIV */
6406 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
6407 break;
6408 case 0x2: /* FADD */
6409 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6410 break;
6411 case 0x3: /* FSUB */
6412 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
6413 break;
6414 case 0x4: /* FMAX */
6415 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6416 break;
6417 case 0x5: /* FMIN */
6418 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6419 break;
6420 case 0x6: /* FMAXNM */
6421 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6422 break;
6423 case 0x7: /* FMINNM */
6424 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6425 break;
6426 case 0x8: /* FNMUL */
6427 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6428 tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
6429 break;
6430 default:
6431 g_assert_not_reached();
6432 }
6433
6434 write_fp_sreg(s, rd, tcg_res);
6435 }
6436
6437 /* Floating point data-processing (2 source)
6438 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
6439 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6440 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
6441 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6442 */
6443 static void disas_fp_2src(DisasContext *s, uint32_t insn)
6444 {
6445 int mos = extract32(insn, 29, 3);
6446 int type = extract32(insn, 22, 2);
6447 int rd = extract32(insn, 0, 5);
6448 int rn = extract32(insn, 5, 5);
6449 int rm = extract32(insn, 16, 5);
6450 int opcode = extract32(insn, 12, 4);
6451
6452 if (opcode > 8 || mos) {
6453 unallocated_encoding(s);
6454 return;
6455 }
6456
6457 switch (type) {
6458 case 0:
6459 if (!fp_access_check(s)) {
6460 return;
6461 }
6462 handle_fp_2src_single(s, opcode, rd, rn, rm);
6463 break;
6464 case 1:
6465 if (!fp_access_check(s)) {
6466 return;
6467 }
6468 handle_fp_2src_double(s, opcode, rd, rn, rm);
6469 break;
6470 case 3:
6471 if (!dc_isar_feature(aa64_fp16, s)) {
6472 unallocated_encoding(s);
6473 return;
6474 }
6475 if (!fp_access_check(s)) {
6476 return;
6477 }
6478 handle_fp_2src_half(s, opcode, rd, rn, rm);
6479 break;
6480 default:
6481 unallocated_encoding(s);
6482 }
6483 }
6484
6485 /* Floating-point data-processing (3 source) - single precision */
6486 static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
6487 int rd, int rn, int rm, int ra)
6488 {
6489 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6490 TCGv_i32 tcg_res = tcg_temp_new_i32();
6491 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6492
6493 tcg_op1 = read_fp_sreg(s, rn);
6494 tcg_op2 = read_fp_sreg(s, rm);
6495 tcg_op3 = read_fp_sreg(s, ra);
6496
6497 /* These are fused multiply-add, and must be done as one
6498 * floating point operation with no rounding between the
6499 * multiplication and addition steps.
6500 * NB that doing the negations here as separate steps is
6501 * correct : an input NaN should come out with its sign bit
6502 * flipped if it is a negated-input.
6503 */
6504 if (o1 == true) {
6505 gen_helper_vfp_negs(tcg_op3, tcg_op3);
6506 }
6507
6508 if (o0 != o1) {
6509 gen_helper_vfp_negs(tcg_op1, tcg_op1);
6510 }
6511
6512 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6513
6514 write_fp_sreg(s, rd, tcg_res);
6515 }
6516
6517 /* Floating-point data-processing (3 source) - double precision */
6518 static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
6519 int rd, int rn, int rm, int ra)
6520 {
6521 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
6522 TCGv_i64 tcg_res = tcg_temp_new_i64();
6523 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6524
6525 tcg_op1 = read_fp_dreg(s, rn);
6526 tcg_op2 = read_fp_dreg(s, rm);
6527 tcg_op3 = read_fp_dreg(s, ra);
6528
6529 /* These are fused multiply-add, and must be done as one
6530 * floating point operation with no rounding between the
6531 * multiplication and addition steps.
6532 * NB that doing the negations here as separate steps is
6533 * correct : an input NaN should come out with its sign bit
6534 * flipped if it is a negated-input.
6535 */
6536 if (o1 == true) {
6537 gen_helper_vfp_negd(tcg_op3, tcg_op3);
6538 }
6539
6540 if (o0 != o1) {
6541 gen_helper_vfp_negd(tcg_op1, tcg_op1);
6542 }
6543
6544 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6545
6546 write_fp_dreg(s, rd, tcg_res);
6547 }
6548
6549 /* Floating-point data-processing (3 source) - half precision */
6550 static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
6551 int rd, int rn, int rm, int ra)
6552 {
6553 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6554 TCGv_i32 tcg_res = tcg_temp_new_i32();
6555 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR_F16);
6556
6557 tcg_op1 = read_fp_hreg(s, rn);
6558 tcg_op2 = read_fp_hreg(s, rm);
6559 tcg_op3 = read_fp_hreg(s, ra);
6560
6561 /* These are fused multiply-add, and must be done as one
6562 * floating point operation with no rounding between the
6563 * multiplication and addition steps.
6564 * NB that doing the negations here as separate steps is
6565 * correct : an input NaN should come out with its sign bit
6566 * flipped if it is a negated-input.
6567 */
6568 if (o1 == true) {
6569 tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
6570 }
6571
6572 if (o0 != o1) {
6573 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
6574 }
6575
6576 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6577
6578 write_fp_sreg(s, rd, tcg_res);
6579 }
6580
6581 /* Floating point data-processing (3 source)
6582 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
6583 * +---+---+---+-----------+------+----+------+----+------+------+------+
6584 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
6585 * +---+---+---+-----------+------+----+------+----+------+------+------+
6586 */
6587 static void disas_fp_3src(DisasContext *s, uint32_t insn)
6588 {
6589 int mos = extract32(insn, 29, 3);
6590 int type = extract32(insn, 22, 2);
6591 int rd = extract32(insn, 0, 5);
6592 int rn = extract32(insn, 5, 5);
6593 int ra = extract32(insn, 10, 5);
6594 int rm = extract32(insn, 16, 5);
6595 bool o0 = extract32(insn, 15, 1);
6596 bool o1 = extract32(insn, 21, 1);
6597
6598 if (mos) {
6599 unallocated_encoding(s);
6600 return;
6601 }
6602
6603 switch (type) {
6604 case 0:
6605 if (!fp_access_check(s)) {
6606 return;
6607 }
6608 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
6609 break;
6610 case 1:
6611 if (!fp_access_check(s)) {
6612 return;
6613 }
6614 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
6615 break;
6616 case 3:
6617 if (!dc_isar_feature(aa64_fp16, s)) {
6618 unallocated_encoding(s);
6619 return;
6620 }
6621 if (!fp_access_check(s)) {
6622 return;
6623 }
6624 handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
6625 break;
6626 default:
6627 unallocated_encoding(s);
6628 }
6629 }
6630
6631 /* Floating point immediate
6632 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
6633 * +---+---+---+-----------+------+---+------------+-------+------+------+
6634 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
6635 * +---+---+---+-----------+------+---+------------+-------+------+------+
6636 */
6637 static void disas_fp_imm(DisasContext *s, uint32_t insn)
6638 {
6639 int rd = extract32(insn, 0, 5);
6640 int imm5 = extract32(insn, 5, 5);
6641 int imm8 = extract32(insn, 13, 8);
6642 int type = extract32(insn, 22, 2);
6643 int mos = extract32(insn, 29, 3);
6644 uint64_t imm;
6645 MemOp sz;
6646
6647 if (mos || imm5) {
6648 unallocated_encoding(s);
6649 return;
6650 }
6651
6652 switch (type) {
6653 case 0:
6654 sz = MO_32;
6655 break;
6656 case 1:
6657 sz = MO_64;
6658 break;
6659 case 3:
6660 sz = MO_16;
6661 if (dc_isar_feature(aa64_fp16, s)) {
6662 break;
6663 }
6664 /* fallthru */
6665 default:
6666 unallocated_encoding(s);
6667 return;
6668 }
6669
6670 if (!fp_access_check(s)) {
6671 return;
6672 }
6673
6674 imm = vfp_expand_imm(sz, imm8);
6675 write_fp_dreg(s, rd, tcg_constant_i64(imm));
6676 }
6677
6678 /* Handle floating point <=> fixed point conversions. Note that we can
6679 * also deal with fp <=> integer conversions as a special case (scale == 64)
6680 * OPTME: consider handling that special case specially or at least skipping
6681 * the call to scalbn in the helpers for zero shifts.
6682 */
6683 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
6684 bool itof, int rmode, int scale, int sf, int type)
6685 {
6686 bool is_signed = !(opcode & 1);
6687 TCGv_ptr tcg_fpstatus;
6688 TCGv_i32 tcg_shift, tcg_single;
6689 TCGv_i64 tcg_double;
6690
6691 tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR);
6692
6693 tcg_shift = tcg_constant_i32(64 - scale);
6694
6695 if (itof) {
6696 TCGv_i64 tcg_int = cpu_reg(s, rn);
6697 if (!sf) {
6698 TCGv_i64 tcg_extend = tcg_temp_new_i64();
6699
6700 if (is_signed) {
6701 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
6702 } else {
6703 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
6704 }
6705
6706 tcg_int = tcg_extend;
6707 }
6708
6709 switch (type) {
6710 case 1: /* float64 */
6711 tcg_double = tcg_temp_new_i64();
6712 if (is_signed) {
6713 gen_helper_vfp_sqtod(tcg_double, tcg_int,
6714 tcg_shift, tcg_fpstatus);
6715 } else {
6716 gen_helper_vfp_uqtod(tcg_double, tcg_int,
6717 tcg_shift, tcg_fpstatus);
6718 }
6719 write_fp_dreg(s, rd, tcg_double);
6720 break;
6721
6722 case 0: /* float32 */
6723 tcg_single = tcg_temp_new_i32();
6724 if (is_signed) {
6725 gen_helper_vfp_sqtos(tcg_single, tcg_int,
6726 tcg_shift, tcg_fpstatus);
6727 } else {
6728 gen_helper_vfp_uqtos(tcg_single, tcg_int,
6729 tcg_shift, tcg_fpstatus);
6730 }
6731 write_fp_sreg(s, rd, tcg_single);
6732 break;
6733
6734 case 3: /* float16 */
6735 tcg_single = tcg_temp_new_i32();
6736 if (is_signed) {
6737 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
6738 tcg_shift, tcg_fpstatus);
6739 } else {
6740 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
6741 tcg_shift, tcg_fpstatus);
6742 }
6743 write_fp_sreg(s, rd, tcg_single);
6744 break;
6745
6746 default:
6747 g_assert_not_reached();
6748 }
6749 } else {
6750 TCGv_i64 tcg_int = cpu_reg(s, rd);
6751 TCGv_i32 tcg_rmode;
6752
6753 if (extract32(opcode, 2, 1)) {
6754 /* There are too many rounding modes to all fit into rmode,
6755 * so FCVTA[US] is a special case.
6756 */
6757 rmode = FPROUNDING_TIEAWAY;
6758 }
6759
6760 tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
6761
6762 switch (type) {
6763 case 1: /* float64 */
6764 tcg_double = read_fp_dreg(s, rn);
6765 if (is_signed) {
6766 if (!sf) {
6767 gen_helper_vfp_tosld(tcg_int, tcg_double,
6768 tcg_shift, tcg_fpstatus);
6769 } else {
6770 gen_helper_vfp_tosqd(tcg_int, tcg_double,
6771 tcg_shift, tcg_fpstatus);
6772 }
6773 } else {
6774 if (!sf) {
6775 gen_helper_vfp_tould(tcg_int, tcg_double,
6776 tcg_shift, tcg_fpstatus);
6777 } else {
6778 gen_helper_vfp_touqd(tcg_int, tcg_double,
6779 tcg_shift, tcg_fpstatus);
6780 }
6781 }
6782 if (!sf) {
6783 tcg_gen_ext32u_i64(tcg_int, tcg_int);
6784 }
6785 break;
6786
6787 case 0: /* float32 */
6788 tcg_single = read_fp_sreg(s, rn);
6789 if (sf) {
6790 if (is_signed) {
6791 gen_helper_vfp_tosqs(tcg_int, tcg_single,
6792 tcg_shift, tcg_fpstatus);
6793 } else {
6794 gen_helper_vfp_touqs(tcg_int, tcg_single,
6795 tcg_shift, tcg_fpstatus);
6796 }
6797 } else {
6798 TCGv_i32 tcg_dest = tcg_temp_new_i32();
6799 if (is_signed) {
6800 gen_helper_vfp_tosls(tcg_dest, tcg_single,
6801 tcg_shift, tcg_fpstatus);
6802 } else {
6803 gen_helper_vfp_touls(tcg_dest, tcg_single,
6804 tcg_shift, tcg_fpstatus);
6805 }
6806 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
6807 }
6808 break;
6809
6810 case 3: /* float16 */
6811 tcg_single = read_fp_sreg(s, rn);
6812 if (sf) {
6813 if (is_signed) {
6814 gen_helper_vfp_tosqh(tcg_int, tcg_single,
6815 tcg_shift, tcg_fpstatus);
6816 } else {
6817 gen_helper_vfp_touqh(tcg_int, tcg_single,
6818 tcg_shift, tcg_fpstatus);
6819 }
6820 } else {
6821 TCGv_i32 tcg_dest = tcg_temp_new_i32();
6822 if (is_signed) {
6823 gen_helper_vfp_toslh(tcg_dest, tcg_single,
6824 tcg_shift, tcg_fpstatus);
6825 } else {
6826 gen_helper_vfp_toulh(tcg_dest, tcg_single,
6827 tcg_shift, tcg_fpstatus);
6828 }
6829 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
6830 }
6831 break;
6832
6833 default:
6834 g_assert_not_reached();
6835 }
6836
6837 gen_restore_rmode(tcg_rmode, tcg_fpstatus);
6838 }
6839 }
6840
6841 /* Floating point <-> fixed point conversions
6842 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
6843 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6844 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
6845 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6846 */
6847 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
6848 {
6849 int rd = extract32(insn, 0, 5);
6850 int rn = extract32(insn, 5, 5);
6851 int scale = extract32(insn, 10, 6);
6852 int opcode = extract32(insn, 16, 3);
6853 int rmode = extract32(insn, 19, 2);
6854 int type = extract32(insn, 22, 2);
6855 bool sbit = extract32(insn, 29, 1);
6856 bool sf = extract32(insn, 31, 1);
6857 bool itof;
6858
6859 if (sbit || (!sf && scale < 32)) {
6860 unallocated_encoding(s);
6861 return;
6862 }
6863
6864 switch (type) {
6865 case 0: /* float32 */
6866 case 1: /* float64 */
6867 break;
6868 case 3: /* float16 */
6869 if (dc_isar_feature(aa64_fp16, s)) {
6870 break;
6871 }
6872 /* fallthru */
6873 default:
6874 unallocated_encoding(s);
6875 return;
6876 }
6877
6878 switch ((rmode << 3) | opcode) {
6879 case 0x2: /* SCVTF */
6880 case 0x3: /* UCVTF */
6881 itof = true;
6882 break;
6883 case 0x18: /* FCVTZS */
6884 case 0x19: /* FCVTZU */
6885 itof = false;
6886 break;
6887 default:
6888 unallocated_encoding(s);
6889 return;
6890 }
6891
6892 if (!fp_access_check(s)) {
6893 return;
6894 }
6895
6896 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
6897 }
6898
6899 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
6900 {
6901 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
6902 * without conversion.
6903 */
6904
6905 if (itof) {
6906 TCGv_i64 tcg_rn = cpu_reg(s, rn);
6907 TCGv_i64 tmp;
6908
6909 switch (type) {
6910 case 0:
6911 /* 32 bit */
6912 tmp = tcg_temp_new_i64();
6913 tcg_gen_ext32u_i64(tmp, tcg_rn);
6914 write_fp_dreg(s, rd, tmp);
6915 break;
6916 case 1:
6917 /* 64 bit */
6918 write_fp_dreg(s, rd, tcg_rn);
6919 break;
6920 case 2:
6921 /* 64 bit to top half. */
6922 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
6923 clear_vec_high(s, true, rd);
6924 break;
6925 case 3:
6926 /* 16 bit */
6927 tmp = tcg_temp_new_i64();
6928 tcg_gen_ext16u_i64(tmp, tcg_rn);
6929 write_fp_dreg(s, rd, tmp);
6930 break;
6931 default:
6932 g_assert_not_reached();
6933 }
6934 } else {
6935 TCGv_i64 tcg_rd = cpu_reg(s, rd);
6936
6937 switch (type) {
6938 case 0:
6939 /* 32 bit */
6940 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
6941 break;
6942 case 1:
6943 /* 64 bit */
6944 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
6945 break;
6946 case 2:
6947 /* 64 bits from top half */
6948 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
6949 break;
6950 case 3:
6951 /* 16 bit */
6952 tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
6953 break;
6954 default:
6955 g_assert_not_reached();
6956 }
6957 }
6958 }
6959
6960 static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
6961 {
6962 TCGv_i64 t = read_fp_dreg(s, rn);
6963 TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR);
6964
6965 gen_helper_fjcvtzs(t, t, fpstatus);
6966
6967 tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
6968 tcg_gen_extrh_i64_i32(cpu_ZF, t);
6969 tcg_gen_movi_i32(cpu_CF, 0);
6970 tcg_gen_movi_i32(cpu_NF, 0);
6971 tcg_gen_movi_i32(cpu_VF, 0);
6972 }
6973
6974 /* Floating point <-> integer conversions
6975 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
6976 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6977 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
6978 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6979 */
6980 static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
6981 {
6982 int rd = extract32(insn, 0, 5);
6983 int rn = extract32(insn, 5, 5);
6984 int opcode = extract32(insn, 16, 3);
6985 int rmode = extract32(insn, 19, 2);
6986 int type = extract32(insn, 22, 2);
6987 bool sbit = extract32(insn, 29, 1);
6988 bool sf = extract32(insn, 31, 1);
6989 bool itof = false;
6990
6991 if (sbit) {
6992 goto do_unallocated;
6993 }
6994
6995 switch (opcode) {
6996 case 2: /* SCVTF */
6997 case 3: /* UCVTF */
6998 itof = true;
6999 /* fallthru */
7000 case 4: /* FCVTAS */
7001 case 5: /* FCVTAU */
7002 if (rmode != 0) {
7003 goto do_unallocated;
7004 }
7005 /* fallthru */
7006 case 0: /* FCVT[NPMZ]S */
7007 case 1: /* FCVT[NPMZ]U */
7008 switch (type) {
7009 case 0: /* float32 */
7010 case 1: /* float64 */
7011 break;
7012 case 3: /* float16 */
7013 if (!dc_isar_feature(aa64_fp16, s)) {
7014 goto do_unallocated;
7015 }
7016 break;
7017 default:
7018 goto do_unallocated;
7019 }
7020 if (!fp_access_check(s)) {
7021 return;
7022 }
7023 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
7024 break;
7025
7026 default:
7027 switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
7028 case 0b01100110: /* FMOV half <-> 32-bit int */
7029 case 0b01100111:
7030 case 0b11100110: /* FMOV half <-> 64-bit int */
7031 case 0b11100111:
7032 if (!dc_isar_feature(aa64_fp16, s)) {
7033 goto do_unallocated;
7034 }
7035 /* fallthru */
7036 case 0b00000110: /* FMOV 32-bit */
7037 case 0b00000111:
7038 case 0b10100110: /* FMOV 64-bit */
7039 case 0b10100111:
7040 case 0b11001110: /* FMOV top half of 128-bit */
7041 case 0b11001111:
7042 if (!fp_access_check(s)) {
7043 return;
7044 }
7045 itof = opcode & 1;
7046 handle_fmov(s, rd, rn, type, itof);
7047 break;
7048
7049 case 0b00111110: /* FJCVTZS */
7050 if (!dc_isar_feature(aa64_jscvt, s)) {
7051 goto do_unallocated;
7052 } else if (fp_access_check(s)) {
7053 handle_fjcvtzs(s, rd, rn);
7054 }
7055 break;
7056
7057 default:
7058 do_unallocated:
7059 unallocated_encoding(s);
7060 return;
7061 }
7062 break;
7063 }
7064 }
7065
7066 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
7067 * 31 30 29 28 25 24 0
7068 * +---+---+---+---------+-----------------------------+
7069 * | | 0 | | 1 1 1 1 | |
7070 * +---+---+---+---------+-----------------------------+
7071 */
7072 static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
7073 {
7074 if (extract32(insn, 24, 1)) {
7075 /* Floating point data-processing (3 source) */
7076 disas_fp_3src(s, insn);
7077 } else if (extract32(insn, 21, 1) == 0) {
7078 /* Floating point to fixed point conversions */
7079 disas_fp_fixed_conv(s, insn);
7080 } else {
7081 switch (extract32(insn, 10, 2)) {
7082 case 1:
7083 /* Floating point conditional compare */
7084 disas_fp_ccomp(s, insn);
7085 break;
7086 case 2:
7087 /* Floating point data-processing (2 source) */
7088 disas_fp_2src(s, insn);
7089 break;
7090 case 3:
7091 /* Floating point conditional select */
7092 disas_fp_csel(s, insn);
7093 break;
7094 case 0:
7095 switch (ctz32(extract32(insn, 12, 4))) {
7096 case 0: /* [15:12] == xxx1 */
7097 /* Floating point immediate */
7098 disas_fp_imm(s, insn);
7099 break;
7100 case 1: /* [15:12] == xx10 */
7101 /* Floating point compare */
7102 disas_fp_compare(s, insn);
7103 break;
7104 case 2: /* [15:12] == x100 */
7105 /* Floating point data-processing (1 source) */
7106 disas_fp_1src(s, insn);
7107 break;
7108 case 3: /* [15:12] == 1000 */
7109 unallocated_encoding(s);
7110 break;
7111 default: /* [15:12] == 0000 */
7112 /* Floating point <-> integer conversions */
7113 disas_fp_int_conv(s, insn);
7114 break;
7115 }
7116 break;
7117 }
7118 }
7119 }
7120
7121 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
7122 int pos)
7123 {
7124 /* Extract 64 bits from the middle of two concatenated 64 bit
7125 * vector register slices left:right. The extracted bits start
7126 * at 'pos' bits into the right (least significant) side.
7127 * We return the result in tcg_right, and guarantee not to
7128 * trash tcg_left.
7129 */
7130 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
7131 assert(pos > 0 && pos < 64);
7132
7133 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
7134 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
7135 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
7136 }
7137
7138 /* EXT
7139 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
7140 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7141 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
7142 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7143 */
7144 static void disas_simd_ext(DisasContext *s, uint32_t insn)
7145 {
7146 int is_q = extract32(insn, 30, 1);
7147 int op2 = extract32(insn, 22, 2);
7148 int imm4 = extract32(insn, 11, 4);
7149 int rm = extract32(insn, 16, 5);
7150 int rn = extract32(insn, 5, 5);
7151 int rd = extract32(insn, 0, 5);
7152 int pos = imm4 << 3;
7153 TCGv_i64 tcg_resl, tcg_resh;
7154
7155 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
7156 unallocated_encoding(s);
7157 return;
7158 }
7159
7160 if (!fp_access_check(s)) {
7161 return;
7162 }
7163
7164 tcg_resh = tcg_temp_new_i64();
7165 tcg_resl = tcg_temp_new_i64();
7166
7167 /* Vd gets bits starting at pos bits into Vm:Vn. This is
7168 * either extracting 128 bits from a 128:128 concatenation, or
7169 * extracting 64 bits from a 64:64 concatenation.
7170 */
7171 if (!is_q) {
7172 read_vec_element(s, tcg_resl, rn, 0, MO_64);
7173 if (pos != 0) {
7174 read_vec_element(s, tcg_resh, rm, 0, MO_64);
7175 do_ext64(s, tcg_resh, tcg_resl, pos);
7176 }
7177 } else {
7178 TCGv_i64 tcg_hh;
7179 typedef struct {
7180 int reg;
7181 int elt;
7182 } EltPosns;
7183 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
7184 EltPosns *elt = eltposns;
7185
7186 if (pos >= 64) {
7187 elt++;
7188 pos -= 64;
7189 }
7190
7191 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
7192 elt++;
7193 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
7194 elt++;
7195 if (pos != 0) {
7196 do_ext64(s, tcg_resh, tcg_resl, pos);
7197 tcg_hh = tcg_temp_new_i64();
7198 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
7199 do_ext64(s, tcg_hh, tcg_resh, pos);
7200 }
7201 }
7202
7203 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7204 if (is_q) {
7205 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7206 }
7207 clear_vec_high(s, is_q, rd);
7208 }
7209
7210 /* TBL/TBX
7211 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
7212 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7213 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
7214 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7215 */
7216 static void disas_simd_tb(DisasContext *s, uint32_t insn)
7217 {
7218 int op2 = extract32(insn, 22, 2);
7219 int is_q = extract32(insn, 30, 1);
7220 int rm = extract32(insn, 16, 5);
7221 int rn = extract32(insn, 5, 5);
7222 int rd = extract32(insn, 0, 5);
7223 int is_tbx = extract32(insn, 12, 1);
7224 int len = (extract32(insn, 13, 2) + 1) * 16;
7225
7226 if (op2 != 0) {
7227 unallocated_encoding(s);
7228 return;
7229 }
7230
7231 if (!fp_access_check(s)) {
7232 return;
7233 }
7234
7235 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
7236 vec_full_reg_offset(s, rm), cpu_env,
7237 is_q ? 16 : 8, vec_full_reg_size(s),
7238 (len << 6) | (is_tbx << 5) | rn,
7239 gen_helper_simd_tblx);
7240 }
7241
7242 /* ZIP/UZP/TRN
7243 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
7244 * +---+---+-------------+------+---+------+---+------------------+------+
7245 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
7246 * +---+---+-------------+------+---+------+---+------------------+------+
7247 */
7248 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
7249 {
7250 int rd = extract32(insn, 0, 5);
7251 int rn = extract32(insn, 5, 5);
7252 int rm = extract32(insn, 16, 5);
7253 int size = extract32(insn, 22, 2);
7254 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
7255 * bit 2 indicates 1 vs 2 variant of the insn.
7256 */
7257 int opcode = extract32(insn, 12, 2);
7258 bool part = extract32(insn, 14, 1);
7259 bool is_q = extract32(insn, 30, 1);
7260 int esize = 8 << size;
7261 int i;
7262 int datasize = is_q ? 128 : 64;
7263 int elements = datasize / esize;
7264 TCGv_i64 tcg_res[2], tcg_ele;
7265
7266 if (opcode == 0 || (size == 3 && !is_q)) {
7267 unallocated_encoding(s);
7268 return;
7269 }
7270
7271 if (!fp_access_check(s)) {
7272 return;
7273 }
7274
7275 tcg_res[0] = tcg_temp_new_i64();
7276 tcg_res[1] = is_q ? tcg_temp_new_i64() : NULL;
7277 tcg_ele = tcg_temp_new_i64();
7278
7279 for (i = 0; i < elements; i++) {
7280 int o, w;
7281
7282 switch (opcode) {
7283 case 1: /* UZP1/2 */
7284 {
7285 int midpoint = elements / 2;
7286 if (i < midpoint) {
7287 read_vec_element(s, tcg_ele, rn, 2 * i + part, size);
7288 } else {
7289 read_vec_element(s, tcg_ele, rm,
7290 2 * (i - midpoint) + part, size);
7291 }
7292 break;
7293 }
7294 case 2: /* TRN1/2 */
7295 if (i & 1) {
7296 read_vec_element(s, tcg_ele, rm, (i & ~1) + part, size);
7297 } else {
7298 read_vec_element(s, tcg_ele, rn, (i & ~1) + part, size);
7299 }
7300 break;
7301 case 3: /* ZIP1/2 */
7302 {
7303 int base = part * elements / 2;
7304 if (i & 1) {
7305 read_vec_element(s, tcg_ele, rm, base + (i >> 1), size);
7306 } else {
7307 read_vec_element(s, tcg_ele, rn, base + (i >> 1), size);
7308 }
7309 break;
7310 }
7311 default:
7312 g_assert_not_reached();
7313 }
7314
7315 w = (i * esize) / 64;
7316 o = (i * esize) % 64;
7317 if (o == 0) {
7318 tcg_gen_mov_i64(tcg_res[w], tcg_ele);
7319 } else {
7320 tcg_gen_shli_i64(tcg_ele, tcg_ele, o);
7321 tcg_gen_or_i64(tcg_res[w], tcg_res[w], tcg_ele);
7322 }
7323 }
7324
7325 for (i = 0; i <= is_q; ++i) {
7326 write_vec_element(s, tcg_res[i], rd, i, MO_64);
7327 }
7328 clear_vec_high(s, is_q, rd);
7329 }
7330
7331 /*
7332 * do_reduction_op helper
7333 *
7334 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
7335 * important for correct NaN propagation that we do these
7336 * operations in exactly the order specified by the pseudocode.
7337 *
7338 * This is a recursive function, TCG temps should be freed by the
7339 * calling function once it is done with the values.
7340 */
7341 static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
7342 int esize, int size, int vmap, TCGv_ptr fpst)
7343 {
7344 if (esize == size) {
7345 int element;
7346 MemOp msize = esize == 16 ? MO_16 : MO_32;
7347 TCGv_i32 tcg_elem;
7348
7349 /* We should have one register left here */
7350 assert(ctpop8(vmap) == 1);
7351 element = ctz32(vmap);
7352 assert(element < 8);
7353
7354 tcg_elem = tcg_temp_new_i32();
7355 read_vec_element_i32(s, tcg_elem, rn, element, msize);
7356 return tcg_elem;
7357 } else {
7358 int bits = size / 2;
7359 int shift = ctpop8(vmap) / 2;
7360 int vmap_lo = (vmap >> shift) & vmap;
7361 int vmap_hi = (vmap & ~vmap_lo);
7362 TCGv_i32 tcg_hi, tcg_lo, tcg_res;
7363
7364 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
7365 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
7366 tcg_res = tcg_temp_new_i32();
7367
7368 switch (fpopcode) {
7369 case 0x0c: /* fmaxnmv half-precision */
7370 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7371 break;
7372 case 0x0f: /* fmaxv half-precision */
7373 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
7374 break;
7375 case 0x1c: /* fminnmv half-precision */
7376 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7377 break;
7378 case 0x1f: /* fminv half-precision */
7379 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
7380 break;
7381 case 0x2c: /* fmaxnmv */
7382 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
7383 break;
7384 case 0x2f: /* fmaxv */
7385 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
7386 break;
7387 case 0x3c: /* fminnmv */
7388 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
7389 break;
7390 case 0x3f: /* fminv */
7391 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
7392 break;
7393 default:
7394 g_assert_not_reached();
7395 }
7396 return tcg_res;
7397 }
7398 }
7399
7400 /* AdvSIMD across lanes
7401 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7402 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7403 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
7404 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7405 */
7406 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
7407 {
7408 int rd = extract32(insn, 0, 5);
7409 int rn = extract32(insn, 5, 5);
7410 int size = extract32(insn, 22, 2);
7411 int opcode = extract32(insn, 12, 5);
7412 bool is_q = extract32(insn, 30, 1);
7413 bool is_u = extract32(insn, 29, 1);
7414 bool is_fp = false;
7415 bool is_min = false;
7416 int esize;
7417 int elements;
7418 int i;
7419 TCGv_i64 tcg_res, tcg_elt;
7420
7421 switch (opcode) {
7422 case 0x1b: /* ADDV */
7423 if (is_u) {
7424 unallocated_encoding(s);
7425 return;
7426 }
7427 /* fall through */
7428 case 0x3: /* SADDLV, UADDLV */
7429 case 0xa: /* SMAXV, UMAXV */
7430 case 0x1a: /* SMINV, UMINV */
7431 if (size == 3 || (size == 2 && !is_q)) {
7432 unallocated_encoding(s);
7433 return;
7434 }
7435 break;
7436 case 0xc: /* FMAXNMV, FMINNMV */
7437 case 0xf: /* FMAXV, FMINV */
7438 /* Bit 1 of size field encodes min vs max and the actual size
7439 * depends on the encoding of the U bit. If not set (and FP16
7440 * enabled) then we do half-precision float instead of single
7441 * precision.
7442 */
7443 is_min = extract32(size, 1, 1);
7444 is_fp = true;
7445 if (!is_u && dc_isar_feature(aa64_fp16, s)) {
7446 size = 1;
7447 } else if (!is_u || !is_q || extract32(size, 0, 1)) {
7448 unallocated_encoding(s);
7449 return;
7450 } else {
7451 size = 2;
7452 }
7453 break;
7454 default:
7455 unallocated_encoding(s);
7456 return;
7457 }
7458
7459 if (!fp_access_check(s)) {
7460 return;
7461 }
7462
7463 esize = 8 << size;
7464 elements = (is_q ? 128 : 64) / esize;
7465
7466 tcg_res = tcg_temp_new_i64();
7467 tcg_elt = tcg_temp_new_i64();
7468
7469 /* These instructions operate across all lanes of a vector
7470 * to produce a single result. We can guarantee that a 64
7471 * bit intermediate is sufficient:
7472 * + for [US]ADDLV the maximum element size is 32 bits, and
7473 * the result type is 64 bits
7474 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
7475 * same as the element size, which is 32 bits at most
7476 * For the integer operations we can choose to work at 64
7477 * or 32 bits and truncate at the end; for simplicity
7478 * we use 64 bits always. The floating point
7479 * ops do require 32 bit intermediates, though.
7480 */
7481 if (!is_fp) {
7482 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
7483
7484 for (i = 1; i < elements; i++) {
7485 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
7486
7487 switch (opcode) {
7488 case 0x03: /* SADDLV / UADDLV */
7489 case 0x1b: /* ADDV */
7490 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
7491 break;
7492 case 0x0a: /* SMAXV / UMAXV */
7493 if (is_u) {
7494 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
7495 } else {
7496 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
7497 }
7498 break;
7499 case 0x1a: /* SMINV / UMINV */
7500 if (is_u) {
7501 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
7502 } else {
7503 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
7504 }
7505 break;
7506 default:
7507 g_assert_not_reached();
7508 }
7509
7510 }
7511 } else {
7512 /* Floating point vector reduction ops which work across 32
7513 * bit (single) or 16 bit (half-precision) intermediates.
7514 * Note that correct NaN propagation requires that we do these
7515 * operations in exactly the order specified by the pseudocode.
7516 */
7517 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7518 int fpopcode = opcode | is_min << 4 | is_u << 5;
7519 int vmap = (1 << elements) - 1;
7520 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
7521 (is_q ? 128 : 64), vmap, fpst);
7522 tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
7523 }
7524
7525 /* Now truncate the result to the width required for the final output */
7526 if (opcode == 0x03) {
7527 /* SADDLV, UADDLV: result is 2*esize */
7528 size++;
7529 }
7530
7531 switch (size) {
7532 case 0:
7533 tcg_gen_ext8u_i64(tcg_res, tcg_res);
7534 break;
7535 case 1:
7536 tcg_gen_ext16u_i64(tcg_res, tcg_res);
7537 break;
7538 case 2:
7539 tcg_gen_ext32u_i64(tcg_res, tcg_res);
7540 break;
7541 case 3:
7542 break;
7543 default:
7544 g_assert_not_reached();
7545 }
7546
7547 write_fp_dreg(s, rd, tcg_res);
7548 }
7549
7550 /* DUP (Element, Vector)
7551 *
7552 * 31 30 29 21 20 16 15 10 9 5 4 0
7553 * +---+---+-------------------+--------+-------------+------+------+
7554 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7555 * +---+---+-------------------+--------+-------------+------+------+
7556 *
7557 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7558 */
7559 static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
7560 int imm5)
7561 {
7562 int size = ctz32(imm5);
7563 int index;
7564
7565 if (size > 3 || (size == 3 && !is_q)) {
7566 unallocated_encoding(s);
7567 return;
7568 }
7569
7570 if (!fp_access_check(s)) {
7571 return;
7572 }
7573
7574 index = imm5 >> (size + 1);
7575 tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
7576 vec_reg_offset(s, rn, index, size),
7577 is_q ? 16 : 8, vec_full_reg_size(s));
7578 }
7579
7580 /* DUP (element, scalar)
7581 * 31 21 20 16 15 10 9 5 4 0
7582 * +-----------------------+--------+-------------+------+------+
7583 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7584 * +-----------------------+--------+-------------+------+------+
7585 */
7586 static void handle_simd_dupes(DisasContext *s, int rd, int rn,
7587 int imm5)
7588 {
7589 int size = ctz32(imm5);
7590 int index;
7591 TCGv_i64 tmp;
7592
7593 if (size > 3) {
7594 unallocated_encoding(s);
7595 return;
7596 }
7597
7598 if (!fp_access_check(s)) {
7599 return;
7600 }
7601
7602 index = imm5 >> (size + 1);
7603
7604 /* This instruction just extracts the specified element and
7605 * zero-extends it into the bottom of the destination register.
7606 */
7607 tmp = tcg_temp_new_i64();
7608 read_vec_element(s, tmp, rn, index, size);
7609 write_fp_dreg(s, rd, tmp);
7610 }
7611
7612 /* DUP (General)
7613 *
7614 * 31 30 29 21 20 16 15 10 9 5 4 0
7615 * +---+---+-------------------+--------+-------------+------+------+
7616 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
7617 * +---+---+-------------------+--------+-------------+------+------+
7618 *
7619 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7620 */
7621 static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
7622 int imm5)
7623 {
7624 int size = ctz32(imm5);
7625 uint32_t dofs, oprsz, maxsz;
7626
7627 if (size > 3 || ((size == 3) && !is_q)) {
7628 unallocated_encoding(s);
7629 return;
7630 }
7631
7632 if (!fp_access_check(s)) {
7633 return;
7634 }
7635
7636 dofs = vec_full_reg_offset(s, rd);
7637 oprsz = is_q ? 16 : 8;
7638 maxsz = vec_full_reg_size(s);
7639
7640 tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
7641 }
7642
7643 /* INS (Element)
7644 *
7645 * 31 21 20 16 15 14 11 10 9 5 4 0
7646 * +-----------------------+--------+------------+---+------+------+
7647 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7648 * +-----------------------+--------+------------+---+------+------+
7649 *
7650 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7651 * index: encoded in imm5<4:size+1>
7652 */
7653 static void handle_simd_inse(DisasContext *s, int rd, int rn,
7654 int imm4, int imm5)
7655 {
7656 int size = ctz32(imm5);
7657 int src_index, dst_index;
7658 TCGv_i64 tmp;
7659
7660 if (size > 3) {
7661 unallocated_encoding(s);
7662 return;
7663 }
7664
7665 if (!fp_access_check(s)) {
7666 return;
7667 }
7668
7669 dst_index = extract32(imm5, 1+size, 5);
7670 src_index = extract32(imm4, size, 4);
7671
7672 tmp = tcg_temp_new_i64();
7673
7674 read_vec_element(s, tmp, rn, src_index, size);
7675 write_vec_element(s, tmp, rd, dst_index, size);
7676
7677 /* INS is considered a 128-bit write for SVE. */
7678 clear_vec_high(s, true, rd);
7679 }
7680
7681
7682 /* INS (General)
7683 *
7684 * 31 21 20 16 15 10 9 5 4 0
7685 * +-----------------------+--------+-------------+------+------+
7686 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
7687 * +-----------------------+--------+-------------+------+------+
7688 *
7689 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7690 * index: encoded in imm5<4:size+1>
7691 */
7692 static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
7693 {
7694 int size = ctz32(imm5);
7695 int idx;
7696
7697 if (size > 3) {
7698 unallocated_encoding(s);
7699 return;
7700 }
7701
7702 if (!fp_access_check(s)) {
7703 return;
7704 }
7705
7706 idx = extract32(imm5, 1 + size, 4 - size);
7707 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
7708
7709 /* INS is considered a 128-bit write for SVE. */
7710 clear_vec_high(s, true, rd);
7711 }
7712
7713 /*
7714 * UMOV (General)
7715 * SMOV (General)
7716 *
7717 * 31 30 29 21 20 16 15 12 10 9 5 4 0
7718 * +---+---+-------------------+--------+-------------+------+------+
7719 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
7720 * +---+---+-------------------+--------+-------------+------+------+
7721 *
7722 * U: unsigned when set
7723 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7724 */
7725 static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
7726 int rn, int rd, int imm5)
7727 {
7728 int size = ctz32(imm5);
7729 int element;
7730 TCGv_i64 tcg_rd;
7731
7732 /* Check for UnallocatedEncodings */
7733 if (is_signed) {
7734 if (size > 2 || (size == 2 && !is_q)) {
7735 unallocated_encoding(s);
7736 return;
7737 }
7738 } else {
7739 if (size > 3
7740 || (size < 3 && is_q)
7741 || (size == 3 && !is_q)) {
7742 unallocated_encoding(s);
7743 return;
7744 }
7745 }
7746
7747 if (!fp_access_check(s)) {
7748 return;
7749 }
7750
7751 element = extract32(imm5, 1+size, 4);
7752
7753 tcg_rd = cpu_reg(s, rd);
7754 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
7755 if (is_signed && !is_q) {
7756 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
7757 }
7758 }
7759
7760 /* AdvSIMD copy
7761 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
7762 * +---+---+----+-----------------+------+---+------+---+------+------+
7763 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7764 * +---+---+----+-----------------+------+---+------+---+------+------+
7765 */
7766 static void disas_simd_copy(DisasContext *s, uint32_t insn)
7767 {
7768 int rd = extract32(insn, 0, 5);
7769 int rn = extract32(insn, 5, 5);
7770 int imm4 = extract32(insn, 11, 4);
7771 int op = extract32(insn, 29, 1);
7772 int is_q = extract32(insn, 30, 1);
7773 int imm5 = extract32(insn, 16, 5);
7774
7775 if (op) {
7776 if (is_q) {
7777 /* INS (element) */
7778 handle_simd_inse(s, rd, rn, imm4, imm5);
7779 } else {
7780 unallocated_encoding(s);
7781 }
7782 } else {
7783 switch (imm4) {
7784 case 0:
7785 /* DUP (element - vector) */
7786 handle_simd_dupe(s, is_q, rd, rn, imm5);
7787 break;
7788 case 1:
7789 /* DUP (general) */
7790 handle_simd_dupg(s, is_q, rd, rn, imm5);
7791 break;
7792 case 3:
7793 if (is_q) {
7794 /* INS (general) */
7795 handle_simd_insg(s, rd, rn, imm5);
7796 } else {
7797 unallocated_encoding(s);
7798 }
7799 break;
7800 case 5:
7801 case 7:
7802 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
7803 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
7804 break;
7805 default:
7806 unallocated_encoding(s);
7807 break;
7808 }
7809 }
7810 }
7811
7812 /* AdvSIMD modified immediate
7813 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
7814 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7815 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
7816 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7817 *
7818 * There are a number of operations that can be carried out here:
7819 * MOVI - move (shifted) imm into register
7820 * MVNI - move inverted (shifted) imm into register
7821 * ORR - bitwise OR of (shifted) imm with register
7822 * BIC - bitwise clear of (shifted) imm with register
7823 * With ARMv8.2 we also have:
7824 * FMOV half-precision
7825 */
7826 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
7827 {
7828 int rd = extract32(insn, 0, 5);
7829 int cmode = extract32(insn, 12, 4);
7830 int o2 = extract32(insn, 11, 1);
7831 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
7832 bool is_neg = extract32(insn, 29, 1);
7833 bool is_q = extract32(insn, 30, 1);
7834 uint64_t imm = 0;
7835
7836 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
7837 /* Check for FMOV (vector, immediate) - half-precision */
7838 if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
7839 unallocated_encoding(s);
7840 return;
7841 }
7842 }
7843
7844 if (!fp_access_check(s)) {
7845 return;
7846 }
7847
7848 if (cmode == 15 && o2 && !is_neg) {
7849 /* FMOV (vector, immediate) - half-precision */
7850 imm = vfp_expand_imm(MO_16, abcdefgh);
7851 /* now duplicate across the lanes */
7852 imm = dup_const(MO_16, imm);
7853 } else {
7854 imm = asimd_imm_const(abcdefgh, cmode, is_neg);
7855 }
7856
7857 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
7858 /* MOVI or MVNI, with MVNI negation handled above. */
7859 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
7860 vec_full_reg_size(s), imm);
7861 } else {
7862 /* ORR or BIC, with BIC negation to AND handled above. */
7863 if (is_neg) {
7864 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
7865 } else {
7866 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
7867 }
7868 }
7869 }
7870
7871 /* AdvSIMD scalar copy
7872 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
7873 * +-----+----+-----------------+------+---+------+---+------+------+
7874 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
7875 * +-----+----+-----------------+------+---+------+---+------+------+
7876 */
7877 static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
7878 {
7879 int rd = extract32(insn, 0, 5);
7880 int rn = extract32(insn, 5, 5);
7881 int imm4 = extract32(insn, 11, 4);
7882 int imm5 = extract32(insn, 16, 5);
7883 int op = extract32(insn, 29, 1);
7884
7885 if (op != 0 || imm4 != 0) {
7886 unallocated_encoding(s);
7887 return;
7888 }
7889
7890 /* DUP (element, scalar) */
7891 handle_simd_dupes(s, rd, rn, imm5);
7892 }
7893
7894 /* AdvSIMD scalar pairwise
7895 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7896 * +-----+---+-----------+------+-----------+--------+-----+------+------+
7897 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
7898 * +-----+---+-----------+------+-----------+--------+-----+------+------+
7899 */
7900 static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
7901 {
7902 int u = extract32(insn, 29, 1);
7903 int size = extract32(insn, 22, 2);
7904 int opcode = extract32(insn, 12, 5);
7905 int rn = extract32(insn, 5, 5);
7906 int rd = extract32(insn, 0, 5);
7907 TCGv_ptr fpst;
7908
7909 /* For some ops (the FP ones), size[1] is part of the encoding.
7910 * For ADDP strictly it is not but size[1] is always 1 for valid
7911 * encodings.
7912 */
7913 opcode |= (extract32(size, 1, 1) << 5);
7914
7915 switch (opcode) {
7916 case 0x3b: /* ADDP */
7917 if (u || size != 3) {
7918 unallocated_encoding(s);
7919 return;
7920 }
7921 if (!fp_access_check(s)) {
7922 return;
7923 }
7924
7925 fpst = NULL;
7926 break;
7927 case 0xc: /* FMAXNMP */
7928 case 0xd: /* FADDP */
7929 case 0xf: /* FMAXP */
7930 case 0x2c: /* FMINNMP */
7931 case 0x2f: /* FMINP */
7932 /* FP op, size[0] is 32 or 64 bit*/
7933 if (!u) {
7934 if (!dc_isar_feature(aa64_fp16, s)) {
7935 unallocated_encoding(s);
7936 return;
7937 } else {
7938 size = MO_16;
7939 }
7940 } else {
7941 size = extract32(size, 0, 1) ? MO_64 : MO_32;
7942 }
7943
7944 if (!fp_access_check(s)) {
7945 return;
7946 }
7947
7948 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7949 break;
7950 default:
7951 unallocated_encoding(s);
7952 return;
7953 }
7954
7955 if (size == MO_64) {
7956 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7957 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7958 TCGv_i64 tcg_res = tcg_temp_new_i64();
7959
7960 read_vec_element(s, tcg_op1, rn, 0, MO_64);
7961 read_vec_element(s, tcg_op2, rn, 1, MO_64);
7962
7963 switch (opcode) {
7964 case 0x3b: /* ADDP */
7965 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
7966 break;
7967 case 0xc: /* FMAXNMP */
7968 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7969 break;
7970 case 0xd: /* FADDP */
7971 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7972 break;
7973 case 0xf: /* FMAXP */
7974 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7975 break;
7976 case 0x2c: /* FMINNMP */
7977 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7978 break;
7979 case 0x2f: /* FMINP */
7980 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7981 break;
7982 default:
7983 g_assert_not_reached();
7984 }
7985
7986 write_fp_dreg(s, rd, tcg_res);
7987 } else {
7988 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7989 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7990 TCGv_i32 tcg_res = tcg_temp_new_i32();
7991
7992 read_vec_element_i32(s, tcg_op1, rn, 0, size);
7993 read_vec_element_i32(s, tcg_op2, rn, 1, size);
7994
7995 if (size == MO_16) {
7996 switch (opcode) {
7997 case 0xc: /* FMAXNMP */
7998 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7999 break;
8000 case 0xd: /* FADDP */
8001 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
8002 break;
8003 case 0xf: /* FMAXP */
8004 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
8005 break;
8006 case 0x2c: /* FMINNMP */
8007 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8008 break;
8009 case 0x2f: /* FMINP */
8010 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
8011 break;
8012 default:
8013 g_assert_not_reached();
8014 }
8015 } else {
8016 switch (opcode) {
8017 case 0xc: /* FMAXNMP */
8018 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
8019 break;
8020 case 0xd: /* FADDP */
8021 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
8022 break;
8023 case 0xf: /* FMAXP */
8024 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
8025 break;
8026 case 0x2c: /* FMINNMP */
8027 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
8028 break;
8029 case 0x2f: /* FMINP */
8030 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
8031 break;
8032 default:
8033 g_assert_not_reached();
8034 }
8035 }
8036
8037 write_fp_sreg(s, rd, tcg_res);
8038 }
8039 }
8040
8041 /*
8042 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
8043 *
8044 * This code is handles the common shifting code and is used by both
8045 * the vector and scalar code.
8046 */
8047 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
8048 TCGv_i64 tcg_rnd, bool accumulate,
8049 bool is_u, int size, int shift)
8050 {
8051 bool extended_result = false;
8052 bool round = tcg_rnd != NULL;
8053 int ext_lshift = 0;
8054 TCGv_i64 tcg_src_hi;
8055
8056 if (round && size == 3) {
8057 extended_result = true;
8058 ext_lshift = 64 - shift;
8059 tcg_src_hi = tcg_temp_new_i64();
8060 } else if (shift == 64) {
8061 if (!accumulate && is_u) {
8062 /* result is zero */
8063 tcg_gen_movi_i64(tcg_res, 0);
8064 return;
8065 }
8066 }
8067
8068 /* Deal with the rounding step */
8069 if (round) {
8070 if (extended_result) {
8071 TCGv_i64 tcg_zero = tcg_constant_i64(0);
8072 if (!is_u) {
8073 /* take care of sign extending tcg_res */
8074 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
8075 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8076 tcg_src, tcg_src_hi,
8077 tcg_rnd, tcg_zero);
8078 } else {
8079 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8080 tcg_src, tcg_zero,
8081 tcg_rnd, tcg_zero);
8082 }
8083 } else {
8084 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
8085 }
8086 }
8087
8088 /* Now do the shift right */
8089 if (round && extended_result) {
8090 /* extended case, >64 bit precision required */
8091 if (ext_lshift == 0) {
8092 /* special case, only high bits matter */
8093 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
8094 } else {
8095 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8096 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
8097 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
8098 }
8099 } else {
8100 if (is_u) {
8101 if (shift == 64) {
8102 /* essentially shifting in 64 zeros */
8103 tcg_gen_movi_i64(tcg_src, 0);
8104 } else {
8105 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8106 }
8107 } else {
8108 if (shift == 64) {
8109 /* effectively extending the sign-bit */
8110 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
8111 } else {
8112 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
8113 }
8114 }
8115 }
8116
8117 if (accumulate) {
8118 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
8119 } else {
8120 tcg_gen_mov_i64(tcg_res, tcg_src);
8121 }
8122 }
8123
8124 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
8125 static void handle_scalar_simd_shri(DisasContext *s,
8126 bool is_u, int immh, int immb,
8127 int opcode, int rn, int rd)
8128 {
8129 const int size = 3;
8130 int immhb = immh << 3 | immb;
8131 int shift = 2 * (8 << size) - immhb;
8132 bool accumulate = false;
8133 bool round = false;
8134 bool insert = false;
8135 TCGv_i64 tcg_rn;
8136 TCGv_i64 tcg_rd;
8137 TCGv_i64 tcg_round;
8138
8139 if (!extract32(immh, 3, 1)) {
8140 unallocated_encoding(s);
8141 return;
8142 }
8143
8144 if (!fp_access_check(s)) {
8145 return;
8146 }
8147
8148 switch (opcode) {
8149 case 0x02: /* SSRA / USRA (accumulate) */
8150 accumulate = true;
8151 break;
8152 case 0x04: /* SRSHR / URSHR (rounding) */
8153 round = true;
8154 break;
8155 case 0x06: /* SRSRA / URSRA (accum + rounding) */
8156 accumulate = round = true;
8157 break;
8158 case 0x08: /* SRI */
8159 insert = true;
8160 break;
8161 }
8162
8163 if (round) {
8164 tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8165 } else {
8166 tcg_round = NULL;
8167 }
8168
8169 tcg_rn = read_fp_dreg(s, rn);
8170 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8171
8172 if (insert) {
8173 /* shift count same as element size is valid but does nothing;
8174 * special case to avoid potential shift by 64.
8175 */
8176 int esize = 8 << size;
8177 if (shift != esize) {
8178 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
8179 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
8180 }
8181 } else {
8182 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8183 accumulate, is_u, size, shift);
8184 }
8185
8186 write_fp_dreg(s, rd, tcg_rd);
8187 }
8188
8189 /* SHL/SLI - Scalar shift left */
8190 static void handle_scalar_simd_shli(DisasContext *s, bool insert,
8191 int immh, int immb, int opcode,
8192 int rn, int rd)
8193 {
8194 int size = 32 - clz32(immh) - 1;
8195 int immhb = immh << 3 | immb;
8196 int shift = immhb - (8 << size);
8197 TCGv_i64 tcg_rn;
8198 TCGv_i64 tcg_rd;
8199
8200 if (!extract32(immh, 3, 1)) {
8201 unallocated_encoding(s);
8202 return;
8203 }
8204
8205 if (!fp_access_check(s)) {
8206 return;
8207 }
8208
8209 tcg_rn = read_fp_dreg(s, rn);
8210 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8211
8212 if (insert) {
8213 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
8214 } else {
8215 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
8216 }
8217
8218 write_fp_dreg(s, rd, tcg_rd);
8219 }
8220
8221 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
8222 * (signed/unsigned) narrowing */
8223 static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
8224 bool is_u_shift, bool is_u_narrow,
8225 int immh, int immb, int opcode,
8226 int rn, int rd)
8227 {
8228 int immhb = immh << 3 | immb;
8229 int size = 32 - clz32(immh) - 1;
8230 int esize = 8 << size;
8231 int shift = (2 * esize) - immhb;
8232 int elements = is_scalar ? 1 : (64 / esize);
8233 bool round = extract32(opcode, 0, 1);
8234 MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
8235 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
8236 TCGv_i32 tcg_rd_narrowed;
8237 TCGv_i64 tcg_final;
8238
8239 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
8240 { gen_helper_neon_narrow_sat_s8,
8241 gen_helper_neon_unarrow_sat8 },
8242 { gen_helper_neon_narrow_sat_s16,
8243 gen_helper_neon_unarrow_sat16 },
8244 { gen_helper_neon_narrow_sat_s32,
8245 gen_helper_neon_unarrow_sat32 },
8246 { NULL, NULL },
8247 };
8248 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
8249 gen_helper_neon_narrow_sat_u8,
8250 gen_helper_neon_narrow_sat_u16,
8251 gen_helper_neon_narrow_sat_u32,
8252 NULL
8253 };
8254 NeonGenNarrowEnvFn *narrowfn;
8255
8256 int i;
8257
8258 assert(size < 4);
8259
8260 if (extract32(immh, 3, 1)) {
8261 unallocated_encoding(s);
8262 return;
8263 }
8264
8265 if (!fp_access_check(s)) {
8266 return;
8267 }
8268
8269 if (is_u_shift) {
8270 narrowfn = unsigned_narrow_fns[size];
8271 } else {
8272 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
8273 }
8274
8275 tcg_rn = tcg_temp_new_i64();
8276 tcg_rd = tcg_temp_new_i64();
8277 tcg_rd_narrowed = tcg_temp_new_i32();
8278 tcg_final = tcg_temp_new_i64();
8279
8280 if (round) {
8281 tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8282 } else {
8283 tcg_round = NULL;
8284 }
8285
8286 for (i = 0; i < elements; i++) {
8287 read_vec_element(s, tcg_rn, rn, i, ldop);
8288 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8289 false, is_u_shift, size+1, shift);
8290 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
8291 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
8292 if (i == 0) {
8293 tcg_gen_mov_i64(tcg_final, tcg_rd);
8294 } else {
8295 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8296 }
8297 }
8298
8299 if (!is_q) {
8300 write_vec_element(s, tcg_final, rd, 0, MO_64);
8301 } else {
8302 write_vec_element(s, tcg_final, rd, 1, MO_64);
8303 }
8304 clear_vec_high(s, is_q, rd);
8305 }
8306
8307 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
8308 static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
8309 bool src_unsigned, bool dst_unsigned,
8310 int immh, int immb, int rn, int rd)
8311 {
8312 int immhb = immh << 3 | immb;
8313 int size = 32 - clz32(immh) - 1;
8314 int shift = immhb - (8 << size);
8315 int pass;
8316
8317 assert(immh != 0);
8318 assert(!(scalar && is_q));
8319
8320 if (!scalar) {
8321 if (!is_q && extract32(immh, 3, 1)) {
8322 unallocated_encoding(s);
8323 return;
8324 }
8325
8326 /* Since we use the variable-shift helpers we must
8327 * replicate the shift count into each element of
8328 * the tcg_shift value.
8329 */
8330 switch (size) {
8331 case 0:
8332 shift |= shift << 8;
8333 /* fall through */
8334 case 1:
8335 shift |= shift << 16;
8336 break;
8337 case 2:
8338 case 3:
8339 break;
8340 default:
8341 g_assert_not_reached();
8342 }
8343 }
8344
8345 if (!fp_access_check(s)) {
8346 return;
8347 }
8348
8349 if (size == 3) {
8350 TCGv_i64 tcg_shift = tcg_constant_i64(shift);
8351 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
8352 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
8353 { NULL, gen_helper_neon_qshl_u64 },
8354 };
8355 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
8356 int maxpass = is_q ? 2 : 1;
8357
8358 for (pass = 0; pass < maxpass; pass++) {
8359 TCGv_i64 tcg_op = tcg_temp_new_i64();
8360
8361 read_vec_element(s, tcg_op, rn, pass, MO_64);
8362 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8363 write_vec_element(s, tcg_op, rd, pass, MO_64);
8364 }
8365 clear_vec_high(s, is_q, rd);
8366 } else {
8367 TCGv_i32 tcg_shift = tcg_constant_i32(shift);
8368 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
8369 {
8370 { gen_helper_neon_qshl_s8,
8371 gen_helper_neon_qshl_s16,
8372 gen_helper_neon_qshl_s32 },
8373 { gen_helper_neon_qshlu_s8,
8374 gen_helper_neon_qshlu_s16,
8375 gen_helper_neon_qshlu_s32 }
8376 }, {
8377 { NULL, NULL, NULL },
8378 { gen_helper_neon_qshl_u8,
8379 gen_helper_neon_qshl_u16,
8380 gen_helper_neon_qshl_u32 }
8381 }
8382 };
8383 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
8384 MemOp memop = scalar ? size : MO_32;
8385 int maxpass = scalar ? 1 : is_q ? 4 : 2;
8386
8387 for (pass = 0; pass < maxpass; pass++) {
8388 TCGv_i32 tcg_op = tcg_temp_new_i32();
8389
8390 read_vec_element_i32(s, tcg_op, rn, pass, memop);
8391 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8392 if (scalar) {
8393 switch (size) {
8394 case 0:
8395 tcg_gen_ext8u_i32(tcg_op, tcg_op);
8396 break;
8397 case 1:
8398 tcg_gen_ext16u_i32(tcg_op, tcg_op);
8399 break;
8400 case 2:
8401 break;
8402 default:
8403 g_assert_not_reached();
8404 }
8405 write_fp_sreg(s, rd, tcg_op);
8406 } else {
8407 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
8408 }
8409 }
8410
8411 if (!scalar) {
8412 clear_vec_high(s, is_q, rd);
8413 }
8414 }
8415 }
8416
8417 /* Common vector code for handling integer to FP conversion */
8418 static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
8419 int elements, int is_signed,
8420 int fracbits, int size)
8421 {
8422 TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8423 TCGv_i32 tcg_shift = NULL;
8424
8425 MemOp mop = size | (is_signed ? MO_SIGN : 0);
8426 int pass;
8427
8428 if (fracbits || size == MO_64) {
8429 tcg_shift = tcg_constant_i32(fracbits);
8430 }
8431
8432 if (size == MO_64) {
8433 TCGv_i64 tcg_int64 = tcg_temp_new_i64();
8434 TCGv_i64 tcg_double = tcg_temp_new_i64();
8435
8436 for (pass = 0; pass < elements; pass++) {
8437 read_vec_element(s, tcg_int64, rn, pass, mop);
8438
8439 if (is_signed) {
8440 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
8441 tcg_shift, tcg_fpst);
8442 } else {
8443 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
8444 tcg_shift, tcg_fpst);
8445 }
8446 if (elements == 1) {
8447 write_fp_dreg(s, rd, tcg_double);
8448 } else {
8449 write_vec_element(s, tcg_double, rd, pass, MO_64);
8450 }
8451 }
8452 } else {
8453 TCGv_i32 tcg_int32 = tcg_temp_new_i32();
8454 TCGv_i32 tcg_float = tcg_temp_new_i32();
8455
8456 for (pass = 0; pass < elements; pass++) {
8457 read_vec_element_i32(s, tcg_int32, rn, pass, mop);
8458
8459 switch (size) {
8460 case MO_32:
8461 if (fracbits) {
8462 if (is_signed) {
8463 gen_helper_vfp_sltos(tcg_float, tcg_int32,
8464 tcg_shift, tcg_fpst);
8465 } else {
8466 gen_helper_vfp_ultos(tcg_float, tcg_int32,
8467 tcg_shift, tcg_fpst);
8468 }
8469 } else {
8470 if (is_signed) {
8471 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
8472 } else {
8473 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
8474 }
8475 }
8476 break;
8477 case MO_16:
8478 if (fracbits) {
8479 if (is_signed) {
8480 gen_helper_vfp_sltoh(tcg_float, tcg_int32,
8481 tcg_shift, tcg_fpst);
8482 } else {
8483 gen_helper_vfp_ultoh(tcg_float, tcg_int32,
8484 tcg_shift, tcg_fpst);
8485 }
8486 } else {
8487 if (is_signed) {
8488 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
8489 } else {
8490 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
8491 }
8492 }
8493 break;
8494 default:
8495 g_assert_not_reached();
8496 }
8497
8498 if (elements == 1) {
8499 write_fp_sreg(s, rd, tcg_float);
8500 } else {
8501 write_vec_element_i32(s, tcg_float, rd, pass, size);
8502 }
8503 }
8504 }
8505
8506 clear_vec_high(s, elements << size == 16, rd);
8507 }
8508
8509 /* UCVTF/SCVTF - Integer to FP conversion */
8510 static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
8511 bool is_q, bool is_u,
8512 int immh, int immb, int opcode,
8513 int rn, int rd)
8514 {
8515 int size, elements, fracbits;
8516 int immhb = immh << 3 | immb;
8517
8518 if (immh & 8) {
8519 size = MO_64;
8520 if (!is_scalar && !is_q) {
8521 unallocated_encoding(s);
8522 return;
8523 }
8524 } else if (immh & 4) {
8525 size = MO_32;
8526 } else if (immh & 2) {
8527 size = MO_16;
8528 if (!dc_isar_feature(aa64_fp16, s)) {
8529 unallocated_encoding(s);
8530 return;
8531 }
8532 } else {
8533 /* immh == 0 would be a failure of the decode logic */
8534 g_assert(immh == 1);
8535 unallocated_encoding(s);
8536 return;
8537 }
8538
8539 if (is_scalar) {
8540 elements = 1;
8541 } else {
8542 elements = (8 << is_q) >> size;
8543 }
8544 fracbits = (16 << size) - immhb;
8545
8546 if (!fp_access_check(s)) {
8547 return;
8548 }
8549
8550 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
8551 }
8552
8553 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
8554 static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
8555 bool is_q, bool is_u,
8556 int immh, int immb, int rn, int rd)
8557 {
8558 int immhb = immh << 3 | immb;
8559 int pass, size, fracbits;
8560 TCGv_ptr tcg_fpstatus;
8561 TCGv_i32 tcg_rmode, tcg_shift;
8562
8563 if (immh & 0x8) {
8564 size = MO_64;
8565 if (!is_scalar && !is_q) {
8566 unallocated_encoding(s);
8567 return;
8568 }
8569 } else if (immh & 0x4) {
8570 size = MO_32;
8571 } else if (immh & 0x2) {
8572 size = MO_16;
8573 if (!dc_isar_feature(aa64_fp16, s)) {
8574 unallocated_encoding(s);
8575 return;
8576 }
8577 } else {
8578 /* Should have split out AdvSIMD modified immediate earlier. */
8579 assert(immh == 1);
8580 unallocated_encoding(s);
8581 return;
8582 }
8583
8584 if (!fp_access_check(s)) {
8585 return;
8586 }
8587
8588 assert(!(is_scalar && is_q));
8589
8590 tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8591 tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, tcg_fpstatus);
8592 fracbits = (16 << size) - immhb;
8593 tcg_shift = tcg_constant_i32(fracbits);
8594
8595 if (size == MO_64) {
8596 int maxpass = is_scalar ? 1 : 2;
8597
8598 for (pass = 0; pass < maxpass; pass++) {
8599 TCGv_i64 tcg_op = tcg_temp_new_i64();
8600
8601 read_vec_element(s, tcg_op, rn, pass, MO_64);
8602 if (is_u) {
8603 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8604 } else {
8605 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8606 }
8607 write_vec_element(s, tcg_op, rd, pass, MO_64);
8608 }
8609 clear_vec_high(s, is_q, rd);
8610 } else {
8611 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
8612 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
8613
8614 switch (size) {
8615 case MO_16:
8616 if (is_u) {
8617 fn = gen_helper_vfp_touhh;
8618 } else {
8619 fn = gen_helper_vfp_toshh;
8620 }
8621 break;
8622 case MO_32:
8623 if (is_u) {
8624 fn = gen_helper_vfp_touls;
8625 } else {
8626 fn = gen_helper_vfp_tosls;
8627 }
8628 break;
8629 default:
8630 g_assert_not_reached();
8631 }
8632
8633 for (pass = 0; pass < maxpass; pass++) {
8634 TCGv_i32 tcg_op = tcg_temp_new_i32();
8635
8636 read_vec_element_i32(s, tcg_op, rn, pass, size);
8637 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8638 if (is_scalar) {
8639 write_fp_sreg(s, rd, tcg_op);
8640 } else {
8641 write_vec_element_i32(s, tcg_op, rd, pass, size);
8642 }
8643 }
8644 if (!is_scalar) {
8645 clear_vec_high(s, is_q, rd);
8646 }
8647 }
8648
8649 gen_restore_rmode(tcg_rmode, tcg_fpstatus);
8650 }
8651
8652 /* AdvSIMD scalar shift by immediate
8653 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
8654 * +-----+---+-------------+------+------+--------+---+------+------+
8655 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
8656 * +-----+---+-------------+------+------+--------+---+------+------+
8657 *
8658 * This is the scalar version so it works on a fixed sized registers
8659 */
8660 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
8661 {
8662 int rd = extract32(insn, 0, 5);
8663 int rn = extract32(insn, 5, 5);
8664 int opcode = extract32(insn, 11, 5);
8665 int immb = extract32(insn, 16, 3);
8666 int immh = extract32(insn, 19, 4);
8667 bool is_u = extract32(insn, 29, 1);
8668
8669 if (immh == 0) {
8670 unallocated_encoding(s);
8671 return;
8672 }
8673
8674 switch (opcode) {
8675 case 0x08: /* SRI */
8676 if (!is_u) {
8677 unallocated_encoding(s);
8678 return;
8679 }
8680 /* fall through */
8681 case 0x00: /* SSHR / USHR */
8682 case 0x02: /* SSRA / USRA */
8683 case 0x04: /* SRSHR / URSHR */
8684 case 0x06: /* SRSRA / URSRA */
8685 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
8686 break;
8687 case 0x0a: /* SHL / SLI */
8688 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
8689 break;
8690 case 0x1c: /* SCVTF, UCVTF */
8691 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
8692 opcode, rn, rd);
8693 break;
8694 case 0x10: /* SQSHRUN, SQSHRUN2 */
8695 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
8696 if (!is_u) {
8697 unallocated_encoding(s);
8698 return;
8699 }
8700 handle_vec_simd_sqshrn(s, true, false, false, true,
8701 immh, immb, opcode, rn, rd);
8702 break;
8703 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
8704 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
8705 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
8706 immh, immb, opcode, rn, rd);
8707 break;
8708 case 0xc: /* SQSHLU */
8709 if (!is_u) {
8710 unallocated_encoding(s);
8711 return;
8712 }
8713 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
8714 break;
8715 case 0xe: /* SQSHL, UQSHL */
8716 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
8717 break;
8718 case 0x1f: /* FCVTZS, FCVTZU */
8719 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
8720 break;
8721 default:
8722 unallocated_encoding(s);
8723 break;
8724 }
8725 }
8726
8727 /* AdvSIMD scalar three different
8728 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
8729 * +-----+---+-----------+------+---+------+--------+-----+------+------+
8730 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
8731 * +-----+---+-----------+------+---+------+--------+-----+------+------+
8732 */
8733 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
8734 {
8735 bool is_u = extract32(insn, 29, 1);
8736 int size = extract32(insn, 22, 2);
8737 int opcode = extract32(insn, 12, 4);
8738 int rm = extract32(insn, 16, 5);
8739 int rn = extract32(insn, 5, 5);
8740 int rd = extract32(insn, 0, 5);
8741
8742 if (is_u) {
8743 unallocated_encoding(s);
8744 return;
8745 }
8746
8747 switch (opcode) {
8748 case 0x9: /* SQDMLAL, SQDMLAL2 */
8749 case 0xb: /* SQDMLSL, SQDMLSL2 */
8750 case 0xd: /* SQDMULL, SQDMULL2 */
8751 if (size == 0 || size == 3) {
8752 unallocated_encoding(s);
8753 return;
8754 }
8755 break;
8756 default:
8757 unallocated_encoding(s);
8758 return;
8759 }
8760
8761 if (!fp_access_check(s)) {
8762 return;
8763 }
8764
8765 if (size == 2) {
8766 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8767 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8768 TCGv_i64 tcg_res = tcg_temp_new_i64();
8769
8770 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
8771 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
8772
8773 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
8774 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
8775
8776 switch (opcode) {
8777 case 0xd: /* SQDMULL, SQDMULL2 */
8778 break;
8779 case 0xb: /* SQDMLSL, SQDMLSL2 */
8780 tcg_gen_neg_i64(tcg_res, tcg_res);
8781 /* fall through */
8782 case 0x9: /* SQDMLAL, SQDMLAL2 */
8783 read_vec_element(s, tcg_op1, rd, 0, MO_64);
8784 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
8785 tcg_res, tcg_op1);
8786 break;
8787 default:
8788 g_assert_not_reached();
8789 }
8790
8791 write_fp_dreg(s, rd, tcg_res);
8792 } else {
8793 TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
8794 TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
8795 TCGv_i64 tcg_res = tcg_temp_new_i64();
8796
8797 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
8798 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
8799
8800 switch (opcode) {
8801 case 0xd: /* SQDMULL, SQDMULL2 */
8802 break;
8803 case 0xb: /* SQDMLSL, SQDMLSL2 */
8804 gen_helper_neon_negl_u32(tcg_res, tcg_res);
8805 /* fall through */
8806 case 0x9: /* SQDMLAL, SQDMLAL2 */
8807 {
8808 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
8809 read_vec_element(s, tcg_op3, rd, 0, MO_32);
8810 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
8811 tcg_res, tcg_op3);
8812 break;
8813 }
8814 default:
8815 g_assert_not_reached();
8816 }
8817
8818 tcg_gen_ext32u_i64(tcg_res, tcg_res);
8819 write_fp_dreg(s, rd, tcg_res);
8820 }
8821 }
8822
8823 static void handle_3same_64(DisasContext *s, int opcode, bool u,
8824 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
8825 {
8826 /* Handle 64x64->64 opcodes which are shared between the scalar
8827 * and vector 3-same groups. We cover every opcode where size == 3
8828 * is valid in either the three-reg-same (integer, not pairwise)
8829 * or scalar-three-reg-same groups.
8830 */
8831 TCGCond cond;
8832
8833 switch (opcode) {
8834 case 0x1: /* SQADD */
8835 if (u) {
8836 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8837 } else {
8838 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8839 }
8840 break;
8841 case 0x5: /* SQSUB */
8842 if (u) {
8843 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8844 } else {
8845 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8846 }
8847 break;
8848 case 0x6: /* CMGT, CMHI */
8849 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
8850 * We implement this using setcond (test) and then negating.
8851 */
8852 cond = u ? TCG_COND_GTU : TCG_COND_GT;
8853 do_cmop:
8854 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
8855 tcg_gen_neg_i64(tcg_rd, tcg_rd);
8856 break;
8857 case 0x7: /* CMGE, CMHS */
8858 cond = u ? TCG_COND_GEU : TCG_COND_GE;
8859 goto do_cmop;
8860 case 0x11: /* CMTST, CMEQ */
8861 if (u) {
8862 cond = TCG_COND_EQ;
8863 goto do_cmop;
8864 }
8865 gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
8866 break;
8867 case 0x8: /* SSHL, USHL */
8868 if (u) {
8869 gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
8870 } else {
8871 gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
8872 }
8873 break;
8874 case 0x9: /* SQSHL, UQSHL */
8875 if (u) {
8876 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8877 } else {
8878 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8879 }
8880 break;
8881 case 0xa: /* SRSHL, URSHL */
8882 if (u) {
8883 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
8884 } else {
8885 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
8886 }
8887 break;
8888 case 0xb: /* SQRSHL, UQRSHL */
8889 if (u) {
8890 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8891 } else {
8892 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8893 }
8894 break;
8895 case 0x10: /* ADD, SUB */
8896 if (u) {
8897 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
8898 } else {
8899 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
8900 }
8901 break;
8902 default:
8903 g_assert_not_reached();
8904 }
8905 }
8906
8907 /* Handle the 3-same-operands float operations; shared by the scalar
8908 * and vector encodings. The caller must filter out any encodings
8909 * not allocated for the encoding it is dealing with.
8910 */
8911 static void handle_3same_float(DisasContext *s, int size, int elements,
8912 int fpopcode, int rd, int rn, int rm)
8913 {
8914 int pass;
8915 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
8916
8917 for (pass = 0; pass < elements; pass++) {
8918 if (size) {
8919 /* Double */
8920 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8921 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8922 TCGv_i64 tcg_res = tcg_temp_new_i64();
8923
8924 read_vec_element(s, tcg_op1, rn, pass, MO_64);
8925 read_vec_element(s, tcg_op2, rm, pass, MO_64);
8926
8927 switch (fpopcode) {
8928 case 0x39: /* FMLS */
8929 /* As usual for ARM, separate negation for fused multiply-add */
8930 gen_helper_vfp_negd(tcg_op1, tcg_op1);
8931 /* fall through */
8932 case 0x19: /* FMLA */
8933 read_vec_element(s, tcg_res, rd, pass, MO_64);
8934 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
8935 tcg_res, fpst);
8936 break;
8937 case 0x18: /* FMAXNM */
8938 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8939 break;
8940 case 0x1a: /* FADD */
8941 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8942 break;
8943 case 0x1b: /* FMULX */
8944 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
8945 break;
8946 case 0x1c: /* FCMEQ */
8947 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8948 break;
8949 case 0x1e: /* FMAX */
8950 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8951 break;
8952 case 0x1f: /* FRECPS */
8953 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8954 break;
8955 case 0x38: /* FMINNM */
8956 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8957 break;
8958 case 0x3a: /* FSUB */
8959 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8960 break;
8961 case 0x3e: /* FMIN */
8962 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8963 break;
8964 case 0x3f: /* FRSQRTS */
8965 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8966 break;
8967 case 0x5b: /* FMUL */
8968 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
8969 break;
8970 case 0x5c: /* FCMGE */
8971 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8972 break;
8973 case 0x5d: /* FACGE */
8974 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8975 break;
8976 case 0x5f: /* FDIV */
8977 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
8978 break;
8979 case 0x7a: /* FABD */
8980 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8981 gen_helper_vfp_absd(tcg_res, tcg_res);
8982 break;
8983 case 0x7c: /* FCMGT */
8984 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8985 break;
8986 case 0x7d: /* FACGT */
8987 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8988 break;
8989 default:
8990 g_assert_not_reached();
8991 }
8992
8993 write_vec_element(s, tcg_res, rd, pass, MO_64);
8994 } else {
8995 /* Single */
8996 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8997 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8998 TCGv_i32 tcg_res = tcg_temp_new_i32();
8999
9000 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
9001 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
9002
9003 switch (fpopcode) {
9004 case 0x39: /* FMLS */
9005 /* As usual for ARM, separate negation for fused multiply-add */
9006 gen_helper_vfp_negs(tcg_op1, tcg_op1);
9007 /* fall through */
9008 case 0x19: /* FMLA */
9009 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9010 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
9011 tcg_res, fpst);
9012 break;
9013 case 0x1a: /* FADD */
9014 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
9015 break;
9016 case 0x1b: /* FMULX */
9017 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
9018 break;
9019 case 0x1c: /* FCMEQ */
9020 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9021 break;
9022 case 0x1e: /* FMAX */
9023 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
9024 break;
9025 case 0x1f: /* FRECPS */
9026 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9027 break;
9028 case 0x18: /* FMAXNM */
9029 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
9030 break;
9031 case 0x38: /* FMINNM */
9032 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
9033 break;
9034 case 0x3a: /* FSUB */
9035 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9036 break;
9037 case 0x3e: /* FMIN */
9038 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
9039 break;
9040 case 0x3f: /* FRSQRTS */
9041 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9042 break;
9043 case 0x5b: /* FMUL */
9044 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
9045 break;
9046 case 0x5c: /* FCMGE */
9047 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9048 break;
9049 case 0x5d: /* FACGE */
9050 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9051 break;
9052 case 0x5f: /* FDIV */
9053 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
9054 break;
9055 case 0x7a: /* FABD */
9056 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9057 gen_helper_vfp_abss(tcg_res, tcg_res);
9058 break;
9059 case 0x7c: /* FCMGT */
9060 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9061 break;
9062 case 0x7d: /* FACGT */
9063 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9064 break;
9065 default:
9066 g_assert_not_reached();
9067 }
9068
9069 if (elements == 1) {
9070 /* scalar single so clear high part */
9071 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9072
9073 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
9074 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
9075 } else {
9076 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9077 }
9078 }
9079 }
9080
9081 clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
9082 }
9083
9084 /* AdvSIMD scalar three same
9085 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
9086 * +-----+---+-----------+------+---+------+--------+---+------+------+
9087 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
9088 * +-----+---+-----------+------+---+------+--------+---+------+------+
9089 */
9090 static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
9091 {
9092 int rd = extract32(insn, 0, 5);
9093 int rn = extract32(insn, 5, 5);
9094 int opcode = extract32(insn, 11, 5);
9095 int rm = extract32(insn, 16, 5);
9096 int size = extract32(insn, 22, 2);
9097 bool u = extract32(insn, 29, 1);
9098 TCGv_i64 tcg_rd;
9099
9100 if (opcode >= 0x18) {
9101 /* Floating point: U, size[1] and opcode indicate operation */
9102 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
9103 switch (fpopcode) {
9104 case 0x1b: /* FMULX */
9105 case 0x1f: /* FRECPS */
9106 case 0x3f: /* FRSQRTS */
9107 case 0x5d: /* FACGE */
9108 case 0x7d: /* FACGT */
9109 case 0x1c: /* FCMEQ */
9110 case 0x5c: /* FCMGE */
9111 case 0x7c: /* FCMGT */
9112 case 0x7a: /* FABD */
9113 break;
9114 default:
9115 unallocated_encoding(s);
9116 return;
9117 }
9118
9119 if (!fp_access_check(s)) {
9120 return;
9121 }
9122
9123 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
9124 return;
9125 }
9126
9127 switch (opcode) {
9128 case 0x1: /* SQADD, UQADD */
9129 case 0x5: /* SQSUB, UQSUB */
9130 case 0x9: /* SQSHL, UQSHL */
9131 case 0xb: /* SQRSHL, UQRSHL */
9132 break;
9133 case 0x8: /* SSHL, USHL */
9134 case 0xa: /* SRSHL, URSHL */
9135 case 0x6: /* CMGT, CMHI */
9136 case 0x7: /* CMGE, CMHS */
9137 case 0x11: /* CMTST, CMEQ */
9138 case 0x10: /* ADD, SUB (vector) */
9139 if (size != 3) {
9140 unallocated_encoding(s);
9141 return;
9142 }
9143 break;
9144 case 0x16: /* SQDMULH, SQRDMULH (vector) */
9145 if (size != 1 && size != 2) {
9146 unallocated_encoding(s);
9147 return;
9148 }
9149 break;
9150 default:
9151 unallocated_encoding(s);
9152 return;
9153 }
9154
9155 if (!fp_access_check(s)) {
9156 return;
9157 }
9158
9159 tcg_rd = tcg_temp_new_i64();
9160
9161 if (size == 3) {
9162 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9163 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
9164
9165 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
9166 } else {
9167 /* Do a single operation on the lowest element in the vector.
9168 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
9169 * no side effects for all these operations.
9170 * OPTME: special-purpose helpers would avoid doing some
9171 * unnecessary work in the helper for the 8 and 16 bit cases.
9172 */
9173 NeonGenTwoOpEnvFn *genenvfn;
9174 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9175 TCGv_i32 tcg_rm = tcg_temp_new_i32();
9176 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
9177
9178 read_vec_element_i32(s, tcg_rn, rn, 0, size);
9179 read_vec_element_i32(s, tcg_rm, rm, 0, size);
9180
9181 switch (opcode) {
9182 case 0x1: /* SQADD, UQADD */
9183 {
9184 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9185 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9186 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9187 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9188 };
9189 genenvfn = fns[size][u];
9190 break;
9191 }
9192 case 0x5: /* SQSUB, UQSUB */
9193 {
9194 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9195 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9196 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9197 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9198 };
9199 genenvfn = fns[size][u];
9200 break;
9201 }
9202 case 0x9: /* SQSHL, UQSHL */
9203 {
9204 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9205 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9206 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9207 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9208 };
9209 genenvfn = fns[size][u];
9210 break;
9211 }
9212 case 0xb: /* SQRSHL, UQRSHL */
9213 {
9214 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9215 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9216 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9217 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9218 };
9219 genenvfn = fns[size][u];
9220 break;
9221 }
9222 case 0x16: /* SQDMULH, SQRDMULH */
9223 {
9224 static NeonGenTwoOpEnvFn * const fns[2][2] = {
9225 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9226 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9227 };
9228 assert(size == 1 || size == 2);
9229 genenvfn = fns[size - 1][u];
9230 break;
9231 }
9232 default:
9233 g_assert_not_reached();
9234 }
9235
9236 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
9237 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
9238 }
9239
9240 write_fp_dreg(s, rd, tcg_rd);
9241 }
9242
9243 /* AdvSIMD scalar three same FP16
9244 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
9245 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9246 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
9247 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9248 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
9249 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
9250 */
9251 static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
9252 uint32_t insn)
9253 {
9254 int rd = extract32(insn, 0, 5);
9255 int rn = extract32(insn, 5, 5);
9256 int opcode = extract32(insn, 11, 3);
9257 int rm = extract32(insn, 16, 5);
9258 bool u = extract32(insn, 29, 1);
9259 bool a = extract32(insn, 23, 1);
9260 int fpopcode = opcode | (a << 3) | (u << 4);
9261 TCGv_ptr fpst;
9262 TCGv_i32 tcg_op1;
9263 TCGv_i32 tcg_op2;
9264 TCGv_i32 tcg_res;
9265
9266 switch (fpopcode) {
9267 case 0x03: /* FMULX */
9268 case 0x04: /* FCMEQ (reg) */
9269 case 0x07: /* FRECPS */
9270 case 0x0f: /* FRSQRTS */
9271 case 0x14: /* FCMGE (reg) */
9272 case 0x15: /* FACGE */
9273 case 0x1a: /* FABD */
9274 case 0x1c: /* FCMGT (reg) */
9275 case 0x1d: /* FACGT */
9276 break;
9277 default:
9278 unallocated_encoding(s);
9279 return;
9280 }
9281
9282 if (!dc_isar_feature(aa64_fp16, s)) {
9283 unallocated_encoding(s);
9284 }
9285
9286 if (!fp_access_check(s)) {
9287 return;
9288 }
9289
9290 fpst = fpstatus_ptr(FPST_FPCR_F16);
9291
9292 tcg_op1 = read_fp_hreg(s, rn);
9293 tcg_op2 = read_fp_hreg(s, rm);
9294 tcg_res = tcg_temp_new_i32();
9295
9296 switch (fpopcode) {
9297 case 0x03: /* FMULX */
9298 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
9299 break;
9300 case 0x04: /* FCMEQ (reg) */
9301 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9302 break;
9303 case 0x07: /* FRECPS */
9304 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9305 break;
9306 case 0x0f: /* FRSQRTS */
9307 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9308 break;
9309 case 0x14: /* FCMGE (reg) */
9310 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9311 break;
9312 case 0x15: /* FACGE */
9313 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9314 break;
9315 case 0x1a: /* FABD */
9316 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
9317 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
9318 break;
9319 case 0x1c: /* FCMGT (reg) */
9320 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9321 break;
9322 case 0x1d: /* FACGT */
9323 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9324 break;
9325 default:
9326 g_assert_not_reached();
9327 }
9328
9329 write_fp_sreg(s, rd, tcg_res);
9330 }
9331
9332 /* AdvSIMD scalar three same extra
9333 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
9334 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9335 * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
9336 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9337 */
9338 static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
9339 uint32_t insn)
9340 {
9341 int rd = extract32(insn, 0, 5);
9342 int rn = extract32(insn, 5, 5);
9343 int opcode = extract32(insn, 11, 4);
9344 int rm = extract32(insn, 16, 5);
9345 int size = extract32(insn, 22, 2);
9346 bool u = extract32(insn, 29, 1);
9347 TCGv_i32 ele1, ele2, ele3;
9348 TCGv_i64 res;
9349 bool feature;
9350
9351 switch (u * 16 + opcode) {
9352 case 0x10: /* SQRDMLAH (vector) */
9353 case 0x11: /* SQRDMLSH (vector) */
9354 if (size != 1 && size != 2) {
9355 unallocated_encoding(s);
9356 return;
9357 }
9358 feature = dc_isar_feature(aa64_rdm, s);
9359 break;
9360 default:
9361 unallocated_encoding(s);
9362 return;
9363 }
9364 if (!feature) {
9365 unallocated_encoding(s);
9366 return;
9367 }
9368 if (!fp_access_check(s)) {
9369 return;
9370 }
9371
9372 /* Do a single operation on the lowest element in the vector.
9373 * We use the standard Neon helpers and rely on 0 OP 0 == 0
9374 * with no side effects for all these operations.
9375 * OPTME: special-purpose helpers would avoid doing some
9376 * unnecessary work in the helper for the 16 bit cases.
9377 */
9378 ele1 = tcg_temp_new_i32();
9379 ele2 = tcg_temp_new_i32();
9380 ele3 = tcg_temp_new_i32();
9381
9382 read_vec_element_i32(s, ele1, rn, 0, size);
9383 read_vec_element_i32(s, ele2, rm, 0, size);
9384 read_vec_element_i32(s, ele3, rd, 0, size);
9385
9386 switch (opcode) {
9387 case 0x0: /* SQRDMLAH */
9388 if (size == 1) {
9389 gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
9390 } else {
9391 gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
9392 }
9393 break;
9394 case 0x1: /* SQRDMLSH */
9395 if (size == 1) {
9396 gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
9397 } else {
9398 gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
9399 }
9400 break;
9401 default:
9402 g_assert_not_reached();
9403 }
9404
9405 res = tcg_temp_new_i64();
9406 tcg_gen_extu_i32_i64(res, ele3);
9407 write_fp_dreg(s, rd, res);
9408 }
9409
9410 static void handle_2misc_64(DisasContext *s, int opcode, bool u,
9411 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
9412 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
9413 {
9414 /* Handle 64->64 opcodes which are shared between the scalar and
9415 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9416 * is valid in either group and also the double-precision fp ops.
9417 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9418 * requires them.
9419 */
9420 TCGCond cond;
9421
9422 switch (opcode) {
9423 case 0x4: /* CLS, CLZ */
9424 if (u) {
9425 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
9426 } else {
9427 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
9428 }
9429 break;
9430 case 0x5: /* NOT */
9431 /* This opcode is shared with CNT and RBIT but we have earlier
9432 * enforced that size == 3 if and only if this is the NOT insn.
9433 */
9434 tcg_gen_not_i64(tcg_rd, tcg_rn);
9435 break;
9436 case 0x7: /* SQABS, SQNEG */
9437 if (u) {
9438 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
9439 } else {
9440 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
9441 }
9442 break;
9443 case 0xa: /* CMLT */
9444 /* 64 bit integer comparison against zero, result is
9445 * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
9446 * subtracting 1.
9447 */
9448 cond = TCG_COND_LT;
9449 do_cmop:
9450 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
9451 tcg_gen_neg_i64(tcg_rd, tcg_rd);
9452 break;
9453 case 0x8: /* CMGT, CMGE */
9454 cond = u ? TCG_COND_GE : TCG_COND_GT;
9455 goto do_cmop;
9456 case 0x9: /* CMEQ, CMLE */
9457 cond = u ? TCG_COND_LE : TCG_COND_EQ;
9458 goto do_cmop;
9459 case 0xb: /* ABS, NEG */
9460 if (u) {
9461 tcg_gen_neg_i64(tcg_rd, tcg_rn);
9462 } else {
9463 tcg_gen_abs_i64(tcg_rd, tcg_rn);
9464 }
9465 break;
9466 case 0x2f: /* FABS */
9467 gen_helper_vfp_absd(tcg_rd, tcg_rn);
9468 break;
9469 case 0x6f: /* FNEG */
9470 gen_helper_vfp_negd(tcg_rd, tcg_rn);
9471 break;
9472 case 0x7f: /* FSQRT */
9473 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
9474 break;
9475 case 0x1a: /* FCVTNS */
9476 case 0x1b: /* FCVTMS */
9477 case 0x1c: /* FCVTAS */
9478 case 0x3a: /* FCVTPS */
9479 case 0x3b: /* FCVTZS */
9480 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9481 break;
9482 case 0x5a: /* FCVTNU */
9483 case 0x5b: /* FCVTMU */
9484 case 0x5c: /* FCVTAU */
9485 case 0x7a: /* FCVTPU */
9486 case 0x7b: /* FCVTZU */
9487 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9488 break;
9489 case 0x18: /* FRINTN */
9490 case 0x19: /* FRINTM */
9491 case 0x38: /* FRINTP */
9492 case 0x39: /* FRINTZ */
9493 case 0x58: /* FRINTA */
9494 case 0x79: /* FRINTI */
9495 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
9496 break;
9497 case 0x59: /* FRINTX */
9498 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
9499 break;
9500 case 0x1e: /* FRINT32Z */
9501 case 0x5e: /* FRINT32X */
9502 gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
9503 break;
9504 case 0x1f: /* FRINT64Z */
9505 case 0x5f: /* FRINT64X */
9506 gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
9507 break;
9508 default:
9509 g_assert_not_reached();
9510 }
9511 }
9512
9513 static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
9514 bool is_scalar, bool is_u, bool is_q,
9515 int size, int rn, int rd)
9516 {
9517 bool is_double = (size == MO_64);
9518 TCGv_ptr fpst;
9519
9520 if (!fp_access_check(s)) {
9521 return;
9522 }
9523
9524 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9525
9526 if (is_double) {
9527 TCGv_i64 tcg_op = tcg_temp_new_i64();
9528 TCGv_i64 tcg_zero = tcg_constant_i64(0);
9529 TCGv_i64 tcg_res = tcg_temp_new_i64();
9530 NeonGenTwoDoubleOpFn *genfn;
9531 bool swap = false;
9532 int pass;
9533
9534 switch (opcode) {
9535 case 0x2e: /* FCMLT (zero) */
9536 swap = true;
9537 /* fallthrough */
9538 case 0x2c: /* FCMGT (zero) */
9539 genfn = gen_helper_neon_cgt_f64;
9540 break;
9541 case 0x2d: /* FCMEQ (zero) */
9542 genfn = gen_helper_neon_ceq_f64;
9543 break;
9544 case 0x6d: /* FCMLE (zero) */
9545 swap = true;
9546 /* fall through */
9547 case 0x6c: /* FCMGE (zero) */
9548 genfn = gen_helper_neon_cge_f64;
9549 break;
9550 default:
9551 g_assert_not_reached();
9552 }
9553
9554 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9555 read_vec_element(s, tcg_op, rn, pass, MO_64);
9556 if (swap) {
9557 genfn(tcg_res, tcg_zero, tcg_op, fpst);
9558 } else {
9559 genfn(tcg_res, tcg_op, tcg_zero, fpst);
9560 }
9561 write_vec_element(s, tcg_res, rd, pass, MO_64);
9562 }
9563
9564 clear_vec_high(s, !is_scalar, rd);
9565 } else {
9566 TCGv_i32 tcg_op = tcg_temp_new_i32();
9567 TCGv_i32 tcg_zero = tcg_constant_i32(0);
9568 TCGv_i32 tcg_res = tcg_temp_new_i32();
9569 NeonGenTwoSingleOpFn *genfn;
9570 bool swap = false;
9571 int pass, maxpasses;
9572
9573 if (size == MO_16) {
9574 switch (opcode) {
9575 case 0x2e: /* FCMLT (zero) */
9576 swap = true;
9577 /* fall through */
9578 case 0x2c: /* FCMGT (zero) */
9579 genfn = gen_helper_advsimd_cgt_f16;
9580 break;
9581 case 0x2d: /* FCMEQ (zero) */
9582 genfn = gen_helper_advsimd_ceq_f16;
9583 break;
9584 case 0x6d: /* FCMLE (zero) */
9585 swap = true;
9586 /* fall through */
9587 case 0x6c: /* FCMGE (zero) */
9588 genfn = gen_helper_advsimd_cge_f16;
9589 break;
9590 default:
9591 g_assert_not_reached();
9592 }
9593 } else {
9594 switch (opcode) {
9595 case 0x2e: /* FCMLT (zero) */
9596 swap = true;
9597 /* fall through */
9598 case 0x2c: /* FCMGT (zero) */
9599 genfn = gen_helper_neon_cgt_f32;
9600 break;
9601 case 0x2d: /* FCMEQ (zero) */
9602 genfn = gen_helper_neon_ceq_f32;
9603 break;
9604 case 0x6d: /* FCMLE (zero) */
9605 swap = true;
9606 /* fall through */
9607 case 0x6c: /* FCMGE (zero) */
9608 genfn = gen_helper_neon_cge_f32;
9609 break;
9610 default:
9611 g_assert_not_reached();
9612 }
9613 }
9614
9615 if (is_scalar) {
9616 maxpasses = 1;
9617 } else {
9618 int vector_size = 8 << is_q;
9619 maxpasses = vector_size >> size;
9620 }
9621
9622 for (pass = 0; pass < maxpasses; pass++) {
9623 read_vec_element_i32(s, tcg_op, rn, pass, size);
9624 if (swap) {
9625 genfn(tcg_res, tcg_zero, tcg_op, fpst);
9626 } else {
9627 genfn(tcg_res, tcg_op, tcg_zero, fpst);
9628 }
9629 if (is_scalar) {
9630 write_fp_sreg(s, rd, tcg_res);
9631 } else {
9632 write_vec_element_i32(s, tcg_res, rd, pass, size);
9633 }
9634 }
9635
9636 if (!is_scalar) {
9637 clear_vec_high(s, is_q, rd);
9638 }
9639 }
9640 }
9641
9642 static void handle_2misc_reciprocal(DisasContext *s, int opcode,
9643 bool is_scalar, bool is_u, bool is_q,
9644 int size, int rn, int rd)
9645 {
9646 bool is_double = (size == 3);
9647 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9648
9649 if (is_double) {
9650 TCGv_i64 tcg_op = tcg_temp_new_i64();
9651 TCGv_i64 tcg_res = tcg_temp_new_i64();
9652 int pass;
9653
9654 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9655 read_vec_element(s, tcg_op, rn, pass, MO_64);
9656 switch (opcode) {
9657 case 0x3d: /* FRECPE */
9658 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
9659 break;
9660 case 0x3f: /* FRECPX */
9661 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
9662 break;
9663 case 0x7d: /* FRSQRTE */
9664 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
9665 break;
9666 default:
9667 g_assert_not_reached();
9668 }
9669 write_vec_element(s, tcg_res, rd, pass, MO_64);
9670 }
9671 clear_vec_high(s, !is_scalar, rd);
9672 } else {
9673 TCGv_i32 tcg_op = tcg_temp_new_i32();
9674 TCGv_i32 tcg_res = tcg_temp_new_i32();
9675 int pass, maxpasses;
9676
9677 if (is_scalar) {
9678 maxpasses = 1;
9679 } else {
9680 maxpasses = is_q ? 4 : 2;
9681 }
9682
9683 for (pass = 0; pass < maxpasses; pass++) {
9684 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
9685
9686 switch (opcode) {
9687 case 0x3c: /* URECPE */
9688 gen_helper_recpe_u32(tcg_res, tcg_op);
9689 break;
9690 case 0x3d: /* FRECPE */
9691 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
9692 break;
9693 case 0x3f: /* FRECPX */
9694 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
9695 break;
9696 case 0x7d: /* FRSQRTE */
9697 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
9698 break;
9699 default:
9700 g_assert_not_reached();
9701 }
9702
9703 if (is_scalar) {
9704 write_fp_sreg(s, rd, tcg_res);
9705 } else {
9706 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9707 }
9708 }
9709 if (!is_scalar) {
9710 clear_vec_high(s, is_q, rd);
9711 }
9712 }
9713 }
9714
9715 static void handle_2misc_narrow(DisasContext *s, bool scalar,
9716 int opcode, bool u, bool is_q,
9717 int size, int rn, int rd)
9718 {
9719 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
9720 * in the source becomes a size element in the destination).
9721 */
9722 int pass;
9723 TCGv_i32 tcg_res[2];
9724 int destelt = is_q ? 2 : 0;
9725 int passes = scalar ? 1 : 2;
9726
9727 if (scalar) {
9728 tcg_res[1] = tcg_constant_i32(0);
9729 }
9730
9731 for (pass = 0; pass < passes; pass++) {
9732 TCGv_i64 tcg_op = tcg_temp_new_i64();
9733 NeonGenNarrowFn *genfn = NULL;
9734 NeonGenNarrowEnvFn *genenvfn = NULL;
9735
9736 if (scalar) {
9737 read_vec_element(s, tcg_op, rn, pass, size + 1);
9738 } else {
9739 read_vec_element(s, tcg_op, rn, pass, MO_64);
9740 }
9741 tcg_res[pass] = tcg_temp_new_i32();
9742
9743 switch (opcode) {
9744 case 0x12: /* XTN, SQXTUN */
9745 {
9746 static NeonGenNarrowFn * const xtnfns[3] = {
9747 gen_helper_neon_narrow_u8,
9748 gen_helper_neon_narrow_u16,
9749 tcg_gen_extrl_i64_i32,
9750 };
9751 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
9752 gen_helper_neon_unarrow_sat8,
9753 gen_helper_neon_unarrow_sat16,
9754 gen_helper_neon_unarrow_sat32,
9755 };
9756 if (u) {
9757 genenvfn = sqxtunfns[size];
9758 } else {
9759 genfn = xtnfns[size];
9760 }
9761 break;
9762 }
9763 case 0x14: /* SQXTN, UQXTN */
9764 {
9765 static NeonGenNarrowEnvFn * const fns[3][2] = {
9766 { gen_helper_neon_narrow_sat_s8,
9767 gen_helper_neon_narrow_sat_u8 },
9768 { gen_helper_neon_narrow_sat_s16,
9769 gen_helper_neon_narrow_sat_u16 },
9770 { gen_helper_neon_narrow_sat_s32,
9771 gen_helper_neon_narrow_sat_u32 },
9772 };
9773 genenvfn = fns[size][u];
9774 break;
9775 }
9776 case 0x16: /* FCVTN, FCVTN2 */
9777 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
9778 if (size == 2) {
9779 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
9780 } else {
9781 TCGv_i32 tcg_lo = tcg_temp_new_i32();
9782 TCGv_i32 tcg_hi = tcg_temp_new_i32();
9783 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9784 TCGv_i32 ahp = get_ahp_flag();
9785
9786 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
9787 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
9788 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
9789 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
9790 }
9791 break;
9792 case 0x36: /* BFCVTN, BFCVTN2 */
9793 {
9794 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9795 gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
9796 }
9797 break;
9798 case 0x56: /* FCVTXN, FCVTXN2 */
9799 /* 64 bit to 32 bit float conversion
9800 * with von Neumann rounding (round to odd)
9801 */
9802 assert(size == 2);
9803 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
9804 break;
9805 default:
9806 g_assert_not_reached();
9807 }
9808
9809 if (genfn) {
9810 genfn(tcg_res[pass], tcg_op);
9811 } else if (genenvfn) {
9812 genenvfn(tcg_res[pass], cpu_env, tcg_op);
9813 }
9814 }
9815
9816 for (pass = 0; pass < 2; pass++) {
9817 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
9818 }
9819 clear_vec_high(s, is_q, rd);
9820 }
9821
9822 /* Remaining saturating accumulating ops */
9823 static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
9824 bool is_q, int size, int rn, int rd)
9825 {
9826 bool is_double = (size == 3);
9827
9828 if (is_double) {
9829 TCGv_i64 tcg_rn = tcg_temp_new_i64();
9830 TCGv_i64 tcg_rd = tcg_temp_new_i64();
9831 int pass;
9832
9833 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9834 read_vec_element(s, tcg_rn, rn, pass, MO_64);
9835 read_vec_element(s, tcg_rd, rd, pass, MO_64);
9836
9837 if (is_u) { /* USQADD */
9838 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9839 } else { /* SUQADD */
9840 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9841 }
9842 write_vec_element(s, tcg_rd, rd, pass, MO_64);
9843 }
9844 clear_vec_high(s, !is_scalar, rd);
9845 } else {
9846 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9847 TCGv_i32 tcg_rd = tcg_temp_new_i32();
9848 int pass, maxpasses;
9849
9850 if (is_scalar) {
9851 maxpasses = 1;
9852 } else {
9853 maxpasses = is_q ? 4 : 2;
9854 }
9855
9856 for (pass = 0; pass < maxpasses; pass++) {
9857 if (is_scalar) {
9858 read_vec_element_i32(s, tcg_rn, rn, pass, size);
9859 read_vec_element_i32(s, tcg_rd, rd, pass, size);
9860 } else {
9861 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
9862 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9863 }
9864
9865 if (is_u) { /* USQADD */
9866 switch (size) {
9867 case 0:
9868 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9869 break;
9870 case 1:
9871 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9872 break;
9873 case 2:
9874 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9875 break;
9876 default:
9877 g_assert_not_reached();
9878 }
9879 } else { /* SUQADD */
9880 switch (size) {
9881 case 0:
9882 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9883 break;
9884 case 1:
9885 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9886 break;
9887 case 2:
9888 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9889 break;
9890 default:
9891 g_assert_not_reached();
9892 }
9893 }
9894
9895 if (is_scalar) {
9896 write_vec_element(s, tcg_constant_i64(0), rd, 0, MO_64);
9897 }
9898 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9899 }
9900 clear_vec_high(s, is_q, rd);
9901 }
9902 }
9903
9904 /* AdvSIMD scalar two reg misc
9905 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
9906 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9907 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
9908 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9909 */
9910 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
9911 {
9912 int rd = extract32(insn, 0, 5);
9913 int rn = extract32(insn, 5, 5);
9914 int opcode = extract32(insn, 12, 5);
9915 int size = extract32(insn, 22, 2);
9916 bool u = extract32(insn, 29, 1);
9917 bool is_fcvt = false;
9918 int rmode;
9919 TCGv_i32 tcg_rmode;
9920 TCGv_ptr tcg_fpstatus;
9921
9922 switch (opcode) {
9923 case 0x3: /* USQADD / SUQADD*/
9924 if (!fp_access_check(s)) {
9925 return;
9926 }
9927 handle_2misc_satacc(s, true, u, false, size, rn, rd);
9928 return;
9929 case 0x7: /* SQABS / SQNEG */
9930 break;
9931 case 0xa: /* CMLT */
9932 if (u) {
9933 unallocated_encoding(s);
9934 return;
9935 }
9936 /* fall through */
9937 case 0x8: /* CMGT, CMGE */
9938 case 0x9: /* CMEQ, CMLE */
9939 case 0xb: /* ABS, NEG */
9940 if (size != 3) {
9941 unallocated_encoding(s);
9942 return;
9943 }
9944 break;
9945 case 0x12: /* SQXTUN */
9946 if (!u) {
9947 unallocated_encoding(s);
9948 return;
9949 }
9950 /* fall through */
9951 case 0x14: /* SQXTN, UQXTN */
9952 if (size == 3) {
9953 unallocated_encoding(s);
9954 return;
9955 }
9956 if (!fp_access_check(s)) {
9957 return;
9958 }
9959 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
9960 return;
9961 case 0xc ... 0xf:
9962 case 0x16 ... 0x1d:
9963 case 0x1f:
9964 /* Floating point: U, size[1] and opcode indicate operation;
9965 * size[0] indicates single or double precision.
9966 */
9967 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
9968 size = extract32(size, 0, 1) ? 3 : 2;
9969 switch (opcode) {
9970 case 0x2c: /* FCMGT (zero) */
9971 case 0x2d: /* FCMEQ (zero) */
9972 case 0x2e: /* FCMLT (zero) */
9973 case 0x6c: /* FCMGE (zero) */
9974 case 0x6d: /* FCMLE (zero) */
9975 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
9976 return;
9977 case 0x1d: /* SCVTF */
9978 case 0x5d: /* UCVTF */
9979 {
9980 bool is_signed = (opcode == 0x1d);
9981 if (!fp_access_check(s)) {
9982 return;
9983 }
9984 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
9985 return;
9986 }
9987 case 0x3d: /* FRECPE */
9988 case 0x3f: /* FRECPX */
9989 case 0x7d: /* FRSQRTE */
9990 if (!fp_access_check(s)) {
9991 return;
9992 }
9993 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
9994 return;
9995 case 0x1a: /* FCVTNS */
9996 case 0x1b: /* FCVTMS */
9997 case 0x3a: /* FCVTPS */
9998 case 0x3b: /* FCVTZS */
9999 case 0x5a: /* FCVTNU */
10000 case 0x5b: /* FCVTMU */
10001 case 0x7a: /* FCVTPU */
10002 case 0x7b: /* FCVTZU */
10003 is_fcvt = true;
10004 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10005 break;
10006 case 0x1c: /* FCVTAS */
10007 case 0x5c: /* FCVTAU */
10008 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
10009 is_fcvt = true;
10010 rmode = FPROUNDING_TIEAWAY;
10011 break;
10012 case 0x56: /* FCVTXN, FCVTXN2 */
10013 if (size == 2) {
10014 unallocated_encoding(s);
10015 return;
10016 }
10017 if (!fp_access_check(s)) {
10018 return;
10019 }
10020 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
10021 return;
10022 default:
10023 unallocated_encoding(s);
10024 return;
10025 }
10026 break;
10027 default:
10028 unallocated_encoding(s);
10029 return;
10030 }
10031
10032 if (!fp_access_check(s)) {
10033 return;
10034 }
10035
10036 if (is_fcvt) {
10037 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
10038 tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
10039 } else {
10040 tcg_fpstatus = NULL;
10041 tcg_rmode = NULL;
10042 }
10043
10044 if (size == 3) {
10045 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
10046 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10047
10048 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
10049 write_fp_dreg(s, rd, tcg_rd);
10050 } else {
10051 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10052 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10053
10054 read_vec_element_i32(s, tcg_rn, rn, 0, size);
10055
10056 switch (opcode) {
10057 case 0x7: /* SQABS, SQNEG */
10058 {
10059 NeonGenOneOpEnvFn *genfn;
10060 static NeonGenOneOpEnvFn * const fns[3][2] = {
10061 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10062 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10063 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
10064 };
10065 genfn = fns[size][u];
10066 genfn(tcg_rd, cpu_env, tcg_rn);
10067 break;
10068 }
10069 case 0x1a: /* FCVTNS */
10070 case 0x1b: /* FCVTMS */
10071 case 0x1c: /* FCVTAS */
10072 case 0x3a: /* FCVTPS */
10073 case 0x3b: /* FCVTZS */
10074 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_constant_i32(0),
10075 tcg_fpstatus);
10076 break;
10077 case 0x5a: /* FCVTNU */
10078 case 0x5b: /* FCVTMU */
10079 case 0x5c: /* FCVTAU */
10080 case 0x7a: /* FCVTPU */
10081 case 0x7b: /* FCVTZU */
10082 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_constant_i32(0),
10083 tcg_fpstatus);
10084 break;
10085 default:
10086 g_assert_not_reached();
10087 }
10088
10089 write_fp_sreg(s, rd, tcg_rd);
10090 }
10091
10092 if (is_fcvt) {
10093 gen_restore_rmode(tcg_rmode, tcg_fpstatus);
10094 }
10095 }
10096
10097 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
10098 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
10099 int immh, int immb, int opcode, int rn, int rd)
10100 {
10101 int size = 32 - clz32(immh) - 1;
10102 int immhb = immh << 3 | immb;
10103 int shift = 2 * (8 << size) - immhb;
10104 GVecGen2iFn *gvec_fn;
10105
10106 if (extract32(immh, 3, 1) && !is_q) {
10107 unallocated_encoding(s);
10108 return;
10109 }
10110 tcg_debug_assert(size <= 3);
10111
10112 if (!fp_access_check(s)) {
10113 return;
10114 }
10115
10116 switch (opcode) {
10117 case 0x02: /* SSRA / USRA (accumulate) */
10118 gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
10119 break;
10120
10121 case 0x08: /* SRI */
10122 gvec_fn = gen_gvec_sri;
10123 break;
10124
10125 case 0x00: /* SSHR / USHR */
10126 if (is_u) {
10127 if (shift == 8 << size) {
10128 /* Shift count the same size as element size produces zero. */
10129 tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
10130 is_q ? 16 : 8, vec_full_reg_size(s), 0);
10131 return;
10132 }
10133 gvec_fn = tcg_gen_gvec_shri;
10134 } else {
10135 /* Shift count the same size as element size produces all sign. */
10136 if (shift == 8 << size) {
10137 shift -= 1;
10138 }
10139 gvec_fn = tcg_gen_gvec_sari;
10140 }
10141 break;
10142
10143 case 0x04: /* SRSHR / URSHR (rounding) */
10144 gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
10145 break;
10146
10147 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10148 gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
10149 break;
10150
10151 default:
10152 g_assert_not_reached();
10153 }
10154
10155 gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
10156 }
10157
10158 /* SHL/SLI - Vector shift left */
10159 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
10160 int immh, int immb, int opcode, int rn, int rd)
10161 {
10162 int size = 32 - clz32(immh) - 1;
10163 int immhb = immh << 3 | immb;
10164 int shift = immhb - (8 << size);
10165
10166 /* Range of size is limited by decode: immh is a non-zero 4 bit field */
10167 assert(size >= 0 && size <= 3);
10168
10169 if (extract32(immh, 3, 1) && !is_q) {
10170 unallocated_encoding(s);
10171 return;
10172 }
10173
10174 if (!fp_access_check(s)) {
10175 return;
10176 }
10177
10178 if (insert) {
10179 gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
10180 } else {
10181 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
10182 }
10183 }
10184
10185 /* USHLL/SHLL - Vector shift left with widening */
10186 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
10187 int immh, int immb, int opcode, int rn, int rd)
10188 {
10189 int size = 32 - clz32(immh) - 1;
10190 int immhb = immh << 3 | immb;
10191 int shift = immhb - (8 << size);
10192 int dsize = 64;
10193 int esize = 8 << size;
10194 int elements = dsize/esize;
10195 TCGv_i64 tcg_rn = tcg_temp_new_i64();
10196 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10197 int i;
10198
10199 if (size >= 3) {
10200 unallocated_encoding(s);
10201 return;
10202 }
10203
10204 if (!fp_access_check(s)) {
10205 return;
10206 }
10207
10208 /* For the LL variants the store is larger than the load,
10209 * so if rd == rn we would overwrite parts of our input.
10210 * So load everything right now and use shifts in the main loop.
10211 */
10212 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
10213
10214 for (i = 0; i < elements; i++) {
10215 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
10216 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
10217 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
10218 write_vec_element(s, tcg_rd, rd, i, size + 1);
10219 }
10220 }
10221
10222 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
10223 static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
10224 int immh, int immb, int opcode, int rn, int rd)
10225 {
10226 int immhb = immh << 3 | immb;
10227 int size = 32 - clz32(immh) - 1;
10228 int dsize = 64;
10229 int esize = 8 << size;
10230 int elements = dsize/esize;
10231 int shift = (2 * esize) - immhb;
10232 bool round = extract32(opcode, 0, 1);
10233 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
10234 TCGv_i64 tcg_round;
10235 int i;
10236
10237 if (extract32(immh, 3, 1)) {
10238 unallocated_encoding(s);
10239 return;
10240 }
10241
10242 if (!fp_access_check(s)) {
10243 return;
10244 }
10245
10246 tcg_rn = tcg_temp_new_i64();
10247 tcg_rd = tcg_temp_new_i64();
10248 tcg_final = tcg_temp_new_i64();
10249 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
10250
10251 if (round) {
10252 tcg_round = tcg_constant_i64(1ULL << (shift - 1));
10253 } else {
10254 tcg_round = NULL;
10255 }
10256
10257 for (i = 0; i < elements; i++) {
10258 read_vec_element(s, tcg_rn, rn, i, size+1);
10259 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10260 false, true, size+1, shift);
10261
10262 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
10263 }
10264
10265 if (!is_q) {
10266 write_vec_element(s, tcg_final, rd, 0, MO_64);
10267 } else {
10268 write_vec_element(s, tcg_final, rd, 1, MO_64);
10269 }
10270
10271 clear_vec_high(s, is_q, rd);
10272 }
10273
10274
10275 /* AdvSIMD shift by immediate
10276 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
10277 * +---+---+---+-------------+------+------+--------+---+------+------+
10278 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
10279 * +---+---+---+-------------+------+------+--------+---+------+------+
10280 */
10281 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
10282 {
10283 int rd = extract32(insn, 0, 5);
10284 int rn = extract32(insn, 5, 5);
10285 int opcode = extract32(insn, 11, 5);
10286 int immb = extract32(insn, 16, 3);
10287 int immh = extract32(insn, 19, 4);
10288 bool is_u = extract32(insn, 29, 1);
10289 bool is_q = extract32(insn, 30, 1);
10290
10291 /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
10292 assert(immh != 0);
10293
10294 switch (opcode) {
10295 case 0x08: /* SRI */
10296 if (!is_u) {
10297 unallocated_encoding(s);
10298 return;
10299 }
10300 /* fall through */
10301 case 0x00: /* SSHR / USHR */
10302 case 0x02: /* SSRA / USRA (accumulate) */
10303 case 0x04: /* SRSHR / URSHR (rounding) */
10304 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10305 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
10306 break;
10307 case 0x0a: /* SHL / SLI */
10308 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10309 break;
10310 case 0x10: /* SHRN */
10311 case 0x11: /* RSHRN / SQRSHRUN */
10312 if (is_u) {
10313 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
10314 opcode, rn, rd);
10315 } else {
10316 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
10317 }
10318 break;
10319 case 0x12: /* SQSHRN / UQSHRN */
10320 case 0x13: /* SQRSHRN / UQRSHRN */
10321 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
10322 opcode, rn, rd);
10323 break;
10324 case 0x14: /* SSHLL / USHLL */
10325 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10326 break;
10327 case 0x1c: /* SCVTF / UCVTF */
10328 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
10329 opcode, rn, rd);
10330 break;
10331 case 0xc: /* SQSHLU */
10332 if (!is_u) {
10333 unallocated_encoding(s);
10334 return;
10335 }
10336 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
10337 break;
10338 case 0xe: /* SQSHL, UQSHL */
10339 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
10340 break;
10341 case 0x1f: /* FCVTZS/ FCVTZU */
10342 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
10343 return;
10344 default:
10345 unallocated_encoding(s);
10346 return;
10347 }
10348 }
10349
10350 /* Generate code to do a "long" addition or subtraction, ie one done in
10351 * TCGv_i64 on vector lanes twice the width specified by size.
10352 */
10353 static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
10354 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
10355 {
10356 static NeonGenTwo64OpFn * const fns[3][2] = {
10357 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
10358 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
10359 { tcg_gen_add_i64, tcg_gen_sub_i64 },
10360 };
10361 NeonGenTwo64OpFn *genfn;
10362 assert(size < 3);
10363
10364 genfn = fns[size][is_sub];
10365 genfn(tcg_res, tcg_op1, tcg_op2);
10366 }
10367
10368 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
10369 int opcode, int rd, int rn, int rm)
10370 {
10371 /* 3-reg-different widening insns: 64 x 64 -> 128 */
10372 TCGv_i64 tcg_res[2];
10373 int pass, accop;
10374
10375 tcg_res[0] = tcg_temp_new_i64();
10376 tcg_res[1] = tcg_temp_new_i64();
10377
10378 /* Does this op do an adding accumulate, a subtracting accumulate,
10379 * or no accumulate at all?
10380 */
10381 switch (opcode) {
10382 case 5:
10383 case 8:
10384 case 9:
10385 accop = 1;
10386 break;
10387 case 10:
10388 case 11:
10389 accop = -1;
10390 break;
10391 default:
10392 accop = 0;
10393 break;
10394 }
10395
10396 if (accop != 0) {
10397 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10398 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10399 }
10400
10401 /* size == 2 means two 32x32->64 operations; this is worth special
10402 * casing because we can generally handle it inline.
10403 */
10404 if (size == 2) {
10405 for (pass = 0; pass < 2; pass++) {
10406 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10407 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10408 TCGv_i64 tcg_passres;
10409 MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10410
10411 int elt = pass + is_q * 2;
10412
10413 read_vec_element(s, tcg_op1, rn, elt, memop);
10414 read_vec_element(s, tcg_op2, rm, elt, memop);
10415
10416 if (accop == 0) {
10417 tcg_passres = tcg_res[pass];
10418 } else {
10419 tcg_passres = tcg_temp_new_i64();
10420 }
10421
10422 switch (opcode) {
10423 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10424 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10425 break;
10426 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10427 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10428 break;
10429 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10430 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10431 {
10432 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10433 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10434
10435 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10436 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10437 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10438 tcg_passres,
10439 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10440 break;
10441 }
10442 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10443 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10444 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10445 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10446 break;
10447 case 9: /* SQDMLAL, SQDMLAL2 */
10448 case 11: /* SQDMLSL, SQDMLSL2 */
10449 case 13: /* SQDMULL, SQDMULL2 */
10450 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10451 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10452 tcg_passres, tcg_passres);
10453 break;
10454 default:
10455 g_assert_not_reached();
10456 }
10457
10458 if (opcode == 9 || opcode == 11) {
10459 /* saturating accumulate ops */
10460 if (accop < 0) {
10461 tcg_gen_neg_i64(tcg_passres, tcg_passres);
10462 }
10463 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10464 tcg_res[pass], tcg_passres);
10465 } else if (accop > 0) {
10466 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10467 } else if (accop < 0) {
10468 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10469 }
10470 }
10471 } else {
10472 /* size 0 or 1, generally helper functions */
10473 for (pass = 0; pass < 2; pass++) {
10474 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10475 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10476 TCGv_i64 tcg_passres;
10477 int elt = pass + is_q * 2;
10478
10479 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
10480 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
10481
10482 if (accop == 0) {
10483 tcg_passres = tcg_res[pass];
10484 } else {
10485 tcg_passres = tcg_temp_new_i64();
10486 }
10487
10488 switch (opcode) {
10489 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10490 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10491 {
10492 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
10493 static NeonGenWidenFn * const widenfns[2][2] = {
10494 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10495 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10496 };
10497 NeonGenWidenFn *widenfn = widenfns[size][is_u];
10498
10499 widenfn(tcg_op2_64, tcg_op2);
10500 widenfn(tcg_passres, tcg_op1);
10501 gen_neon_addl(size, (opcode == 2), tcg_passres,
10502 tcg_passres, tcg_op2_64);
10503 break;
10504 }
10505 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10506 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10507 if (size == 0) {
10508 if (is_u) {
10509 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
10510 } else {
10511 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
10512 }
10513 } else {
10514 if (is_u) {
10515 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
10516 } else {
10517 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
10518 }
10519 }
10520 break;
10521 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10522 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10523 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10524 if (size == 0) {
10525 if (is_u) {
10526 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
10527 } else {
10528 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
10529 }
10530 } else {
10531 if (is_u) {
10532 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
10533 } else {
10534 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10535 }
10536 }
10537 break;
10538 case 9: /* SQDMLAL, SQDMLAL2 */
10539 case 11: /* SQDMLSL, SQDMLSL2 */
10540 case 13: /* SQDMULL, SQDMULL2 */
10541 assert(size == 1);
10542 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10543 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
10544 tcg_passres, tcg_passres);
10545 break;
10546 default:
10547 g_assert_not_reached();
10548 }
10549
10550 if (accop != 0) {
10551 if (opcode == 9 || opcode == 11) {
10552 /* saturating accumulate ops */
10553 if (accop < 0) {
10554 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
10555 }
10556 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
10557 tcg_res[pass],
10558 tcg_passres);
10559 } else {
10560 gen_neon_addl(size, (accop < 0), tcg_res[pass],
10561 tcg_res[pass], tcg_passres);
10562 }
10563 }
10564 }
10565 }
10566
10567 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
10568 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
10569 }
10570
10571 static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
10572 int opcode, int rd, int rn, int rm)
10573 {
10574 TCGv_i64 tcg_res[2];
10575 int part = is_q ? 2 : 0;
10576 int pass;
10577
10578 for (pass = 0; pass < 2; pass++) {
10579 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10580 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10581 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
10582 static NeonGenWidenFn * const widenfns[3][2] = {
10583 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10584 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10585 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
10586 };
10587 NeonGenWidenFn *widenfn = widenfns[size][is_u];
10588
10589 read_vec_element(s, tcg_op1, rn, pass, MO_64);
10590 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
10591 widenfn(tcg_op2_wide, tcg_op2);
10592 tcg_res[pass] = tcg_temp_new_i64();
10593 gen_neon_addl(size, (opcode == 3),
10594 tcg_res[pass], tcg_op1, tcg_op2_wide);
10595 }
10596
10597 for (pass = 0; pass < 2; pass++) {
10598 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10599 }
10600 }
10601
10602 static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
10603 {
10604 tcg_gen_addi_i64(in, in, 1U << 31);
10605 tcg_gen_extrh_i64_i32(res, in);
10606 }
10607
10608 static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
10609 int opcode, int rd, int rn, int rm)
10610 {
10611 TCGv_i32 tcg_res[2];
10612 int part = is_q ? 2 : 0;
10613 int pass;
10614
10615 for (pass = 0; pass < 2; pass++) {
10616 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10617 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10618 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
10619 static NeonGenNarrowFn * const narrowfns[3][2] = {
10620 { gen_helper_neon_narrow_high_u8,
10621 gen_helper_neon_narrow_round_high_u8 },
10622 { gen_helper_neon_narrow_high_u16,
10623 gen_helper_neon_narrow_round_high_u16 },
10624 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
10625 };
10626 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
10627
10628 read_vec_element(s, tcg_op1, rn, pass, MO_64);
10629 read_vec_element(s, tcg_op2, rm, pass, MO_64);
10630
10631 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
10632
10633 tcg_res[pass] = tcg_temp_new_i32();
10634 gennarrow(tcg_res[pass], tcg_wideres);
10635 }
10636
10637 for (pass = 0; pass < 2; pass++) {
10638 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
10639 }
10640 clear_vec_high(s, is_q, rd);
10641 }
10642
10643 /* AdvSIMD three different
10644 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
10645 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10646 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
10647 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10648 */
10649 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
10650 {
10651 /* Instructions in this group fall into three basic classes
10652 * (in each case with the operation working on each element in
10653 * the input vectors):
10654 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
10655 * 128 bit input)
10656 * (2) wide 64 x 128 -> 128
10657 * (3) narrowing 128 x 128 -> 64
10658 * Here we do initial decode, catch unallocated cases and
10659 * dispatch to separate functions for each class.
10660 */
10661 int is_q = extract32(insn, 30, 1);
10662 int is_u = extract32(insn, 29, 1);
10663 int size = extract32(insn, 22, 2);
10664 int opcode = extract32(insn, 12, 4);
10665 int rm = extract32(insn, 16, 5);
10666 int rn = extract32(insn, 5, 5);
10667 int rd = extract32(insn, 0, 5);
10668
10669 switch (opcode) {
10670 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10671 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10672 /* 64 x 128 -> 128 */
10673 if (size == 3) {
10674 unallocated_encoding(s);
10675 return;
10676 }
10677 if (!fp_access_check(s)) {
10678 return;
10679 }
10680 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
10681 break;
10682 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10683 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10684 /* 128 x 128 -> 64 */
10685 if (size == 3) {
10686 unallocated_encoding(s);
10687 return;
10688 }
10689 if (!fp_access_check(s)) {
10690 return;
10691 }
10692 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
10693 break;
10694 case 14: /* PMULL, PMULL2 */
10695 if (is_u) {
10696 unallocated_encoding(s);
10697 return;
10698 }
10699 switch (size) {
10700 case 0: /* PMULL.P8 */
10701 if (!fp_access_check(s)) {
10702 return;
10703 }
10704 /* The Q field specifies lo/hi half input for this insn. */
10705 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
10706 gen_helper_neon_pmull_h);
10707 break;
10708
10709 case 3: /* PMULL.P64 */
10710 if (!dc_isar_feature(aa64_pmull, s)) {
10711 unallocated_encoding(s);
10712 return;
10713 }
10714 if (!fp_access_check(s)) {
10715 return;
10716 }
10717 /* The Q field specifies lo/hi half input for this insn. */
10718 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
10719 gen_helper_gvec_pmull_q);
10720 break;
10721
10722 default:
10723 unallocated_encoding(s);
10724 break;
10725 }
10726 return;
10727 case 9: /* SQDMLAL, SQDMLAL2 */
10728 case 11: /* SQDMLSL, SQDMLSL2 */
10729 case 13: /* SQDMULL, SQDMULL2 */
10730 if (is_u || size == 0) {
10731 unallocated_encoding(s);
10732 return;
10733 }
10734 /* fall through */
10735 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10736 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10737 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10738 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10739 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10740 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10741 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10742 /* 64 x 64 -> 128 */
10743 if (size == 3) {
10744 unallocated_encoding(s);
10745 return;
10746 }
10747 if (!fp_access_check(s)) {
10748 return;
10749 }
10750
10751 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
10752 break;
10753 default:
10754 /* opcode 15 not allocated */
10755 unallocated_encoding(s);
10756 break;
10757 }
10758 }
10759
10760 /* Logic op (opcode == 3) subgroup of C3.6.16. */
10761 static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
10762 {
10763 int rd = extract32(insn, 0, 5);
10764 int rn = extract32(insn, 5, 5);
10765 int rm = extract32(insn, 16, 5);
10766 int size = extract32(insn, 22, 2);
10767 bool is_u = extract32(insn, 29, 1);
10768 bool is_q = extract32(insn, 30, 1);
10769
10770 if (!fp_access_check(s)) {
10771 return;
10772 }
10773
10774 switch (size + 4 * is_u) {
10775 case 0: /* AND */
10776 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
10777 return;
10778 case 1: /* BIC */
10779 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
10780 return;
10781 case 2: /* ORR */
10782 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
10783 return;
10784 case 3: /* ORN */
10785 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
10786 return;
10787 case 4: /* EOR */
10788 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
10789 return;
10790
10791 case 5: /* BSL bitwise select */
10792 gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
10793 return;
10794 case 6: /* BIT, bitwise insert if true */
10795 gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
10796 return;
10797 case 7: /* BIF, bitwise insert if false */
10798 gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
10799 return;
10800
10801 default:
10802 g_assert_not_reached();
10803 }
10804 }
10805
10806 /* Pairwise op subgroup of C3.6.16.
10807 *
10808 * This is called directly or via the handle_3same_float for float pairwise
10809 * operations where the opcode and size are calculated differently.
10810 */
10811 static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
10812 int size, int rn, int rm, int rd)
10813 {
10814 TCGv_ptr fpst;
10815 int pass;
10816
10817 /* Floating point operations need fpst */
10818 if (opcode >= 0x58) {
10819 fpst = fpstatus_ptr(FPST_FPCR);
10820 } else {
10821 fpst = NULL;
10822 }
10823
10824 if (!fp_access_check(s)) {
10825 return;
10826 }
10827
10828 /* These operations work on the concatenated rm:rn, with each pair of
10829 * adjacent elements being operated on to produce an element in the result.
10830 */
10831 if (size == 3) {
10832 TCGv_i64 tcg_res[2];
10833
10834 for (pass = 0; pass < 2; pass++) {
10835 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10836 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10837 int passreg = (pass == 0) ? rn : rm;
10838
10839 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
10840 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
10841 tcg_res[pass] = tcg_temp_new_i64();
10842
10843 switch (opcode) {
10844 case 0x17: /* ADDP */
10845 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
10846 break;
10847 case 0x58: /* FMAXNMP */
10848 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10849 break;
10850 case 0x5a: /* FADDP */
10851 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10852 break;
10853 case 0x5e: /* FMAXP */
10854 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10855 break;
10856 case 0x78: /* FMINNMP */
10857 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10858 break;
10859 case 0x7e: /* FMINP */
10860 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10861 break;
10862 default:
10863 g_assert_not_reached();
10864 }
10865 }
10866
10867 for (pass = 0; pass < 2; pass++) {
10868 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10869 }
10870 } else {
10871 int maxpass = is_q ? 4 : 2;
10872 TCGv_i32 tcg_res[4];
10873
10874 for (pass = 0; pass < maxpass; pass++) {
10875 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10876 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10877 NeonGenTwoOpFn *genfn = NULL;
10878 int passreg = pass < (maxpass / 2) ? rn : rm;
10879 int passelt = (is_q && (pass & 1)) ? 2 : 0;
10880
10881 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
10882 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
10883 tcg_res[pass] = tcg_temp_new_i32();
10884
10885 switch (opcode) {
10886 case 0x17: /* ADDP */
10887 {
10888 static NeonGenTwoOpFn * const fns[3] = {
10889 gen_helper_neon_padd_u8,
10890 gen_helper_neon_padd_u16,
10891 tcg_gen_add_i32,
10892 };
10893 genfn = fns[size];
10894 break;
10895 }
10896 case 0x14: /* SMAXP, UMAXP */
10897 {
10898 static NeonGenTwoOpFn * const fns[3][2] = {
10899 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
10900 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
10901 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
10902 };
10903 genfn = fns[size][u];
10904 break;
10905 }
10906 case 0x15: /* SMINP, UMINP */
10907 {
10908 static NeonGenTwoOpFn * const fns[3][2] = {
10909 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
10910 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
10911 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
10912 };
10913 genfn = fns[size][u];
10914 break;
10915 }
10916 /* The FP operations are all on single floats (32 bit) */
10917 case 0x58: /* FMAXNMP */
10918 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10919 break;
10920 case 0x5a: /* FADDP */
10921 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10922 break;
10923 case 0x5e: /* FMAXP */
10924 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10925 break;
10926 case 0x78: /* FMINNMP */
10927 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10928 break;
10929 case 0x7e: /* FMINP */
10930 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10931 break;
10932 default:
10933 g_assert_not_reached();
10934 }
10935
10936 /* FP ops called directly, otherwise call now */
10937 if (genfn) {
10938 genfn(tcg_res[pass], tcg_op1, tcg_op2);
10939 }
10940 }
10941
10942 for (pass = 0; pass < maxpass; pass++) {
10943 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
10944 }
10945 clear_vec_high(s, is_q, rd);
10946 }
10947 }
10948
10949 /* Floating point op subgroup of C3.6.16. */
10950 static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
10951 {
10952 /* For floating point ops, the U, size[1] and opcode bits
10953 * together indicate the operation. size[0] indicates single
10954 * or double.
10955 */
10956 int fpopcode = extract32(insn, 11, 5)
10957 | (extract32(insn, 23, 1) << 5)
10958 | (extract32(insn, 29, 1) << 6);
10959 int is_q = extract32(insn, 30, 1);
10960 int size = extract32(insn, 22, 1);
10961 int rm = extract32(insn, 16, 5);
10962 int rn = extract32(insn, 5, 5);
10963 int rd = extract32(insn, 0, 5);
10964
10965 int datasize = is_q ? 128 : 64;
10966 int esize = 32 << size;
10967 int elements = datasize / esize;
10968
10969 if (size == 1 && !is_q) {
10970 unallocated_encoding(s);
10971 return;
10972 }
10973
10974 switch (fpopcode) {
10975 case 0x58: /* FMAXNMP */
10976 case 0x5a: /* FADDP */
10977 case 0x5e: /* FMAXP */
10978 case 0x78: /* FMINNMP */
10979 case 0x7e: /* FMINP */
10980 if (size && !is_q) {
10981 unallocated_encoding(s);
10982 return;
10983 }
10984 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
10985 rn, rm, rd);
10986 return;
10987 case 0x1b: /* FMULX */
10988 case 0x1f: /* FRECPS */
10989 case 0x3f: /* FRSQRTS */
10990 case 0x5d: /* FACGE */
10991 case 0x7d: /* FACGT */
10992 case 0x19: /* FMLA */
10993 case 0x39: /* FMLS */
10994 case 0x18: /* FMAXNM */
10995 case 0x1a: /* FADD */
10996 case 0x1c: /* FCMEQ */
10997 case 0x1e: /* FMAX */
10998 case 0x38: /* FMINNM */
10999 case 0x3a: /* FSUB */
11000 case 0x3e: /* FMIN */
11001 case 0x5b: /* FMUL */
11002 case 0x5c: /* FCMGE */
11003 case 0x5f: /* FDIV */
11004 case 0x7a: /* FABD */
11005 case 0x7c: /* FCMGT */
11006 if (!fp_access_check(s)) {
11007 return;
11008 }
11009 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
11010 return;
11011
11012 case 0x1d: /* FMLAL */
11013 case 0x3d: /* FMLSL */
11014 case 0x59: /* FMLAL2 */
11015 case 0x79: /* FMLSL2 */
11016 if (size & 1 || !dc_isar_feature(aa64_fhm, s)) {
11017 unallocated_encoding(s);
11018 return;
11019 }
11020 if (fp_access_check(s)) {
11021 int is_s = extract32(insn, 23, 1);
11022 int is_2 = extract32(insn, 29, 1);
11023 int data = (is_2 << 1) | is_s;
11024 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
11025 vec_full_reg_offset(s, rn),
11026 vec_full_reg_offset(s, rm), cpu_env,
11027 is_q ? 16 : 8, vec_full_reg_size(s),
11028 data, gen_helper_gvec_fmlal_a64);
11029 }
11030 return;
11031
11032 default:
11033 unallocated_encoding(s);
11034 return;
11035 }
11036 }
11037
11038 /* Integer op subgroup of C3.6.16. */
11039 static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
11040 {
11041 int is_q = extract32(insn, 30, 1);
11042 int u = extract32(insn, 29, 1);
11043 int size = extract32(insn, 22, 2);
11044 int opcode = extract32(insn, 11, 5);
11045 int rm = extract32(insn, 16, 5);
11046 int rn = extract32(insn, 5, 5);
11047 int rd = extract32(insn, 0, 5);
11048 int pass;
11049 TCGCond cond;
11050
11051 switch (opcode) {
11052 case 0x13: /* MUL, PMUL */
11053 if (u && size != 0) {
11054 unallocated_encoding(s);
11055 return;
11056 }
11057 /* fall through */
11058 case 0x0: /* SHADD, UHADD */
11059 case 0x2: /* SRHADD, URHADD */
11060 case 0x4: /* SHSUB, UHSUB */
11061 case 0xc: /* SMAX, UMAX */
11062 case 0xd: /* SMIN, UMIN */
11063 case 0xe: /* SABD, UABD */
11064 case 0xf: /* SABA, UABA */
11065 case 0x12: /* MLA, MLS */
11066 if (size == 3) {
11067 unallocated_encoding(s);
11068 return;
11069 }
11070 break;
11071 case 0x16: /* SQDMULH, SQRDMULH */
11072 if (size == 0 || size == 3) {
11073 unallocated_encoding(s);
11074 return;
11075 }
11076 break;
11077 default:
11078 if (size == 3 && !is_q) {
11079 unallocated_encoding(s);
11080 return;
11081 }
11082 break;
11083 }
11084
11085 if (!fp_access_check(s)) {
11086 return;
11087 }
11088
11089 switch (opcode) {
11090 case 0x01: /* SQADD, UQADD */
11091 if (u) {
11092 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size);
11093 } else {
11094 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size);
11095 }
11096 return;
11097 case 0x05: /* SQSUB, UQSUB */
11098 if (u) {
11099 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size);
11100 } else {
11101 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size);
11102 }
11103 return;
11104 case 0x08: /* SSHL, USHL */
11105 if (u) {
11106 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
11107 } else {
11108 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
11109 }
11110 return;
11111 case 0x0c: /* SMAX, UMAX */
11112 if (u) {
11113 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
11114 } else {
11115 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
11116 }
11117 return;
11118 case 0x0d: /* SMIN, UMIN */
11119 if (u) {
11120 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
11121 } else {
11122 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
11123 }
11124 return;
11125 case 0xe: /* SABD, UABD */
11126 if (u) {
11127 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size);
11128 } else {
11129 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size);
11130 }
11131 return;
11132 case 0xf: /* SABA, UABA */
11133 if (u) {
11134 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size);
11135 } else {
11136 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size);
11137 }
11138 return;
11139 case 0x10: /* ADD, SUB */
11140 if (u) {
11141 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
11142 } else {
11143 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
11144 }
11145 return;
11146 case 0x13: /* MUL, PMUL */
11147 if (!u) { /* MUL */
11148 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
11149 } else { /* PMUL */
11150 gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
11151 }
11152 return;
11153 case 0x12: /* MLA, MLS */
11154 if (u) {
11155 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size);
11156 } else {
11157 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size);
11158 }
11159 return;
11160 case 0x16: /* SQDMULH, SQRDMULH */
11161 {
11162 static gen_helper_gvec_3_ptr * const fns[2][2] = {
11163 { gen_helper_neon_sqdmulh_h, gen_helper_neon_sqrdmulh_h },
11164 { gen_helper_neon_sqdmulh_s, gen_helper_neon_sqrdmulh_s },
11165 };
11166 gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
11167 }
11168 return;
11169 case 0x11:
11170 if (!u) { /* CMTST */
11171 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
11172 return;
11173 }
11174 /* else CMEQ */
11175 cond = TCG_COND_EQ;
11176 goto do_gvec_cmp;
11177 case 0x06: /* CMGT, CMHI */
11178 cond = u ? TCG_COND_GTU : TCG_COND_GT;
11179 goto do_gvec_cmp;
11180 case 0x07: /* CMGE, CMHS */
11181 cond = u ? TCG_COND_GEU : TCG_COND_GE;
11182 do_gvec_cmp:
11183 tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
11184 vec_full_reg_offset(s, rn),
11185 vec_full_reg_offset(s, rm),
11186 is_q ? 16 : 8, vec_full_reg_size(s));
11187 return;
11188 }
11189
11190 if (size == 3) {
11191 assert(is_q);
11192 for (pass = 0; pass < 2; pass++) {
11193 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11194 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11195 TCGv_i64 tcg_res = tcg_temp_new_i64();
11196
11197 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11198 read_vec_element(s, tcg_op2, rm, pass, MO_64);
11199
11200 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
11201
11202 write_vec_element(s, tcg_res, rd, pass, MO_64);
11203 }
11204 } else {
11205 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11206 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11207 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11208 TCGv_i32 tcg_res = tcg_temp_new_i32();
11209 NeonGenTwoOpFn *genfn = NULL;
11210 NeonGenTwoOpEnvFn *genenvfn = NULL;
11211
11212 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
11213 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
11214
11215 switch (opcode) {
11216 case 0x0: /* SHADD, UHADD */
11217 {
11218 static NeonGenTwoOpFn * const fns[3][2] = {
11219 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
11220 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
11221 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
11222 };
11223 genfn = fns[size][u];
11224 break;
11225 }
11226 case 0x2: /* SRHADD, URHADD */
11227 {
11228 static NeonGenTwoOpFn * const fns[3][2] = {
11229 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11230 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11231 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11232 };
11233 genfn = fns[size][u];
11234 break;
11235 }
11236 case 0x4: /* SHSUB, UHSUB */
11237 {
11238 static NeonGenTwoOpFn * const fns[3][2] = {
11239 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11240 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11241 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11242 };
11243 genfn = fns[size][u];
11244 break;
11245 }
11246 case 0x9: /* SQSHL, UQSHL */
11247 {
11248 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11249 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11250 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11251 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11252 };
11253 genenvfn = fns[size][u];
11254 break;
11255 }
11256 case 0xa: /* SRSHL, URSHL */
11257 {
11258 static NeonGenTwoOpFn * const fns[3][2] = {
11259 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11260 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11261 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11262 };
11263 genfn = fns[size][u];
11264 break;
11265 }
11266 case 0xb: /* SQRSHL, UQRSHL */
11267 {
11268 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11269 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11270 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11271 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11272 };
11273 genenvfn = fns[size][u];
11274 break;
11275 }
11276 default:
11277 g_assert_not_reached();
11278 }
11279
11280 if (genenvfn) {
11281 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11282 } else {
11283 genfn(tcg_res, tcg_op1, tcg_op2);
11284 }
11285
11286 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11287 }
11288 }
11289 clear_vec_high(s, is_q, rd);
11290 }
11291
11292 /* AdvSIMD three same
11293 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
11294 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11295 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
11296 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11297 */
11298 static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11299 {
11300 int opcode = extract32(insn, 11, 5);
11301
11302 switch (opcode) {
11303 case 0x3: /* logic ops */
11304 disas_simd_3same_logic(s, insn);
11305 break;
11306 case 0x17: /* ADDP */
11307 case 0x14: /* SMAXP, UMAXP */
11308 case 0x15: /* SMINP, UMINP */
11309 {
11310 /* Pairwise operations */
11311 int is_q = extract32(insn, 30, 1);
11312 int u = extract32(insn, 29, 1);
11313 int size = extract32(insn, 22, 2);
11314 int rm = extract32(insn, 16, 5);
11315 int rn = extract32(insn, 5, 5);
11316 int rd = extract32(insn, 0, 5);
11317 if (opcode == 0x17) {
11318 if (u || (size == 3 && !is_q)) {
11319 unallocated_encoding(s);
11320 return;
11321 }
11322 } else {
11323 if (size == 3) {
11324 unallocated_encoding(s);
11325 return;
11326 }
11327 }
11328 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11329 break;
11330 }
11331 case 0x18 ... 0x31:
11332 /* floating point ops, sz[1] and U are part of opcode */
11333 disas_simd_3same_float(s, insn);
11334 break;
11335 default:
11336 disas_simd_3same_int(s, insn);
11337 break;
11338 }
11339 }
11340
11341 /*
11342 * Advanced SIMD three same (ARMv8.2 FP16 variants)
11343 *
11344 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
11345 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11346 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
11347 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11348 *
11349 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11350 * (register), FACGE, FABD, FCMGT (register) and FACGT.
11351 *
11352 */
11353 static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11354 {
11355 int opcode = extract32(insn, 11, 3);
11356 int u = extract32(insn, 29, 1);
11357 int a = extract32(insn, 23, 1);
11358 int is_q = extract32(insn, 30, 1);
11359 int rm = extract32(insn, 16, 5);
11360 int rn = extract32(insn, 5, 5);
11361 int rd = extract32(insn, 0, 5);
11362 /*
11363 * For these floating point ops, the U, a and opcode bits
11364 * together indicate the operation.
11365 */
11366 int fpopcode = opcode | (a << 3) | (u << 4);
11367 int datasize = is_q ? 128 : 64;
11368 int elements = datasize / 16;
11369 bool pairwise;
11370 TCGv_ptr fpst;
11371 int pass;
11372
11373 switch (fpopcode) {
11374 case 0x0: /* FMAXNM */
11375 case 0x1: /* FMLA */
11376 case 0x2: /* FADD */
11377 case 0x3: /* FMULX */
11378 case 0x4: /* FCMEQ */
11379 case 0x6: /* FMAX */
11380 case 0x7: /* FRECPS */
11381 case 0x8: /* FMINNM */
11382 case 0x9: /* FMLS */
11383 case 0xa: /* FSUB */
11384 case 0xe: /* FMIN */
11385 case 0xf: /* FRSQRTS */
11386 case 0x13: /* FMUL */
11387 case 0x14: /* FCMGE */
11388 case 0x15: /* FACGE */
11389 case 0x17: /* FDIV */
11390 case 0x1a: /* FABD */
11391 case 0x1c: /* FCMGT */
11392 case 0x1d: /* FACGT */
11393 pairwise = false;
11394 break;
11395 case 0x10: /* FMAXNMP */
11396 case 0x12: /* FADDP */
11397 case 0x16: /* FMAXP */
11398 case 0x18: /* FMINNMP */
11399 case 0x1e: /* FMINP */
11400 pairwise = true;
11401 break;
11402 default:
11403 unallocated_encoding(s);
11404 return;
11405 }
11406
11407 if (!dc_isar_feature(aa64_fp16, s)) {
11408 unallocated_encoding(s);
11409 return;
11410 }
11411
11412 if (!fp_access_check(s)) {
11413 return;
11414 }
11415
11416 fpst = fpstatus_ptr(FPST_FPCR_F16);
11417
11418 if (pairwise) {
11419 int maxpass = is_q ? 8 : 4;
11420 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11421 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11422 TCGv_i32 tcg_res[8];
11423
11424 for (pass = 0; pass < maxpass; pass++) {
11425 int passreg = pass < (maxpass / 2) ? rn : rm;
11426 int passelt = (pass << 1) & (maxpass - 1);
11427
11428 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
11429 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
11430 tcg_res[pass] = tcg_temp_new_i32();
11431
11432 switch (fpopcode) {
11433 case 0x10: /* FMAXNMP */
11434 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
11435 fpst);
11436 break;
11437 case 0x12: /* FADDP */
11438 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11439 break;
11440 case 0x16: /* FMAXP */
11441 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11442 break;
11443 case 0x18: /* FMINNMP */
11444 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
11445 fpst);
11446 break;
11447 case 0x1e: /* FMINP */
11448 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11449 break;
11450 default:
11451 g_assert_not_reached();
11452 }
11453 }
11454
11455 for (pass = 0; pass < maxpass; pass++) {
11456 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
11457 }
11458 } else {
11459 for (pass = 0; pass < elements; pass++) {
11460 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11461 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11462 TCGv_i32 tcg_res = tcg_temp_new_i32();
11463
11464 read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
11465 read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
11466
11467 switch (fpopcode) {
11468 case 0x0: /* FMAXNM */
11469 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11470 break;
11471 case 0x1: /* FMLA */
11472 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11473 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11474 fpst);
11475 break;
11476 case 0x2: /* FADD */
11477 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
11478 break;
11479 case 0x3: /* FMULX */
11480 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
11481 break;
11482 case 0x4: /* FCMEQ */
11483 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11484 break;
11485 case 0x6: /* FMAX */
11486 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
11487 break;
11488 case 0x7: /* FRECPS */
11489 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11490 break;
11491 case 0x8: /* FMINNM */
11492 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11493 break;
11494 case 0x9: /* FMLS */
11495 /* As usual for ARM, separate negation for fused multiply-add */
11496 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
11497 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11498 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11499 fpst);
11500 break;
11501 case 0xa: /* FSUB */
11502 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11503 break;
11504 case 0xe: /* FMIN */
11505 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
11506 break;
11507 case 0xf: /* FRSQRTS */
11508 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11509 break;
11510 case 0x13: /* FMUL */
11511 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
11512 break;
11513 case 0x14: /* FCMGE */
11514 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11515 break;
11516 case 0x15: /* FACGE */
11517 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11518 break;
11519 case 0x17: /* FDIV */
11520 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
11521 break;
11522 case 0x1a: /* FABD */
11523 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11524 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
11525 break;
11526 case 0x1c: /* FCMGT */
11527 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11528 break;
11529 case 0x1d: /* FACGT */
11530 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11531 break;
11532 default:
11533 g_assert_not_reached();
11534 }
11535
11536 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11537 }
11538 }
11539
11540 clear_vec_high(s, is_q, rd);
11541 }
11542
11543 /* AdvSIMD three same extra
11544 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
11545 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11546 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
11547 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11548 */
11549 static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
11550 {
11551 int rd = extract32(insn, 0, 5);
11552 int rn = extract32(insn, 5, 5);
11553 int opcode = extract32(insn, 11, 4);
11554 int rm = extract32(insn, 16, 5);
11555 int size = extract32(insn, 22, 2);
11556 bool u = extract32(insn, 29, 1);
11557 bool is_q = extract32(insn, 30, 1);
11558 bool feature;
11559 int rot;
11560
11561 switch (u * 16 + opcode) {
11562 case 0x10: /* SQRDMLAH (vector) */
11563 case 0x11: /* SQRDMLSH (vector) */
11564 if (size != 1 && size != 2) {
11565 unallocated_encoding(s);
11566 return;
11567 }
11568 feature = dc_isar_feature(aa64_rdm, s);
11569 break;
11570 case 0x02: /* SDOT (vector) */
11571 case 0x12: /* UDOT (vector) */
11572 if (size != MO_32) {
11573 unallocated_encoding(s);
11574 return;
11575 }
11576 feature = dc_isar_feature(aa64_dp, s);
11577 break;
11578 case 0x03: /* USDOT */
11579 if (size != MO_32) {
11580 unallocated_encoding(s);
11581 return;
11582 }
11583 feature = dc_isar_feature(aa64_i8mm, s);
11584 break;
11585 case 0x04: /* SMMLA */
11586 case 0x14: /* UMMLA */
11587 case 0x05: /* USMMLA */
11588 if (!is_q || size != MO_32) {
11589 unallocated_encoding(s);
11590 return;
11591 }
11592 feature = dc_isar_feature(aa64_i8mm, s);
11593 break;
11594 case 0x18: /* FCMLA, #0 */
11595 case 0x19: /* FCMLA, #90 */
11596 case 0x1a: /* FCMLA, #180 */
11597 case 0x1b: /* FCMLA, #270 */
11598 case 0x1c: /* FCADD, #90 */
11599 case 0x1e: /* FCADD, #270 */
11600 if (size == 0
11601 || (size == 1 && !dc_isar_feature(aa64_fp16, s))
11602 || (size == 3 && !is_q)) {
11603 unallocated_encoding(s);
11604 return;
11605 }
11606 feature = dc_isar_feature(aa64_fcma, s);
11607 break;
11608 case 0x1d: /* BFMMLA */
11609 if (size != MO_16 || !is_q) {
11610 unallocated_encoding(s);
11611 return;
11612 }
11613 feature = dc_isar_feature(aa64_bf16, s);
11614 break;
11615 case 0x1f:
11616 switch (size) {
11617 case 1: /* BFDOT */
11618 case 3: /* BFMLAL{B,T} */
11619 feature = dc_isar_feature(aa64_bf16, s);
11620 break;
11621 default:
11622 unallocated_encoding(s);
11623 return;
11624 }
11625 break;
11626 default:
11627 unallocated_encoding(s);
11628 return;
11629 }
11630 if (!feature) {
11631 unallocated_encoding(s);
11632 return;
11633 }
11634 if (!fp_access_check(s)) {
11635 return;
11636 }
11637
11638 switch (opcode) {
11639 case 0x0: /* SQRDMLAH (vector) */
11640 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size);
11641 return;
11642
11643 case 0x1: /* SQRDMLSH (vector) */
11644 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size);
11645 return;
11646
11647 case 0x2: /* SDOT / UDOT */
11648 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0,
11649 u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
11650 return;
11651
11652 case 0x3: /* USDOT */
11653 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b);
11654 return;
11655
11656 case 0x04: /* SMMLA, UMMLA */
11657 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0,
11658 u ? gen_helper_gvec_ummla_b
11659 : gen_helper_gvec_smmla_b);
11660 return;
11661 case 0x05: /* USMMLA */
11662 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b);
11663 return;
11664
11665 case 0x8: /* FCMLA, #0 */
11666 case 0x9: /* FCMLA, #90 */
11667 case 0xa: /* FCMLA, #180 */
11668 case 0xb: /* FCMLA, #270 */
11669 rot = extract32(opcode, 0, 2);
11670 switch (size) {
11671 case 1:
11672 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
11673 gen_helper_gvec_fcmlah);
11674 break;
11675 case 2:
11676 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
11677 gen_helper_gvec_fcmlas);
11678 break;
11679 case 3:
11680 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
11681 gen_helper_gvec_fcmlad);
11682 break;
11683 default:
11684 g_assert_not_reached();
11685 }
11686 return;
11687
11688 case 0xc: /* FCADD, #90 */
11689 case 0xe: /* FCADD, #270 */
11690 rot = extract32(opcode, 1, 1);
11691 switch (size) {
11692 case 1:
11693 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11694 gen_helper_gvec_fcaddh);
11695 break;
11696 case 2:
11697 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11698 gen_helper_gvec_fcadds);
11699 break;
11700 case 3:
11701 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11702 gen_helper_gvec_fcaddd);
11703 break;
11704 default:
11705 g_assert_not_reached();
11706 }
11707 return;
11708
11709 case 0xd: /* BFMMLA */
11710 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
11711 return;
11712 case 0xf:
11713 switch (size) {
11714 case 1: /* BFDOT */
11715 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
11716 break;
11717 case 3: /* BFMLAL{B,T} */
11718 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q,
11719 gen_helper_gvec_bfmlal);
11720 break;
11721 default:
11722 g_assert_not_reached();
11723 }
11724 return;
11725
11726 default:
11727 g_assert_not_reached();
11728 }
11729 }
11730
11731 static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
11732 int size, int rn, int rd)
11733 {
11734 /* Handle 2-reg-misc ops which are widening (so each size element
11735 * in the source becomes a 2*size element in the destination.
11736 * The only instruction like this is FCVTL.
11737 */
11738 int pass;
11739
11740 if (size == 3) {
11741 /* 32 -> 64 bit fp conversion */
11742 TCGv_i64 tcg_res[2];
11743 int srcelt = is_q ? 2 : 0;
11744
11745 for (pass = 0; pass < 2; pass++) {
11746 TCGv_i32 tcg_op = tcg_temp_new_i32();
11747 tcg_res[pass] = tcg_temp_new_i64();
11748
11749 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
11750 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
11751 }
11752 for (pass = 0; pass < 2; pass++) {
11753 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11754 }
11755 } else {
11756 /* 16 -> 32 bit fp conversion */
11757 int srcelt = is_q ? 4 : 0;
11758 TCGv_i32 tcg_res[4];
11759 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
11760 TCGv_i32 ahp = get_ahp_flag();
11761
11762 for (pass = 0; pass < 4; pass++) {
11763 tcg_res[pass] = tcg_temp_new_i32();
11764
11765 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
11766 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
11767 fpst, ahp);
11768 }
11769 for (pass = 0; pass < 4; pass++) {
11770 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11771 }
11772 }
11773 }
11774
11775 static void handle_rev(DisasContext *s, int opcode, bool u,
11776 bool is_q, int size, int rn, int rd)
11777 {
11778 int op = (opcode << 1) | u;
11779 int opsz = op + size;
11780 int grp_size = 3 - opsz;
11781 int dsize = is_q ? 128 : 64;
11782 int i;
11783
11784 if (opsz >= 3) {
11785 unallocated_encoding(s);
11786 return;
11787 }
11788
11789 if (!fp_access_check(s)) {
11790 return;
11791 }
11792
11793 if (size == 0) {
11794 /* Special case bytes, use bswap op on each group of elements */
11795 int groups = dsize / (8 << grp_size);
11796
11797 for (i = 0; i < groups; i++) {
11798 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
11799
11800 read_vec_element(s, tcg_tmp, rn, i, grp_size);
11801 switch (grp_size) {
11802 case MO_16:
11803 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
11804 break;
11805 case MO_32:
11806 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
11807 break;
11808 case MO_64:
11809 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
11810 break;
11811 default:
11812 g_assert_not_reached();
11813 }
11814 write_vec_element(s, tcg_tmp, rd, i, grp_size);
11815 }
11816 clear_vec_high(s, is_q, rd);
11817 } else {
11818 int revmask = (1 << grp_size) - 1;
11819 int esize = 8 << size;
11820 int elements = dsize / esize;
11821 TCGv_i64 tcg_rn = tcg_temp_new_i64();
11822 TCGv_i64 tcg_rd[2];
11823
11824 for (i = 0; i < 2; i++) {
11825 tcg_rd[i] = tcg_temp_new_i64();
11826 tcg_gen_movi_i64(tcg_rd[i], 0);
11827 }
11828
11829 for (i = 0; i < elements; i++) {
11830 int e_rev = (i & 0xf) ^ revmask;
11831 int w = (e_rev * esize) / 64;
11832 int o = (e_rev * esize) % 64;
11833
11834 read_vec_element(s, tcg_rn, rn, i, size);
11835 tcg_gen_deposit_i64(tcg_rd[w], tcg_rd[w], tcg_rn, o, esize);
11836 }
11837
11838 for (i = 0; i < 2; i++) {
11839 write_vec_element(s, tcg_rd[i], rd, i, MO_64);
11840 }
11841 clear_vec_high(s, true, rd);
11842 }
11843 }
11844
11845 static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
11846 bool is_q, int size, int rn, int rd)
11847 {
11848 /* Implement the pairwise operations from 2-misc:
11849 * SADDLP, UADDLP, SADALP, UADALP.
11850 * These all add pairs of elements in the input to produce a
11851 * double-width result element in the output (possibly accumulating).
11852 */
11853 bool accum = (opcode == 0x6);
11854 int maxpass = is_q ? 2 : 1;
11855 int pass;
11856 TCGv_i64 tcg_res[2];
11857
11858 if (size == 2) {
11859 /* 32 + 32 -> 64 op */
11860 MemOp memop = size + (u ? 0 : MO_SIGN);
11861
11862 for (pass = 0; pass < maxpass; pass++) {
11863 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11864 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11865
11866 tcg_res[pass] = tcg_temp_new_i64();
11867
11868 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
11869 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
11870 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11871 if (accum) {
11872 read_vec_element(s, tcg_op1, rd, pass, MO_64);
11873 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
11874 }
11875 }
11876 } else {
11877 for (pass = 0; pass < maxpass; pass++) {
11878 TCGv_i64 tcg_op = tcg_temp_new_i64();
11879 NeonGenOne64OpFn *genfn;
11880 static NeonGenOne64OpFn * const fns[2][2] = {
11881 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
11882 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
11883 };
11884
11885 genfn = fns[size][u];
11886
11887 tcg_res[pass] = tcg_temp_new_i64();
11888
11889 read_vec_element(s, tcg_op, rn, pass, MO_64);
11890 genfn(tcg_res[pass], tcg_op);
11891
11892 if (accum) {
11893 read_vec_element(s, tcg_op, rd, pass, MO_64);
11894 if (size == 0) {
11895 gen_helper_neon_addl_u16(tcg_res[pass],
11896 tcg_res[pass], tcg_op);
11897 } else {
11898 gen_helper_neon_addl_u32(tcg_res[pass],
11899 tcg_res[pass], tcg_op);
11900 }
11901 }
11902 }
11903 }
11904 if (!is_q) {
11905 tcg_res[1] = tcg_constant_i64(0);
11906 }
11907 for (pass = 0; pass < 2; pass++) {
11908 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11909 }
11910 }
11911
11912 static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
11913 {
11914 /* Implement SHLL and SHLL2 */
11915 int pass;
11916 int part = is_q ? 2 : 0;
11917 TCGv_i64 tcg_res[2];
11918
11919 for (pass = 0; pass < 2; pass++) {
11920 static NeonGenWidenFn * const widenfns[3] = {
11921 gen_helper_neon_widen_u8,
11922 gen_helper_neon_widen_u16,
11923 tcg_gen_extu_i32_i64,
11924 };
11925 NeonGenWidenFn *widenfn = widenfns[size];
11926 TCGv_i32 tcg_op = tcg_temp_new_i32();
11927
11928 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
11929 tcg_res[pass] = tcg_temp_new_i64();
11930 widenfn(tcg_res[pass], tcg_op);
11931 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
11932 }
11933
11934 for (pass = 0; pass < 2; pass++) {
11935 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11936 }
11937 }
11938
11939 /* AdvSIMD two reg misc
11940 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
11941 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11942 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
11943 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11944 */
11945 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
11946 {
11947 int size = extract32(insn, 22, 2);
11948 int opcode = extract32(insn, 12, 5);
11949 bool u = extract32(insn, 29, 1);
11950 bool is_q = extract32(insn, 30, 1);
11951 int rn = extract32(insn, 5, 5);
11952 int rd = extract32(insn, 0, 5);
11953 bool need_fpstatus = false;
11954 int rmode = -1;
11955 TCGv_i32 tcg_rmode;
11956 TCGv_ptr tcg_fpstatus;
11957
11958 switch (opcode) {
11959 case 0x0: /* REV64, REV32 */
11960 case 0x1: /* REV16 */
11961 handle_rev(s, opcode, u, is_q, size, rn, rd);
11962 return;
11963 case 0x5: /* CNT, NOT, RBIT */
11964 if (u && size == 0) {
11965 /* NOT */
11966 break;
11967 } else if (u && size == 1) {
11968 /* RBIT */
11969 break;
11970 } else if (!u && size == 0) {
11971 /* CNT */
11972 break;
11973 }
11974 unallocated_encoding(s);
11975 return;
11976 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
11977 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
11978 if (size == 3) {
11979 unallocated_encoding(s);
11980 return;
11981 }
11982 if (!fp_access_check(s)) {
11983 return;
11984 }
11985
11986 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
11987 return;
11988 case 0x4: /* CLS, CLZ */
11989 if (size == 3) {
11990 unallocated_encoding(s);
11991 return;
11992 }
11993 break;
11994 case 0x2: /* SADDLP, UADDLP */
11995 case 0x6: /* SADALP, UADALP */
11996 if (size == 3) {
11997 unallocated_encoding(s);
11998 return;
11999 }
12000 if (!fp_access_check(s)) {
12001 return;
12002 }
12003 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
12004 return;
12005 case 0x13: /* SHLL, SHLL2 */
12006 if (u == 0 || size == 3) {
12007 unallocated_encoding(s);
12008 return;
12009 }
12010 if (!fp_access_check(s)) {
12011 return;
12012 }
12013 handle_shll(s, is_q, size, rn, rd);
12014 return;
12015 case 0xa: /* CMLT */
12016 if (u == 1) {
12017 unallocated_encoding(s);
12018 return;
12019 }
12020 /* fall through */
12021 case 0x8: /* CMGT, CMGE */
12022 case 0x9: /* CMEQ, CMLE */
12023 case 0xb: /* ABS, NEG */
12024 if (size == 3 && !is_q) {
12025 unallocated_encoding(s);
12026 return;
12027 }
12028 break;
12029 case 0x3: /* SUQADD, USQADD */
12030 if (size == 3 && !is_q) {
12031 unallocated_encoding(s);
12032 return;
12033 }
12034 if (!fp_access_check(s)) {
12035 return;
12036 }
12037 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
12038 return;
12039 case 0x7: /* SQABS, SQNEG */
12040 if (size == 3 && !is_q) {
12041 unallocated_encoding(s);
12042 return;
12043 }
12044 break;
12045 case 0xc ... 0xf:
12046 case 0x16 ... 0x1f:
12047 {
12048 /* Floating point: U, size[1] and opcode indicate operation;
12049 * size[0] indicates single or double precision.
12050 */
12051 int is_double = extract32(size, 0, 1);
12052 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
12053 size = is_double ? 3 : 2;
12054 switch (opcode) {
12055 case 0x2f: /* FABS */
12056 case 0x6f: /* FNEG */
12057 if (size == 3 && !is_q) {
12058 unallocated_encoding(s);
12059 return;
12060 }
12061 break;
12062 case 0x1d: /* SCVTF */
12063 case 0x5d: /* UCVTF */
12064 {
12065 bool is_signed = (opcode == 0x1d) ? true : false;
12066 int elements = is_double ? 2 : is_q ? 4 : 2;
12067 if (is_double && !is_q) {
12068 unallocated_encoding(s);
12069 return;
12070 }
12071 if (!fp_access_check(s)) {
12072 return;
12073 }
12074 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
12075 return;
12076 }
12077 case 0x2c: /* FCMGT (zero) */
12078 case 0x2d: /* FCMEQ (zero) */
12079 case 0x2e: /* FCMLT (zero) */
12080 case 0x6c: /* FCMGE (zero) */
12081 case 0x6d: /* FCMLE (zero) */
12082 if (size == 3 && !is_q) {
12083 unallocated_encoding(s);
12084 return;
12085 }
12086 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
12087 return;
12088 case 0x7f: /* FSQRT */
12089 if (size == 3 && !is_q) {
12090 unallocated_encoding(s);
12091 return;
12092 }
12093 break;
12094 case 0x1a: /* FCVTNS */
12095 case 0x1b: /* FCVTMS */
12096 case 0x3a: /* FCVTPS */
12097 case 0x3b: /* FCVTZS */
12098 case 0x5a: /* FCVTNU */
12099 case 0x5b: /* FCVTMU */
12100 case 0x7a: /* FCVTPU */
12101 case 0x7b: /* FCVTZU */
12102 need_fpstatus = true;
12103 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12104 if (size == 3 && !is_q) {
12105 unallocated_encoding(s);
12106 return;
12107 }
12108 break;
12109 case 0x5c: /* FCVTAU */
12110 case 0x1c: /* FCVTAS */
12111 need_fpstatus = true;
12112 rmode = FPROUNDING_TIEAWAY;
12113 if (size == 3 && !is_q) {
12114 unallocated_encoding(s);
12115 return;
12116 }
12117 break;
12118 case 0x3c: /* URECPE */
12119 if (size == 3) {
12120 unallocated_encoding(s);
12121 return;
12122 }
12123 /* fall through */
12124 case 0x3d: /* FRECPE */
12125 case 0x7d: /* FRSQRTE */
12126 if (size == 3 && !is_q) {
12127 unallocated_encoding(s);
12128 return;
12129 }
12130 if (!fp_access_check(s)) {
12131 return;
12132 }
12133 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
12134 return;
12135 case 0x56: /* FCVTXN, FCVTXN2 */
12136 if (size == 2) {
12137 unallocated_encoding(s);
12138 return;
12139 }
12140 /* fall through */
12141 case 0x16: /* FCVTN, FCVTN2 */
12142 /* handle_2misc_narrow does a 2*size -> size operation, but these
12143 * instructions encode the source size rather than dest size.
12144 */
12145 if (!fp_access_check(s)) {
12146 return;
12147 }
12148 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12149 return;
12150 case 0x36: /* BFCVTN, BFCVTN2 */
12151 if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
12152 unallocated_encoding(s);
12153 return;
12154 }
12155 if (!fp_access_check(s)) {
12156 return;
12157 }
12158 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12159 return;
12160 case 0x17: /* FCVTL, FCVTL2 */
12161 if (!fp_access_check(s)) {
12162 return;
12163 }
12164 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
12165 return;
12166 case 0x18: /* FRINTN */
12167 case 0x19: /* FRINTM */
12168 case 0x38: /* FRINTP */
12169 case 0x39: /* FRINTZ */
12170 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12171 /* fall through */
12172 case 0x59: /* FRINTX */
12173 case 0x79: /* FRINTI */
12174 need_fpstatus = true;
12175 if (size == 3 && !is_q) {
12176 unallocated_encoding(s);
12177 return;
12178 }
12179 break;
12180 case 0x58: /* FRINTA */
12181 rmode = FPROUNDING_TIEAWAY;
12182 need_fpstatus = true;
12183 if (size == 3 && !is_q) {
12184 unallocated_encoding(s);
12185 return;
12186 }
12187 break;
12188 case 0x7c: /* URSQRTE */
12189 if (size == 3) {
12190 unallocated_encoding(s);
12191 return;
12192 }
12193 break;
12194 case 0x1e: /* FRINT32Z */
12195 case 0x1f: /* FRINT64Z */
12196 rmode = FPROUNDING_ZERO;
12197 /* fall through */
12198 case 0x5e: /* FRINT32X */
12199 case 0x5f: /* FRINT64X */
12200 need_fpstatus = true;
12201 if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
12202 unallocated_encoding(s);
12203 return;
12204 }
12205 break;
12206 default:
12207 unallocated_encoding(s);
12208 return;
12209 }
12210 break;
12211 }
12212 default:
12213 unallocated_encoding(s);
12214 return;
12215 }
12216
12217 if (!fp_access_check(s)) {
12218 return;
12219 }
12220
12221 if (need_fpstatus || rmode >= 0) {
12222 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
12223 } else {
12224 tcg_fpstatus = NULL;
12225 }
12226 if (rmode >= 0) {
12227 tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
12228 } else {
12229 tcg_rmode = NULL;
12230 }
12231
12232 switch (opcode) {
12233 case 0x5:
12234 if (u && size == 0) { /* NOT */
12235 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12236 return;
12237 }
12238 break;
12239 case 0x8: /* CMGT, CMGE */
12240 if (u) {
12241 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
12242 } else {
12243 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
12244 }
12245 return;
12246 case 0x9: /* CMEQ, CMLE */
12247 if (u) {
12248 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
12249 } else {
12250 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
12251 }
12252 return;
12253 case 0xa: /* CMLT */
12254 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
12255 return;
12256 case 0xb:
12257 if (u) { /* ABS, NEG */
12258 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12259 } else {
12260 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
12261 }
12262 return;
12263 }
12264
12265 if (size == 3) {
12266 /* All 64-bit element operations can be shared with scalar 2misc */
12267 int pass;
12268
12269 /* Coverity claims (size == 3 && !is_q) has been eliminated
12270 * from all paths leading to here.
12271 */
12272 tcg_debug_assert(is_q);
12273 for (pass = 0; pass < 2; pass++) {
12274 TCGv_i64 tcg_op = tcg_temp_new_i64();
12275 TCGv_i64 tcg_res = tcg_temp_new_i64();
12276
12277 read_vec_element(s, tcg_op, rn, pass, MO_64);
12278
12279 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12280 tcg_rmode, tcg_fpstatus);
12281
12282 write_vec_element(s, tcg_res, rd, pass, MO_64);
12283 }
12284 } else {
12285 int pass;
12286
12287 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12288 TCGv_i32 tcg_op = tcg_temp_new_i32();
12289 TCGv_i32 tcg_res = tcg_temp_new_i32();
12290
12291 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12292
12293 if (size == 2) {
12294 /* Special cases for 32 bit elements */
12295 switch (opcode) {
12296 case 0x4: /* CLS */
12297 if (u) {
12298 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12299 } else {
12300 tcg_gen_clrsb_i32(tcg_res, tcg_op);
12301 }
12302 break;
12303 case 0x7: /* SQABS, SQNEG */
12304 if (u) {
12305 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12306 } else {
12307 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12308 }
12309 break;
12310 case 0x2f: /* FABS */
12311 gen_helper_vfp_abss(tcg_res, tcg_op);
12312 break;
12313 case 0x6f: /* FNEG */
12314 gen_helper_vfp_negs(tcg_res, tcg_op);
12315 break;
12316 case 0x7f: /* FSQRT */
12317 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12318 break;
12319 case 0x1a: /* FCVTNS */
12320 case 0x1b: /* FCVTMS */
12321 case 0x1c: /* FCVTAS */
12322 case 0x3a: /* FCVTPS */
12323 case 0x3b: /* FCVTZS */
12324 gen_helper_vfp_tosls(tcg_res, tcg_op,
12325 tcg_constant_i32(0), tcg_fpstatus);
12326 break;
12327 case 0x5a: /* FCVTNU */
12328 case 0x5b: /* FCVTMU */
12329 case 0x5c: /* FCVTAU */
12330 case 0x7a: /* FCVTPU */
12331 case 0x7b: /* FCVTZU */
12332 gen_helper_vfp_touls(tcg_res, tcg_op,
12333 tcg_constant_i32(0), tcg_fpstatus);
12334 break;
12335 case 0x18: /* FRINTN */
12336 case 0x19: /* FRINTM */
12337 case 0x38: /* FRINTP */
12338 case 0x39: /* FRINTZ */
12339 case 0x58: /* FRINTA */
12340 case 0x79: /* FRINTI */
12341 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12342 break;
12343 case 0x59: /* FRINTX */
12344 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12345 break;
12346 case 0x7c: /* URSQRTE */
12347 gen_helper_rsqrte_u32(tcg_res, tcg_op);
12348 break;
12349 case 0x1e: /* FRINT32Z */
12350 case 0x5e: /* FRINT32X */
12351 gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
12352 break;
12353 case 0x1f: /* FRINT64Z */
12354 case 0x5f: /* FRINT64X */
12355 gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
12356 break;
12357 default:
12358 g_assert_not_reached();
12359 }
12360 } else {
12361 /* Use helpers for 8 and 16 bit elements */
12362 switch (opcode) {
12363 case 0x5: /* CNT, RBIT */
12364 /* For these two insns size is part of the opcode specifier
12365 * (handled earlier); they always operate on byte elements.
12366 */
12367 if (u) {
12368 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12369 } else {
12370 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12371 }
12372 break;
12373 case 0x7: /* SQABS, SQNEG */
12374 {
12375 NeonGenOneOpEnvFn *genfn;
12376 static NeonGenOneOpEnvFn * const fns[2][2] = {
12377 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12378 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12379 };
12380 genfn = fns[size][u];
12381 genfn(tcg_res, cpu_env, tcg_op);
12382 break;
12383 }
12384 case 0x4: /* CLS, CLZ */
12385 if (u) {
12386 if (size == 0) {
12387 gen_helper_neon_clz_u8(tcg_res, tcg_op);
12388 } else {
12389 gen_helper_neon_clz_u16(tcg_res, tcg_op);
12390 }
12391 } else {
12392 if (size == 0) {
12393 gen_helper_neon_cls_s8(tcg_res, tcg_op);
12394 } else {
12395 gen_helper_neon_cls_s16(tcg_res, tcg_op);
12396 }
12397 }
12398 break;
12399 default:
12400 g_assert_not_reached();
12401 }
12402 }
12403
12404 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12405 }
12406 }
12407 clear_vec_high(s, is_q, rd);
12408
12409 if (tcg_rmode) {
12410 gen_restore_rmode(tcg_rmode, tcg_fpstatus);
12411 }
12412 }
12413
12414 /* AdvSIMD [scalar] two register miscellaneous (FP16)
12415 *
12416 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
12417 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12418 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
12419 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12420 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
12421 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
12422 *
12423 * This actually covers two groups where scalar access is governed by
12424 * bit 28. A bunch of the instructions (float to integral) only exist
12425 * in the vector form and are un-allocated for the scalar decode. Also
12426 * in the scalar decode Q is always 1.
12427 */
12428 static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
12429 {
12430 int fpop, opcode, a, u;
12431 int rn, rd;
12432 bool is_q;
12433 bool is_scalar;
12434 bool only_in_vector = false;
12435
12436 int pass;
12437 TCGv_i32 tcg_rmode = NULL;
12438 TCGv_ptr tcg_fpstatus = NULL;
12439 bool need_fpst = true;
12440 int rmode = -1;
12441
12442 if (!dc_isar_feature(aa64_fp16, s)) {
12443 unallocated_encoding(s);
12444 return;
12445 }
12446
12447 rd = extract32(insn, 0, 5);
12448 rn = extract32(insn, 5, 5);
12449
12450 a = extract32(insn, 23, 1);
12451 u = extract32(insn, 29, 1);
12452 is_scalar = extract32(insn, 28, 1);
12453 is_q = extract32(insn, 30, 1);
12454
12455 opcode = extract32(insn, 12, 5);
12456 fpop = deposit32(opcode, 5, 1, a);
12457 fpop = deposit32(fpop, 6, 1, u);
12458
12459 switch (fpop) {
12460 case 0x1d: /* SCVTF */
12461 case 0x5d: /* UCVTF */
12462 {
12463 int elements;
12464
12465 if (is_scalar) {
12466 elements = 1;
12467 } else {
12468 elements = (is_q ? 8 : 4);
12469 }
12470
12471 if (!fp_access_check(s)) {
12472 return;
12473 }
12474 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
12475 return;
12476 }
12477 break;
12478 case 0x2c: /* FCMGT (zero) */
12479 case 0x2d: /* FCMEQ (zero) */
12480 case 0x2e: /* FCMLT (zero) */
12481 case 0x6c: /* FCMGE (zero) */
12482 case 0x6d: /* FCMLE (zero) */
12483 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
12484 return;
12485 case 0x3d: /* FRECPE */
12486 case 0x3f: /* FRECPX */
12487 break;
12488 case 0x18: /* FRINTN */
12489 only_in_vector = true;
12490 rmode = FPROUNDING_TIEEVEN;
12491 break;
12492 case 0x19: /* FRINTM */
12493 only_in_vector = true;
12494 rmode = FPROUNDING_NEGINF;
12495 break;
12496 case 0x38: /* FRINTP */
12497 only_in_vector = true;
12498 rmode = FPROUNDING_POSINF;
12499 break;
12500 case 0x39: /* FRINTZ */
12501 only_in_vector = true;
12502 rmode = FPROUNDING_ZERO;
12503 break;
12504 case 0x58: /* FRINTA */
12505 only_in_vector = true;
12506 rmode = FPROUNDING_TIEAWAY;
12507 break;
12508 case 0x59: /* FRINTX */
12509 case 0x79: /* FRINTI */
12510 only_in_vector = true;
12511 /* current rounding mode */
12512 break;
12513 case 0x1a: /* FCVTNS */
12514 rmode = FPROUNDING_TIEEVEN;
12515 break;
12516 case 0x1b: /* FCVTMS */
12517 rmode = FPROUNDING_NEGINF;
12518 break;
12519 case 0x1c: /* FCVTAS */
12520 rmode = FPROUNDING_TIEAWAY;
12521 break;
12522 case 0x3a: /* FCVTPS */
12523 rmode = FPROUNDING_POSINF;
12524 break;
12525 case 0x3b: /* FCVTZS */
12526 rmode = FPROUNDING_ZERO;
12527 break;
12528 case 0x5a: /* FCVTNU */
12529 rmode = FPROUNDING_TIEEVEN;
12530 break;
12531 case 0x5b: /* FCVTMU */
12532 rmode = FPROUNDING_NEGINF;
12533 break;
12534 case 0x5c: /* FCVTAU */
12535 rmode = FPROUNDING_TIEAWAY;
12536 break;
12537 case 0x7a: /* FCVTPU */
12538 rmode = FPROUNDING_POSINF;
12539 break;
12540 case 0x7b: /* FCVTZU */
12541 rmode = FPROUNDING_ZERO;
12542 break;
12543 case 0x2f: /* FABS */
12544 case 0x6f: /* FNEG */
12545 need_fpst = false;
12546 break;
12547 case 0x7d: /* FRSQRTE */
12548 case 0x7f: /* FSQRT (vector) */
12549 break;
12550 default:
12551 unallocated_encoding(s);
12552 return;
12553 }
12554
12555
12556 /* Check additional constraints for the scalar encoding */
12557 if (is_scalar) {
12558 if (!is_q) {
12559 unallocated_encoding(s);
12560 return;
12561 }
12562 /* FRINTxx is only in the vector form */
12563 if (only_in_vector) {
12564 unallocated_encoding(s);
12565 return;
12566 }
12567 }
12568
12569 if (!fp_access_check(s)) {
12570 return;
12571 }
12572
12573 if (rmode >= 0 || need_fpst) {
12574 tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
12575 }
12576
12577 if (rmode >= 0) {
12578 tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
12579 }
12580
12581 if (is_scalar) {
12582 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
12583 TCGv_i32 tcg_res = tcg_temp_new_i32();
12584
12585 switch (fpop) {
12586 case 0x1a: /* FCVTNS */
12587 case 0x1b: /* FCVTMS */
12588 case 0x1c: /* FCVTAS */
12589 case 0x3a: /* FCVTPS */
12590 case 0x3b: /* FCVTZS */
12591 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12592 break;
12593 case 0x3d: /* FRECPE */
12594 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12595 break;
12596 case 0x3f: /* FRECPX */
12597 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
12598 break;
12599 case 0x5a: /* FCVTNU */
12600 case 0x5b: /* FCVTMU */
12601 case 0x5c: /* FCVTAU */
12602 case 0x7a: /* FCVTPU */
12603 case 0x7b: /* FCVTZU */
12604 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12605 break;
12606 case 0x6f: /* FNEG */
12607 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12608 break;
12609 case 0x7d: /* FRSQRTE */
12610 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12611 break;
12612 default:
12613 g_assert_not_reached();
12614 }
12615
12616 /* limit any sign extension going on */
12617 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
12618 write_fp_sreg(s, rd, tcg_res);
12619 } else {
12620 for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
12621 TCGv_i32 tcg_op = tcg_temp_new_i32();
12622 TCGv_i32 tcg_res = tcg_temp_new_i32();
12623
12624 read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
12625
12626 switch (fpop) {
12627 case 0x1a: /* FCVTNS */
12628 case 0x1b: /* FCVTMS */
12629 case 0x1c: /* FCVTAS */
12630 case 0x3a: /* FCVTPS */
12631 case 0x3b: /* FCVTZS */
12632 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12633 break;
12634 case 0x3d: /* FRECPE */
12635 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12636 break;
12637 case 0x5a: /* FCVTNU */
12638 case 0x5b: /* FCVTMU */
12639 case 0x5c: /* FCVTAU */
12640 case 0x7a: /* FCVTPU */
12641 case 0x7b: /* FCVTZU */
12642 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12643 break;
12644 case 0x18: /* FRINTN */
12645 case 0x19: /* FRINTM */
12646 case 0x38: /* FRINTP */
12647 case 0x39: /* FRINTZ */
12648 case 0x58: /* FRINTA */
12649 case 0x79: /* FRINTI */
12650 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
12651 break;
12652 case 0x59: /* FRINTX */
12653 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
12654 break;
12655 case 0x2f: /* FABS */
12656 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
12657 break;
12658 case 0x6f: /* FNEG */
12659 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12660 break;
12661 case 0x7d: /* FRSQRTE */
12662 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12663 break;
12664 case 0x7f: /* FSQRT */
12665 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
12666 break;
12667 default:
12668 g_assert_not_reached();
12669 }
12670
12671 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12672 }
12673
12674 clear_vec_high(s, is_q, rd);
12675 }
12676
12677 if (tcg_rmode) {
12678 gen_restore_rmode(tcg_rmode, tcg_fpstatus);
12679 }
12680 }
12681
12682 /* AdvSIMD scalar x indexed element
12683 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12684 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12685 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12686 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12687 * AdvSIMD vector x indexed element
12688 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12689 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12690 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12691 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12692 */
12693 static void disas_simd_indexed(DisasContext *s, uint32_t insn)
12694 {
12695 /* This encoding has two kinds of instruction:
12696 * normal, where we perform elt x idxelt => elt for each
12697 * element in the vector
12698 * long, where we perform elt x idxelt and generate a result of
12699 * double the width of the input element
12700 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12701 */
12702 bool is_scalar = extract32(insn, 28, 1);
12703 bool is_q = extract32(insn, 30, 1);
12704 bool u = extract32(insn, 29, 1);
12705 int size = extract32(insn, 22, 2);
12706 int l = extract32(insn, 21, 1);
12707 int m = extract32(insn, 20, 1);
12708 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12709 int rm = extract32(insn, 16, 4);
12710 int opcode = extract32(insn, 12, 4);
12711 int h = extract32(insn, 11, 1);
12712 int rn = extract32(insn, 5, 5);
12713 int rd = extract32(insn, 0, 5);
12714 bool is_long = false;
12715 int is_fp = 0;
12716 bool is_fp16 = false;
12717 int index;
12718 TCGv_ptr fpst;
12719
12720 switch (16 * u + opcode) {
12721 case 0x08: /* MUL */
12722 case 0x10: /* MLA */
12723 case 0x14: /* MLS */
12724 if (is_scalar) {
12725 unallocated_encoding(s);
12726 return;
12727 }
12728 break;
12729 case 0x02: /* SMLAL, SMLAL2 */
12730 case 0x12: /* UMLAL, UMLAL2 */
12731 case 0x06: /* SMLSL, SMLSL2 */
12732 case 0x16: /* UMLSL, UMLSL2 */
12733 case 0x0a: /* SMULL, SMULL2 */
12734 case 0x1a: /* UMULL, UMULL2 */
12735 if (is_scalar) {
12736 unallocated_encoding(s);
12737 return;
12738 }
12739 is_long = true;
12740 break;
12741 case 0x03: /* SQDMLAL, SQDMLAL2 */
12742 case 0x07: /* SQDMLSL, SQDMLSL2 */
12743 case 0x0b: /* SQDMULL, SQDMULL2 */
12744 is_long = true;
12745 break;
12746 case 0x0c: /* SQDMULH */
12747 case 0x0d: /* SQRDMULH */
12748 break;
12749 case 0x01: /* FMLA */
12750 case 0x05: /* FMLS */
12751 case 0x09: /* FMUL */
12752 case 0x19: /* FMULX */
12753 is_fp = 1;
12754 break;
12755 case 0x1d: /* SQRDMLAH */
12756 case 0x1f: /* SQRDMLSH */
12757 if (!dc_isar_feature(aa64_rdm, s)) {
12758 unallocated_encoding(s);
12759 return;
12760 }
12761 break;
12762 case 0x0e: /* SDOT */
12763 case 0x1e: /* UDOT */
12764 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
12765 unallocated_encoding(s);
12766 return;
12767 }
12768 break;
12769 case 0x0f:
12770 switch (size) {
12771 case 0: /* SUDOT */
12772 case 2: /* USDOT */
12773 if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) {
12774 unallocated_encoding(s);
12775 return;
12776 }
12777 size = MO_32;
12778 break;
12779 case 1: /* BFDOT */
12780 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
12781 unallocated_encoding(s);
12782 return;
12783 }
12784 size = MO_32;
12785 break;
12786 case 3: /* BFMLAL{B,T} */
12787 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
12788 unallocated_encoding(s);
12789 return;
12790 }
12791 /* can't set is_fp without other incorrect size checks */
12792 size = MO_16;
12793 break;
12794 default:
12795 unallocated_encoding(s);
12796 return;
12797 }
12798 break;
12799 case 0x11: /* FCMLA #0 */
12800 case 0x13: /* FCMLA #90 */
12801 case 0x15: /* FCMLA #180 */
12802 case 0x17: /* FCMLA #270 */
12803 if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
12804 unallocated_encoding(s);
12805 return;
12806 }
12807 is_fp = 2;
12808 break;
12809 case 0x00: /* FMLAL */
12810 case 0x04: /* FMLSL */
12811 case 0x18: /* FMLAL2 */
12812 case 0x1c: /* FMLSL2 */
12813 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
12814 unallocated_encoding(s);
12815 return;
12816 }
12817 size = MO_16;
12818 /* is_fp, but we pass cpu_env not fp_status. */
12819 break;
12820 default:
12821 unallocated_encoding(s);
12822 return;
12823 }
12824
12825 switch (is_fp) {
12826 case 1: /* normal fp */
12827 /* convert insn encoded size to MemOp size */
12828 switch (size) {
12829 case 0: /* half-precision */
12830 size = MO_16;
12831 is_fp16 = true;
12832 break;
12833 case MO_32: /* single precision */
12834 case MO_64: /* double precision */
12835 break;
12836 default:
12837 unallocated_encoding(s);
12838 return;
12839 }
12840 break;
12841
12842 case 2: /* complex fp */
12843 /* Each indexable element is a complex pair. */
12844 size += 1;
12845 switch (size) {
12846 case MO_32:
12847 if (h && !is_q) {
12848 unallocated_encoding(s);
12849 return;
12850 }
12851 is_fp16 = true;
12852 break;
12853 case MO_64:
12854 break;
12855 default:
12856 unallocated_encoding(s);
12857 return;
12858 }
12859 break;
12860
12861 default: /* integer */
12862 switch (size) {
12863 case MO_8:
12864 case MO_64:
12865 unallocated_encoding(s);
12866 return;
12867 }
12868 break;
12869 }
12870 if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
12871 unallocated_encoding(s);
12872 return;
12873 }
12874
12875 /* Given MemOp size, adjust register and indexing. */
12876 switch (size) {
12877 case MO_16:
12878 index = h << 2 | l << 1 | m;
12879 break;
12880 case MO_32:
12881 index = h << 1 | l;
12882 rm |= m << 4;
12883 break;
12884 case MO_64:
12885 if (l || !is_q) {
12886 unallocated_encoding(s);
12887 return;
12888 }
12889 index = h;
12890 rm |= m << 4;
12891 break;
12892 default:
12893 g_assert_not_reached();
12894 }
12895
12896 if (!fp_access_check(s)) {
12897 return;
12898 }
12899
12900 if (is_fp) {
12901 fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
12902 } else {
12903 fpst = NULL;
12904 }
12905
12906 switch (16 * u + opcode) {
12907 case 0x0e: /* SDOT */
12908 case 0x1e: /* UDOT */
12909 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12910 u ? gen_helper_gvec_udot_idx_b
12911 : gen_helper_gvec_sdot_idx_b);
12912 return;
12913 case 0x0f:
12914 switch (extract32(insn, 22, 2)) {
12915 case 0: /* SUDOT */
12916 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12917 gen_helper_gvec_sudot_idx_b);
12918 return;
12919 case 1: /* BFDOT */
12920 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12921 gen_helper_gvec_bfdot_idx);
12922 return;
12923 case 2: /* USDOT */
12924 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12925 gen_helper_gvec_usdot_idx_b);
12926 return;
12927 case 3: /* BFMLAL{B,T} */
12928 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q,
12929 gen_helper_gvec_bfmlal_idx);
12930 return;
12931 }
12932 g_assert_not_reached();
12933 case 0x11: /* FCMLA #0 */
12934 case 0x13: /* FCMLA #90 */
12935 case 0x15: /* FCMLA #180 */
12936 case 0x17: /* FCMLA #270 */
12937 {
12938 int rot = extract32(insn, 13, 2);
12939 int data = (index << 2) | rot;
12940 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
12941 vec_full_reg_offset(s, rn),
12942 vec_full_reg_offset(s, rm),
12943 vec_full_reg_offset(s, rd), fpst,
12944 is_q ? 16 : 8, vec_full_reg_size(s), data,
12945 size == MO_64
12946 ? gen_helper_gvec_fcmlas_idx
12947 : gen_helper_gvec_fcmlah_idx);
12948 }
12949 return;
12950
12951 case 0x00: /* FMLAL */
12952 case 0x04: /* FMLSL */
12953 case 0x18: /* FMLAL2 */
12954 case 0x1c: /* FMLSL2 */
12955 {
12956 int is_s = extract32(opcode, 2, 1);
12957 int is_2 = u;
12958 int data = (index << 2) | (is_2 << 1) | is_s;
12959 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
12960 vec_full_reg_offset(s, rn),
12961 vec_full_reg_offset(s, rm), cpu_env,
12962 is_q ? 16 : 8, vec_full_reg_size(s),
12963 data, gen_helper_gvec_fmlal_idx_a64);
12964 }
12965 return;
12966
12967 case 0x08: /* MUL */
12968 if (!is_long && !is_scalar) {
12969 static gen_helper_gvec_3 * const fns[3] = {
12970 gen_helper_gvec_mul_idx_h,
12971 gen_helper_gvec_mul_idx_s,
12972 gen_helper_gvec_mul_idx_d,
12973 };
12974 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
12975 vec_full_reg_offset(s, rn),
12976 vec_full_reg_offset(s, rm),
12977 is_q ? 16 : 8, vec_full_reg_size(s),
12978 index, fns[size - 1]);
12979 return;
12980 }
12981 break;
12982
12983 case 0x10: /* MLA */
12984 if (!is_long && !is_scalar) {
12985 static gen_helper_gvec_4 * const fns[3] = {
12986 gen_helper_gvec_mla_idx_h,
12987 gen_helper_gvec_mla_idx_s,
12988 gen_helper_gvec_mla_idx_d,
12989 };
12990 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
12991 vec_full_reg_offset(s, rn),
12992 vec_full_reg_offset(s, rm),
12993 vec_full_reg_offset(s, rd),
12994 is_q ? 16 : 8, vec_full_reg_size(s),
12995 index, fns[size - 1]);
12996 return;
12997 }
12998 break;
12999
13000 case 0x14: /* MLS */
13001 if (!is_long && !is_scalar) {
13002 static gen_helper_gvec_4 * const fns[3] = {
13003 gen_helper_gvec_mls_idx_h,
13004 gen_helper_gvec_mls_idx_s,
13005 gen_helper_gvec_mls_idx_d,
13006 };
13007 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13008 vec_full_reg_offset(s, rn),
13009 vec_full_reg_offset(s, rm),
13010 vec_full_reg_offset(s, rd),
13011 is_q ? 16 : 8, vec_full_reg_size(s),
13012 index, fns[size - 1]);
13013 return;
13014 }
13015 break;
13016 }
13017
13018 if (size == 3) {
13019 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13020 int pass;
13021
13022 assert(is_fp && is_q && !is_long);
13023
13024 read_vec_element(s, tcg_idx, rm, index, MO_64);
13025
13026 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13027 TCGv_i64 tcg_op = tcg_temp_new_i64();
13028 TCGv_i64 tcg_res = tcg_temp_new_i64();
13029
13030 read_vec_element(s, tcg_op, rn, pass, MO_64);
13031
13032 switch (16 * u + opcode) {
13033 case 0x05: /* FMLS */
13034 /* As usual for ARM, separate negation for fused multiply-add */
13035 gen_helper_vfp_negd(tcg_op, tcg_op);
13036 /* fall through */
13037 case 0x01: /* FMLA */
13038 read_vec_element(s, tcg_res, rd, pass, MO_64);
13039 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
13040 break;
13041 case 0x09: /* FMUL */
13042 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
13043 break;
13044 case 0x19: /* FMULX */
13045 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
13046 break;
13047 default:
13048 g_assert_not_reached();
13049 }
13050
13051 write_vec_element(s, tcg_res, rd, pass, MO_64);
13052 }
13053
13054 clear_vec_high(s, !is_scalar, rd);
13055 } else if (!is_long) {
13056 /* 32 bit floating point, or 16 or 32 bit integer.
13057 * For the 16 bit scalar case we use the usual Neon helpers and
13058 * rely on the fact that 0 op 0 == 0 with no side effects.
13059 */
13060 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13061 int pass, maxpasses;
13062
13063 if (is_scalar) {
13064 maxpasses = 1;
13065 } else {
13066 maxpasses = is_q ? 4 : 2;
13067 }
13068
13069 read_vec_element_i32(s, tcg_idx, rm, index, size);
13070
13071 if (size == 1 && !is_scalar) {
13072 /* The simplest way to handle the 16x16 indexed ops is to duplicate
13073 * the index into both halves of the 32 bit tcg_idx and then use
13074 * the usual Neon helpers.
13075 */
13076 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13077 }
13078
13079 for (pass = 0; pass < maxpasses; pass++) {
13080 TCGv_i32 tcg_op = tcg_temp_new_i32();
13081 TCGv_i32 tcg_res = tcg_temp_new_i32();
13082
13083 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
13084
13085 switch (16 * u + opcode) {
13086 case 0x08: /* MUL */
13087 case 0x10: /* MLA */
13088 case 0x14: /* MLS */
13089 {
13090 static NeonGenTwoOpFn * const fns[2][2] = {
13091 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
13092 { tcg_gen_add_i32, tcg_gen_sub_i32 },
13093 };
13094 NeonGenTwoOpFn *genfn;
13095 bool is_sub = opcode == 0x4;
13096
13097 if (size == 1) {
13098 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
13099 } else {
13100 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
13101 }
13102 if (opcode == 0x8) {
13103 break;
13104 }
13105 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
13106 genfn = fns[size - 1][is_sub];
13107 genfn(tcg_res, tcg_op, tcg_res);
13108 break;
13109 }
13110 case 0x05: /* FMLS */
13111 case 0x01: /* FMLA */
13112 read_vec_element_i32(s, tcg_res, rd, pass,
13113 is_scalar ? size : MO_32);
13114 switch (size) {
13115 case 1:
13116 if (opcode == 0x5) {
13117 /* As usual for ARM, separate negation for fused
13118 * multiply-add */
13119 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
13120 }
13121 if (is_scalar) {
13122 gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
13123 tcg_res, fpst);
13124 } else {
13125 gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
13126 tcg_res, fpst);
13127 }
13128 break;
13129 case 2:
13130 if (opcode == 0x5) {
13131 /* As usual for ARM, separate negation for
13132 * fused multiply-add */
13133 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
13134 }
13135 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
13136 tcg_res, fpst);
13137 break;
13138 default:
13139 g_assert_not_reached();
13140 }
13141 break;
13142 case 0x09: /* FMUL */
13143 switch (size) {
13144 case 1:
13145 if (is_scalar) {
13146 gen_helper_advsimd_mulh(tcg_res, tcg_op,
13147 tcg_idx, fpst);
13148 } else {
13149 gen_helper_advsimd_mul2h(tcg_res, tcg_op,
13150 tcg_idx, fpst);
13151 }
13152 break;
13153 case 2:
13154 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
13155 break;
13156 default:
13157 g_assert_not_reached();
13158 }
13159 break;
13160 case 0x19: /* FMULX */
13161 switch (size) {
13162 case 1:
13163 if (is_scalar) {
13164 gen_helper_advsimd_mulxh(tcg_res, tcg_op,
13165 tcg_idx, fpst);
13166 } else {
13167 gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
13168 tcg_idx, fpst);
13169 }
13170 break;
13171 case 2:
13172 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
13173 break;
13174 default:
13175 g_assert_not_reached();
13176 }
13177 break;
13178 case 0x0c: /* SQDMULH */
13179 if (size == 1) {
13180 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
13181 tcg_op, tcg_idx);
13182 } else {
13183 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
13184 tcg_op, tcg_idx);
13185 }
13186 break;
13187 case 0x0d: /* SQRDMULH */
13188 if (size == 1) {
13189 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
13190 tcg_op, tcg_idx);
13191 } else {
13192 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
13193 tcg_op, tcg_idx);
13194 }
13195 break;
13196 case 0x1d: /* SQRDMLAH */
13197 read_vec_element_i32(s, tcg_res, rd, pass,
13198 is_scalar ? size : MO_32);
13199 if (size == 1) {
13200 gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
13201 tcg_op, tcg_idx, tcg_res);
13202 } else {
13203 gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
13204 tcg_op, tcg_idx, tcg_res);
13205 }
13206 break;
13207 case 0x1f: /* SQRDMLSH */
13208 read_vec_element_i32(s, tcg_res, rd, pass,
13209 is_scalar ? size : MO_32);
13210 if (size == 1) {
13211 gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
13212 tcg_op, tcg_idx, tcg_res);
13213 } else {
13214 gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
13215 tcg_op, tcg_idx, tcg_res);
13216 }
13217 break;
13218 default:
13219 g_assert_not_reached();
13220 }
13221
13222 if (is_scalar) {
13223 write_fp_sreg(s, rd, tcg_res);
13224 } else {
13225 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13226 }
13227 }
13228
13229 clear_vec_high(s, is_q, rd);
13230 } else {
13231 /* long ops: 16x16->32 or 32x32->64 */
13232 TCGv_i64 tcg_res[2];
13233 int pass;
13234 bool satop = extract32(opcode, 0, 1);
13235 MemOp memop = MO_32;
13236
13237 if (satop || !u) {
13238 memop |= MO_SIGN;
13239 }
13240
13241 if (size == 2) {
13242 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13243
13244 read_vec_element(s, tcg_idx, rm, index, memop);
13245
13246 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13247 TCGv_i64 tcg_op = tcg_temp_new_i64();
13248 TCGv_i64 tcg_passres;
13249 int passelt;
13250
13251 if (is_scalar) {
13252 passelt = 0;
13253 } else {
13254 passelt = pass + (is_q * 2);
13255 }
13256
13257 read_vec_element(s, tcg_op, rn, passelt, memop);
13258
13259 tcg_res[pass] = tcg_temp_new_i64();
13260
13261 if (opcode == 0xa || opcode == 0xb) {
13262 /* Non-accumulating ops */
13263 tcg_passres = tcg_res[pass];
13264 } else {
13265 tcg_passres = tcg_temp_new_i64();
13266 }
13267
13268 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13269
13270 if (satop) {
13271 /* saturating, doubling */
13272 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13273 tcg_passres, tcg_passres);
13274 }
13275
13276 if (opcode == 0xa || opcode == 0xb) {
13277 continue;
13278 }
13279
13280 /* Accumulating op: handle accumulate step */
13281 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13282
13283 switch (opcode) {
13284 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13285 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13286 break;
13287 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13288 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13289 break;
13290 case 0x7: /* SQDMLSL, SQDMLSL2 */
13291 tcg_gen_neg_i64(tcg_passres, tcg_passres);
13292 /* fall through */
13293 case 0x3: /* SQDMLAL, SQDMLAL2 */
13294 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13295 tcg_res[pass],
13296 tcg_passres);
13297 break;
13298 default:
13299 g_assert_not_reached();
13300 }
13301 }
13302
13303 clear_vec_high(s, !is_scalar, rd);
13304 } else {
13305 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13306
13307 assert(size == 1);
13308 read_vec_element_i32(s, tcg_idx, rm, index, size);
13309
13310 if (!is_scalar) {
13311 /* The simplest way to handle the 16x16 indexed ops is to
13312 * duplicate the index into both halves of the 32 bit tcg_idx
13313 * and then use the usual Neon helpers.
13314 */
13315 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13316 }
13317
13318 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13319 TCGv_i32 tcg_op = tcg_temp_new_i32();
13320 TCGv_i64 tcg_passres;
13321
13322 if (is_scalar) {
13323 read_vec_element_i32(s, tcg_op, rn, pass, size);
13324 } else {
13325 read_vec_element_i32(s, tcg_op, rn,
13326 pass + (is_q * 2), MO_32);
13327 }
13328
13329 tcg_res[pass] = tcg_temp_new_i64();
13330
13331 if (opcode == 0xa || opcode == 0xb) {
13332 /* Non-accumulating ops */
13333 tcg_passres = tcg_res[pass];
13334 } else {
13335 tcg_passres = tcg_temp_new_i64();
13336 }
13337
13338 if (memop & MO_SIGN) {
13339 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13340 } else {
13341 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13342 }
13343 if (satop) {
13344 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13345 tcg_passres, tcg_passres);
13346 }
13347
13348 if (opcode == 0xa || opcode == 0xb) {
13349 continue;
13350 }
13351
13352 /* Accumulating op: handle accumulate step */
13353 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13354
13355 switch (opcode) {
13356 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13357 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
13358 tcg_passres);
13359 break;
13360 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13361 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
13362 tcg_passres);
13363 break;
13364 case 0x7: /* SQDMLSL, SQDMLSL2 */
13365 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
13366 /* fall through */
13367 case 0x3: /* SQDMLAL, SQDMLAL2 */
13368 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
13369 tcg_res[pass],
13370 tcg_passres);
13371 break;
13372 default:
13373 g_assert_not_reached();
13374 }
13375 }
13376
13377 if (is_scalar) {
13378 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
13379 }
13380 }
13381
13382 if (is_scalar) {
13383 tcg_res[1] = tcg_constant_i64(0);
13384 }
13385
13386 for (pass = 0; pass < 2; pass++) {
13387 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13388 }
13389 }
13390 }
13391
13392 /* Crypto AES
13393 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13394 * +-----------------+------+-----------+--------+-----+------+------+
13395 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13396 * +-----------------+------+-----------+--------+-----+------+------+
13397 */
13398 static void disas_crypto_aes(DisasContext *s, uint32_t insn)
13399 {
13400 int size = extract32(insn, 22, 2);
13401 int opcode = extract32(insn, 12, 5);
13402 int rn = extract32(insn, 5, 5);
13403 int rd = extract32(insn, 0, 5);
13404 int decrypt;
13405 gen_helper_gvec_2 *genfn2 = NULL;
13406 gen_helper_gvec_3 *genfn3 = NULL;
13407
13408 if (!dc_isar_feature(aa64_aes, s) || size != 0) {
13409 unallocated_encoding(s);
13410 return;
13411 }
13412
13413 switch (opcode) {
13414 case 0x4: /* AESE */
13415 decrypt = 0;
13416 genfn3 = gen_helper_crypto_aese;
13417 break;
13418 case 0x6: /* AESMC */
13419 decrypt = 0;
13420 genfn2 = gen_helper_crypto_aesmc;
13421 break;
13422 case 0x5: /* AESD */
13423 decrypt = 1;
13424 genfn3 = gen_helper_crypto_aese;
13425 break;
13426 case 0x7: /* AESIMC */
13427 decrypt = 1;
13428 genfn2 = gen_helper_crypto_aesmc;
13429 break;
13430 default:
13431 unallocated_encoding(s);
13432 return;
13433 }
13434
13435 if (!fp_access_check(s)) {
13436 return;
13437 }
13438 if (genfn2) {
13439 gen_gvec_op2_ool(s, true, rd, rn, decrypt, genfn2);
13440 } else {
13441 gen_gvec_op3_ool(s, true, rd, rd, rn, decrypt, genfn3);
13442 }
13443 }
13444
13445 /* Crypto three-reg SHA
13446 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
13447 * +-----------------+------+---+------+---+--------+-----+------+------+
13448 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
13449 * +-----------------+------+---+------+---+--------+-----+------+------+
13450 */
13451 static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
13452 {
13453 int size = extract32(insn, 22, 2);
13454 int opcode = extract32(insn, 12, 3);
13455 int rm = extract32(insn, 16, 5);
13456 int rn = extract32(insn, 5, 5);
13457 int rd = extract32(insn, 0, 5);
13458 gen_helper_gvec_3 *genfn;
13459 bool feature;
13460
13461 if (size != 0) {
13462 unallocated_encoding(s);
13463 return;
13464 }
13465
13466 switch (opcode) {
13467 case 0: /* SHA1C */
13468 genfn = gen_helper_crypto_sha1c;
13469 feature = dc_isar_feature(aa64_sha1, s);
13470 break;
13471 case 1: /* SHA1P */
13472 genfn = gen_helper_crypto_sha1p;
13473 feature = dc_isar_feature(aa64_sha1, s);
13474 break;
13475 case 2: /* SHA1M */
13476 genfn = gen_helper_crypto_sha1m;
13477 feature = dc_isar_feature(aa64_sha1, s);
13478 break;
13479 case 3: /* SHA1SU0 */
13480 genfn = gen_helper_crypto_sha1su0;
13481 feature = dc_isar_feature(aa64_sha1, s);
13482 break;
13483 case 4: /* SHA256H */
13484 genfn = gen_helper_crypto_sha256h;
13485 feature = dc_isar_feature(aa64_sha256, s);
13486 break;
13487 case 5: /* SHA256H2 */
13488 genfn = gen_helper_crypto_sha256h2;
13489 feature = dc_isar_feature(aa64_sha256, s);
13490 break;
13491 case 6: /* SHA256SU1 */
13492 genfn = gen_helper_crypto_sha256su1;
13493 feature = dc_isar_feature(aa64_sha256, s);
13494 break;
13495 default:
13496 unallocated_encoding(s);
13497 return;
13498 }
13499
13500 if (!feature) {
13501 unallocated_encoding(s);
13502 return;
13503 }
13504
13505 if (!fp_access_check(s)) {
13506 return;
13507 }
13508 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn);
13509 }
13510
13511 /* Crypto two-reg SHA
13512 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13513 * +-----------------+------+-----------+--------+-----+------+------+
13514 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13515 * +-----------------+------+-----------+--------+-----+------+------+
13516 */
13517 static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
13518 {
13519 int size = extract32(insn, 22, 2);
13520 int opcode = extract32(insn, 12, 5);
13521 int rn = extract32(insn, 5, 5);
13522 int rd = extract32(insn, 0, 5);
13523 gen_helper_gvec_2 *genfn;
13524 bool feature;
13525
13526 if (size != 0) {
13527 unallocated_encoding(s);
13528 return;
13529 }
13530
13531 switch (opcode) {
13532 case 0: /* SHA1H */
13533 feature = dc_isar_feature(aa64_sha1, s);
13534 genfn = gen_helper_crypto_sha1h;
13535 break;
13536 case 1: /* SHA1SU1 */
13537 feature = dc_isar_feature(aa64_sha1, s);
13538 genfn = gen_helper_crypto_sha1su1;
13539 break;
13540 case 2: /* SHA256SU0 */
13541 feature = dc_isar_feature(aa64_sha256, s);
13542 genfn = gen_helper_crypto_sha256su0;
13543 break;
13544 default:
13545 unallocated_encoding(s);
13546 return;
13547 }
13548
13549 if (!feature) {
13550 unallocated_encoding(s);
13551 return;
13552 }
13553
13554 if (!fp_access_check(s)) {
13555 return;
13556 }
13557 gen_gvec_op2_ool(s, true, rd, rn, 0, genfn);
13558 }
13559
13560 static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m)
13561 {
13562 tcg_gen_rotli_i64(d, m, 1);
13563 tcg_gen_xor_i64(d, d, n);
13564 }
13565
13566 static void gen_rax1_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m)
13567 {
13568 tcg_gen_rotli_vec(vece, d, m, 1);
13569 tcg_gen_xor_vec(vece, d, d, n);
13570 }
13571
13572 void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
13573 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
13574 {
13575 static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
13576 static const GVecGen3 op = {
13577 .fni8 = gen_rax1_i64,
13578 .fniv = gen_rax1_vec,
13579 .opt_opc = vecop_list,
13580 .fno = gen_helper_crypto_rax1,
13581 .vece = MO_64,
13582 };
13583 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &op);
13584 }
13585
13586 /* Crypto three-reg SHA512
13587 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13588 * +-----------------------+------+---+---+-----+--------+------+------+
13589 * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
13590 * +-----------------------+------+---+---+-----+--------+------+------+
13591 */
13592 static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
13593 {
13594 int opcode = extract32(insn, 10, 2);
13595 int o = extract32(insn, 14, 1);
13596 int rm = extract32(insn, 16, 5);
13597 int rn = extract32(insn, 5, 5);
13598 int rd = extract32(insn, 0, 5);
13599 bool feature;
13600 gen_helper_gvec_3 *oolfn = NULL;
13601 GVecGen3Fn *gvecfn = NULL;
13602
13603 if (o == 0) {
13604 switch (opcode) {
13605 case 0: /* SHA512H */
13606 feature = dc_isar_feature(aa64_sha512, s);
13607 oolfn = gen_helper_crypto_sha512h;
13608 break;
13609 case 1: /* SHA512H2 */
13610 feature = dc_isar_feature(aa64_sha512, s);
13611 oolfn = gen_helper_crypto_sha512h2;
13612 break;
13613 case 2: /* SHA512SU1 */
13614 feature = dc_isar_feature(aa64_sha512, s);
13615 oolfn = gen_helper_crypto_sha512su1;
13616 break;
13617 case 3: /* RAX1 */
13618 feature = dc_isar_feature(aa64_sha3, s);
13619 gvecfn = gen_gvec_rax1;
13620 break;
13621 default:
13622 g_assert_not_reached();
13623 }
13624 } else {
13625 switch (opcode) {
13626 case 0: /* SM3PARTW1 */
13627 feature = dc_isar_feature(aa64_sm3, s);
13628 oolfn = gen_helper_crypto_sm3partw1;
13629 break;
13630 case 1: /* SM3PARTW2 */
13631 feature = dc_isar_feature(aa64_sm3, s);
13632 oolfn = gen_helper_crypto_sm3partw2;
13633 break;
13634 case 2: /* SM4EKEY */
13635 feature = dc_isar_feature(aa64_sm4, s);
13636 oolfn = gen_helper_crypto_sm4ekey;
13637 break;
13638 default:
13639 unallocated_encoding(s);
13640 return;
13641 }
13642 }
13643
13644 if (!feature) {
13645 unallocated_encoding(s);
13646 return;
13647 }
13648
13649 if (!fp_access_check(s)) {
13650 return;
13651 }
13652
13653 if (oolfn) {
13654 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, oolfn);
13655 } else {
13656 gen_gvec_fn3(s, true, rd, rn, rm, gvecfn, MO_64);
13657 }
13658 }
13659
13660 /* Crypto two-reg SHA512
13661 * 31 12 11 10 9 5 4 0
13662 * +-----------------------------------------+--------+------+------+
13663 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
13664 * +-----------------------------------------+--------+------+------+
13665 */
13666 static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
13667 {
13668 int opcode = extract32(insn, 10, 2);
13669 int rn = extract32(insn, 5, 5);
13670 int rd = extract32(insn, 0, 5);
13671 bool feature;
13672
13673 switch (opcode) {
13674 case 0: /* SHA512SU0 */
13675 feature = dc_isar_feature(aa64_sha512, s);
13676 break;
13677 case 1: /* SM4E */
13678 feature = dc_isar_feature(aa64_sm4, s);
13679 break;
13680 default:
13681 unallocated_encoding(s);
13682 return;
13683 }
13684
13685 if (!feature) {
13686 unallocated_encoding(s);
13687 return;
13688 }
13689
13690 if (!fp_access_check(s)) {
13691 return;
13692 }
13693
13694 switch (opcode) {
13695 case 0: /* SHA512SU0 */
13696 gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0);
13697 break;
13698 case 1: /* SM4E */
13699 gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e);
13700 break;
13701 default:
13702 g_assert_not_reached();
13703 }
13704 }
13705
13706 /* Crypto four-register
13707 * 31 23 22 21 20 16 15 14 10 9 5 4 0
13708 * +-------------------+-----+------+---+------+------+------+
13709 * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
13710 * +-------------------+-----+------+---+------+------+------+
13711 */
13712 static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
13713 {
13714 int op0 = extract32(insn, 21, 2);
13715 int rm = extract32(insn, 16, 5);
13716 int ra = extract32(insn, 10, 5);
13717 int rn = extract32(insn, 5, 5);
13718 int rd = extract32(insn, 0, 5);
13719 bool feature;
13720
13721 switch (op0) {
13722 case 0: /* EOR3 */
13723 case 1: /* BCAX */
13724 feature = dc_isar_feature(aa64_sha3, s);
13725 break;
13726 case 2: /* SM3SS1 */
13727 feature = dc_isar_feature(aa64_sm3, s);
13728 break;
13729 default:
13730 unallocated_encoding(s);
13731 return;
13732 }
13733
13734 if (!feature) {
13735 unallocated_encoding(s);
13736 return;
13737 }
13738
13739 if (!fp_access_check(s)) {
13740 return;
13741 }
13742
13743 if (op0 < 2) {
13744 TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
13745 int pass;
13746
13747 tcg_op1 = tcg_temp_new_i64();
13748 tcg_op2 = tcg_temp_new_i64();
13749 tcg_op3 = tcg_temp_new_i64();
13750 tcg_res[0] = tcg_temp_new_i64();
13751 tcg_res[1] = tcg_temp_new_i64();
13752
13753 for (pass = 0; pass < 2; pass++) {
13754 read_vec_element(s, tcg_op1, rn, pass, MO_64);
13755 read_vec_element(s, tcg_op2, rm, pass, MO_64);
13756 read_vec_element(s, tcg_op3, ra, pass, MO_64);
13757
13758 if (op0 == 0) {
13759 /* EOR3 */
13760 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
13761 } else {
13762 /* BCAX */
13763 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
13764 }
13765 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13766 }
13767 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13768 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13769 } else {
13770 TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
13771
13772 tcg_op1 = tcg_temp_new_i32();
13773 tcg_op2 = tcg_temp_new_i32();
13774 tcg_op3 = tcg_temp_new_i32();
13775 tcg_res = tcg_temp_new_i32();
13776 tcg_zero = tcg_constant_i32(0);
13777
13778 read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
13779 read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
13780 read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
13781
13782 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
13783 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
13784 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
13785 tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
13786
13787 write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
13788 write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
13789 write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
13790 write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
13791 }
13792 }
13793
13794 /* Crypto XAR
13795 * 31 21 20 16 15 10 9 5 4 0
13796 * +-----------------------+------+--------+------+------+
13797 * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
13798 * +-----------------------+------+--------+------+------+
13799 */
13800 static void disas_crypto_xar(DisasContext *s, uint32_t insn)
13801 {
13802 int rm = extract32(insn, 16, 5);
13803 int imm6 = extract32(insn, 10, 6);
13804 int rn = extract32(insn, 5, 5);
13805 int rd = extract32(insn, 0, 5);
13806
13807 if (!dc_isar_feature(aa64_sha3, s)) {
13808 unallocated_encoding(s);
13809 return;
13810 }
13811
13812 if (!fp_access_check(s)) {
13813 return;
13814 }
13815
13816 gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
13817 vec_full_reg_offset(s, rn),
13818 vec_full_reg_offset(s, rm), imm6, 16,
13819 vec_full_reg_size(s));
13820 }
13821
13822 /* Crypto three-reg imm2
13823 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13824 * +-----------------------+------+-----+------+--------+------+------+
13825 * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
13826 * +-----------------------+------+-----+------+--------+------+------+
13827 */
13828 static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
13829 {
13830 static gen_helper_gvec_3 * const fns[4] = {
13831 gen_helper_crypto_sm3tt1a, gen_helper_crypto_sm3tt1b,
13832 gen_helper_crypto_sm3tt2a, gen_helper_crypto_sm3tt2b,
13833 };
13834 int opcode = extract32(insn, 10, 2);
13835 int imm2 = extract32(insn, 12, 2);
13836 int rm = extract32(insn, 16, 5);
13837 int rn = extract32(insn, 5, 5);
13838 int rd = extract32(insn, 0, 5);
13839
13840 if (!dc_isar_feature(aa64_sm3, s)) {
13841 unallocated_encoding(s);
13842 return;
13843 }
13844
13845 if (!fp_access_check(s)) {
13846 return;
13847 }
13848
13849 gen_gvec_op3_ool(s, true, rd, rn, rm, imm2, fns[opcode]);
13850 }
13851
13852 /* C3.6 Data processing - SIMD, inc Crypto
13853 *
13854 * As the decode gets a little complex we are using a table based
13855 * approach for this part of the decode.
13856 */
13857 static const AArch64DecodeTable data_proc_simd[] = {
13858 /* pattern , mask , fn */
13859 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
13860 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
13861 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
13862 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
13863 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
13864 { 0x0e000400, 0x9fe08400, disas_simd_copy },
13865 { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
13866 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
13867 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
13868 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
13869 { 0x0e000000, 0xbf208c00, disas_simd_tb },
13870 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
13871 { 0x2e000000, 0xbf208400, disas_simd_ext },
13872 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
13873 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
13874 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
13875 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
13876 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
13877 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
13878 { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
13879 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
13880 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
13881 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
13882 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
13883 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
13884 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
13885 { 0xce000000, 0xff808000, disas_crypto_four_reg },
13886 { 0xce800000, 0xffe00000, disas_crypto_xar },
13887 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
13888 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
13889 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
13890 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
13891 { 0x00000000, 0x00000000, NULL }
13892 };
13893
13894 static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
13895 {
13896 /* Note that this is called with all non-FP cases from
13897 * table C3-6 so it must UNDEF for entries not specifically
13898 * allocated to instructions in that table.
13899 */
13900 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
13901 if (fn) {
13902 fn(s, insn);
13903 } else {
13904 unallocated_encoding(s);
13905 }
13906 }
13907
13908 /* C3.6 Data processing - SIMD and floating point */
13909 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
13910 {
13911 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
13912 disas_data_proc_fp(s, insn);
13913 } else {
13914 /* SIMD, including crypto */
13915 disas_data_proc_simd(s, insn);
13916 }
13917 }
13918
13919 static bool trans_OK(DisasContext *s, arg_OK *a)
13920 {
13921 return true;
13922 }
13923
13924 static bool trans_FAIL(DisasContext *s, arg_OK *a)
13925 {
13926 s->is_nonstreaming = true;
13927 return true;
13928 }
13929
13930 /**
13931 * is_guarded_page:
13932 * @env: The cpu environment
13933 * @s: The DisasContext
13934 *
13935 * Return true if the page is guarded.
13936 */
13937 static bool is_guarded_page(CPUARMState *env, DisasContext *s)
13938 {
13939 uint64_t addr = s->base.pc_first;
13940 #ifdef CONFIG_USER_ONLY
13941 return page_get_flags(addr) & PAGE_BTI;
13942 #else
13943 CPUTLBEntryFull *full;
13944 void *host;
13945 int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
13946 int flags;
13947
13948 /*
13949 * We test this immediately after reading an insn, which means
13950 * that the TLB entry must be present and valid, and thus this
13951 * access will never raise an exception.
13952 */
13953 flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx,
13954 false, &host, &full, 0);
13955 assert(!(flags & TLB_INVALID_MASK));
13956
13957 return full->guarded;
13958 #endif
13959 }
13960
13961 /**
13962 * btype_destination_ok:
13963 * @insn: The instruction at the branch destination
13964 * @bt: SCTLR_ELx.BT
13965 * @btype: PSTATE.BTYPE, and is non-zero
13966 *
13967 * On a guarded page, there are a limited number of insns
13968 * that may be present at the branch target:
13969 * - branch target identifiers,
13970 * - paciasp, pacibsp,
13971 * - BRK insn
13972 * - HLT insn
13973 * Anything else causes a Branch Target Exception.
13974 *
13975 * Return true if the branch is compatible, false to raise BTITRAP.
13976 */
13977 static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
13978 {
13979 if ((insn & 0xfffff01fu) == 0xd503201fu) {
13980 /* HINT space */
13981 switch (extract32(insn, 5, 7)) {
13982 case 0b011001: /* PACIASP */
13983 case 0b011011: /* PACIBSP */
13984 /*
13985 * If SCTLR_ELx.BT, then PACI*SP are not compatible
13986 * with btype == 3. Otherwise all btype are ok.
13987 */
13988 return !bt || btype != 3;
13989 case 0b100000: /* BTI */
13990 /* Not compatible with any btype. */
13991 return false;
13992 case 0b100010: /* BTI c */
13993 /* Not compatible with btype == 3 */
13994 return btype != 3;
13995 case 0b100100: /* BTI j */
13996 /* Not compatible with btype == 2 */
13997 return btype != 2;
13998 case 0b100110: /* BTI jc */
13999 /* Compatible with any btype. */
14000 return true;
14001 }
14002 } else {
14003 switch (insn & 0xffe0001fu) {
14004 case 0xd4200000u: /* BRK */
14005 case 0xd4400000u: /* HLT */
14006 /* Give priority to the breakpoint exception. */
14007 return true;
14008 }
14009 }
14010 return false;
14011 }
14012
14013 /* C3.1 A64 instruction index by encoding */
14014 static void disas_a64_legacy(DisasContext *s, uint32_t insn)
14015 {
14016 switch (extract32(insn, 25, 4)) {
14017 case 0xa: case 0xb: /* Branch, exception generation and system insns */
14018 disas_b_exc_sys(s, insn);
14019 break;
14020 case 0x4:
14021 case 0x6:
14022 case 0xc:
14023 case 0xe: /* Loads and stores */
14024 disas_ldst(s, insn);
14025 break;
14026 case 0x5:
14027 case 0xd: /* Data processing - register */
14028 disas_data_proc_reg(s, insn);
14029 break;
14030 case 0x7:
14031 case 0xf: /* Data processing - SIMD and floating point */
14032 disas_data_proc_simd_fp(s, insn);
14033 break;
14034 default:
14035 unallocated_encoding(s);
14036 break;
14037 }
14038 }
14039
14040 static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
14041 CPUState *cpu)
14042 {
14043 DisasContext *dc = container_of(dcbase, DisasContext, base);
14044 CPUARMState *env = cpu->env_ptr;
14045 ARMCPU *arm_cpu = env_archcpu(env);
14046 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
14047 int bound, core_mmu_idx;
14048
14049 dc->isar = &arm_cpu->isar;
14050 dc->condjmp = 0;
14051 dc->pc_save = dc->base.pc_first;
14052 dc->aarch64 = true;
14053 dc->thumb = false;
14054 dc->sctlr_b = 0;
14055 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
14056 dc->condexec_mask = 0;
14057 dc->condexec_cond = 0;
14058 core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
14059 dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
14060 dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
14061 dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
14062 dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
14063 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
14064 #if !defined(CONFIG_USER_ONLY)
14065 dc->user = (dc->current_el == 0);
14066 #endif
14067 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
14068 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
14069 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
14070 dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE);
14071 dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC);
14072 dc->fgt_eret = EX_TBFLAG_A64(tb_flags, FGT_ERET);
14073 dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
14074 dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL);
14075 dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16;
14076 dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16;
14077 dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
14078 dc->bt = EX_TBFLAG_A64(tb_flags, BT);
14079 dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
14080 dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
14081 dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
14082 dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
14083 dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
14084 dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
14085 dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
14086 dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
14087 dc->vec_len = 0;
14088 dc->vec_stride = 0;
14089 dc->cp_regs = arm_cpu->cp_regs;
14090 dc->features = env->features;
14091 dc->dcz_blocksize = arm_cpu->dcz_blocksize;
14092
14093 #ifdef CONFIG_USER_ONLY
14094 /* In sve_probe_page, we assume TBI is enabled. */
14095 tcg_debug_assert(dc->tbid & 1);
14096 #endif
14097
14098 dc->lse2 = dc_isar_feature(aa64_lse2, dc);
14099
14100 /* Single step state. The code-generation logic here is:
14101 * SS_ACTIVE == 0:
14102 * generate code with no special handling for single-stepping (except
14103 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
14104 * this happens anyway because those changes are all system register or
14105 * PSTATE writes).
14106 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
14107 * emit code for one insn
14108 * emit code to clear PSTATE.SS
14109 * emit code to generate software step exception for completed step
14110 * end TB (as usual for having generated an exception)
14111 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
14112 * emit code to generate a software step exception
14113 * end the TB
14114 */
14115 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
14116 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
14117 dc->is_ldex = false;
14118
14119 /* Bound the number of insns to execute to those left on the page. */
14120 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
14121
14122 /* If architectural single step active, limit to 1. */
14123 if (dc->ss_active) {
14124 bound = 1;
14125 }
14126 dc->base.max_insns = MIN(dc->base.max_insns, bound);
14127 }
14128
14129 static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
14130 {
14131 }
14132
14133 static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
14134 {
14135 DisasContext *dc = container_of(dcbase, DisasContext, base);
14136 target_ulong pc_arg = dc->base.pc_next;
14137
14138 if (tb_cflags(dcbase->tb) & CF_PCREL) {
14139 pc_arg &= ~TARGET_PAGE_MASK;
14140 }
14141 tcg_gen_insn_start(pc_arg, 0, 0);
14142 dc->insn_start = tcg_last_op();
14143 }
14144
14145 static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
14146 {
14147 DisasContext *s = container_of(dcbase, DisasContext, base);
14148 CPUARMState *env = cpu->env_ptr;
14149 uint64_t pc = s->base.pc_next;
14150 uint32_t insn;
14151
14152 /* Singlestep exceptions have the highest priority. */
14153 if (s->ss_active && !s->pstate_ss) {
14154 /* Singlestep state is Active-pending.
14155 * If we're in this state at the start of a TB then either
14156 * a) we just took an exception to an EL which is being debugged
14157 * and this is the first insn in the exception handler
14158 * b) debug exceptions were masked and we just unmasked them
14159 * without changing EL (eg by clearing PSTATE.D)
14160 * In either case we're going to take a swstep exception in the
14161 * "did not step an insn" case, and so the syndrome ISV and EX
14162 * bits should be zero.
14163 */
14164 assert(s->base.num_insns == 1);
14165 gen_swstep_exception(s, 0, 0);
14166 s->base.is_jmp = DISAS_NORETURN;
14167 s->base.pc_next = pc + 4;
14168 return;
14169 }
14170
14171 if (pc & 3) {
14172 /*
14173 * PC alignment fault. This has priority over the instruction abort
14174 * that we would receive from a translation fault via arm_ldl_code.
14175 * This should only be possible after an indirect branch, at the
14176 * start of the TB.
14177 */
14178 assert(s->base.num_insns == 1);
14179 gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
14180 s->base.is_jmp = DISAS_NORETURN;
14181 s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
14182 return;
14183 }
14184
14185 s->pc_curr = pc;
14186 insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b);
14187 s->insn = insn;
14188 s->base.pc_next = pc + 4;
14189
14190 s->fp_access_checked = false;
14191 s->sve_access_checked = false;
14192
14193 if (s->pstate_il) {
14194 /*
14195 * Illegal execution state. This has priority over BTI
14196 * exceptions, but comes after instruction abort exceptions.
14197 */
14198 gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate());
14199 return;
14200 }
14201
14202 if (dc_isar_feature(aa64_bti, s)) {
14203 if (s->base.num_insns == 1) {
14204 /*
14205 * At the first insn of the TB, compute s->guarded_page.
14206 * We delayed computing this until successfully reading
14207 * the first insn of the TB, above. This (mostly) ensures
14208 * that the softmmu tlb entry has been populated, and the
14209 * page table GP bit is available.
14210 *
14211 * Note that we need to compute this even if btype == 0,
14212 * because this value is used for BR instructions later
14213 * where ENV is not available.
14214 */
14215 s->guarded_page = is_guarded_page(env, s);
14216
14217 /* First insn can have btype set to non-zero. */
14218 tcg_debug_assert(s->btype >= 0);
14219
14220 /*
14221 * Note that the Branch Target Exception has fairly high
14222 * priority -- below debugging exceptions but above most
14223 * everything else. This allows us to handle this now
14224 * instead of waiting until the insn is otherwise decoded.
14225 */
14226 if (s->btype != 0
14227 && s->guarded_page
14228 && !btype_destination_ok(insn, s->bt, s->btype)) {
14229 gen_exception_insn(s, 0, EXCP_UDEF, syn_btitrap(s->btype));
14230 return;
14231 }
14232 } else {
14233 /* Not the first insn: btype must be 0. */
14234 tcg_debug_assert(s->btype == 0);
14235 }
14236 }
14237
14238 s->is_nonstreaming = false;
14239 if (s->sme_trap_nonstreaming) {
14240 disas_sme_fa64(s, insn);
14241 }
14242
14243 if (!disas_a64(s, insn) &&
14244 !disas_sme(s, insn) &&
14245 !disas_sve(s, insn)) {
14246 disas_a64_legacy(s, insn);
14247 }
14248
14249 /*
14250 * After execution of most insns, btype is reset to 0.
14251 * Note that we set btype == -1 when the insn sets btype.
14252 */
14253 if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
14254 reset_btype(s);
14255 }
14256 }
14257
14258 static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14259 {
14260 DisasContext *dc = container_of(dcbase, DisasContext, base);
14261
14262 if (unlikely(dc->ss_active)) {
14263 /* Note that this means single stepping WFI doesn't halt the CPU.
14264 * For conditional branch insns this is harmless unreachable code as
14265 * gen_goto_tb() has already handled emitting the debug exception
14266 * (and thus a tb-jump is not possible when singlestepping).
14267 */
14268 switch (dc->base.is_jmp) {
14269 default:
14270 gen_a64_update_pc(dc, 4);
14271 /* fall through */
14272 case DISAS_EXIT:
14273 case DISAS_JUMP:
14274 gen_step_complete_exception(dc);
14275 break;
14276 case DISAS_NORETURN:
14277 break;
14278 }
14279 } else {
14280 switch (dc->base.is_jmp) {
14281 case DISAS_NEXT:
14282 case DISAS_TOO_MANY:
14283 gen_goto_tb(dc, 1, 4);
14284 break;
14285 default:
14286 case DISAS_UPDATE_EXIT:
14287 gen_a64_update_pc(dc, 4);
14288 /* fall through */
14289 case DISAS_EXIT:
14290 tcg_gen_exit_tb(NULL, 0);
14291 break;
14292 case DISAS_UPDATE_NOCHAIN:
14293 gen_a64_update_pc(dc, 4);
14294 /* fall through */
14295 case DISAS_JUMP:
14296 tcg_gen_lookup_and_goto_ptr();
14297 break;
14298 case DISAS_NORETURN:
14299 case DISAS_SWI:
14300 break;
14301 case DISAS_WFE:
14302 gen_a64_update_pc(dc, 4);
14303 gen_helper_wfe(cpu_env);
14304 break;
14305 case DISAS_YIELD:
14306 gen_a64_update_pc(dc, 4);
14307 gen_helper_yield(cpu_env);
14308 break;
14309 case DISAS_WFI:
14310 /*
14311 * This is a special case because we don't want to just halt
14312 * the CPU if trying to debug across a WFI.
14313 */
14314 gen_a64_update_pc(dc, 4);
14315 gen_helper_wfi(cpu_env, tcg_constant_i32(4));
14316 /*
14317 * The helper doesn't necessarily throw an exception, but we
14318 * must go back to the main loop to check for interrupts anyway.
14319 */
14320 tcg_gen_exit_tb(NULL, 0);
14321 break;
14322 }
14323 }
14324 }
14325
14326 static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
14327 CPUState *cpu, FILE *logfile)
14328 {
14329 DisasContext *dc = container_of(dcbase, DisasContext, base);
14330
14331 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
14332 target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
14333 }
14334
14335 const TranslatorOps aarch64_translator_ops = {
14336 .init_disas_context = aarch64_tr_init_disas_context,
14337 .tb_start = aarch64_tr_tb_start,
14338 .insn_start = aarch64_tr_insn_start,
14339 .translate_insn = aarch64_tr_translate_insn,
14340 .tb_stop = aarch64_tr_tb_stop,
14341 .disas_log = aarch64_tr_disas_log,
14342 };