]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Convert Signed multiply, signed and unsigned divide
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
1d854765 31#include "arm_ldst.h"
f1672e6f 32#include "hw/semihosting/semihost.h"
1497c961 33
2ef6175a
RH
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
2c0262af 36
a7e30d84 37#include "trace-tcg.h"
508127e2 38#include "exec/log.h"
a7e30d84
LV
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
09cbd501 45#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d 55
b5ff1b31
FB
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
022c62cb 69#include "exec/gen-icount.h"
2e70f6ef 70
308e5636 71static const char * const regnames[] =
155c3eac
FN
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
74
61adacc8
RH
75/* Function prototypes for gen_ functions calling Neon helpers. */
76typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
77 TCGv_i32, TCGv_i32);
c253dd78
PM
78/* Function prototypes for gen_ functions for fix point conversions */
79typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
61adacc8 80
b26eefb6
PB
81/* initialize TCG globals. */
82void arm_translate_init(void)
83{
155c3eac
FN
84 int i;
85
155c3eac 86 for (i = 0; i < 16; i++) {
e1ccc054 87 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 88 offsetof(CPUARMState, regs[i]),
155c3eac
FN
89 regnames[i]);
90 }
e1ccc054
RH
91 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
92 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
93 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
94 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 95
e1ccc054 96 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 97 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 98 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 100
14ade10f 101 a64_translate_init();
b26eefb6
PB
102}
103
9bb6558a
PM
104/* Flags for the disas_set_da_iss info argument:
105 * lower bits hold the Rt register number, higher bits are flags.
106 */
107typedef enum ISSInfo {
108 ISSNone = 0,
109 ISSRegMask = 0x1f,
110 ISSInvalid = (1 << 5),
111 ISSIsAcqRel = (1 << 6),
112 ISSIsWrite = (1 << 7),
113 ISSIs16Bit = (1 << 8),
114} ISSInfo;
115
116/* Save the syndrome information for a Data Abort */
14776ab5 117static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
9bb6558a
PM
118{
119 uint32_t syn;
120 int sas = memop & MO_SIZE;
121 bool sse = memop & MO_SIGN;
122 bool is_acqrel = issinfo & ISSIsAcqRel;
123 bool is_write = issinfo & ISSIsWrite;
124 bool is_16bit = issinfo & ISSIs16Bit;
125 int srt = issinfo & ISSRegMask;
126
127 if (issinfo & ISSInvalid) {
128 /* Some callsites want to conditionally provide ISS info,
129 * eg "only if this was not a writeback"
130 */
131 return;
132 }
133
134 if (srt == 15) {
135 /* For AArch32, insns where the src/dest is R15 never generate
136 * ISS information. Catching that here saves checking at all
137 * the call sites.
138 */
139 return;
140 }
141
142 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
143 0, 0, 0, is_write, 0, is_16bit);
144 disas_set_insn_syndrome(s, syn);
145}
146
8bd5c820 147static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 148{
8bd5c820 149 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
150 * insns:
151 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
152 * otherwise, access as if at PL0.
153 */
154 switch (s->mmu_idx) {
155 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
156 case ARMMMUIdx_S12NSE0:
157 case ARMMMUIdx_S12NSE1:
8bd5c820 158 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
159 case ARMMMUIdx_S1E3:
160 case ARMMMUIdx_S1SE0:
161 case ARMMMUIdx_S1SE1:
8bd5c820 162 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
163 case ARMMMUIdx_MUser:
164 case ARMMMUIdx_MPriv:
165 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
166 case ARMMMUIdx_MUserNegPri:
167 case ARMMMUIdx_MPrivNegPri:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
169 case ARMMMUIdx_MSUser:
170 case ARMMMUIdx_MSPriv:
b9f587d6 171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
172 case ARMMMUIdx_MSUserNegPri:
173 case ARMMMUIdx_MSPrivNegPri:
174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
175 case ARMMMUIdx_S2NS:
176 default:
177 g_assert_not_reached();
178 }
179}
180
39d5492a 181static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 182{
39d5492a 183 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
184 tcg_gen_ld_i32(tmp, cpu_env, offset);
185 return tmp;
186}
187
0ecb72a5 188#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 189
39d5492a 190static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
191{
192 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 193 tcg_temp_free_i32(var);
d9ba4830
PB
194}
195
196#define store_cpu_field(var, name) \
0ecb72a5 197 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 198
fdbcf632
RH
199/* The architectural value of PC. */
200static uint32_t read_pc(DisasContext *s)
201{
202 return s->pc_curr + (s->thumb ? 4 : 8);
203}
204
b26eefb6 205/* Set a variable to the value of a CPU register. */
39d5492a 206static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
207{
208 if (reg == 15) {
fdbcf632 209 tcg_gen_movi_i32(var, read_pc(s));
b26eefb6 210 } else {
155c3eac 211 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
212 }
213}
214
215/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 216static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 217{
39d5492a 218 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
219 load_reg_var(s, tmp, reg);
220 return tmp;
221}
222
16e0d823
RH
223/*
224 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
225 * This is used for load/store for which use of PC implies (literal),
226 * or ADD that implies ADR.
227 */
228static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
229{
230 TCGv_i32 tmp = tcg_temp_new_i32();
231
232 if (reg == 15) {
233 tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
234 } else {
235 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
236 }
237 return tmp;
238}
239
b26eefb6
PB
240/* Set a CPU register. The source must be a temporary and will be
241 marked as dead. */
39d5492a 242static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
243{
244 if (reg == 15) {
9b6a3ea7
PM
245 /* In Thumb mode, we must ignore bit 0.
246 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
247 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
248 * We choose to ignore [1:0] in ARM mode for all architecture versions.
249 */
250 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 251 s->base.is_jmp = DISAS_JUMP;
b26eefb6 252 }
155c3eac 253 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 254 tcg_temp_free_i32(var);
b26eefb6
PB
255}
256
55203189
PM
257/*
258 * Variant of store_reg which applies v8M stack-limit checks before updating
259 * SP. If the check fails this will result in an exception being taken.
260 * We disable the stack checks for CONFIG_USER_ONLY because we have
261 * no idea what the stack limits should be in that case.
262 * If stack checking is not being done this just acts like store_reg().
263 */
264static void store_sp_checked(DisasContext *s, TCGv_i32 var)
265{
266#ifndef CONFIG_USER_ONLY
267 if (s->v8m_stackcheck) {
268 gen_helper_v8m_stackcheck(cpu_env, var);
269 }
270#endif
271 store_reg(s, 13, var);
272}
273
b26eefb6 274/* Value extensions. */
86831435
PB
275#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
276#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
277#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
278#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
279
1497c961
PB
280#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
281#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 282
b26eefb6 283
39d5492a 284static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 285{
39d5492a 286 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 287 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
288 tcg_temp_free_i32(tmp_mask);
289}
d9ba4830
PB
290/* Set NZCV flags from the high 4 bits of var. */
291#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
292
d4a2dc67 293static void gen_exception_internal(int excp)
d9ba4830 294{
d4a2dc67
PM
295 TCGv_i32 tcg_excp = tcg_const_i32(excp);
296
297 assert(excp_is_internal(excp));
298 gen_helper_exception_internal(cpu_env, tcg_excp);
299 tcg_temp_free_i32(tcg_excp);
300}
301
50225ad0
PM
302static void gen_step_complete_exception(DisasContext *s)
303{
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
312 */
313 gen_ss_advance(s);
c1d5f50f 314 gen_swstep_exception(s, 1, s->is_ldex);
dcba3a8d 315 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
316}
317
5425415e
PM
318static void gen_singlestep_exception(DisasContext *s)
319{
320 /* Generate the right kind of exception for singlestep, which is
321 * either the architectural singlestep or EXCP_DEBUG for QEMU's
322 * gdb singlestepping.
323 */
324 if (s->ss_active) {
325 gen_step_complete_exception(s);
326 } else {
327 gen_exception_internal(EXCP_DEBUG);
328 }
329}
330
b636649f
PM
331static inline bool is_singlestepping(DisasContext *s)
332{
333 /* Return true if we are singlestepping either because of
334 * architectural singlestep or QEMU gdbstub singlestep. This does
335 * not include the command line '-singlestep' mode which is rather
336 * misnamed as it only means "one instruction per TB" and doesn't
337 * affect the code we generate.
338 */
dcba3a8d 339 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
340}
341
39d5492a 342static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 343{
39d5492a
PM
344 TCGv_i32 tmp1 = tcg_temp_new_i32();
345 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
346 tcg_gen_ext16s_i32(tmp1, a);
347 tcg_gen_ext16s_i32(tmp2, b);
3670669c 348 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 349 tcg_temp_free_i32(tmp2);
3670669c
PB
350 tcg_gen_sari_i32(a, a, 16);
351 tcg_gen_sari_i32(b, b, 16);
352 tcg_gen_mul_i32(b, b, a);
353 tcg_gen_mov_i32(a, tmp1);
7d1b0095 354 tcg_temp_free_i32(tmp1);
3670669c
PB
355}
356
357/* Byteswap each halfword. */
46497f6a 358static void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
3670669c 359{
39d5492a 360 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 361 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 362 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
363 tcg_gen_and_i32(tmp, tmp, mask);
364 tcg_gen_and_i32(var, var, mask);
3670669c 365 tcg_gen_shli_i32(var, var, 8);
46497f6a 366 tcg_gen_or_i32(dest, var, tmp);
68cedf73 367 tcg_temp_free_i32(mask);
7d1b0095 368 tcg_temp_free_i32(tmp);
3670669c
PB
369}
370
371/* Byteswap low halfword and sign extend. */
46497f6a 372static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
3670669c 373{
1a855029
AJ
374 tcg_gen_ext16u_i32(var, var);
375 tcg_gen_bswap16_i32(var, var);
46497f6a 376 tcg_gen_ext16s_i32(dest, var);
3670669c
PB
377}
378
5e3f878a 379/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 380static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 381{
39d5492a
PM
382 TCGv_i32 lo = tcg_temp_new_i32();
383 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 384 TCGv_i64 ret;
5e3f878a 385
831d7fe8 386 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 387 tcg_temp_free_i32(a);
7d1b0095 388 tcg_temp_free_i32(b);
831d7fe8
RH
389
390 ret = tcg_temp_new_i64();
391 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
392 tcg_temp_free_i32(lo);
393 tcg_temp_free_i32(hi);
831d7fe8
RH
394
395 return ret;
5e3f878a
PB
396}
397
39d5492a 398static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 399{
39d5492a
PM
400 TCGv_i32 lo = tcg_temp_new_i32();
401 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 402 TCGv_i64 ret;
5e3f878a 403
831d7fe8 404 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 405 tcg_temp_free_i32(a);
7d1b0095 406 tcg_temp_free_i32(b);
831d7fe8
RH
407
408 ret = tcg_temp_new_i64();
409 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
410 tcg_temp_free_i32(lo);
411 tcg_temp_free_i32(hi);
831d7fe8
RH
412
413 return ret;
5e3f878a
PB
414}
415
8f01245e 416/* Swap low and high halfwords. */
39d5492a 417static void gen_swap_half(TCGv_i32 var)
8f01245e 418{
adefba76 419 tcg_gen_rotri_i32(var, var, 16);
8f01245e
PB
420}
421
b26eefb6
PB
422/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
423 tmp = (t0 ^ t1) & 0x8000;
424 t0 &= ~0x8000;
425 t1 &= ~0x8000;
426 t0 = (t0 + t1) ^ tmp;
427 */
428
46497f6a 429static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 430{
39d5492a 431 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
432 tcg_gen_xor_i32(tmp, t0, t1);
433 tcg_gen_andi_i32(tmp, tmp, 0x8000);
434 tcg_gen_andi_i32(t0, t0, ~0x8000);
435 tcg_gen_andi_i32(t1, t1, ~0x8000);
436 tcg_gen_add_i32(t0, t0, t1);
46497f6a 437 tcg_gen_xor_i32(dest, t0, tmp);
7d1b0095 438 tcg_temp_free_i32(tmp);
b26eefb6
PB
439}
440
b26eefb6 441/* Set N and Z flags from var. */
39d5492a 442static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 443{
66c374de
AJ
444 tcg_gen_mov_i32(cpu_NF, var);
445 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
446}
447
448/* T0 += T1 + CF. */
39d5492a 449static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 450{
396e467c 451 tcg_gen_add_i32(t0, t0, t1);
66c374de 452 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
453}
454
e9bb4aa9 455/* dest = T0 + T1 + CF. */
39d5492a 456static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 457{
e9bb4aa9 458 tcg_gen_add_i32(dest, t0, t1);
66c374de 459 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
460}
461
3670669c 462/* dest = T0 - T1 + CF - 1. */
39d5492a 463static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 464{
3670669c 465 tcg_gen_sub_i32(dest, t0, t1);
66c374de 466 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 467 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
468}
469
72485ec4 470/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 471static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 472{
39d5492a 473 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
474 tcg_gen_movi_i32(tmp, 0);
475 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 476 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 477 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
478 tcg_gen_xor_i32(tmp, t0, t1);
479 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
480 tcg_temp_free_i32(tmp);
481 tcg_gen_mov_i32(dest, cpu_NF);
482}
483
49b4c31e 484/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 485static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 486{
39d5492a 487 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
488 if (TCG_TARGET_HAS_add2_i32) {
489 tcg_gen_movi_i32(tmp, 0);
490 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 491 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
492 } else {
493 TCGv_i64 q0 = tcg_temp_new_i64();
494 TCGv_i64 q1 = tcg_temp_new_i64();
495 tcg_gen_extu_i32_i64(q0, t0);
496 tcg_gen_extu_i32_i64(q1, t1);
497 tcg_gen_add_i64(q0, q0, q1);
498 tcg_gen_extu_i32_i64(q1, cpu_CF);
499 tcg_gen_add_i64(q0, q0, q1);
500 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
501 tcg_temp_free_i64(q0);
502 tcg_temp_free_i64(q1);
503 }
504 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
505 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
506 tcg_gen_xor_i32(tmp, t0, t1);
507 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
508 tcg_temp_free_i32(tmp);
509 tcg_gen_mov_i32(dest, cpu_NF);
510}
511
72485ec4 512/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 513static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 514{
39d5492a 515 TCGv_i32 tmp;
72485ec4
AJ
516 tcg_gen_sub_i32(cpu_NF, t0, t1);
517 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
518 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
519 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
520 tmp = tcg_temp_new_i32();
521 tcg_gen_xor_i32(tmp, t0, t1);
522 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
523 tcg_temp_free_i32(tmp);
524 tcg_gen_mov_i32(dest, cpu_NF);
525}
526
e77f0832 527/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 528static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 529{
39d5492a 530 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
531 tcg_gen_not_i32(tmp, t1);
532 gen_adc_CC(dest, t0, tmp);
39d5492a 533 tcg_temp_free_i32(tmp);
2de68a49
RH
534}
535
365af80e 536#define GEN_SHIFT(name) \
39d5492a 537static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 538{ \
39d5492a 539 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
540 tmp1 = tcg_temp_new_i32(); \
541 tcg_gen_andi_i32(tmp1, t1, 0xff); \
542 tmp2 = tcg_const_i32(0); \
543 tmp3 = tcg_const_i32(0x1f); \
544 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
545 tcg_temp_free_i32(tmp3); \
546 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
547 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
548 tcg_temp_free_i32(tmp2); \
549 tcg_temp_free_i32(tmp1); \
550}
551GEN_SHIFT(shl)
552GEN_SHIFT(shr)
553#undef GEN_SHIFT
554
39d5492a 555static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 556{
39d5492a 557 TCGv_i32 tmp1, tmp2;
365af80e
AJ
558 tmp1 = tcg_temp_new_i32();
559 tcg_gen_andi_i32(tmp1, t1, 0xff);
560 tmp2 = tcg_const_i32(0x1f);
561 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
562 tcg_temp_free_i32(tmp2);
563 tcg_gen_sar_i32(dest, t0, tmp1);
564 tcg_temp_free_i32(tmp1);
565}
566
39d5492a 567static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 568{
191f4bfe 569 tcg_gen_extract_i32(cpu_CF, var, shift, 1);
9a119ff6 570}
b26eefb6 571
9a119ff6 572/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
573static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
574 int shift, int flags)
9a119ff6
PB
575{
576 switch (shiftop) {
577 case 0: /* LSL */
578 if (shift != 0) {
579 if (flags)
580 shifter_out_im(var, 32 - shift);
581 tcg_gen_shli_i32(var, var, shift);
582 }
583 break;
584 case 1: /* LSR */
585 if (shift == 0) {
586 if (flags) {
66c374de 587 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
588 }
589 tcg_gen_movi_i32(var, 0);
590 } else {
591 if (flags)
592 shifter_out_im(var, shift - 1);
593 tcg_gen_shri_i32(var, var, shift);
594 }
595 break;
596 case 2: /* ASR */
597 if (shift == 0)
598 shift = 32;
599 if (flags)
600 shifter_out_im(var, shift - 1);
601 if (shift == 32)
602 shift = 31;
603 tcg_gen_sari_i32(var, var, shift);
604 break;
605 case 3: /* ROR/RRX */
606 if (shift != 0) {
607 if (flags)
608 shifter_out_im(var, shift - 1);
f669df27 609 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 610 } else {
39d5492a 611 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 612 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
613 if (flags)
614 shifter_out_im(var, 0);
615 tcg_gen_shri_i32(var, var, 1);
b26eefb6 616 tcg_gen_or_i32(var, var, tmp);
7d1b0095 617 tcg_temp_free_i32(tmp);
b26eefb6
PB
618 }
619 }
620};
621
39d5492a
PM
622static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
623 TCGv_i32 shift, int flags)
8984bd2e
PB
624{
625 if (flags) {
626 switch (shiftop) {
9ef39277
BS
627 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
628 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
629 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
630 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
631 }
632 } else {
633 switch (shiftop) {
365af80e
AJ
634 case 0:
635 gen_shl(var, var, shift);
636 break;
637 case 1:
638 gen_shr(var, var, shift);
639 break;
640 case 2:
641 gen_sar(var, var, shift);
642 break;
f669df27
AJ
643 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
644 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
645 }
646 }
7d1b0095 647 tcg_temp_free_i32(shift);
8984bd2e
PB
648}
649
39fb730a 650/*
6c2c63d3 651 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
652 * This is common between ARM and Aarch64 targets.
653 */
6c2c63d3 654void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 655{
6c2c63d3
RH
656 TCGv_i32 value;
657 TCGCond cond;
658 bool global = true;
d9ba4830 659
d9ba4830
PB
660 switch (cc) {
661 case 0: /* eq: Z */
d9ba4830 662 case 1: /* ne: !Z */
6c2c63d3
RH
663 cond = TCG_COND_EQ;
664 value = cpu_ZF;
d9ba4830 665 break;
6c2c63d3 666
d9ba4830 667 case 2: /* cs: C */
d9ba4830 668 case 3: /* cc: !C */
6c2c63d3
RH
669 cond = TCG_COND_NE;
670 value = cpu_CF;
d9ba4830 671 break;
6c2c63d3 672
d9ba4830 673 case 4: /* mi: N */
d9ba4830 674 case 5: /* pl: !N */
6c2c63d3
RH
675 cond = TCG_COND_LT;
676 value = cpu_NF;
d9ba4830 677 break;
6c2c63d3 678
d9ba4830 679 case 6: /* vs: V */
d9ba4830 680 case 7: /* vc: !V */
6c2c63d3
RH
681 cond = TCG_COND_LT;
682 value = cpu_VF;
d9ba4830 683 break;
6c2c63d3 684
d9ba4830 685 case 8: /* hi: C && !Z */
6c2c63d3
RH
686 case 9: /* ls: !C || Z -> !(C && !Z) */
687 cond = TCG_COND_NE;
688 value = tcg_temp_new_i32();
689 global = false;
690 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
691 ZF is non-zero for !Z; so AND the two subexpressions. */
692 tcg_gen_neg_i32(value, cpu_CF);
693 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 694 break;
6c2c63d3 695
d9ba4830 696 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 697 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
698 /* Since we're only interested in the sign bit, == 0 is >= 0. */
699 cond = TCG_COND_GE;
700 value = tcg_temp_new_i32();
701 global = false;
702 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 703 break;
6c2c63d3 704
d9ba4830 705 case 12: /* gt: !Z && N == V */
d9ba4830 706 case 13: /* le: Z || N != V */
6c2c63d3
RH
707 cond = TCG_COND_NE;
708 value = tcg_temp_new_i32();
709 global = false;
710 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
711 * the sign bit then AND with ZF to yield the result. */
712 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
713 tcg_gen_sari_i32(value, value, 31);
714 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 715 break;
6c2c63d3 716
9305eac0
RH
717 case 14: /* always */
718 case 15: /* always */
719 /* Use the ALWAYS condition, which will fold early.
720 * It doesn't matter what we use for the value. */
721 cond = TCG_COND_ALWAYS;
722 value = cpu_ZF;
723 goto no_invert;
724
d9ba4830
PB
725 default:
726 fprintf(stderr, "Bad condition code 0x%x\n", cc);
727 abort();
728 }
6c2c63d3
RH
729
730 if (cc & 1) {
731 cond = tcg_invert_cond(cond);
732 }
733
9305eac0 734 no_invert:
6c2c63d3
RH
735 cmp->cond = cond;
736 cmp->value = value;
737 cmp->value_global = global;
738}
739
740void arm_free_cc(DisasCompare *cmp)
741{
742 if (!cmp->value_global) {
743 tcg_temp_free_i32(cmp->value);
744 }
745}
746
747void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
748{
749 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
750}
751
752void arm_gen_test_cc(int cc, TCGLabel *label)
753{
754 DisasCompare cmp;
755 arm_test_cc(&cmp, cc);
756 arm_jump_cc(&cmp, label);
757 arm_free_cc(&cmp);
d9ba4830 758}
2c0262af 759
4d5e8c96
PM
760static inline void gen_set_condexec(DisasContext *s)
761{
762 if (s->condexec_mask) {
763 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
764 TCGv_i32 tmp = tcg_temp_new_i32();
765 tcg_gen_movi_i32(tmp, val);
766 store_cpu_field(tmp, condexec_bits);
767 }
768}
769
770static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
771{
772 tcg_gen_movi_i32(cpu_R[15], val);
773}
774
d9ba4830
PB
775/* Set PC and Thumb state from an immediate address. */
776static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 777{
39d5492a 778 TCGv_i32 tmp;
99c475ab 779
dcba3a8d 780 s->base.is_jmp = DISAS_JUMP;
d9ba4830 781 if (s->thumb != (addr & 1)) {
7d1b0095 782 tmp = tcg_temp_new_i32();
d9ba4830 783 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 784 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 785 tcg_temp_free_i32(tmp);
d9ba4830 786 }
155c3eac 787 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
788}
789
790/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 791static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 792{
dcba3a8d 793 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
794 tcg_gen_andi_i32(cpu_R[15], var, ~1);
795 tcg_gen_andi_i32(var, var, 1);
796 store_cpu_field(var, thumb);
d9ba4830
PB
797}
798
5e5584c8
PM
799/*
800 * Set PC and Thumb state from var. var is marked as dead.
3bb8a96f
PM
801 * For M-profile CPUs, include logic to detect exception-return
802 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
803 * and BX reg, and no others, and happens only for code in Handler mode.
5e5584c8
PM
804 * The Security Extension also requires us to check for the FNC_RETURN
805 * which signals a function return from non-secure state; this can happen
806 * in both Handler and Thread mode.
807 * To avoid having to do multiple comparisons in inline generated code,
808 * we make the check we do here loose, so it will match for EXC_RETURN
809 * in Thread mode. For system emulation do_v7m_exception_exit() checks
810 * for these spurious cases and returns without doing anything (giving
811 * the same behaviour as for a branch to a non-magic address).
812 *
813 * In linux-user mode it is unclear what the right behaviour for an
814 * attempted FNC_RETURN should be, because in real hardware this will go
815 * directly to Secure code (ie not the Linux kernel) which will then treat
816 * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
817 * attempt behave the way it would on a CPU without the security extension,
818 * which is to say "like a normal branch". That means we can simply treat
819 * all branches as normal with no magic address behaviour.
3bb8a96f
PM
820 */
821static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
822{
823 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 824 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
825 */
826 gen_bx(s, var);
5e5584c8 827#ifndef CONFIG_USER_ONLY
d02a8698
PM
828 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
829 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 830 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f 831 }
5e5584c8 832#endif
3bb8a96f
PM
833}
834
835static inline void gen_bx_excret_final_code(DisasContext *s)
836{
837 /* Generate the code to finish possible exception return and end the TB */
838 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
839 uint32_t min_magic;
840
841 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
842 /* Covers FNC_RETURN and EXC_RETURN magic */
843 min_magic = FNC_RETURN_MIN_MAGIC;
844 } else {
845 /* EXC_RETURN magic only */
846 min_magic = EXC_RETURN_MIN_MAGIC;
847 }
3bb8a96f
PM
848
849 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 850 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
851 /* No: end the TB as we would for a DISAS_JMP */
852 if (is_singlestepping(s)) {
853 gen_singlestep_exception(s);
854 } else {
07ea28b4 855 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
856 }
857 gen_set_label(excret_label);
858 /* Yes: this is an exception return.
859 * At this point in runtime env->regs[15] and env->thumb will hold
860 * the exception-return magic number, which do_v7m_exception_exit()
861 * will read. Nothing else will be able to see those values because
862 * the cpu-exec main loop guarantees that we will always go straight
863 * from raising the exception to the exception-handling code.
864 *
865 * gen_ss_advance(s) does nothing on M profile currently but
866 * calling it is conceptually the right thing as we have executed
867 * this instruction (compare SWI, HVC, SMC handling).
868 */
869 gen_ss_advance(s);
870 gen_exception_internal(EXCP_EXCEPTION_EXIT);
871}
872
fb602cb7
PM
873static inline void gen_bxns(DisasContext *s, int rm)
874{
875 TCGv_i32 var = load_reg(s, rm);
876
877 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
878 * we need to sync state before calling it, but:
879 * - we don't need to do gen_set_pc_im() because the bxns helper will
880 * always set the PC itself
881 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
882 * unless it's outside an IT block or the last insn in an IT block,
883 * so we know that condexec == 0 (already set at the top of the TB)
884 * is correct in the non-UNPREDICTABLE cases, and we can choose
885 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
886 */
887 gen_helper_v7m_bxns(cpu_env, var);
888 tcg_temp_free_i32(var);
ef475b5d 889 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
890}
891
3e3fa230
PM
892static inline void gen_blxns(DisasContext *s, int rm)
893{
894 TCGv_i32 var = load_reg(s, rm);
895
896 /* We don't need to sync condexec state, for the same reason as bxns.
897 * We do however need to set the PC, because the blxns helper reads it.
898 * The blxns helper may throw an exception.
899 */
a0415916 900 gen_set_pc_im(s, s->base.pc_next);
3e3fa230
PM
901 gen_helper_v7m_blxns(cpu_env, var);
902 tcg_temp_free_i32(var);
903 s->base.is_jmp = DISAS_EXIT;
904}
905
21aeb343
JR
906/* Variant of store_reg which uses branch&exchange logic when storing
907 to r15 in ARM architecture v7 and above. The source must be a temporary
908 and will be marked as dead. */
7dcc1f89 909static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
910{
911 if (reg == 15 && ENABLE_ARCH_7) {
912 gen_bx(s, var);
913 } else {
914 store_reg(s, reg, var);
915 }
916}
917
be5e7a76
DES
918/* Variant of store_reg which uses branch&exchange logic when storing
919 * to r15 in ARM architecture v5T and above. This is used for storing
920 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
921 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 922static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
923{
924 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 925 gen_bx_excret(s, var);
be5e7a76
DES
926 } else {
927 store_reg(s, reg, var);
928 }
929}
930
e334bd31
PB
931#ifdef CONFIG_USER_ONLY
932#define IS_USER_ONLY 1
933#else
934#define IS_USER_ONLY 0
935#endif
936
08307563
PM
937/* Abstractions of "generate code to do a guest load/store for
938 * AArch32", where a vaddr is always 32 bits (and is zero
939 * extended if we're a 64 bit core) and data is also
940 * 32 bits unless specifically doing a 64 bit access.
941 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 942 * that the address argument is TCGv_i32 rather than TCGv.
08307563 943 */
08307563 944
14776ab5 945static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
08307563 946{
7f5616f5
RH
947 TCGv addr = tcg_temp_new();
948 tcg_gen_extu_i32_tl(addr, a32);
949
e334bd31 950 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
951 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
952 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 953 }
7f5616f5 954 return addr;
08307563
PM
955}
956
7f5616f5 957static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
14776ab5 958 int index, MemOp opc)
08307563 959{
2aeba0d0
JS
960 TCGv addr;
961
962 if (arm_dc_feature(s, ARM_FEATURE_M) &&
963 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
964 opc |= MO_ALIGN;
965 }
966
967 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
968 tcg_gen_qemu_ld_i32(val, addr, index, opc);
969 tcg_temp_free(addr);
08307563
PM
970}
971
7f5616f5 972static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
14776ab5 973 int index, MemOp opc)
7f5616f5 974{
2aeba0d0
JS
975 TCGv addr;
976
977 if (arm_dc_feature(s, ARM_FEATURE_M) &&
978 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
979 opc |= MO_ALIGN;
980 }
981
982 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
983 tcg_gen_qemu_st_i32(val, addr, index, opc);
984 tcg_temp_free(addr);
985}
08307563 986
7f5616f5 987#define DO_GEN_LD(SUFF, OPC) \
12dcc321 988static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 989 TCGv_i32 a32, int index) \
08307563 990{ \
7f5616f5 991 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
992} \
993static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
994 TCGv_i32 val, \
995 TCGv_i32 a32, int index, \
996 ISSInfo issinfo) \
997{ \
998 gen_aa32_ld##SUFF(s, val, a32, index); \
999 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1000}
1001
7f5616f5 1002#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1003static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1004 TCGv_i32 a32, int index) \
08307563 1005{ \
7f5616f5 1006 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1007} \
1008static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1009 TCGv_i32 val, \
1010 TCGv_i32 a32, int index, \
1011 ISSInfo issinfo) \
1012{ \
1013 gen_aa32_st##SUFF(s, val, a32, index); \
1014 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1015}
1016
7f5616f5 1017static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1018{
e334bd31
PB
1019 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1020 if (!IS_USER_ONLY && s->sctlr_b) {
1021 tcg_gen_rotri_i64(val, val, 32);
1022 }
08307563
PM
1023}
1024
7f5616f5 1025static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
14776ab5 1026 int index, MemOp opc)
08307563 1027{
7f5616f5
RH
1028 TCGv addr = gen_aa32_addr(s, a32, opc);
1029 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1030 gen_aa32_frob64(s, val);
1031 tcg_temp_free(addr);
1032}
1033
1034static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1035 TCGv_i32 a32, int index)
1036{
1037 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1038}
1039
1040static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
14776ab5 1041 int index, MemOp opc)
7f5616f5
RH
1042{
1043 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1044
1045 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1046 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1047 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1048 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1049 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1050 tcg_temp_free_i64(tmp);
e334bd31 1051 } else {
7f5616f5 1052 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1053 }
7f5616f5 1054 tcg_temp_free(addr);
08307563
PM
1055}
1056
7f5616f5
RH
1057static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1058 TCGv_i32 a32, int index)
1059{
1060 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1061}
08307563 1062
7f5616f5
RH
1063DO_GEN_LD(8s, MO_SB)
1064DO_GEN_LD(8u, MO_UB)
1065DO_GEN_LD(16s, MO_SW)
1066DO_GEN_LD(16u, MO_UW)
1067DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1068DO_GEN_ST(8, MO_UB)
1069DO_GEN_ST(16, MO_UW)
1070DO_GEN_ST(32, MO_UL)
08307563 1071
37e6456e
PM
1072static inline void gen_hvc(DisasContext *s, int imm16)
1073{
1074 /* The pre HVC helper handles cases when HVC gets trapped
1075 * as an undefined insn by runtime configuration (ie before
1076 * the insn really executes).
1077 */
43722a6d 1078 gen_set_pc_im(s, s->pc_curr);
37e6456e
PM
1079 gen_helper_pre_hvc(cpu_env);
1080 /* Otherwise we will treat this as a real exception which
1081 * happens after execution of the insn. (The distinction matters
1082 * for the PC value reported to the exception handler and also
1083 * for single stepping.)
1084 */
1085 s->svc_imm = imm16;
a0415916 1086 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 1087 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1088}
1089
1090static inline void gen_smc(DisasContext *s)
1091{
1092 /* As with HVC, we may take an exception either before or after
1093 * the insn executes.
1094 */
1095 TCGv_i32 tmp;
1096
43722a6d 1097 gen_set_pc_im(s, s->pc_curr);
37e6456e
PM
1098 tmp = tcg_const_i32(syn_aa32_smc());
1099 gen_helper_pre_smc(cpu_env, tmp);
1100 tcg_temp_free_i32(tmp);
a0415916 1101 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 1102 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1103}
1104
aee828e7 1105static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
d4a2dc67
PM
1106{
1107 gen_set_condexec(s);
aee828e7 1108 gen_set_pc_im(s, pc);
d4a2dc67 1109 gen_exception_internal(excp);
dcba3a8d 1110 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1111}
1112
a767fac8 1113static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
73710361 1114 int syn, uint32_t target_el)
d4a2dc67
PM
1115{
1116 gen_set_condexec(s);
a767fac8 1117 gen_set_pc_im(s, pc);
73710361 1118 gen_exception(excp, syn, target_el);
dcba3a8d 1119 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1120}
1121
06bcbda3 1122static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
c900a2e6
PM
1123{
1124 TCGv_i32 tcg_syn;
1125
1126 gen_set_condexec(s);
06bcbda3 1127 gen_set_pc_im(s, s->pc_curr);
c900a2e6
PM
1128 tcg_syn = tcg_const_i32(syn);
1129 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1130 tcg_temp_free_i32(tcg_syn);
1131 s->base.is_jmp = DISAS_NORETURN;
1132}
1133
1ce21ba1
RH
1134static void unallocated_encoding(DisasContext *s)
1135{
1136 /* Unallocated and reserved encodings are uncategorized */
1137 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
1138 default_exception_el(s));
1139}
1140
b5ff1b31
FB
1141/* Force a TB lookup after an instruction that changes the CPU state. */
1142static inline void gen_lookup_tb(DisasContext *s)
1143{
a0415916 1144 tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
dcba3a8d 1145 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1146}
1147
19a6e31c
PM
1148static inline void gen_hlt(DisasContext *s, int imm)
1149{
1150 /* HLT. This has two purposes.
1151 * Architecturally, it is an external halting debug instruction.
1152 * Since QEMU doesn't implement external debug, we treat this as
1153 * it is required for halting debug disabled: it will UNDEF.
1154 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1155 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1156 * must trigger semihosting even for ARMv7 and earlier, where
1157 * HLT was an undefined encoding.
1158 * In system mode, we don't allow userspace access to
1159 * semihosting, to provide some semblance of security
1160 * (and for consistency with our 32-bit semihosting).
1161 */
1162 if (semihosting_enabled() &&
1163#ifndef CONFIG_USER_ONLY
1164 s->current_el != 0 &&
1165#endif
1166 (imm == (s->thumb ? 0x3c : 0xf000))) {
aee828e7 1167 gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
19a6e31c
PM
1168 return;
1169 }
1170
1ce21ba1 1171 unallocated_encoding(s);
19a6e31c
PM
1172}
1173
5aaebd13
PM
1174static TCGv_ptr get_fpstatus_ptr(int neon)
1175{
1176 TCGv_ptr statusptr = tcg_temp_new_ptr();
1177 int offset;
1178 if (neon) {
0ecb72a5 1179 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1180 } else {
0ecb72a5 1181 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1182 }
1183 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1184 return statusptr;
1185}
1186
c39c2b90 1187static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1188{
9a2b5256 1189 if (dp) {
c39c2b90 1190 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1191 } else {
c39c2b90 1192 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1193 if (reg & 1) {
1194 ofs += offsetof(CPU_DoubleU, l.upper);
1195 } else {
1196 ofs += offsetof(CPU_DoubleU, l.lower);
1197 }
1198 return ofs;
8e96005d
FB
1199 }
1200}
9ee6e8bb
PB
1201
1202/* Return the offset of a 32-bit piece of a NEON register.
1203 zero is the least significant end of the register. */
1204static inline long
1205neon_reg_offset (int reg, int n)
1206{
1207 int sreg;
1208 sreg = reg * 2 + n;
1209 return vfp_reg_offset(0, sreg);
1210}
1211
32f91fb7
RH
1212/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1213 * where 0 is the least significant end of the register.
1214 */
1215static inline long
14776ab5 1216neon_element_offset(int reg, int element, MemOp size)
32f91fb7
RH
1217{
1218 int element_size = 1 << size;
1219 int ofs = element * element_size;
1220#ifdef HOST_WORDS_BIGENDIAN
1221 /* Calculate the offset assuming fully little-endian,
1222 * then XOR to account for the order of the 8-byte units.
1223 */
1224 if (element_size < 8) {
1225 ofs ^= 8 - element_size;
1226 }
1227#endif
1228 return neon_reg_offset(reg, 0) + ofs;
1229}
1230
39d5492a 1231static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1232{
39d5492a 1233 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1234 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1235 return tmp;
1236}
1237
14776ab5 1238static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
2d6ac920
RH
1239{
1240 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1241
1242 switch (mop) {
1243 case MO_UB:
1244 tcg_gen_ld8u_i32(var, cpu_env, offset);
1245 break;
1246 case MO_UW:
1247 tcg_gen_ld16u_i32(var, cpu_env, offset);
1248 break;
1249 case MO_UL:
1250 tcg_gen_ld_i32(var, cpu_env, offset);
1251 break;
1252 default:
1253 g_assert_not_reached();
1254 }
1255}
1256
14776ab5 1257static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop)
ac55d007
RH
1258{
1259 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1260
1261 switch (mop) {
1262 case MO_UB:
1263 tcg_gen_ld8u_i64(var, cpu_env, offset);
1264 break;
1265 case MO_UW:
1266 tcg_gen_ld16u_i64(var, cpu_env, offset);
1267 break;
1268 case MO_UL:
1269 tcg_gen_ld32u_i64(var, cpu_env, offset);
1270 break;
1271 case MO_Q:
1272 tcg_gen_ld_i64(var, cpu_env, offset);
1273 break;
1274 default:
1275 g_assert_not_reached();
1276 }
1277}
1278
39d5492a 1279static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1280{
1281 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1282 tcg_temp_free_i32(var);
8f8e3aa4
PB
1283}
1284
14776ab5 1285static void neon_store_element(int reg, int ele, MemOp size, TCGv_i32 var)
2d6ac920
RH
1286{
1287 long offset = neon_element_offset(reg, ele, size);
1288
1289 switch (size) {
1290 case MO_8:
1291 tcg_gen_st8_i32(var, cpu_env, offset);
1292 break;
1293 case MO_16:
1294 tcg_gen_st16_i32(var, cpu_env, offset);
1295 break;
1296 case MO_32:
1297 tcg_gen_st_i32(var, cpu_env, offset);
1298 break;
1299 default:
1300 g_assert_not_reached();
1301 }
1302}
1303
14776ab5 1304static void neon_store_element64(int reg, int ele, MemOp size, TCGv_i64 var)
ac55d007
RH
1305{
1306 long offset = neon_element_offset(reg, ele, size);
1307
1308 switch (size) {
1309 case MO_8:
1310 tcg_gen_st8_i64(var, cpu_env, offset);
1311 break;
1312 case MO_16:
1313 tcg_gen_st16_i64(var, cpu_env, offset);
1314 break;
1315 case MO_32:
1316 tcg_gen_st32_i64(var, cpu_env, offset);
1317 break;
1318 case MO_64:
1319 tcg_gen_st_i64(var, cpu_env, offset);
1320 break;
1321 default:
1322 g_assert_not_reached();
1323 }
1324}
1325
a7812ae4 1326static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1327{
1328 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1329}
1330
a7812ae4 1331static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1332{
1333 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1334}
1335
160f3b64
PM
1336static inline void neon_load_reg32(TCGv_i32 var, int reg)
1337{
1338 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1339}
1340
1341static inline void neon_store_reg32(TCGv_i32 var, int reg)
1342{
1343 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1344}
1345
1a66ac61
RH
1346static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1347{
1348 TCGv_ptr ret = tcg_temp_new_ptr();
1349 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1350 return ret;
1351}
1352
d00584b7 1353#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1354
78e138bc
PM
1355/* Include the VFP decoder */
1356#include "translate-vfp.inc.c"
1357
a7812ae4 1358static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1359{
0ecb72a5 1360 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1361}
1362
a7812ae4 1363static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1364{
0ecb72a5 1365 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1366}
1367
39d5492a 1368static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1369{
39d5492a 1370 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1371 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1372 return var;
e677137d
PB
1373}
1374
39d5492a 1375static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1376{
0ecb72a5 1377 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1378 tcg_temp_free_i32(var);
e677137d
PB
1379}
1380
1381static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1382{
1383 iwmmxt_store_reg(cpu_M0, rn);
1384}
1385
1386static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1387{
1388 iwmmxt_load_reg(cpu_M0, rn);
1389}
1390
1391static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1392{
1393 iwmmxt_load_reg(cpu_V1, rn);
1394 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1395}
1396
1397static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1398{
1399 iwmmxt_load_reg(cpu_V1, rn);
1400 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1401}
1402
1403static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1404{
1405 iwmmxt_load_reg(cpu_V1, rn);
1406 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1407}
1408
1409#define IWMMXT_OP(name) \
1410static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1411{ \
1412 iwmmxt_load_reg(cpu_V1, rn); \
1413 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1414}
1415
477955bd
PM
1416#define IWMMXT_OP_ENV(name) \
1417static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1418{ \
1419 iwmmxt_load_reg(cpu_V1, rn); \
1420 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1421}
1422
1423#define IWMMXT_OP_ENV_SIZE(name) \
1424IWMMXT_OP_ENV(name##b) \
1425IWMMXT_OP_ENV(name##w) \
1426IWMMXT_OP_ENV(name##l)
e677137d 1427
477955bd 1428#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1429static inline void gen_op_iwmmxt_##name##_M0(void) \
1430{ \
477955bd 1431 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1432}
1433
1434IWMMXT_OP(maddsq)
1435IWMMXT_OP(madduq)
1436IWMMXT_OP(sadb)
1437IWMMXT_OP(sadw)
1438IWMMXT_OP(mulslw)
1439IWMMXT_OP(mulshw)
1440IWMMXT_OP(mululw)
1441IWMMXT_OP(muluhw)
1442IWMMXT_OP(macsw)
1443IWMMXT_OP(macuw)
1444
477955bd
PM
1445IWMMXT_OP_ENV_SIZE(unpackl)
1446IWMMXT_OP_ENV_SIZE(unpackh)
1447
1448IWMMXT_OP_ENV1(unpacklub)
1449IWMMXT_OP_ENV1(unpackluw)
1450IWMMXT_OP_ENV1(unpacklul)
1451IWMMXT_OP_ENV1(unpackhub)
1452IWMMXT_OP_ENV1(unpackhuw)
1453IWMMXT_OP_ENV1(unpackhul)
1454IWMMXT_OP_ENV1(unpacklsb)
1455IWMMXT_OP_ENV1(unpacklsw)
1456IWMMXT_OP_ENV1(unpacklsl)
1457IWMMXT_OP_ENV1(unpackhsb)
1458IWMMXT_OP_ENV1(unpackhsw)
1459IWMMXT_OP_ENV1(unpackhsl)
1460
1461IWMMXT_OP_ENV_SIZE(cmpeq)
1462IWMMXT_OP_ENV_SIZE(cmpgtu)
1463IWMMXT_OP_ENV_SIZE(cmpgts)
1464
1465IWMMXT_OP_ENV_SIZE(mins)
1466IWMMXT_OP_ENV_SIZE(minu)
1467IWMMXT_OP_ENV_SIZE(maxs)
1468IWMMXT_OP_ENV_SIZE(maxu)
1469
1470IWMMXT_OP_ENV_SIZE(subn)
1471IWMMXT_OP_ENV_SIZE(addn)
1472IWMMXT_OP_ENV_SIZE(subu)
1473IWMMXT_OP_ENV_SIZE(addu)
1474IWMMXT_OP_ENV_SIZE(subs)
1475IWMMXT_OP_ENV_SIZE(adds)
1476
1477IWMMXT_OP_ENV(avgb0)
1478IWMMXT_OP_ENV(avgb1)
1479IWMMXT_OP_ENV(avgw0)
1480IWMMXT_OP_ENV(avgw1)
e677137d 1481
477955bd
PM
1482IWMMXT_OP_ENV(packuw)
1483IWMMXT_OP_ENV(packul)
1484IWMMXT_OP_ENV(packuq)
1485IWMMXT_OP_ENV(packsw)
1486IWMMXT_OP_ENV(packsl)
1487IWMMXT_OP_ENV(packsq)
e677137d 1488
e677137d
PB
1489static void gen_op_iwmmxt_set_mup(void)
1490{
39d5492a 1491 TCGv_i32 tmp;
e677137d
PB
1492 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1493 tcg_gen_ori_i32(tmp, tmp, 2);
1494 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1495}
1496
1497static void gen_op_iwmmxt_set_cup(void)
1498{
39d5492a 1499 TCGv_i32 tmp;
e677137d
PB
1500 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1501 tcg_gen_ori_i32(tmp, tmp, 1);
1502 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1503}
1504
1505static void gen_op_iwmmxt_setpsr_nz(void)
1506{
39d5492a 1507 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1508 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1509 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1510}
1511
1512static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1513{
1514 iwmmxt_load_reg(cpu_V1, rn);
86831435 1515 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1516 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1517}
1518
39d5492a
PM
1519static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1520 TCGv_i32 dest)
18c9b560
AZ
1521{
1522 int rd;
1523 uint32_t offset;
39d5492a 1524 TCGv_i32 tmp;
18c9b560
AZ
1525
1526 rd = (insn >> 16) & 0xf;
da6b5335 1527 tmp = load_reg(s, rd);
18c9b560
AZ
1528
1529 offset = (insn & 0xff) << ((insn >> 7) & 2);
1530 if (insn & (1 << 24)) {
1531 /* Pre indexed */
1532 if (insn & (1 << 23))
da6b5335 1533 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1534 else
da6b5335
FN
1535 tcg_gen_addi_i32(tmp, tmp, -offset);
1536 tcg_gen_mov_i32(dest, tmp);
18c9b560 1537 if (insn & (1 << 21))
da6b5335
FN
1538 store_reg(s, rd, tmp);
1539 else
7d1b0095 1540 tcg_temp_free_i32(tmp);
18c9b560
AZ
1541 } else if (insn & (1 << 21)) {
1542 /* Post indexed */
da6b5335 1543 tcg_gen_mov_i32(dest, tmp);
18c9b560 1544 if (insn & (1 << 23))
da6b5335 1545 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1546 else
da6b5335
FN
1547 tcg_gen_addi_i32(tmp, tmp, -offset);
1548 store_reg(s, rd, tmp);
18c9b560
AZ
1549 } else if (!(insn & (1 << 23)))
1550 return 1;
1551 return 0;
1552}
1553
39d5492a 1554static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1555{
1556 int rd = (insn >> 0) & 0xf;
39d5492a 1557 TCGv_i32 tmp;
18c9b560 1558
da6b5335
FN
1559 if (insn & (1 << 8)) {
1560 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1561 return 1;
da6b5335
FN
1562 } else {
1563 tmp = iwmmxt_load_creg(rd);
1564 }
1565 } else {
7d1b0095 1566 tmp = tcg_temp_new_i32();
da6b5335 1567 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1568 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1569 }
1570 tcg_gen_andi_i32(tmp, tmp, mask);
1571 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1572 tcg_temp_free_i32(tmp);
18c9b560
AZ
1573 return 0;
1574}
1575
a1c7273b 1576/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1577 (ie. an undefined instruction). */
7dcc1f89 1578static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1579{
1580 int rd, wrd;
1581 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1582 TCGv_i32 addr;
1583 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1584
1585 if ((insn & 0x0e000e00) == 0x0c000000) {
1586 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1587 wrd = insn & 0xf;
1588 rdlo = (insn >> 12) & 0xf;
1589 rdhi = (insn >> 16) & 0xf;
d00584b7 1590 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1591 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1592 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
664b7e3b 1593 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1594 } else { /* TMCRR */
da6b5335
FN
1595 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1596 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1597 gen_op_iwmmxt_set_mup();
1598 }
1599 return 0;
1600 }
1601
1602 wrd = (insn >> 12) & 0xf;
7d1b0095 1603 addr = tcg_temp_new_i32();
da6b5335 1604 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1605 tcg_temp_free_i32(addr);
18c9b560 1606 return 1;
da6b5335 1607 }
18c9b560 1608 if (insn & ARM_CP_RW_BIT) {
d00584b7 1609 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1610 tmp = tcg_temp_new_i32();
12dcc321 1611 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1612 iwmmxt_store_creg(wrd, tmp);
18c9b560 1613 } else {
e677137d
PB
1614 i = 1;
1615 if (insn & (1 << 8)) {
d00584b7 1616 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1617 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1618 i = 0;
d00584b7 1619 } else { /* WLDRW wRd */
29531141 1620 tmp = tcg_temp_new_i32();
12dcc321 1621 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1622 }
1623 } else {
29531141 1624 tmp = tcg_temp_new_i32();
d00584b7 1625 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1626 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 1627 } else { /* WLDRB */
12dcc321 1628 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1629 }
1630 }
1631 if (i) {
1632 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1633 tcg_temp_free_i32(tmp);
e677137d 1634 }
18c9b560
AZ
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 }
1637 } else {
d00584b7 1638 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1639 tmp = iwmmxt_load_creg(wrd);
12dcc321 1640 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1641 } else {
1642 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1643 tmp = tcg_temp_new_i32();
e677137d 1644 if (insn & (1 << 8)) {
d00584b7 1645 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1646 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 1647 } else { /* WSTRW wRd */
ecc7b3aa 1648 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1649 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1650 }
1651 } else {
d00584b7 1652 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1653 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1654 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 1655 } else { /* WSTRB */
ecc7b3aa 1656 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1657 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1658 }
1659 }
18c9b560 1660 }
29531141 1661 tcg_temp_free_i32(tmp);
18c9b560 1662 }
7d1b0095 1663 tcg_temp_free_i32(addr);
18c9b560
AZ
1664 return 0;
1665 }
1666
1667 if ((insn & 0x0f000000) != 0x0e000000)
1668 return 1;
1669
1670 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 1671 case 0x000: /* WOR */
18c9b560
AZ
1672 wrd = (insn >> 12) & 0xf;
1673 rd0 = (insn >> 0) & 0xf;
1674 rd1 = (insn >> 16) & 0xf;
1675 gen_op_iwmmxt_movq_M0_wRn(rd0);
1676 gen_op_iwmmxt_orq_M0_wRn(rd1);
1677 gen_op_iwmmxt_setpsr_nz();
1678 gen_op_iwmmxt_movq_wRn_M0(wrd);
1679 gen_op_iwmmxt_set_mup();
1680 gen_op_iwmmxt_set_cup();
1681 break;
d00584b7 1682 case 0x011: /* TMCR */
18c9b560
AZ
1683 if (insn & 0xf)
1684 return 1;
1685 rd = (insn >> 12) & 0xf;
1686 wrd = (insn >> 16) & 0xf;
1687 switch (wrd) {
1688 case ARM_IWMMXT_wCID:
1689 case ARM_IWMMXT_wCASF:
1690 break;
1691 case ARM_IWMMXT_wCon:
1692 gen_op_iwmmxt_set_cup();
1693 /* Fall through. */
1694 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1695 tmp = iwmmxt_load_creg(wrd);
1696 tmp2 = load_reg(s, rd);
f669df27 1697 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1698 tcg_temp_free_i32(tmp2);
da6b5335 1699 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1700 break;
1701 case ARM_IWMMXT_wCGR0:
1702 case ARM_IWMMXT_wCGR1:
1703 case ARM_IWMMXT_wCGR2:
1704 case ARM_IWMMXT_wCGR3:
1705 gen_op_iwmmxt_set_cup();
da6b5335
FN
1706 tmp = load_reg(s, rd);
1707 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1708 break;
1709 default:
1710 return 1;
1711 }
1712 break;
d00584b7 1713 case 0x100: /* WXOR */
18c9b560
AZ
1714 wrd = (insn >> 12) & 0xf;
1715 rd0 = (insn >> 0) & 0xf;
1716 rd1 = (insn >> 16) & 0xf;
1717 gen_op_iwmmxt_movq_M0_wRn(rd0);
1718 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1719 gen_op_iwmmxt_setpsr_nz();
1720 gen_op_iwmmxt_movq_wRn_M0(wrd);
1721 gen_op_iwmmxt_set_mup();
1722 gen_op_iwmmxt_set_cup();
1723 break;
d00584b7 1724 case 0x111: /* TMRC */
18c9b560
AZ
1725 if (insn & 0xf)
1726 return 1;
1727 rd = (insn >> 12) & 0xf;
1728 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1729 tmp = iwmmxt_load_creg(wrd);
1730 store_reg(s, rd, tmp);
18c9b560 1731 break;
d00584b7 1732 case 0x300: /* WANDN */
18c9b560
AZ
1733 wrd = (insn >> 12) & 0xf;
1734 rd0 = (insn >> 0) & 0xf;
1735 rd1 = (insn >> 16) & 0xf;
1736 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1737 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1738 gen_op_iwmmxt_andq_M0_wRn(rd1);
1739 gen_op_iwmmxt_setpsr_nz();
1740 gen_op_iwmmxt_movq_wRn_M0(wrd);
1741 gen_op_iwmmxt_set_mup();
1742 gen_op_iwmmxt_set_cup();
1743 break;
d00584b7 1744 case 0x200: /* WAND */
18c9b560
AZ
1745 wrd = (insn >> 12) & 0xf;
1746 rd0 = (insn >> 0) & 0xf;
1747 rd1 = (insn >> 16) & 0xf;
1748 gen_op_iwmmxt_movq_M0_wRn(rd0);
1749 gen_op_iwmmxt_andq_M0_wRn(rd1);
1750 gen_op_iwmmxt_setpsr_nz();
1751 gen_op_iwmmxt_movq_wRn_M0(wrd);
1752 gen_op_iwmmxt_set_mup();
1753 gen_op_iwmmxt_set_cup();
1754 break;
d00584b7 1755 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
1756 wrd = (insn >> 12) & 0xf;
1757 rd0 = (insn >> 0) & 0xf;
1758 rd1 = (insn >> 16) & 0xf;
1759 gen_op_iwmmxt_movq_M0_wRn(rd0);
1760 if (insn & (1 << 21))
1761 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1762 else
1763 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1764 gen_op_iwmmxt_movq_wRn_M0(wrd);
1765 gen_op_iwmmxt_set_mup();
1766 break;
d00584b7 1767 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
1768 wrd = (insn >> 12) & 0xf;
1769 rd0 = (insn >> 16) & 0xf;
1770 rd1 = (insn >> 0) & 0xf;
1771 gen_op_iwmmxt_movq_M0_wRn(rd0);
1772 switch ((insn >> 22) & 3) {
1773 case 0:
1774 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1775 break;
1776 case 1:
1777 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1778 break;
1779 case 2:
1780 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1781 break;
1782 case 3:
1783 return 1;
1784 }
1785 gen_op_iwmmxt_movq_wRn_M0(wrd);
1786 gen_op_iwmmxt_set_mup();
1787 gen_op_iwmmxt_set_cup();
1788 break;
d00584b7 1789 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
1790 wrd = (insn >> 12) & 0xf;
1791 rd0 = (insn >> 16) & 0xf;
1792 rd1 = (insn >> 0) & 0xf;
1793 gen_op_iwmmxt_movq_M0_wRn(rd0);
1794 switch ((insn >> 22) & 3) {
1795 case 0:
1796 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1797 break;
1798 case 1:
1799 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1800 break;
1801 case 2:
1802 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1803 break;
1804 case 3:
1805 return 1;
1806 }
1807 gen_op_iwmmxt_movq_wRn_M0(wrd);
1808 gen_op_iwmmxt_set_mup();
1809 gen_op_iwmmxt_set_cup();
1810 break;
d00584b7 1811 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
1812 wrd = (insn >> 12) & 0xf;
1813 rd0 = (insn >> 16) & 0xf;
1814 rd1 = (insn >> 0) & 0xf;
1815 gen_op_iwmmxt_movq_M0_wRn(rd0);
1816 if (insn & (1 << 22))
1817 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1818 else
1819 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1820 if (!(insn & (1 << 20)))
1821 gen_op_iwmmxt_addl_M0_wRn(wrd);
1822 gen_op_iwmmxt_movq_wRn_M0(wrd);
1823 gen_op_iwmmxt_set_mup();
1824 break;
d00584b7 1825 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
1826 wrd = (insn >> 12) & 0xf;
1827 rd0 = (insn >> 16) & 0xf;
1828 rd1 = (insn >> 0) & 0xf;
1829 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1830 if (insn & (1 << 21)) {
1831 if (insn & (1 << 20))
1832 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1833 else
1834 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1835 } else {
1836 if (insn & (1 << 20))
1837 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1838 else
1839 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1840 }
18c9b560
AZ
1841 gen_op_iwmmxt_movq_wRn_M0(wrd);
1842 gen_op_iwmmxt_set_mup();
1843 break;
d00584b7 1844 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
1845 wrd = (insn >> 12) & 0xf;
1846 rd0 = (insn >> 16) & 0xf;
1847 rd1 = (insn >> 0) & 0xf;
1848 gen_op_iwmmxt_movq_M0_wRn(rd0);
1849 if (insn & (1 << 21))
1850 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1851 else
1852 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1853 if (!(insn & (1 << 20))) {
e677137d
PB
1854 iwmmxt_load_reg(cpu_V1, wrd);
1855 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1856 }
1857 gen_op_iwmmxt_movq_wRn_M0(wrd);
1858 gen_op_iwmmxt_set_mup();
1859 break;
d00584b7 1860 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
1861 wrd = (insn >> 12) & 0xf;
1862 rd0 = (insn >> 16) & 0xf;
1863 rd1 = (insn >> 0) & 0xf;
1864 gen_op_iwmmxt_movq_M0_wRn(rd0);
1865 switch ((insn >> 22) & 3) {
1866 case 0:
1867 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1868 break;
1869 case 1:
1870 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1871 break;
1872 case 2:
1873 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1874 break;
1875 case 3:
1876 return 1;
1877 }
1878 gen_op_iwmmxt_movq_wRn_M0(wrd);
1879 gen_op_iwmmxt_set_mup();
1880 gen_op_iwmmxt_set_cup();
1881 break;
d00584b7 1882 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
1883 wrd = (insn >> 12) & 0xf;
1884 rd0 = (insn >> 16) & 0xf;
1885 rd1 = (insn >> 0) & 0xf;
1886 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1887 if (insn & (1 << 22)) {
1888 if (insn & (1 << 20))
1889 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1890 else
1891 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1892 } else {
1893 if (insn & (1 << 20))
1894 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1895 else
1896 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1897 }
18c9b560
AZ
1898 gen_op_iwmmxt_movq_wRn_M0(wrd);
1899 gen_op_iwmmxt_set_mup();
1900 gen_op_iwmmxt_set_cup();
1901 break;
d00584b7 1902 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
1903 wrd = (insn >> 12) & 0xf;
1904 rd0 = (insn >> 16) & 0xf;
1905 rd1 = (insn >> 0) & 0xf;
1906 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1907 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1908 tcg_gen_andi_i32(tmp, tmp, 7);
1909 iwmmxt_load_reg(cpu_V1, rd1);
1910 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1911 tcg_temp_free_i32(tmp);
18c9b560
AZ
1912 gen_op_iwmmxt_movq_wRn_M0(wrd);
1913 gen_op_iwmmxt_set_mup();
1914 break;
d00584b7 1915 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1916 if (((insn >> 6) & 3) == 3)
1917 return 1;
18c9b560
AZ
1918 rd = (insn >> 12) & 0xf;
1919 wrd = (insn >> 16) & 0xf;
da6b5335 1920 tmp = load_reg(s, rd);
18c9b560
AZ
1921 gen_op_iwmmxt_movq_M0_wRn(wrd);
1922 switch ((insn >> 6) & 3) {
1923 case 0:
da6b5335
FN
1924 tmp2 = tcg_const_i32(0xff);
1925 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1926 break;
1927 case 1:
da6b5335
FN
1928 tmp2 = tcg_const_i32(0xffff);
1929 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1930 break;
1931 case 2:
da6b5335
FN
1932 tmp2 = tcg_const_i32(0xffffffff);
1933 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1934 break;
da6b5335 1935 default:
f764718d
RH
1936 tmp2 = NULL;
1937 tmp3 = NULL;
18c9b560 1938 }
da6b5335 1939 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1940 tcg_temp_free_i32(tmp3);
1941 tcg_temp_free_i32(tmp2);
7d1b0095 1942 tcg_temp_free_i32(tmp);
18c9b560
AZ
1943 gen_op_iwmmxt_movq_wRn_M0(wrd);
1944 gen_op_iwmmxt_set_mup();
1945 break;
d00584b7 1946 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
1947 rd = (insn >> 12) & 0xf;
1948 wrd = (insn >> 16) & 0xf;
da6b5335 1949 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1950 return 1;
1951 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1952 tmp = tcg_temp_new_i32();
18c9b560
AZ
1953 switch ((insn >> 22) & 3) {
1954 case 0:
da6b5335 1955 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 1956 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
1957 if (insn & 8) {
1958 tcg_gen_ext8s_i32(tmp, tmp);
1959 } else {
1960 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1961 }
1962 break;
1963 case 1:
da6b5335 1964 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 1965 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
1966 if (insn & 8) {
1967 tcg_gen_ext16s_i32(tmp, tmp);
1968 } else {
1969 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1970 }
1971 break;
1972 case 2:
da6b5335 1973 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 1974 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 1975 break;
18c9b560 1976 }
da6b5335 1977 store_reg(s, rd, tmp);
18c9b560 1978 break;
d00584b7 1979 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1980 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1981 return 1;
da6b5335 1982 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1983 switch ((insn >> 22) & 3) {
1984 case 0:
da6b5335 1985 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1986 break;
1987 case 1:
da6b5335 1988 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1989 break;
1990 case 2:
da6b5335 1991 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1992 break;
18c9b560 1993 }
da6b5335
FN
1994 tcg_gen_shli_i32(tmp, tmp, 28);
1995 gen_set_nzcv(tmp);
7d1b0095 1996 tcg_temp_free_i32(tmp);
18c9b560 1997 break;
d00584b7 1998 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1999 if (((insn >> 6) & 3) == 3)
2000 return 1;
18c9b560
AZ
2001 rd = (insn >> 12) & 0xf;
2002 wrd = (insn >> 16) & 0xf;
da6b5335 2003 tmp = load_reg(s, rd);
18c9b560
AZ
2004 switch ((insn >> 6) & 3) {
2005 case 0:
da6b5335 2006 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2007 break;
2008 case 1:
da6b5335 2009 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2010 break;
2011 case 2:
da6b5335 2012 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2013 break;
18c9b560 2014 }
7d1b0095 2015 tcg_temp_free_i32(tmp);
18c9b560
AZ
2016 gen_op_iwmmxt_movq_wRn_M0(wrd);
2017 gen_op_iwmmxt_set_mup();
2018 break;
d00584b7 2019 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2020 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2021 return 1;
da6b5335 2022 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2023 tmp2 = tcg_temp_new_i32();
da6b5335 2024 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2025 switch ((insn >> 22) & 3) {
2026 case 0:
2027 for (i = 0; i < 7; i ++) {
da6b5335
FN
2028 tcg_gen_shli_i32(tmp2, tmp2, 4);
2029 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2030 }
2031 break;
2032 case 1:
2033 for (i = 0; i < 3; i ++) {
da6b5335
FN
2034 tcg_gen_shli_i32(tmp2, tmp2, 8);
2035 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2036 }
2037 break;
2038 case 2:
da6b5335
FN
2039 tcg_gen_shli_i32(tmp2, tmp2, 16);
2040 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2041 break;
18c9b560 2042 }
da6b5335 2043 gen_set_nzcv(tmp);
7d1b0095
PM
2044 tcg_temp_free_i32(tmp2);
2045 tcg_temp_free_i32(tmp);
18c9b560 2046 break;
d00584b7 2047 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2048 wrd = (insn >> 12) & 0xf;
2049 rd0 = (insn >> 16) & 0xf;
2050 gen_op_iwmmxt_movq_M0_wRn(rd0);
2051 switch ((insn >> 22) & 3) {
2052 case 0:
e677137d 2053 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2054 break;
2055 case 1:
e677137d 2056 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2057 break;
2058 case 2:
e677137d 2059 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2060 break;
2061 case 3:
2062 return 1;
2063 }
2064 gen_op_iwmmxt_movq_wRn_M0(wrd);
2065 gen_op_iwmmxt_set_mup();
2066 break;
d00584b7 2067 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2068 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2069 return 1;
da6b5335 2070 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2071 tmp2 = tcg_temp_new_i32();
da6b5335 2072 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2073 switch ((insn >> 22) & 3) {
2074 case 0:
2075 for (i = 0; i < 7; i ++) {
da6b5335
FN
2076 tcg_gen_shli_i32(tmp2, tmp2, 4);
2077 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2078 }
2079 break;
2080 case 1:
2081 for (i = 0; i < 3; i ++) {
da6b5335
FN
2082 tcg_gen_shli_i32(tmp2, tmp2, 8);
2083 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2084 }
2085 break;
2086 case 2:
da6b5335
FN
2087 tcg_gen_shli_i32(tmp2, tmp2, 16);
2088 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2089 break;
18c9b560 2090 }
da6b5335 2091 gen_set_nzcv(tmp);
7d1b0095
PM
2092 tcg_temp_free_i32(tmp2);
2093 tcg_temp_free_i32(tmp);
18c9b560 2094 break;
d00584b7 2095 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2096 rd = (insn >> 12) & 0xf;
2097 rd0 = (insn >> 16) & 0xf;
da6b5335 2098 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2099 return 1;
2100 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2101 tmp = tcg_temp_new_i32();
18c9b560
AZ
2102 switch ((insn >> 22) & 3) {
2103 case 0:
da6b5335 2104 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2105 break;
2106 case 1:
da6b5335 2107 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2108 break;
2109 case 2:
da6b5335 2110 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2111 break;
18c9b560 2112 }
da6b5335 2113 store_reg(s, rd, tmp);
18c9b560 2114 break;
d00584b7 2115 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2116 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2117 wrd = (insn >> 12) & 0xf;
2118 rd0 = (insn >> 16) & 0xf;
2119 rd1 = (insn >> 0) & 0xf;
2120 gen_op_iwmmxt_movq_M0_wRn(rd0);
2121 switch ((insn >> 22) & 3) {
2122 case 0:
2123 if (insn & (1 << 21))
2124 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2125 else
2126 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2127 break;
2128 case 1:
2129 if (insn & (1 << 21))
2130 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2131 else
2132 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2133 break;
2134 case 2:
2135 if (insn & (1 << 21))
2136 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2137 else
2138 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2139 break;
2140 case 3:
2141 return 1;
2142 }
2143 gen_op_iwmmxt_movq_wRn_M0(wrd);
2144 gen_op_iwmmxt_set_mup();
2145 gen_op_iwmmxt_set_cup();
2146 break;
d00584b7 2147 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2148 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2149 wrd = (insn >> 12) & 0xf;
2150 rd0 = (insn >> 16) & 0xf;
2151 gen_op_iwmmxt_movq_M0_wRn(rd0);
2152 switch ((insn >> 22) & 3) {
2153 case 0:
2154 if (insn & (1 << 21))
2155 gen_op_iwmmxt_unpacklsb_M0();
2156 else
2157 gen_op_iwmmxt_unpacklub_M0();
2158 break;
2159 case 1:
2160 if (insn & (1 << 21))
2161 gen_op_iwmmxt_unpacklsw_M0();
2162 else
2163 gen_op_iwmmxt_unpackluw_M0();
2164 break;
2165 case 2:
2166 if (insn & (1 << 21))
2167 gen_op_iwmmxt_unpacklsl_M0();
2168 else
2169 gen_op_iwmmxt_unpacklul_M0();
2170 break;
2171 case 3:
2172 return 1;
2173 }
2174 gen_op_iwmmxt_movq_wRn_M0(wrd);
2175 gen_op_iwmmxt_set_mup();
2176 gen_op_iwmmxt_set_cup();
2177 break;
d00584b7 2178 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2179 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2180 wrd = (insn >> 12) & 0xf;
2181 rd0 = (insn >> 16) & 0xf;
2182 gen_op_iwmmxt_movq_M0_wRn(rd0);
2183 switch ((insn >> 22) & 3) {
2184 case 0:
2185 if (insn & (1 << 21))
2186 gen_op_iwmmxt_unpackhsb_M0();
2187 else
2188 gen_op_iwmmxt_unpackhub_M0();
2189 break;
2190 case 1:
2191 if (insn & (1 << 21))
2192 gen_op_iwmmxt_unpackhsw_M0();
2193 else
2194 gen_op_iwmmxt_unpackhuw_M0();
2195 break;
2196 case 2:
2197 if (insn & (1 << 21))
2198 gen_op_iwmmxt_unpackhsl_M0();
2199 else
2200 gen_op_iwmmxt_unpackhul_M0();
2201 break;
2202 case 3:
2203 return 1;
2204 }
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
d00584b7 2209 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2210 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2211 if (((insn >> 22) & 3) == 0)
2212 return 1;
18c9b560
AZ
2213 wrd = (insn >> 12) & 0xf;
2214 rd0 = (insn >> 16) & 0xf;
2215 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2216 tmp = tcg_temp_new_i32();
da6b5335 2217 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2218 tcg_temp_free_i32(tmp);
18c9b560 2219 return 1;
da6b5335 2220 }
18c9b560 2221 switch ((insn >> 22) & 3) {
18c9b560 2222 case 1:
477955bd 2223 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2224 break;
2225 case 2:
477955bd 2226 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2227 break;
2228 case 3:
477955bd 2229 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2230 break;
2231 }
7d1b0095 2232 tcg_temp_free_i32(tmp);
18c9b560
AZ
2233 gen_op_iwmmxt_movq_wRn_M0(wrd);
2234 gen_op_iwmmxt_set_mup();
2235 gen_op_iwmmxt_set_cup();
2236 break;
d00584b7 2237 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2238 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2239 if (((insn >> 22) & 3) == 0)
2240 return 1;
18c9b560
AZ
2241 wrd = (insn >> 12) & 0xf;
2242 rd0 = (insn >> 16) & 0xf;
2243 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2244 tmp = tcg_temp_new_i32();
da6b5335 2245 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2246 tcg_temp_free_i32(tmp);
18c9b560 2247 return 1;
da6b5335 2248 }
18c9b560 2249 switch ((insn >> 22) & 3) {
18c9b560 2250 case 1:
477955bd 2251 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2252 break;
2253 case 2:
477955bd 2254 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2255 break;
2256 case 3:
477955bd 2257 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2258 break;
2259 }
7d1b0095 2260 tcg_temp_free_i32(tmp);
18c9b560
AZ
2261 gen_op_iwmmxt_movq_wRn_M0(wrd);
2262 gen_op_iwmmxt_set_mup();
2263 gen_op_iwmmxt_set_cup();
2264 break;
d00584b7 2265 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2266 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2267 if (((insn >> 22) & 3) == 0)
2268 return 1;
18c9b560
AZ
2269 wrd = (insn >> 12) & 0xf;
2270 rd0 = (insn >> 16) & 0xf;
2271 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2272 tmp = tcg_temp_new_i32();
da6b5335 2273 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2274 tcg_temp_free_i32(tmp);
18c9b560 2275 return 1;
da6b5335 2276 }
18c9b560 2277 switch ((insn >> 22) & 3) {
18c9b560 2278 case 1:
477955bd 2279 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2280 break;
2281 case 2:
477955bd 2282 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2283 break;
2284 case 3:
477955bd 2285 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2286 break;
2287 }
7d1b0095 2288 tcg_temp_free_i32(tmp);
18c9b560
AZ
2289 gen_op_iwmmxt_movq_wRn_M0(wrd);
2290 gen_op_iwmmxt_set_mup();
2291 gen_op_iwmmxt_set_cup();
2292 break;
d00584b7 2293 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2294 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2295 if (((insn >> 22) & 3) == 0)
2296 return 1;
18c9b560
AZ
2297 wrd = (insn >> 12) & 0xf;
2298 rd0 = (insn >> 16) & 0xf;
2299 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2300 tmp = tcg_temp_new_i32();
18c9b560 2301 switch ((insn >> 22) & 3) {
18c9b560 2302 case 1:
da6b5335 2303 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2304 tcg_temp_free_i32(tmp);
18c9b560 2305 return 1;
da6b5335 2306 }
477955bd 2307 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2308 break;
2309 case 2:
da6b5335 2310 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2311 tcg_temp_free_i32(tmp);
18c9b560 2312 return 1;
da6b5335 2313 }
477955bd 2314 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2315 break;
2316 case 3:
da6b5335 2317 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2318 tcg_temp_free_i32(tmp);
18c9b560 2319 return 1;
da6b5335 2320 }
477955bd 2321 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2322 break;
2323 }
7d1b0095 2324 tcg_temp_free_i32(tmp);
18c9b560
AZ
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2328 break;
d00584b7 2329 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2330 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2331 wrd = (insn >> 12) & 0xf;
2332 rd0 = (insn >> 16) & 0xf;
2333 rd1 = (insn >> 0) & 0xf;
2334 gen_op_iwmmxt_movq_M0_wRn(rd0);
2335 switch ((insn >> 22) & 3) {
2336 case 0:
2337 if (insn & (1 << 21))
2338 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2339 else
2340 gen_op_iwmmxt_minub_M0_wRn(rd1);
2341 break;
2342 case 1:
2343 if (insn & (1 << 21))
2344 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2345 else
2346 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2347 break;
2348 case 2:
2349 if (insn & (1 << 21))
2350 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2351 else
2352 gen_op_iwmmxt_minul_M0_wRn(rd1);
2353 break;
2354 case 3:
2355 return 1;
2356 }
2357 gen_op_iwmmxt_movq_wRn_M0(wrd);
2358 gen_op_iwmmxt_set_mup();
2359 break;
d00584b7 2360 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2361 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2362 wrd = (insn >> 12) & 0xf;
2363 rd0 = (insn >> 16) & 0xf;
2364 rd1 = (insn >> 0) & 0xf;
2365 gen_op_iwmmxt_movq_M0_wRn(rd0);
2366 switch ((insn >> 22) & 3) {
2367 case 0:
2368 if (insn & (1 << 21))
2369 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2370 else
2371 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2372 break;
2373 case 1:
2374 if (insn & (1 << 21))
2375 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2376 else
2377 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2378 break;
2379 case 2:
2380 if (insn & (1 << 21))
2381 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2382 else
2383 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2384 break;
2385 case 3:
2386 return 1;
2387 }
2388 gen_op_iwmmxt_movq_wRn_M0(wrd);
2389 gen_op_iwmmxt_set_mup();
2390 break;
d00584b7 2391 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2392 case 0x402: case 0x502: case 0x602: case 0x702:
2393 wrd = (insn >> 12) & 0xf;
2394 rd0 = (insn >> 16) & 0xf;
2395 rd1 = (insn >> 0) & 0xf;
2396 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2397 tmp = tcg_const_i32((insn >> 20) & 3);
2398 iwmmxt_load_reg(cpu_V1, rd1);
2399 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2400 tcg_temp_free_i32(tmp);
18c9b560
AZ
2401 gen_op_iwmmxt_movq_wRn_M0(wrd);
2402 gen_op_iwmmxt_set_mup();
2403 break;
d00584b7 2404 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2405 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2406 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2407 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2408 wrd = (insn >> 12) & 0xf;
2409 rd0 = (insn >> 16) & 0xf;
2410 rd1 = (insn >> 0) & 0xf;
2411 gen_op_iwmmxt_movq_M0_wRn(rd0);
2412 switch ((insn >> 20) & 0xf) {
2413 case 0x0:
2414 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2415 break;
2416 case 0x1:
2417 gen_op_iwmmxt_subub_M0_wRn(rd1);
2418 break;
2419 case 0x3:
2420 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2421 break;
2422 case 0x4:
2423 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2424 break;
2425 case 0x5:
2426 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2427 break;
2428 case 0x7:
2429 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2430 break;
2431 case 0x8:
2432 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2433 break;
2434 case 0x9:
2435 gen_op_iwmmxt_subul_M0_wRn(rd1);
2436 break;
2437 case 0xb:
2438 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2439 break;
2440 default:
2441 return 1;
2442 }
2443 gen_op_iwmmxt_movq_wRn_M0(wrd);
2444 gen_op_iwmmxt_set_mup();
2445 gen_op_iwmmxt_set_cup();
2446 break;
d00584b7 2447 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2448 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2449 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2450 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2451 wrd = (insn >> 12) & 0xf;
2452 rd0 = (insn >> 16) & 0xf;
2453 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2454 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2455 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2456 tcg_temp_free_i32(tmp);
18c9b560
AZ
2457 gen_op_iwmmxt_movq_wRn_M0(wrd);
2458 gen_op_iwmmxt_set_mup();
2459 gen_op_iwmmxt_set_cup();
2460 break;
d00584b7 2461 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2462 case 0x418: case 0x518: case 0x618: case 0x718:
2463 case 0x818: case 0x918: case 0xa18: case 0xb18:
2464 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2465 wrd = (insn >> 12) & 0xf;
2466 rd0 = (insn >> 16) & 0xf;
2467 rd1 = (insn >> 0) & 0xf;
2468 gen_op_iwmmxt_movq_M0_wRn(rd0);
2469 switch ((insn >> 20) & 0xf) {
2470 case 0x0:
2471 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2472 break;
2473 case 0x1:
2474 gen_op_iwmmxt_addub_M0_wRn(rd1);
2475 break;
2476 case 0x3:
2477 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2478 break;
2479 case 0x4:
2480 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2481 break;
2482 case 0x5:
2483 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2484 break;
2485 case 0x7:
2486 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2487 break;
2488 case 0x8:
2489 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2490 break;
2491 case 0x9:
2492 gen_op_iwmmxt_addul_M0_wRn(rd1);
2493 break;
2494 case 0xb:
2495 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2496 break;
2497 default:
2498 return 1;
2499 }
2500 gen_op_iwmmxt_movq_wRn_M0(wrd);
2501 gen_op_iwmmxt_set_mup();
2502 gen_op_iwmmxt_set_cup();
2503 break;
d00584b7 2504 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2505 case 0x408: case 0x508: case 0x608: case 0x708:
2506 case 0x808: case 0x908: case 0xa08: case 0xb08:
2507 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2508 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2509 return 1;
18c9b560
AZ
2510 wrd = (insn >> 12) & 0xf;
2511 rd0 = (insn >> 16) & 0xf;
2512 rd1 = (insn >> 0) & 0xf;
2513 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2514 switch ((insn >> 22) & 3) {
18c9b560
AZ
2515 case 1:
2516 if (insn & (1 << 21))
2517 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2518 else
2519 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2520 break;
2521 case 2:
2522 if (insn & (1 << 21))
2523 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2524 else
2525 gen_op_iwmmxt_packul_M0_wRn(rd1);
2526 break;
2527 case 3:
2528 if (insn & (1 << 21))
2529 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2530 else
2531 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2532 break;
2533 }
2534 gen_op_iwmmxt_movq_wRn_M0(wrd);
2535 gen_op_iwmmxt_set_mup();
2536 gen_op_iwmmxt_set_cup();
2537 break;
2538 case 0x201: case 0x203: case 0x205: case 0x207:
2539 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2540 case 0x211: case 0x213: case 0x215: case 0x217:
2541 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2542 wrd = (insn >> 5) & 0xf;
2543 rd0 = (insn >> 12) & 0xf;
2544 rd1 = (insn >> 0) & 0xf;
2545 if (rd0 == 0xf || rd1 == 0xf)
2546 return 1;
2547 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2548 tmp = load_reg(s, rd0);
2549 tmp2 = load_reg(s, rd1);
18c9b560 2550 switch ((insn >> 16) & 0xf) {
d00584b7 2551 case 0x0: /* TMIA */
da6b5335 2552 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2553 break;
d00584b7 2554 case 0x8: /* TMIAPH */
da6b5335 2555 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2556 break;
d00584b7 2557 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2558 if (insn & (1 << 16))
da6b5335 2559 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2560 if (insn & (1 << 17))
da6b5335
FN
2561 tcg_gen_shri_i32(tmp2, tmp2, 16);
2562 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2563 break;
2564 default:
7d1b0095
PM
2565 tcg_temp_free_i32(tmp2);
2566 tcg_temp_free_i32(tmp);
18c9b560
AZ
2567 return 1;
2568 }
7d1b0095
PM
2569 tcg_temp_free_i32(tmp2);
2570 tcg_temp_free_i32(tmp);
18c9b560
AZ
2571 gen_op_iwmmxt_movq_wRn_M0(wrd);
2572 gen_op_iwmmxt_set_mup();
2573 break;
2574 default:
2575 return 1;
2576 }
2577
2578 return 0;
2579}
2580
a1c7273b 2581/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2582 (ie. an undefined instruction). */
7dcc1f89 2583static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2584{
2585 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2586 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2587
2588 if ((insn & 0x0ff00f10) == 0x0e200010) {
2589 /* Multiply with Internal Accumulate Format */
2590 rd0 = (insn >> 12) & 0xf;
2591 rd1 = insn & 0xf;
2592 acc = (insn >> 5) & 7;
2593
2594 if (acc != 0)
2595 return 1;
2596
3a554c0f
FN
2597 tmp = load_reg(s, rd0);
2598 tmp2 = load_reg(s, rd1);
18c9b560 2599 switch ((insn >> 16) & 0xf) {
d00584b7 2600 case 0x0: /* MIA */
3a554c0f 2601 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2602 break;
d00584b7 2603 case 0x8: /* MIAPH */
3a554c0f 2604 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2605 break;
d00584b7
PM
2606 case 0xc: /* MIABB */
2607 case 0xd: /* MIABT */
2608 case 0xe: /* MIATB */
2609 case 0xf: /* MIATT */
18c9b560 2610 if (insn & (1 << 16))
3a554c0f 2611 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2612 if (insn & (1 << 17))
3a554c0f
FN
2613 tcg_gen_shri_i32(tmp2, tmp2, 16);
2614 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2615 break;
2616 default:
2617 return 1;
2618 }
7d1b0095
PM
2619 tcg_temp_free_i32(tmp2);
2620 tcg_temp_free_i32(tmp);
18c9b560
AZ
2621
2622 gen_op_iwmmxt_movq_wRn_M0(acc);
2623 return 0;
2624 }
2625
2626 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2627 /* Internal Accumulator Access Format */
2628 rdhi = (insn >> 16) & 0xf;
2629 rdlo = (insn >> 12) & 0xf;
2630 acc = insn & 7;
2631
2632 if (acc != 0)
2633 return 1;
2634
d00584b7 2635 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2636 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2637 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
664b7e3b 2638 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2639 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 2640 } else { /* MAR */
3a554c0f
FN
2641 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2642 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2643 }
2644 return 0;
2645 }
2646
2647 return 1;
2648}
2649
9ee6e8bb
PB
2650#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2651#define VFP_SREG(insn, bigbit, smallbit) \
2652 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2653#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2654 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2655 reg = (((insn) >> (bigbit)) & 0x0f) \
2656 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2657 } else { \
2658 if (insn & (1 << (smallbit))) \
2659 return 1; \
2660 reg = ((insn) >> (bigbit)) & 0x0f; \
2661 }} while (0)
2662
2663#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2664#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2665#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2666#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2667#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2668#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2669
39d5492a 2670static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2671{
39d5492a 2672 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2673 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2674 tcg_gen_shli_i32(tmp, var, 16);
2675 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2676 tcg_temp_free_i32(tmp);
ad69471c
PB
2677}
2678
39d5492a 2679static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2680{
39d5492a 2681 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2682 tcg_gen_andi_i32(var, var, 0xffff0000);
2683 tcg_gen_shri_i32(tmp, var, 16);
2684 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2685 tcg_temp_free_i32(tmp);
ad69471c
PB
2686}
2687
06db8196
PM
2688/*
2689 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2690 * (ie. an undefined instruction).
2691 */
7dcc1f89 2692static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95 2693{
d614a513 2694 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 2695 return 1;
d614a513 2696 }
40f137e1 2697
78e138bc
PM
2698 /*
2699 * If the decodetree decoder handles this insn it will always
2700 * emit code to either execute the insn or generate an appropriate
2701 * exception; so we don't need to ever return non-zero to tell
2702 * the calling code to emit an UNDEF exception.
2703 */
2704 if (extract32(insn, 28, 4) == 0xf) {
2705 if (disas_vfp_uncond(s, insn)) {
2706 return 0;
2707 }
2708 } else {
2709 if (disas_vfp(s, insn)) {
2710 return 0;
2711 }
2712 }
3111bfc2
PM
2713 /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
2714 return 1;
b7bcbe95
FB
2715}
2716
90aa39a1 2717static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 2718{
90aa39a1 2719#ifndef CONFIG_USER_ONLY
dcba3a8d 2720 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
a0415916 2721 ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
90aa39a1
SF
2722#else
2723 return true;
2724#endif
2725}
6e256c93 2726
8a6b28c7
EC
2727static void gen_goto_ptr(void)
2728{
7f11636d 2729 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
2730}
2731
4cae8f56
AB
2732/* This will end the TB but doesn't guarantee we'll return to
2733 * cpu_loop_exec. Any live exit_requests will be processed as we
2734 * enter the next TB.
2735 */
8a6b28c7 2736static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
2737{
2738 if (use_goto_tb(s, dest)) {
57fec1fe 2739 tcg_gen_goto_tb(n);
eaed129d 2740 gen_set_pc_im(s, dest);
07ea28b4 2741 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 2742 } else {
eaed129d 2743 gen_set_pc_im(s, dest);
8a6b28c7 2744 gen_goto_ptr();
6e256c93 2745 }
dcba3a8d 2746 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
2747}
2748
8aaca4c0
FB
2749static inline void gen_jmp (DisasContext *s, uint32_t dest)
2750{
b636649f 2751 if (unlikely(is_singlestepping(s))) {
8aaca4c0 2752 /* An indirect jump so that we still trigger the debug exception. */
5899f386 2753 if (s->thumb)
d9ba4830
PB
2754 dest |= 1;
2755 gen_bx_im(s, dest);
8aaca4c0 2756 } else {
6e256c93 2757 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
2758 }
2759}
2760
39d5492a 2761static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 2762{
ee097184 2763 if (x)
d9ba4830 2764 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 2765 else
d9ba4830 2766 gen_sxth(t0);
ee097184 2767 if (y)
d9ba4830 2768 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 2769 else
d9ba4830
PB
2770 gen_sxth(t1);
2771 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
2772}
2773
2774/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
2775static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2776{
b5ff1b31
FB
2777 uint32_t mask;
2778
2779 mask = 0;
2780 if (flags & (1 << 0))
2781 mask |= 0xff;
2782 if (flags & (1 << 1))
2783 mask |= 0xff00;
2784 if (flags & (1 << 2))
2785 mask |= 0xff0000;
2786 if (flags & (1 << 3))
2787 mask |= 0xff000000;
9ee6e8bb 2788
2ae23e75 2789 /* Mask out undefined bits. */
9ee6e8bb 2790 mask &= ~CPSR_RESERVED;
d614a513 2791 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 2792 mask &= ~CPSR_T;
d614a513
PM
2793 }
2794 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 2795 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
2796 }
2797 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 2798 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
2799 }
2800 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 2801 mask &= ~CPSR_IT;
d614a513 2802 }
4051e12c
PM
2803 /* Mask out execution state and reserved bits. */
2804 if (!spsr) {
2805 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
2806 }
b5ff1b31
FB
2807 /* Mask out privileged bits. */
2808 if (IS_USER(s))
9ee6e8bb 2809 mask &= CPSR_USER;
b5ff1b31
FB
2810 return mask;
2811}
2812
2fbac54b 2813/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 2814static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 2815{
39d5492a 2816 TCGv_i32 tmp;
b5ff1b31
FB
2817 if (spsr) {
2818 /* ??? This is also undefined in system mode. */
2819 if (IS_USER(s))
2820 return 1;
d9ba4830
PB
2821
2822 tmp = load_cpu_field(spsr);
2823 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
2824 tcg_gen_andi_i32(t0, t0, mask);
2825 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 2826 store_cpu_field(tmp, spsr);
b5ff1b31 2827 } else {
2fbac54b 2828 gen_set_cpsr(t0, mask);
b5ff1b31 2829 }
7d1b0095 2830 tcg_temp_free_i32(t0);
b5ff1b31
FB
2831 gen_lookup_tb(s);
2832 return 0;
2833}
2834
2fbac54b
FN
2835/* Returns nonzero if access to the PSR is not permitted. */
2836static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
2837{
39d5492a 2838 TCGv_i32 tmp;
7d1b0095 2839 tmp = tcg_temp_new_i32();
2fbac54b
FN
2840 tcg_gen_movi_i32(tmp, val);
2841 return gen_set_psr(s, mask, spsr, tmp);
2842}
2843
8bfd0550
PM
2844static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
2845 int *tgtmode, int *regno)
2846{
2847 /* Decode the r and sysm fields of MSR/MRS banked accesses into
2848 * the target mode and register number, and identify the various
2849 * unpredictable cases.
2850 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
2851 * + executed in user mode
2852 * + using R15 as the src/dest register
2853 * + accessing an unimplemented register
2854 * + accessing a register that's inaccessible at current PL/security state*
2855 * + accessing a register that you could access with a different insn
2856 * We choose to UNDEF in all these cases.
2857 * Since we don't know which of the various AArch32 modes we are in
2858 * we have to defer some checks to runtime.
2859 * Accesses to Monitor mode registers from Secure EL1 (which implies
2860 * that EL3 is AArch64) must trap to EL3.
2861 *
2862 * If the access checks fail this function will emit code to take
2863 * an exception and return false. Otherwise it will return true,
2864 * and set *tgtmode and *regno appropriately.
2865 */
2866 int exc_target = default_exception_el(s);
2867
2868 /* These instructions are present only in ARMv8, or in ARMv7 with the
2869 * Virtualization Extensions.
2870 */
2871 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
2872 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
2873 goto undef;
2874 }
2875
2876 if (IS_USER(s) || rn == 15) {
2877 goto undef;
2878 }
2879
2880 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
2881 * of registers into (r, sysm).
2882 */
2883 if (r) {
2884 /* SPSRs for other modes */
2885 switch (sysm) {
2886 case 0xe: /* SPSR_fiq */
2887 *tgtmode = ARM_CPU_MODE_FIQ;
2888 break;
2889 case 0x10: /* SPSR_irq */
2890 *tgtmode = ARM_CPU_MODE_IRQ;
2891 break;
2892 case 0x12: /* SPSR_svc */
2893 *tgtmode = ARM_CPU_MODE_SVC;
2894 break;
2895 case 0x14: /* SPSR_abt */
2896 *tgtmode = ARM_CPU_MODE_ABT;
2897 break;
2898 case 0x16: /* SPSR_und */
2899 *tgtmode = ARM_CPU_MODE_UND;
2900 break;
2901 case 0x1c: /* SPSR_mon */
2902 *tgtmode = ARM_CPU_MODE_MON;
2903 break;
2904 case 0x1e: /* SPSR_hyp */
2905 *tgtmode = ARM_CPU_MODE_HYP;
2906 break;
2907 default: /* unallocated */
2908 goto undef;
2909 }
2910 /* We arbitrarily assign SPSR a register number of 16. */
2911 *regno = 16;
2912 } else {
2913 /* general purpose registers for other modes */
2914 switch (sysm) {
2915 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
2916 *tgtmode = ARM_CPU_MODE_USR;
2917 *regno = sysm + 8;
2918 break;
2919 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
2920 *tgtmode = ARM_CPU_MODE_FIQ;
2921 *regno = sysm;
2922 break;
2923 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
2924 *tgtmode = ARM_CPU_MODE_IRQ;
2925 *regno = sysm & 1 ? 13 : 14;
2926 break;
2927 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
2928 *tgtmode = ARM_CPU_MODE_SVC;
2929 *regno = sysm & 1 ? 13 : 14;
2930 break;
2931 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
2932 *tgtmode = ARM_CPU_MODE_ABT;
2933 *regno = sysm & 1 ? 13 : 14;
2934 break;
2935 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
2936 *tgtmode = ARM_CPU_MODE_UND;
2937 *regno = sysm & 1 ? 13 : 14;
2938 break;
2939 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
2940 *tgtmode = ARM_CPU_MODE_MON;
2941 *regno = sysm & 1 ? 13 : 14;
2942 break;
2943 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
2944 *tgtmode = ARM_CPU_MODE_HYP;
2945 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
2946 *regno = sysm & 1 ? 13 : 17;
2947 break;
2948 default: /* unallocated */
2949 goto undef;
2950 }
2951 }
2952
2953 /* Catch the 'accessing inaccessible register' cases we can detect
2954 * at translate time.
2955 */
2956 switch (*tgtmode) {
2957 case ARM_CPU_MODE_MON:
2958 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
2959 goto undef;
2960 }
2961 if (s->current_el == 1) {
2962 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
2963 * then accesses to Mon registers trap to EL3
2964 */
2965 exc_target = 3;
2966 goto undef;
2967 }
2968 break;
2969 case ARM_CPU_MODE_HYP:
aec4dd09
PM
2970 /*
2971 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
2972 * (and so we can forbid accesses from EL2 or below). elr_hyp
2973 * can be accessed also from Hyp mode, so forbid accesses from
2974 * EL0 or EL1.
8bfd0550 2975 */
aec4dd09
PM
2976 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
2977 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
2978 goto undef;
2979 }
2980 break;
2981 default:
2982 break;
2983 }
2984
2985 return true;
2986
2987undef:
2988 /* If we get here then some access check did not pass */
a767fac8
RH
2989 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
2990 syn_uncategorized(), exc_target);
8bfd0550
PM
2991 return false;
2992}
2993
2994static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
2995{
2996 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
2997 int tgtmode = 0, regno = 0;
2998
2999 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3000 return;
3001 }
3002
3003 /* Sync state because msr_banked() can raise exceptions */
3004 gen_set_condexec(s);
43722a6d 3005 gen_set_pc_im(s, s->pc_curr);
8bfd0550
PM
3006 tcg_reg = load_reg(s, rn);
3007 tcg_tgtmode = tcg_const_i32(tgtmode);
3008 tcg_regno = tcg_const_i32(regno);
3009 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
3010 tcg_temp_free_i32(tcg_tgtmode);
3011 tcg_temp_free_i32(tcg_regno);
3012 tcg_temp_free_i32(tcg_reg);
dcba3a8d 3013 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
3014}
3015
3016static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
3017{
3018 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3019 int tgtmode = 0, regno = 0;
3020
3021 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3022 return;
3023 }
3024
3025 /* Sync state because mrs_banked() can raise exceptions */
3026 gen_set_condexec(s);
43722a6d 3027 gen_set_pc_im(s, s->pc_curr);
8bfd0550
PM
3028 tcg_reg = tcg_temp_new_i32();
3029 tcg_tgtmode = tcg_const_i32(tgtmode);
3030 tcg_regno = tcg_const_i32(regno);
3031 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
3032 tcg_temp_free_i32(tcg_tgtmode);
3033 tcg_temp_free_i32(tcg_regno);
3034 store_reg(s, rn, tcg_reg);
dcba3a8d 3035 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
3036}
3037
fb0e8e79
PM
3038/* Store value to PC as for an exception return (ie don't
3039 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
3040 * will do the masking based on the new value of the Thumb bit.
3041 */
3042static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3043{
fb0e8e79
PM
3044 tcg_gen_mov_i32(cpu_R[15], pc);
3045 tcg_temp_free_i32(pc);
b5ff1b31
FB
3046}
3047
b0109805 3048/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3049static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3050{
fb0e8e79
PM
3051 store_pc_exc_ret(s, pc);
3052 /* The cpsr_write_eret helper will mask the low bits of PC
3053 * appropriately depending on the new Thumb bit, so it must
3054 * be called after storing the new PC.
3055 */
e69ad9df
AL
3056 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3057 gen_io_start();
3058 }
235ea1f5 3059 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 3060 tcg_temp_free_i32(cpsr);
b29fd33d 3061 /* Must exit loop to check un-masked IRQs */
dcba3a8d 3062 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 3063}
3b46e624 3064
fb0e8e79
PM
3065/* Generate an old-style exception return. Marks pc as dead. */
3066static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3067{
3068 gen_rfe(s, pc, load_cpu_field(spsr));
3069}
3070
c22edfeb
AB
3071/*
3072 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
3073 * only call the helper when running single threaded TCG code to ensure
3074 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
3075 * just skip this instruction. Currently the SEV/SEVL instructions
3076 * which are *one* of many ways to wake the CPU from WFE are not
3077 * implemented so we can't sleep like WFI does.
3078 */
9ee6e8bb
PB
3079static void gen_nop_hint(DisasContext *s, int val)
3080{
3081 switch (val) {
2399d4e7
EC
3082 /* When running in MTTCG we don't generate jumps to the yield and
3083 * WFE helpers as it won't affect the scheduling of other vCPUs.
3084 * If we wanted to more completely model WFE/SEV so we don't busy
3085 * spin unnecessarily we would need to do something more involved.
3086 */
c87e5a61 3087 case 1: /* yield */
2399d4e7 3088 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
a0415916 3089 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 3090 s->base.is_jmp = DISAS_YIELD;
c22edfeb 3091 }
c87e5a61 3092 break;
9ee6e8bb 3093 case 3: /* wfi */
a0415916 3094 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 3095 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
3096 break;
3097 case 2: /* wfe */
2399d4e7 3098 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
a0415916 3099 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 3100 s->base.is_jmp = DISAS_WFE;
c22edfeb 3101 }
72c1d3af 3102 break;
9ee6e8bb 3103 case 4: /* sev */
12b10571
MR
3104 case 5: /* sevl */
3105 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3106 default: /* nop */
3107 break;
3108 }
3109}
99c475ab 3110
ad69471c 3111#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3112
39d5492a 3113static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3114{
3115 switch (size) {
dd8fbd78
FN
3116 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3117 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3118 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3119 default: abort();
9ee6e8bb 3120 }
9ee6e8bb
PB
3121}
3122
39d5492a 3123static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3124{
3125 switch (size) {
dd8fbd78
FN
3126 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3127 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3128 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3129 default: return;
3130 }
3131}
3132
3133/* 32-bit pairwise ops end up the same as the elementwise versions. */
9ecd3c5c
RH
3134#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3135#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3136#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3137#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
ad69471c 3138
ad69471c
PB
3139#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3140 switch ((size << 1) | u) { \
3141 case 0: \
dd8fbd78 3142 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3143 break; \
3144 case 1: \
dd8fbd78 3145 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3146 break; \
3147 case 2: \
dd8fbd78 3148 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3149 break; \
3150 case 3: \
dd8fbd78 3151 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3152 break; \
3153 case 4: \
dd8fbd78 3154 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3155 break; \
3156 case 5: \
dd8fbd78 3157 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3158 break; \
3159 default: return 1; \
3160 }} while (0)
9ee6e8bb
PB
3161
3162#define GEN_NEON_INTEGER_OP(name) do { \
3163 switch ((size << 1) | u) { \
ad69471c 3164 case 0: \
dd8fbd78 3165 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3166 break; \
3167 case 1: \
dd8fbd78 3168 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3169 break; \
3170 case 2: \
dd8fbd78 3171 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3172 break; \
3173 case 3: \
dd8fbd78 3174 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3175 break; \
3176 case 4: \
dd8fbd78 3177 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3178 break; \
3179 case 5: \
dd8fbd78 3180 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3181 break; \
9ee6e8bb
PB
3182 default: return 1; \
3183 }} while (0)
3184
39d5492a 3185static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3186{
39d5492a 3187 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3188 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3189 return tmp;
9ee6e8bb
PB
3190}
3191
39d5492a 3192static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3193{
dd8fbd78 3194 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3195 tcg_temp_free_i32(var);
9ee6e8bb
PB
3196}
3197
39d5492a 3198static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3199{
39d5492a 3200 TCGv_i32 tmp;
9ee6e8bb 3201 if (size == 1) {
0fad6efc
PM
3202 tmp = neon_load_reg(reg & 7, reg >> 4);
3203 if (reg & 8) {
dd8fbd78 3204 gen_neon_dup_high16(tmp);
0fad6efc
PM
3205 } else {
3206 gen_neon_dup_low16(tmp);
dd8fbd78 3207 }
0fad6efc
PM
3208 } else {
3209 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3210 }
dd8fbd78 3211 return tmp;
9ee6e8bb
PB
3212}
3213
02acedf9 3214static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3215{
b13708bb
RH
3216 TCGv_ptr pd, pm;
3217
600b828c 3218 if (!q && size == 2) {
02acedf9
PM
3219 return 1;
3220 }
b13708bb
RH
3221 pd = vfp_reg_ptr(true, rd);
3222 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
3223 if (q) {
3224 switch (size) {
3225 case 0:
b13708bb 3226 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
3227 break;
3228 case 1:
b13708bb 3229 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
3230 break;
3231 case 2:
b13708bb 3232 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
3233 break;
3234 default:
3235 abort();
3236 }
3237 } else {
3238 switch (size) {
3239 case 0:
b13708bb 3240 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
3241 break;
3242 case 1:
b13708bb 3243 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
3244 break;
3245 default:
3246 abort();
3247 }
3248 }
b13708bb
RH
3249 tcg_temp_free_ptr(pd);
3250 tcg_temp_free_ptr(pm);
02acedf9 3251 return 0;
19457615
FN
3252}
3253
d68a6f3a 3254static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3255{
b13708bb
RH
3256 TCGv_ptr pd, pm;
3257
600b828c 3258 if (!q && size == 2) {
d68a6f3a
PM
3259 return 1;
3260 }
b13708bb
RH
3261 pd = vfp_reg_ptr(true, rd);
3262 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
3263 if (q) {
3264 switch (size) {
3265 case 0:
b13708bb 3266 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
3267 break;
3268 case 1:
b13708bb 3269 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
3270 break;
3271 case 2:
b13708bb 3272 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
3273 break;
3274 default:
3275 abort();
3276 }
3277 } else {
3278 switch (size) {
3279 case 0:
b13708bb 3280 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
3281 break;
3282 case 1:
b13708bb 3283 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
3284 break;
3285 default:
3286 abort();
3287 }
3288 }
b13708bb
RH
3289 tcg_temp_free_ptr(pd);
3290 tcg_temp_free_ptr(pm);
d68a6f3a 3291 return 0;
19457615
FN
3292}
3293
39d5492a 3294static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3295{
39d5492a 3296 TCGv_i32 rd, tmp;
19457615 3297
7d1b0095
PM
3298 rd = tcg_temp_new_i32();
3299 tmp = tcg_temp_new_i32();
19457615
FN
3300
3301 tcg_gen_shli_i32(rd, t0, 8);
3302 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3303 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3304 tcg_gen_or_i32(rd, rd, tmp);
3305
3306 tcg_gen_shri_i32(t1, t1, 8);
3307 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3308 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3309 tcg_gen_or_i32(t1, t1, tmp);
3310 tcg_gen_mov_i32(t0, rd);
3311
7d1b0095
PM
3312 tcg_temp_free_i32(tmp);
3313 tcg_temp_free_i32(rd);
19457615
FN
3314}
3315
39d5492a 3316static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3317{
39d5492a 3318 TCGv_i32 rd, tmp;
19457615 3319
7d1b0095
PM
3320 rd = tcg_temp_new_i32();
3321 tmp = tcg_temp_new_i32();
19457615
FN
3322
3323 tcg_gen_shli_i32(rd, t0, 16);
3324 tcg_gen_andi_i32(tmp, t1, 0xffff);
3325 tcg_gen_or_i32(rd, rd, tmp);
3326 tcg_gen_shri_i32(t1, t1, 16);
3327 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3328 tcg_gen_or_i32(t1, t1, tmp);
3329 tcg_gen_mov_i32(t0, rd);
3330
7d1b0095
PM
3331 tcg_temp_free_i32(tmp);
3332 tcg_temp_free_i32(rd);
19457615
FN
3333}
3334
3335
9ee6e8bb
PB
3336static struct {
3337 int nregs;
3338 int interleave;
3339 int spacing;
308e5636 3340} const neon_ls_element_type[11] = {
ac55d007
RH
3341 {1, 4, 1},
3342 {1, 4, 2},
9ee6e8bb 3343 {4, 1, 1},
ac55d007
RH
3344 {2, 2, 2},
3345 {1, 3, 1},
3346 {1, 3, 2},
9ee6e8bb
PB
3347 {3, 1, 1},
3348 {1, 1, 1},
ac55d007
RH
3349 {1, 2, 1},
3350 {1, 2, 2},
9ee6e8bb
PB
3351 {2, 1, 1}
3352};
3353
3354/* Translate a NEON load/store element instruction. Return nonzero if the
3355 instruction is invalid. */
7dcc1f89 3356static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3357{
3358 int rd, rn, rm;
3359 int op;
3360 int nregs;
3361 int interleave;
84496233 3362 int spacing;
9ee6e8bb
PB
3363 int stride;
3364 int size;
3365 int reg;
9ee6e8bb 3366 int load;
9ee6e8bb 3367 int n;
7377c2c9 3368 int vec_size;
ac55d007 3369 int mmu_idx;
14776ab5 3370 MemOp endian;
39d5492a
PM
3371 TCGv_i32 addr;
3372 TCGv_i32 tmp;
3373 TCGv_i32 tmp2;
84496233 3374 TCGv_i64 tmp64;
9ee6e8bb 3375
2c7ffc41
PM
3376 /* FIXME: this access check should not take precedence over UNDEF
3377 * for invalid encodings; we will generate incorrect syndrome information
3378 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3379 */
9dbbc748 3380 if (s->fp_excp_el) {
a767fac8 3381 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4be42f40 3382 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3383 return 0;
3384 }
3385
5df8bac1 3386 if (!s->vfp_enabled)
9ee6e8bb
PB
3387 return 1;
3388 VFP_DREG_D(rd, insn);
3389 rn = (insn >> 16) & 0xf;
3390 rm = insn & 0xf;
3391 load = (insn & (1 << 21)) != 0;
ac55d007
RH
3392 endian = s->be_data;
3393 mmu_idx = get_mem_index(s);
9ee6e8bb
PB
3394 if ((insn & (1 << 23)) == 0) {
3395 /* Load store all elements. */
3396 op = (insn >> 8) & 0xf;
3397 size = (insn >> 6) & 3;
84496233 3398 if (op > 10)
9ee6e8bb 3399 return 1;
f2dd89d0
PM
3400 /* Catch UNDEF cases for bad values of align field */
3401 switch (op & 0xc) {
3402 case 4:
3403 if (((insn >> 5) & 1) == 1) {
3404 return 1;
3405 }
3406 break;
3407 case 8:
3408 if (((insn >> 4) & 3) == 3) {
3409 return 1;
3410 }
3411 break;
3412 default:
3413 break;
3414 }
9ee6e8bb
PB
3415 nregs = neon_ls_element_type[op].nregs;
3416 interleave = neon_ls_element_type[op].interleave;
84496233 3417 spacing = neon_ls_element_type[op].spacing;
ac55d007 3418 if (size == 3 && (interleave | spacing) != 1) {
84496233 3419 return 1;
ac55d007 3420 }
e23f12b3
RH
3421 /* For our purposes, bytes are always little-endian. */
3422 if (size == 0) {
3423 endian = MO_LE;
3424 }
3425 /* Consecutive little-endian elements from a single register
3426 * can be promoted to a larger little-endian operation.
3427 */
3428 if (interleave == 1 && endian == MO_LE) {
3429 size = 3;
3430 }
ac55d007 3431 tmp64 = tcg_temp_new_i64();
e318a60b 3432 addr = tcg_temp_new_i32();
ac55d007 3433 tmp2 = tcg_const_i32(1 << size);
dcc65026 3434 load_reg_var(s, addr, rn);
9ee6e8bb 3435 for (reg = 0; reg < nregs; reg++) {
ac55d007
RH
3436 for (n = 0; n < 8 >> size; n++) {
3437 int xs;
3438 for (xs = 0; xs < interleave; xs++) {
3439 int tt = rd + reg + spacing * xs;
3440
3441 if (load) {
3442 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
3443 neon_store_element64(tt, n, size, tmp64);
3444 } else {
3445 neon_load_element64(tmp64, tt, n, size);
3446 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
9ee6e8bb 3447 }
ac55d007 3448 tcg_gen_add_i32(addr, addr, tmp2);
9ee6e8bb
PB
3449 }
3450 }
9ee6e8bb 3451 }
e318a60b 3452 tcg_temp_free_i32(addr);
ac55d007
RH
3453 tcg_temp_free_i32(tmp2);
3454 tcg_temp_free_i64(tmp64);
3455 stride = nregs * interleave * 8;
9ee6e8bb
PB
3456 } else {
3457 size = (insn >> 10) & 3;
3458 if (size == 3) {
3459 /* Load single element to all lanes. */
8e18cde3
PM
3460 int a = (insn >> 4) & 1;
3461 if (!load) {
9ee6e8bb 3462 return 1;
8e18cde3 3463 }
9ee6e8bb
PB
3464 size = (insn >> 6) & 3;
3465 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3466
3467 if (size == 3) {
3468 if (nregs != 4 || a == 0) {
9ee6e8bb 3469 return 1;
99c475ab 3470 }
8e18cde3
PM
3471 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3472 size = 2;
3473 }
3474 if (nregs == 1 && a == 1 && size == 0) {
3475 return 1;
3476 }
3477 if (nregs == 3 && a == 1) {
3478 return 1;
3479 }
e318a60b 3480 addr = tcg_temp_new_i32();
8e18cde3 3481 load_reg_var(s, addr, rn);
7377c2c9
RH
3482
3483 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
3484 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
3485 */
3486 stride = (insn & (1 << 5)) ? 2 : 1;
3487 vec_size = nregs == 1 ? stride * 8 : 8;
3488
3489 tmp = tcg_temp_new_i32();
3490 for (reg = 0; reg < nregs; reg++) {
3491 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3492 s->be_data | size);
3493 if ((rd & 1) && vec_size == 16) {
3494 /* We cannot write 16 bytes at once because the
3495 * destination is unaligned.
3496 */
3497 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3498 8, 8, tmp);
3499 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
3500 neon_reg_offset(rd, 0), 8, 8);
3501 } else {
3502 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3503 vec_size, vec_size, tmp);
8e18cde3 3504 }
7377c2c9
RH
3505 tcg_gen_addi_i32(addr, addr, 1 << size);
3506 rd += stride;
9ee6e8bb 3507 }
7377c2c9 3508 tcg_temp_free_i32(tmp);
e318a60b 3509 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3510 stride = (1 << size) * nregs;
3511 } else {
3512 /* Single element. */
93262b16 3513 int idx = (insn >> 4) & 0xf;
2d6ac920 3514 int reg_idx;
9ee6e8bb
PB
3515 switch (size) {
3516 case 0:
2d6ac920 3517 reg_idx = (insn >> 5) & 7;
9ee6e8bb
PB
3518 stride = 1;
3519 break;
3520 case 1:
2d6ac920 3521 reg_idx = (insn >> 6) & 3;
9ee6e8bb
PB
3522 stride = (insn & (1 << 5)) ? 2 : 1;
3523 break;
3524 case 2:
2d6ac920 3525 reg_idx = (insn >> 7) & 1;
9ee6e8bb
PB
3526 stride = (insn & (1 << 6)) ? 2 : 1;
3527 break;
3528 default:
3529 abort();
3530 }
3531 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
3532 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3533 switch (nregs) {
3534 case 1:
3535 if (((idx & (1 << size)) != 0) ||
3536 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3537 return 1;
3538 }
3539 break;
3540 case 3:
3541 if ((idx & 1) != 0) {
3542 return 1;
3543 }
3544 /* fall through */
3545 case 2:
3546 if (size == 2 && (idx & 2) != 0) {
3547 return 1;
3548 }
3549 break;
3550 case 4:
3551 if ((size == 2) && ((idx & 3) == 3)) {
3552 return 1;
3553 }
3554 break;
3555 default:
3556 abort();
3557 }
3558 if ((rd + stride * (nregs - 1)) > 31) {
3559 /* Attempts to write off the end of the register file
3560 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3561 * the neon_load_reg() would write off the end of the array.
3562 */
3563 return 1;
3564 }
2d6ac920 3565 tmp = tcg_temp_new_i32();
e318a60b 3566 addr = tcg_temp_new_i32();
dcc65026 3567 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3568 for (reg = 0; reg < nregs; reg++) {
3569 if (load) {
2d6ac920
RH
3570 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3571 s->be_data | size);
3572 neon_store_element(rd, reg_idx, size, tmp);
9ee6e8bb 3573 } else { /* Store */
2d6ac920
RH
3574 neon_load_element(tmp, rd, reg_idx, size);
3575 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
3576 s->be_data | size);
99c475ab 3577 }
9ee6e8bb 3578 rd += stride;
1b2b1e54 3579 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3580 }
e318a60b 3581 tcg_temp_free_i32(addr);
2d6ac920 3582 tcg_temp_free_i32(tmp);
9ee6e8bb 3583 stride = nregs * (1 << size);
99c475ab 3584 }
9ee6e8bb
PB
3585 }
3586 if (rm != 15) {
39d5492a 3587 TCGv_i32 base;
b26eefb6
PB
3588
3589 base = load_reg(s, rn);
9ee6e8bb 3590 if (rm == 13) {
b26eefb6 3591 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3592 } else {
39d5492a 3593 TCGv_i32 index;
b26eefb6
PB
3594 index = load_reg(s, rm);
3595 tcg_gen_add_i32(base, base, index);
7d1b0095 3596 tcg_temp_free_i32(index);
9ee6e8bb 3597 }
b26eefb6 3598 store_reg(s, rn, base);
9ee6e8bb
PB
3599 }
3600 return 0;
3601}
3b46e624 3602
39d5492a 3603static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3604{
3605 switch (size) {
3606 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3607 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 3608 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
3609 default: abort();
3610 }
3611}
3612
39d5492a 3613static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3614{
3615 switch (size) {
02da0b2d
PM
3616 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3617 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3618 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
3619 default: abort();
3620 }
3621}
3622
39d5492a 3623static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3624{
3625 switch (size) {
02da0b2d
PM
3626 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3627 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3628 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
3629 default: abort();
3630 }
3631}
3632
39d5492a 3633static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
3634{
3635 switch (size) {
02da0b2d
PM
3636 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
3637 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
3638 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
3639 default: abort();
3640 }
3641}
3642
39d5492a 3643static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
3644 int q, int u)
3645{
3646 if (q) {
3647 if (u) {
3648 switch (size) {
3649 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3650 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3651 default: abort();
3652 }
3653 } else {
3654 switch (size) {
3655 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3656 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3657 default: abort();
3658 }
3659 }
3660 } else {
3661 if (u) {
3662 switch (size) {
b408a9b0
CL
3663 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
3664 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
3665 default: abort();
3666 }
3667 } else {
3668 switch (size) {
3669 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3670 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3671 default: abort();
3672 }
3673 }
3674 }
3675}
3676
39d5492a 3677static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
3678{
3679 if (u) {
3680 switch (size) {
3681 case 0: gen_helper_neon_widen_u8(dest, src); break;
3682 case 1: gen_helper_neon_widen_u16(dest, src); break;
3683 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3684 default: abort();
3685 }
3686 } else {
3687 switch (size) {
3688 case 0: gen_helper_neon_widen_s8(dest, src); break;
3689 case 1: gen_helper_neon_widen_s16(dest, src); break;
3690 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3691 default: abort();
3692 }
3693 }
7d1b0095 3694 tcg_temp_free_i32(src);
ad69471c
PB
3695}
3696
3697static inline void gen_neon_addl(int size)
3698{
3699 switch (size) {
3700 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3701 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3702 case 2: tcg_gen_add_i64(CPU_V001); break;
3703 default: abort();
3704 }
3705}
3706
3707static inline void gen_neon_subl(int size)
3708{
3709 switch (size) {
3710 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3711 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3712 case 2: tcg_gen_sub_i64(CPU_V001); break;
3713 default: abort();
3714 }
3715}
3716
a7812ae4 3717static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
3718{
3719 switch (size) {
3720 case 0: gen_helper_neon_negl_u16(var, var); break;
3721 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
3722 case 2:
3723 tcg_gen_neg_i64(var, var);
3724 break;
ad69471c
PB
3725 default: abort();
3726 }
3727}
3728
a7812ae4 3729static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
3730{
3731 switch (size) {
02da0b2d
PM
3732 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
3733 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
3734 default: abort();
3735 }
3736}
3737
39d5492a
PM
3738static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
3739 int size, int u)
ad69471c 3740{
a7812ae4 3741 TCGv_i64 tmp;
ad69471c
PB
3742
3743 switch ((size << 1) | u) {
3744 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
3745 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
3746 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
3747 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
3748 case 4:
3749 tmp = gen_muls_i64_i32(a, b);
3750 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 3751 tcg_temp_free_i64(tmp);
ad69471c
PB
3752 break;
3753 case 5:
3754 tmp = gen_mulu_i64_i32(a, b);
3755 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 3756 tcg_temp_free_i64(tmp);
ad69471c
PB
3757 break;
3758 default: abort();
3759 }
c6067f04
CL
3760
3761 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3762 Don't forget to clean them now. */
3763 if (size < 2) {
7d1b0095
PM
3764 tcg_temp_free_i32(a);
3765 tcg_temp_free_i32(b);
c6067f04 3766 }
ad69471c
PB
3767}
3768
39d5492a
PM
3769static void gen_neon_narrow_op(int op, int u, int size,
3770 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
3771{
3772 if (op) {
3773 if (u) {
3774 gen_neon_unarrow_sats(size, dest, src);
3775 } else {
3776 gen_neon_narrow(size, dest, src);
3777 }
3778 } else {
3779 if (u) {
3780 gen_neon_narrow_satu(size, dest, src);
3781 } else {
3782 gen_neon_narrow_sats(size, dest, src);
3783 }
3784 }
3785}
3786
62698be3
PM
3787/* Symbolic constants for op fields for Neon 3-register same-length.
3788 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3789 * table A7-9.
3790 */
3791#define NEON_3R_VHADD 0
3792#define NEON_3R_VQADD 1
3793#define NEON_3R_VRHADD 2
3794#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3795#define NEON_3R_VHSUB 4
3796#define NEON_3R_VQSUB 5
3797#define NEON_3R_VCGT 6
3798#define NEON_3R_VCGE 7
3799#define NEON_3R_VSHL 8
3800#define NEON_3R_VQSHL 9
3801#define NEON_3R_VRSHL 10
3802#define NEON_3R_VQRSHL 11
3803#define NEON_3R_VMAX 12
3804#define NEON_3R_VMIN 13
3805#define NEON_3R_VABD 14
3806#define NEON_3R_VABA 15
3807#define NEON_3R_VADD_VSUB 16
3808#define NEON_3R_VTST_VCEQ 17
4a7832b0 3809#define NEON_3R_VML 18 /* VMLA, VMLS */
62698be3
PM
3810#define NEON_3R_VMUL 19
3811#define NEON_3R_VPMAX 20
3812#define NEON_3R_VPMIN 21
3813#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 3814#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 3815#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 3816#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
3817#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
3818#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
3819#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
3820#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
3821#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 3822#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
3823
3824static const uint8_t neon_3r_sizes[] = {
3825 [NEON_3R_VHADD] = 0x7,
3826 [NEON_3R_VQADD] = 0xf,
3827 [NEON_3R_VRHADD] = 0x7,
3828 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
3829 [NEON_3R_VHSUB] = 0x7,
3830 [NEON_3R_VQSUB] = 0xf,
3831 [NEON_3R_VCGT] = 0x7,
3832 [NEON_3R_VCGE] = 0x7,
3833 [NEON_3R_VSHL] = 0xf,
3834 [NEON_3R_VQSHL] = 0xf,
3835 [NEON_3R_VRSHL] = 0xf,
3836 [NEON_3R_VQRSHL] = 0xf,
3837 [NEON_3R_VMAX] = 0x7,
3838 [NEON_3R_VMIN] = 0x7,
3839 [NEON_3R_VABD] = 0x7,
3840 [NEON_3R_VABA] = 0x7,
3841 [NEON_3R_VADD_VSUB] = 0xf,
3842 [NEON_3R_VTST_VCEQ] = 0x7,
3843 [NEON_3R_VML] = 0x7,
3844 [NEON_3R_VMUL] = 0x7,
3845 [NEON_3R_VPMAX] = 0x7,
3846 [NEON_3R_VPMIN] = 0x7,
3847 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 3848 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 3849 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 3850 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
3851 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
3852 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
3853 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
3854 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
3855 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 3856 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
3857};
3858
600b828c
PM
3859/* Symbolic constants for op fields for Neon 2-register miscellaneous.
3860 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
3861 * table A7-13.
3862 */
3863#define NEON_2RM_VREV64 0
3864#define NEON_2RM_VREV32 1
3865#define NEON_2RM_VREV16 2
3866#define NEON_2RM_VPADDL 4
3867#define NEON_2RM_VPADDL_U 5
9d935509
AB
3868#define NEON_2RM_AESE 6 /* Includes AESD */
3869#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
3870#define NEON_2RM_VCLS 8
3871#define NEON_2RM_VCLZ 9
3872#define NEON_2RM_VCNT 10
3873#define NEON_2RM_VMVN 11
3874#define NEON_2RM_VPADAL 12
3875#define NEON_2RM_VPADAL_U 13
3876#define NEON_2RM_VQABS 14
3877#define NEON_2RM_VQNEG 15
3878#define NEON_2RM_VCGT0 16
3879#define NEON_2RM_VCGE0 17
3880#define NEON_2RM_VCEQ0 18
3881#define NEON_2RM_VCLE0 19
3882#define NEON_2RM_VCLT0 20
f1ecb913 3883#define NEON_2RM_SHA1H 21
600b828c
PM
3884#define NEON_2RM_VABS 22
3885#define NEON_2RM_VNEG 23
3886#define NEON_2RM_VCGT0_F 24
3887#define NEON_2RM_VCGE0_F 25
3888#define NEON_2RM_VCEQ0_F 26
3889#define NEON_2RM_VCLE0_F 27
3890#define NEON_2RM_VCLT0_F 28
3891#define NEON_2RM_VABS_F 30
3892#define NEON_2RM_VNEG_F 31
3893#define NEON_2RM_VSWP 32
3894#define NEON_2RM_VTRN 33
3895#define NEON_2RM_VUZP 34
3896#define NEON_2RM_VZIP 35
3897#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
3898#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
3899#define NEON_2RM_VSHLL 38
f1ecb913 3900#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 3901#define NEON_2RM_VRINTN 40
2ce70625 3902#define NEON_2RM_VRINTX 41
34f7b0a2
WN
3903#define NEON_2RM_VRINTA 42
3904#define NEON_2RM_VRINTZ 43
600b828c 3905#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 3906#define NEON_2RM_VRINTM 45
600b828c 3907#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 3908#define NEON_2RM_VRINTP 47
901ad525
WN
3909#define NEON_2RM_VCVTAU 48
3910#define NEON_2RM_VCVTAS 49
3911#define NEON_2RM_VCVTNU 50
3912#define NEON_2RM_VCVTNS 51
3913#define NEON_2RM_VCVTPU 52
3914#define NEON_2RM_VCVTPS 53
3915#define NEON_2RM_VCVTMU 54
3916#define NEON_2RM_VCVTMS 55
600b828c
PM
3917#define NEON_2RM_VRECPE 56
3918#define NEON_2RM_VRSQRTE 57
3919#define NEON_2RM_VRECPE_F 58
3920#define NEON_2RM_VRSQRTE_F 59
3921#define NEON_2RM_VCVT_FS 60
3922#define NEON_2RM_VCVT_FU 61
3923#define NEON_2RM_VCVT_SF 62
3924#define NEON_2RM_VCVT_UF 63
3925
fe8fcf3d
PM
3926static bool neon_2rm_is_v8_op(int op)
3927{
3928 /* Return true if this neon 2reg-misc op is ARMv8 and up */
3929 switch (op) {
3930 case NEON_2RM_VRINTN:
3931 case NEON_2RM_VRINTA:
3932 case NEON_2RM_VRINTM:
3933 case NEON_2RM_VRINTP:
3934 case NEON_2RM_VRINTZ:
3935 case NEON_2RM_VRINTX:
3936 case NEON_2RM_VCVTAU:
3937 case NEON_2RM_VCVTAS:
3938 case NEON_2RM_VCVTNU:
3939 case NEON_2RM_VCVTNS:
3940 case NEON_2RM_VCVTPU:
3941 case NEON_2RM_VCVTPS:
3942 case NEON_2RM_VCVTMU:
3943 case NEON_2RM_VCVTMS:
3944 return true;
3945 default:
3946 return false;
3947 }
3948}
3949
600b828c
PM
3950/* Each entry in this array has bit n set if the insn allows
3951 * size value n (otherwise it will UNDEF). Since unallocated
3952 * op values will have no bits set they always UNDEF.
3953 */
3954static const uint8_t neon_2rm_sizes[] = {
3955 [NEON_2RM_VREV64] = 0x7,
3956 [NEON_2RM_VREV32] = 0x3,
3957 [NEON_2RM_VREV16] = 0x1,
3958 [NEON_2RM_VPADDL] = 0x7,
3959 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
3960 [NEON_2RM_AESE] = 0x1,
3961 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
3962 [NEON_2RM_VCLS] = 0x7,
3963 [NEON_2RM_VCLZ] = 0x7,
3964 [NEON_2RM_VCNT] = 0x1,
3965 [NEON_2RM_VMVN] = 0x1,
3966 [NEON_2RM_VPADAL] = 0x7,
3967 [NEON_2RM_VPADAL_U] = 0x7,
3968 [NEON_2RM_VQABS] = 0x7,
3969 [NEON_2RM_VQNEG] = 0x7,
3970 [NEON_2RM_VCGT0] = 0x7,
3971 [NEON_2RM_VCGE0] = 0x7,
3972 [NEON_2RM_VCEQ0] = 0x7,
3973 [NEON_2RM_VCLE0] = 0x7,
3974 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 3975 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
3976 [NEON_2RM_VABS] = 0x7,
3977 [NEON_2RM_VNEG] = 0x7,
3978 [NEON_2RM_VCGT0_F] = 0x4,
3979 [NEON_2RM_VCGE0_F] = 0x4,
3980 [NEON_2RM_VCEQ0_F] = 0x4,
3981 [NEON_2RM_VCLE0_F] = 0x4,
3982 [NEON_2RM_VCLT0_F] = 0x4,
3983 [NEON_2RM_VABS_F] = 0x4,
3984 [NEON_2RM_VNEG_F] = 0x4,
3985 [NEON_2RM_VSWP] = 0x1,
3986 [NEON_2RM_VTRN] = 0x7,
3987 [NEON_2RM_VUZP] = 0x7,
3988 [NEON_2RM_VZIP] = 0x7,
3989 [NEON_2RM_VMOVN] = 0x7,
3990 [NEON_2RM_VQMOVN] = 0x7,
3991 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 3992 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 3993 [NEON_2RM_VRINTN] = 0x4,
2ce70625 3994 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
3995 [NEON_2RM_VRINTA] = 0x4,
3996 [NEON_2RM_VRINTZ] = 0x4,
600b828c 3997 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 3998 [NEON_2RM_VRINTM] = 0x4,
600b828c 3999 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 4000 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
4001 [NEON_2RM_VCVTAU] = 0x4,
4002 [NEON_2RM_VCVTAS] = 0x4,
4003 [NEON_2RM_VCVTNU] = 0x4,
4004 [NEON_2RM_VCVTNS] = 0x4,
4005 [NEON_2RM_VCVTPU] = 0x4,
4006 [NEON_2RM_VCVTPS] = 0x4,
4007 [NEON_2RM_VCVTMU] = 0x4,
4008 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
4009 [NEON_2RM_VRECPE] = 0x4,
4010 [NEON_2RM_VRSQRTE] = 0x4,
4011 [NEON_2RM_VRECPE_F] = 0x4,
4012 [NEON_2RM_VRSQRTE_F] = 0x4,
4013 [NEON_2RM_VCVT_FS] = 0x4,
4014 [NEON_2RM_VCVT_FU] = 0x4,
4015 [NEON_2RM_VCVT_SF] = 0x4,
4016 [NEON_2RM_VCVT_UF] = 0x4,
4017};
4018
36a71934
RH
4019
4020/* Expand v8.1 simd helper. */
4021static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
4022 int q, int rd, int rn, int rm)
4023{
962fcbf2 4024 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
4025 int opr_sz = (1 + q) * 8;
4026 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
4027 vfp_reg_offset(1, rn),
4028 vfp_reg_offset(1, rm), cpu_env,
4029 opr_sz, opr_sz, 0, fn);
4030 return 0;
4031 }
4032 return 1;
4033}
4034
41f6c113
RH
4035static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4036{
4037 tcg_gen_vec_sar8i_i64(a, a, shift);
4038 tcg_gen_vec_add8_i64(d, d, a);
4039}
4040
4041static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4042{
4043 tcg_gen_vec_sar16i_i64(a, a, shift);
4044 tcg_gen_vec_add16_i64(d, d, a);
4045}
4046
4047static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4048{
4049 tcg_gen_sari_i32(a, a, shift);
4050 tcg_gen_add_i32(d, d, a);
4051}
4052
4053static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4054{
4055 tcg_gen_sari_i64(a, a, shift);
4056 tcg_gen_add_i64(d, d, a);
4057}
4058
4059static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4060{
4061 tcg_gen_sari_vec(vece, a, a, sh);
4062 tcg_gen_add_vec(vece, d, d, a);
4063}
4064
53229a77
RH
4065static const TCGOpcode vecop_list_ssra[] = {
4066 INDEX_op_sari_vec, INDEX_op_add_vec, 0
4067};
4068
41f6c113
RH
4069const GVecGen2i ssra_op[4] = {
4070 { .fni8 = gen_ssra8_i64,
4071 .fniv = gen_ssra_vec,
4072 .load_dest = true,
53229a77 4073 .opt_opc = vecop_list_ssra,
41f6c113
RH
4074 .vece = MO_8 },
4075 { .fni8 = gen_ssra16_i64,
4076 .fniv = gen_ssra_vec,
4077 .load_dest = true,
53229a77 4078 .opt_opc = vecop_list_ssra,
41f6c113
RH
4079 .vece = MO_16 },
4080 { .fni4 = gen_ssra32_i32,
4081 .fniv = gen_ssra_vec,
4082 .load_dest = true,
53229a77 4083 .opt_opc = vecop_list_ssra,
41f6c113
RH
4084 .vece = MO_32 },
4085 { .fni8 = gen_ssra64_i64,
4086 .fniv = gen_ssra_vec,
4087 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 4088 .opt_opc = vecop_list_ssra,
41f6c113 4089 .load_dest = true,
41f6c113
RH
4090 .vece = MO_64 },
4091};
4092
4093static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4094{
4095 tcg_gen_vec_shr8i_i64(a, a, shift);
4096 tcg_gen_vec_add8_i64(d, d, a);
4097}
4098
4099static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4100{
4101 tcg_gen_vec_shr16i_i64(a, a, shift);
4102 tcg_gen_vec_add16_i64(d, d, a);
4103}
4104
4105static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4106{
4107 tcg_gen_shri_i32(a, a, shift);
4108 tcg_gen_add_i32(d, d, a);
4109}
4110
4111static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4112{
4113 tcg_gen_shri_i64(a, a, shift);
4114 tcg_gen_add_i64(d, d, a);
4115}
4116
4117static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4118{
4119 tcg_gen_shri_vec(vece, a, a, sh);
4120 tcg_gen_add_vec(vece, d, d, a);
4121}
4122
53229a77
RH
4123static const TCGOpcode vecop_list_usra[] = {
4124 INDEX_op_shri_vec, INDEX_op_add_vec, 0
4125};
4126
41f6c113
RH
4127const GVecGen2i usra_op[4] = {
4128 { .fni8 = gen_usra8_i64,
4129 .fniv = gen_usra_vec,
4130 .load_dest = true,
53229a77 4131 .opt_opc = vecop_list_usra,
41f6c113
RH
4132 .vece = MO_8, },
4133 { .fni8 = gen_usra16_i64,
4134 .fniv = gen_usra_vec,
4135 .load_dest = true,
53229a77 4136 .opt_opc = vecop_list_usra,
41f6c113
RH
4137 .vece = MO_16, },
4138 { .fni4 = gen_usra32_i32,
4139 .fniv = gen_usra_vec,
4140 .load_dest = true,
53229a77 4141 .opt_opc = vecop_list_usra,
41f6c113
RH
4142 .vece = MO_32, },
4143 { .fni8 = gen_usra64_i64,
4144 .fniv = gen_usra_vec,
4145 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4146 .load_dest = true,
53229a77 4147 .opt_opc = vecop_list_usra,
41f6c113
RH
4148 .vece = MO_64, },
4149};
eabcd6fa 4150
f3cd8218
RH
4151static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4152{
4153 uint64_t mask = dup_const(MO_8, 0xff >> shift);
4154 TCGv_i64 t = tcg_temp_new_i64();
4155
4156 tcg_gen_shri_i64(t, a, shift);
4157 tcg_gen_andi_i64(t, t, mask);
4158 tcg_gen_andi_i64(d, d, ~mask);
4159 tcg_gen_or_i64(d, d, t);
4160 tcg_temp_free_i64(t);
4161}
4162
4163static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4164{
4165 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
4166 TCGv_i64 t = tcg_temp_new_i64();
4167
4168 tcg_gen_shri_i64(t, a, shift);
4169 tcg_gen_andi_i64(t, t, mask);
4170 tcg_gen_andi_i64(d, d, ~mask);
4171 tcg_gen_or_i64(d, d, t);
4172 tcg_temp_free_i64(t);
4173}
4174
4175static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4176{
4177 tcg_gen_shri_i32(a, a, shift);
4178 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
4179}
4180
4181static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4182{
4183 tcg_gen_shri_i64(a, a, shift);
4184 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
4185}
4186
4187static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4188{
4189 if (sh == 0) {
4190 tcg_gen_mov_vec(d, a);
4191 } else {
4192 TCGv_vec t = tcg_temp_new_vec_matching(d);
4193 TCGv_vec m = tcg_temp_new_vec_matching(d);
4194
4195 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
4196 tcg_gen_shri_vec(vece, t, a, sh);
4197 tcg_gen_and_vec(vece, d, d, m);
4198 tcg_gen_or_vec(vece, d, d, t);
4199
4200 tcg_temp_free_vec(t);
4201 tcg_temp_free_vec(m);
4202 }
4203}
4204
53229a77
RH
4205static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
4206
f3cd8218
RH
4207const GVecGen2i sri_op[4] = {
4208 { .fni8 = gen_shr8_ins_i64,
4209 .fniv = gen_shr_ins_vec,
4210 .load_dest = true,
53229a77 4211 .opt_opc = vecop_list_sri,
f3cd8218
RH
4212 .vece = MO_8 },
4213 { .fni8 = gen_shr16_ins_i64,
4214 .fniv = gen_shr_ins_vec,
4215 .load_dest = true,
53229a77 4216 .opt_opc = vecop_list_sri,
f3cd8218
RH
4217 .vece = MO_16 },
4218 { .fni4 = gen_shr32_ins_i32,
4219 .fniv = gen_shr_ins_vec,
4220 .load_dest = true,
53229a77 4221 .opt_opc = vecop_list_sri,
f3cd8218
RH
4222 .vece = MO_32 },
4223 { .fni8 = gen_shr64_ins_i64,
4224 .fniv = gen_shr_ins_vec,
4225 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4226 .load_dest = true,
53229a77 4227 .opt_opc = vecop_list_sri,
f3cd8218
RH
4228 .vece = MO_64 },
4229};
4230
4231static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4232{
4233 uint64_t mask = dup_const(MO_8, 0xff << shift);
4234 TCGv_i64 t = tcg_temp_new_i64();
4235
4236 tcg_gen_shli_i64(t, a, shift);
4237 tcg_gen_andi_i64(t, t, mask);
4238 tcg_gen_andi_i64(d, d, ~mask);
4239 tcg_gen_or_i64(d, d, t);
4240 tcg_temp_free_i64(t);
4241}
4242
4243static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4244{
4245 uint64_t mask = dup_const(MO_16, 0xffff << shift);
4246 TCGv_i64 t = tcg_temp_new_i64();
4247
4248 tcg_gen_shli_i64(t, a, shift);
4249 tcg_gen_andi_i64(t, t, mask);
4250 tcg_gen_andi_i64(d, d, ~mask);
4251 tcg_gen_or_i64(d, d, t);
4252 tcg_temp_free_i64(t);
4253}
4254
4255static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4256{
4257 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
4258}
4259
4260static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4261{
4262 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
4263}
4264
4265static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4266{
4267 if (sh == 0) {
4268 tcg_gen_mov_vec(d, a);
4269 } else {
4270 TCGv_vec t = tcg_temp_new_vec_matching(d);
4271 TCGv_vec m = tcg_temp_new_vec_matching(d);
4272
4273 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
4274 tcg_gen_shli_vec(vece, t, a, sh);
4275 tcg_gen_and_vec(vece, d, d, m);
4276 tcg_gen_or_vec(vece, d, d, t);
4277
4278 tcg_temp_free_vec(t);
4279 tcg_temp_free_vec(m);
4280 }
4281}
4282
53229a77
RH
4283static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
4284
f3cd8218
RH
4285const GVecGen2i sli_op[4] = {
4286 { .fni8 = gen_shl8_ins_i64,
4287 .fniv = gen_shl_ins_vec,
4288 .load_dest = true,
53229a77 4289 .opt_opc = vecop_list_sli,
f3cd8218
RH
4290 .vece = MO_8 },
4291 { .fni8 = gen_shl16_ins_i64,
4292 .fniv = gen_shl_ins_vec,
4293 .load_dest = true,
53229a77 4294 .opt_opc = vecop_list_sli,
f3cd8218
RH
4295 .vece = MO_16 },
4296 { .fni4 = gen_shl32_ins_i32,
4297 .fniv = gen_shl_ins_vec,
4298 .load_dest = true,
53229a77 4299 .opt_opc = vecop_list_sli,
f3cd8218
RH
4300 .vece = MO_32 },
4301 { .fni8 = gen_shl64_ins_i64,
4302 .fniv = gen_shl_ins_vec,
4303 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4304 .load_dest = true,
53229a77 4305 .opt_opc = vecop_list_sli,
f3cd8218
RH
4306 .vece = MO_64 },
4307};
4308
4a7832b0
RH
4309static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4310{
4311 gen_helper_neon_mul_u8(a, a, b);
4312 gen_helper_neon_add_u8(d, d, a);
4313}
4314
4315static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4316{
4317 gen_helper_neon_mul_u8(a, a, b);
4318 gen_helper_neon_sub_u8(d, d, a);
4319}
4320
4321static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4322{
4323 gen_helper_neon_mul_u16(a, a, b);
4324 gen_helper_neon_add_u16(d, d, a);
4325}
4326
4327static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4328{
4329 gen_helper_neon_mul_u16(a, a, b);
4330 gen_helper_neon_sub_u16(d, d, a);
4331}
4332
4333static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4334{
4335 tcg_gen_mul_i32(a, a, b);
4336 tcg_gen_add_i32(d, d, a);
4337}
4338
4339static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4340{
4341 tcg_gen_mul_i32(a, a, b);
4342 tcg_gen_sub_i32(d, d, a);
4343}
4344
4345static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4346{
4347 tcg_gen_mul_i64(a, a, b);
4348 tcg_gen_add_i64(d, d, a);
4349}
4350
4351static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4352{
4353 tcg_gen_mul_i64(a, a, b);
4354 tcg_gen_sub_i64(d, d, a);
4355}
4356
4357static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4358{
4359 tcg_gen_mul_vec(vece, a, a, b);
4360 tcg_gen_add_vec(vece, d, d, a);
4361}
4362
4363static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4364{
4365 tcg_gen_mul_vec(vece, a, a, b);
4366 tcg_gen_sub_vec(vece, d, d, a);
4367}
4368
4369/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4370 * these tables are shared with AArch64 which does support them.
4371 */
53229a77
RH
4372
4373static const TCGOpcode vecop_list_mla[] = {
4374 INDEX_op_mul_vec, INDEX_op_add_vec, 0
4375};
4376
4377static const TCGOpcode vecop_list_mls[] = {
4378 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
4379};
4380
4a7832b0
RH
4381const GVecGen3 mla_op[4] = {
4382 { .fni4 = gen_mla8_i32,
4383 .fniv = gen_mla_vec,
4a7832b0 4384 .load_dest = true,
53229a77 4385 .opt_opc = vecop_list_mla,
4a7832b0
RH
4386 .vece = MO_8 },
4387 { .fni4 = gen_mla16_i32,
4388 .fniv = gen_mla_vec,
4a7832b0 4389 .load_dest = true,
53229a77 4390 .opt_opc = vecop_list_mla,
4a7832b0
RH
4391 .vece = MO_16 },
4392 { .fni4 = gen_mla32_i32,
4393 .fniv = gen_mla_vec,
4a7832b0 4394 .load_dest = true,
53229a77 4395 .opt_opc = vecop_list_mla,
4a7832b0
RH
4396 .vece = MO_32 },
4397 { .fni8 = gen_mla64_i64,
4398 .fniv = gen_mla_vec,
4a7832b0
RH
4399 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4400 .load_dest = true,
53229a77 4401 .opt_opc = vecop_list_mla,
4a7832b0
RH
4402 .vece = MO_64 },
4403};
4404
4405const GVecGen3 mls_op[4] = {
4406 { .fni4 = gen_mls8_i32,
4407 .fniv = gen_mls_vec,
4a7832b0 4408 .load_dest = true,
53229a77 4409 .opt_opc = vecop_list_mls,
4a7832b0
RH
4410 .vece = MO_8 },
4411 { .fni4 = gen_mls16_i32,
4412 .fniv = gen_mls_vec,
4a7832b0 4413 .load_dest = true,
53229a77 4414 .opt_opc = vecop_list_mls,
4a7832b0
RH
4415 .vece = MO_16 },
4416 { .fni4 = gen_mls32_i32,
4417 .fniv = gen_mls_vec,
4a7832b0 4418 .load_dest = true,
53229a77 4419 .opt_opc = vecop_list_mls,
4a7832b0
RH
4420 .vece = MO_32 },
4421 { .fni8 = gen_mls64_i64,
4422 .fniv = gen_mls_vec,
4a7832b0
RH
4423 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4424 .load_dest = true,
53229a77 4425 .opt_opc = vecop_list_mls,
4a7832b0
RH
4426 .vece = MO_64 },
4427};
4428
ea580fa3
RH
4429/* CMTST : test is "if (X & Y != 0)". */
4430static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4431{
4432 tcg_gen_and_i32(d, a, b);
4433 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
4434 tcg_gen_neg_i32(d, d);
4435}
4436
4437void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4438{
4439 tcg_gen_and_i64(d, a, b);
4440 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
4441 tcg_gen_neg_i64(d, d);
4442}
4443
4444static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4445{
4446 tcg_gen_and_vec(vece, d, a, b);
4447 tcg_gen_dupi_vec(vece, a, 0);
4448 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
4449}
4450
53229a77
RH
4451static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
4452
ea580fa3
RH
4453const GVecGen3 cmtst_op[4] = {
4454 { .fni4 = gen_helper_neon_tst_u8,
4455 .fniv = gen_cmtst_vec,
53229a77 4456 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4457 .vece = MO_8 },
4458 { .fni4 = gen_helper_neon_tst_u16,
4459 .fniv = gen_cmtst_vec,
53229a77 4460 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4461 .vece = MO_16 },
4462 { .fni4 = gen_cmtst_i32,
4463 .fniv = gen_cmtst_vec,
53229a77 4464 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4465 .vece = MO_32 },
4466 { .fni8 = gen_cmtst_i64,
4467 .fniv = gen_cmtst_vec,
4468 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 4469 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4470 .vece = MO_64 },
4471};
4472
89e68b57
RH
4473static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4474 TCGv_vec a, TCGv_vec b)
4475{
4476 TCGv_vec x = tcg_temp_new_vec_matching(t);
4477 tcg_gen_add_vec(vece, x, a, b);
4478 tcg_gen_usadd_vec(vece, t, a, b);
4479 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4480 tcg_gen_or_vec(vece, sat, sat, x);
4481 tcg_temp_free_vec(x);
4482}
4483
53229a77
RH
4484static const TCGOpcode vecop_list_uqadd[] = {
4485 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4486};
4487
89e68b57
RH
4488const GVecGen4 uqadd_op[4] = {
4489 { .fniv = gen_uqadd_vec,
4490 .fno = gen_helper_gvec_uqadd_b,
89e68b57 4491 .write_aofs = true,
53229a77 4492 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4493 .vece = MO_8 },
4494 { .fniv = gen_uqadd_vec,
4495 .fno = gen_helper_gvec_uqadd_h,
89e68b57 4496 .write_aofs = true,
53229a77 4497 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4498 .vece = MO_16 },
4499 { .fniv = gen_uqadd_vec,
4500 .fno = gen_helper_gvec_uqadd_s,
89e68b57 4501 .write_aofs = true,
53229a77 4502 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4503 .vece = MO_32 },
4504 { .fniv = gen_uqadd_vec,
4505 .fno = gen_helper_gvec_uqadd_d,
89e68b57 4506 .write_aofs = true,
53229a77 4507 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4508 .vece = MO_64 },
4509};
4510
4511static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4512 TCGv_vec a, TCGv_vec b)
4513{
4514 TCGv_vec x = tcg_temp_new_vec_matching(t);
4515 tcg_gen_add_vec(vece, x, a, b);
4516 tcg_gen_ssadd_vec(vece, t, a, b);
4517 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4518 tcg_gen_or_vec(vece, sat, sat, x);
4519 tcg_temp_free_vec(x);
4520}
4521
53229a77
RH
4522static const TCGOpcode vecop_list_sqadd[] = {
4523 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4524};
4525
89e68b57
RH
4526const GVecGen4 sqadd_op[4] = {
4527 { .fniv = gen_sqadd_vec,
4528 .fno = gen_helper_gvec_sqadd_b,
53229a77 4529 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4530 .write_aofs = true,
4531 .vece = MO_8 },
4532 { .fniv = gen_sqadd_vec,
4533 .fno = gen_helper_gvec_sqadd_h,
53229a77 4534 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4535 .write_aofs = true,
4536 .vece = MO_16 },
4537 { .fniv = gen_sqadd_vec,
4538 .fno = gen_helper_gvec_sqadd_s,
53229a77 4539 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4540 .write_aofs = true,
4541 .vece = MO_32 },
4542 { .fniv = gen_sqadd_vec,
4543 .fno = gen_helper_gvec_sqadd_d,
53229a77 4544 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4545 .write_aofs = true,
4546 .vece = MO_64 },
4547};
4548
4549static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4550 TCGv_vec a, TCGv_vec b)
4551{
4552 TCGv_vec x = tcg_temp_new_vec_matching(t);
4553 tcg_gen_sub_vec(vece, x, a, b);
4554 tcg_gen_ussub_vec(vece, t, a, b);
4555 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4556 tcg_gen_or_vec(vece, sat, sat, x);
4557 tcg_temp_free_vec(x);
4558}
4559
53229a77
RH
4560static const TCGOpcode vecop_list_uqsub[] = {
4561 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4562};
4563
89e68b57
RH
4564const GVecGen4 uqsub_op[4] = {
4565 { .fniv = gen_uqsub_vec,
4566 .fno = gen_helper_gvec_uqsub_b,
53229a77 4567 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4568 .write_aofs = true,
4569 .vece = MO_8 },
4570 { .fniv = gen_uqsub_vec,
4571 .fno = gen_helper_gvec_uqsub_h,
53229a77 4572 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4573 .write_aofs = true,
4574 .vece = MO_16 },
4575 { .fniv = gen_uqsub_vec,
4576 .fno = gen_helper_gvec_uqsub_s,
53229a77 4577 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4578 .write_aofs = true,
4579 .vece = MO_32 },
4580 { .fniv = gen_uqsub_vec,
4581 .fno = gen_helper_gvec_uqsub_d,
53229a77 4582 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4583 .write_aofs = true,
4584 .vece = MO_64 },
4585};
4586
4587static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4588 TCGv_vec a, TCGv_vec b)
4589{
4590 TCGv_vec x = tcg_temp_new_vec_matching(t);
4591 tcg_gen_sub_vec(vece, x, a, b);
4592 tcg_gen_sssub_vec(vece, t, a, b);
4593 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4594 tcg_gen_or_vec(vece, sat, sat, x);
4595 tcg_temp_free_vec(x);
4596}
4597
53229a77
RH
4598static const TCGOpcode vecop_list_sqsub[] = {
4599 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4600};
4601
89e68b57
RH
4602const GVecGen4 sqsub_op[4] = {
4603 { .fniv = gen_sqsub_vec,
4604 .fno = gen_helper_gvec_sqsub_b,
53229a77 4605 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4606 .write_aofs = true,
4607 .vece = MO_8 },
4608 { .fniv = gen_sqsub_vec,
4609 .fno = gen_helper_gvec_sqsub_h,
53229a77 4610 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4611 .write_aofs = true,
4612 .vece = MO_16 },
4613 { .fniv = gen_sqsub_vec,
4614 .fno = gen_helper_gvec_sqsub_s,
53229a77 4615 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4616 .write_aofs = true,
4617 .vece = MO_32 },
4618 { .fniv = gen_sqsub_vec,
4619 .fno = gen_helper_gvec_sqsub_d,
53229a77 4620 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4621 .write_aofs = true,
4622 .vece = MO_64 },
4623};
4624
9ee6e8bb
PB
4625/* Translate a NEON data processing instruction. Return nonzero if the
4626 instruction is invalid.
ad69471c
PB
4627 We process data in a mixture of 32-bit and 64-bit chunks.
4628 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4629
7dcc1f89 4630static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4631{
4632 int op;
4633 int q;
eabcd6fa 4634 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
9ee6e8bb
PB
4635 int size;
4636 int shift;
4637 int pass;
4638 int count;
4639 int pairwise;
4640 int u;
eabcd6fa 4641 int vec_size;
f3cd8218 4642 uint32_t imm;
39d5492a 4643 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 4644 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 4645 TCGv_i64 tmp64;
9ee6e8bb 4646
2c7ffc41
PM
4647 /* FIXME: this access check should not take precedence over UNDEF
4648 * for invalid encodings; we will generate incorrect syndrome information
4649 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4650 */
9dbbc748 4651 if (s->fp_excp_el) {
a767fac8 4652 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4be42f40 4653 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4654 return 0;
4655 }
4656
5df8bac1 4657 if (!s->vfp_enabled)
9ee6e8bb
PB
4658 return 1;
4659 q = (insn & (1 << 6)) != 0;
4660 u = (insn >> 24) & 1;
4661 VFP_DREG_D(rd, insn);
4662 VFP_DREG_N(rn, insn);
4663 VFP_DREG_M(rm, insn);
4664 size = (insn >> 20) & 3;
eabcd6fa
RH
4665 vec_size = q ? 16 : 8;
4666 rd_ofs = neon_reg_offset(rd, 0);
4667 rn_ofs = neon_reg_offset(rn, 0);
4668 rm_ofs = neon_reg_offset(rm, 0);
4669
9ee6e8bb
PB
4670 if ((insn & (1 << 23)) == 0) {
4671 /* Three register same length. */
4672 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4673 /* Catch invalid op and bad size combinations: UNDEF */
4674 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4675 return 1;
4676 }
25f84f79
PM
4677 /* All insns of this form UNDEF for either this condition or the
4678 * superset of cases "Q==1"; we catch the latter later.
4679 */
4680 if (q && ((rd | rn | rm) & 1)) {
4681 return 1;
4682 }
36a71934
RH
4683 switch (op) {
4684 case NEON_3R_SHA:
4685 /* The SHA-1/SHA-256 3-register instructions require special
4686 * treatment here, as their size field is overloaded as an
4687 * op type selector, and they all consume their input in a
4688 * single pass.
4689 */
f1ecb913
AB
4690 if (!q) {
4691 return 1;
4692 }
4693 if (!u) { /* SHA-1 */
962fcbf2 4694 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
4695 return 1;
4696 }
1a66ac61
RH
4697 ptr1 = vfp_reg_ptr(true, rd);
4698 ptr2 = vfp_reg_ptr(true, rn);
4699 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 4700 tmp4 = tcg_const_i32(size);
1a66ac61 4701 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
4702 tcg_temp_free_i32(tmp4);
4703 } else { /* SHA-256 */
962fcbf2 4704 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
4705 return 1;
4706 }
1a66ac61
RH
4707 ptr1 = vfp_reg_ptr(true, rd);
4708 ptr2 = vfp_reg_ptr(true, rn);
4709 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
4710 switch (size) {
4711 case 0:
1a66ac61 4712 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
4713 break;
4714 case 1:
1a66ac61 4715 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
4716 break;
4717 case 2:
1a66ac61 4718 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
4719 break;
4720 }
4721 }
1a66ac61
RH
4722 tcg_temp_free_ptr(ptr1);
4723 tcg_temp_free_ptr(ptr2);
4724 tcg_temp_free_ptr(ptr3);
f1ecb913 4725 return 0;
36a71934
RH
4726
4727 case NEON_3R_VPADD_VQRDMLAH:
4728 if (!u) {
4729 break; /* VPADD */
4730 }
4731 /* VQRDMLAH */
4732 switch (size) {
4733 case 1:
4734 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
4735 q, rd, rn, rm);
4736 case 2:
4737 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
4738 q, rd, rn, rm);
4739 }
4740 return 1;
4741
4742 case NEON_3R_VFM_VQRDMLSH:
4743 if (!u) {
4744 /* VFM, VFMS */
4745 if (size == 1) {
4746 return 1;
4747 }
4748 break;
4749 }
4750 /* VQRDMLSH */
4751 switch (size) {
4752 case 1:
4753 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
4754 q, rd, rn, rm);
4755 case 2:
4756 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
4757 q, rd, rn, rm);
4758 }
4759 return 1;
eabcd6fa
RH
4760
4761 case NEON_3R_LOGIC: /* Logic ops. */
4762 switch ((u << 2) | size) {
4763 case 0: /* VAND */
4764 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
4765 vec_size, vec_size);
4766 break;
4767 case 1: /* VBIC */
4768 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
4769 vec_size, vec_size);
4770 break;
2900847f
RH
4771 case 2: /* VORR */
4772 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
4773 vec_size, vec_size);
eabcd6fa
RH
4774 break;
4775 case 3: /* VORN */
4776 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
4777 vec_size, vec_size);
4778 break;
4779 case 4: /* VEOR */
4780 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
4781 vec_size, vec_size);
4782 break;
4783 case 5: /* VBSL */
3a7a2b4e
RH
4784 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
4785 vec_size, vec_size);
eabcd6fa
RH
4786 break;
4787 case 6: /* VBIT */
3a7a2b4e
RH
4788 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
4789 vec_size, vec_size);
eabcd6fa
RH
4790 break;
4791 case 7: /* VBIF */
3a7a2b4e
RH
4792 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
4793 vec_size, vec_size);
eabcd6fa
RH
4794 break;
4795 }
4796 return 0;
e4717ae0
RH
4797
4798 case NEON_3R_VADD_VSUB:
4799 if (u) {
4800 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
4801 vec_size, vec_size);
4802 } else {
4803 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
4804 vec_size, vec_size);
4805 }
4806 return 0;
82083184 4807
89e68b57
RH
4808 case NEON_3R_VQADD:
4809 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
4810 rn_ofs, rm_ofs, vec_size, vec_size,
4811 (u ? uqadd_op : sqadd_op) + size);
2f143d3a 4812 return 0;
89e68b57
RH
4813
4814 case NEON_3R_VQSUB:
4815 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
4816 rn_ofs, rm_ofs, vec_size, vec_size,
4817 (u ? uqsub_op : sqsub_op) + size);
2f143d3a 4818 return 0;
89e68b57 4819
82083184
RH
4820 case NEON_3R_VMUL: /* VMUL */
4821 if (u) {
4822 /* Polynomial case allows only P8 and is handled below. */
4823 if (size != 0) {
4824 return 1;
4825 }
4826 } else {
4827 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
4828 vec_size, vec_size);
4829 return 0;
4830 }
4831 break;
4a7832b0
RH
4832
4833 case NEON_3R_VML: /* VMLA, VMLS */
4834 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
4835 u ? &mls_op[size] : &mla_op[size]);
4836 return 0;
ea580fa3
RH
4837
4838 case NEON_3R_VTST_VCEQ:
4839 if (u) { /* VCEQ */
4840 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
4841 vec_size, vec_size);
4842 } else { /* VTST */
4843 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
4844 vec_size, vec_size, &cmtst_op[size]);
4845 }
4846 return 0;
4847
4848 case NEON_3R_VCGT:
4849 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
4850 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
4851 return 0;
4852
4853 case NEON_3R_VCGE:
4854 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
4855 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
4856 return 0;
6f278221
RH
4857
4858 case NEON_3R_VMAX:
4859 if (u) {
4860 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
4861 vec_size, vec_size);
4862 } else {
4863 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
4864 vec_size, vec_size);
4865 }
4866 return 0;
4867 case NEON_3R_VMIN:
4868 if (u) {
4869 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
4870 vec_size, vec_size);
4871 } else {
4872 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
4873 vec_size, vec_size);
4874 }
4875 return 0;
f1ecb913 4876 }
4a7832b0 4877
eabcd6fa 4878 if (size == 3) {
62698be3 4879 /* 64-bit element instructions. */
9ee6e8bb 4880 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4881 neon_load_reg64(cpu_V0, rn + pass);
4882 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4883 switch (op) {
62698be3 4884 case NEON_3R_VSHL:
ad69471c
PB
4885 if (u) {
4886 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4887 } else {
4888 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4889 }
4890 break;
62698be3 4891 case NEON_3R_VQSHL:
ad69471c 4892 if (u) {
02da0b2d
PM
4893 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4894 cpu_V1, cpu_V0);
ad69471c 4895 } else {
02da0b2d
PM
4896 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4897 cpu_V1, cpu_V0);
ad69471c
PB
4898 }
4899 break;
62698be3 4900 case NEON_3R_VRSHL:
ad69471c
PB
4901 if (u) {
4902 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4903 } else {
ad69471c
PB
4904 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4905 }
4906 break;
62698be3 4907 case NEON_3R_VQRSHL:
ad69471c 4908 if (u) {
02da0b2d
PM
4909 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4910 cpu_V1, cpu_V0);
ad69471c 4911 } else {
02da0b2d
PM
4912 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4913 cpu_V1, cpu_V0);
1e8d4eec 4914 }
9ee6e8bb 4915 break;
9ee6e8bb
PB
4916 default:
4917 abort();
2c0262af 4918 }
ad69471c 4919 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4920 }
9ee6e8bb 4921 return 0;
2c0262af 4922 }
25f84f79 4923 pairwise = 0;
9ee6e8bb 4924 switch (op) {
62698be3
PM
4925 case NEON_3R_VSHL:
4926 case NEON_3R_VQSHL:
4927 case NEON_3R_VRSHL:
4928 case NEON_3R_VQRSHL:
9ee6e8bb 4929 {
ad69471c
PB
4930 int rtmp;
4931 /* Shift instruction operands are reversed. */
4932 rtmp = rn;
9ee6e8bb 4933 rn = rm;
ad69471c 4934 rm = rtmp;
9ee6e8bb 4935 }
2c0262af 4936 break;
36a71934 4937 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
4938 case NEON_3R_VPMAX:
4939 case NEON_3R_VPMIN:
9ee6e8bb 4940 pairwise = 1;
2c0262af 4941 break;
25f84f79
PM
4942 case NEON_3R_FLOAT_ARITH:
4943 pairwise = (u && size < 2); /* if VPADD (float) */
4944 break;
4945 case NEON_3R_FLOAT_MINMAX:
4946 pairwise = u; /* if VPMIN/VPMAX (float) */
4947 break;
4948 case NEON_3R_FLOAT_CMP:
4949 if (!u && size) {
4950 /* no encoding for U=0 C=1x */
4951 return 1;
4952 }
4953 break;
4954 case NEON_3R_FLOAT_ACMP:
4955 if (!u) {
4956 return 1;
4957 }
4958 break;
505935fc
WN
4959 case NEON_3R_FLOAT_MISC:
4960 /* VMAXNM/VMINNM in ARMv8 */
d614a513 4961 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
4962 return 1;
4963 }
2c0262af 4964 break;
36a71934
RH
4965 case NEON_3R_VFM_VQRDMLSH:
4966 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
4967 return 1;
4968 }
4969 break;
9ee6e8bb 4970 default:
2c0262af 4971 break;
9ee6e8bb 4972 }
dd8fbd78 4973
25f84f79
PM
4974 if (pairwise && q) {
4975 /* All the pairwise insns UNDEF if Q is set */
4976 return 1;
4977 }
4978
9ee6e8bb
PB
4979 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4980
4981 if (pairwise) {
4982 /* Pairwise. */
a5a14945
JR
4983 if (pass < 1) {
4984 tmp = neon_load_reg(rn, 0);
4985 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4986 } else {
a5a14945
JR
4987 tmp = neon_load_reg(rm, 0);
4988 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4989 }
4990 } else {
4991 /* Elementwise. */
dd8fbd78
FN
4992 tmp = neon_load_reg(rn, pass);
4993 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4994 }
4995 switch (op) {
62698be3 4996 case NEON_3R_VHADD:
9ee6e8bb
PB
4997 GEN_NEON_INTEGER_OP(hadd);
4998 break;
62698be3 4999 case NEON_3R_VRHADD:
9ee6e8bb 5000 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5001 break;
62698be3 5002 case NEON_3R_VHSUB:
9ee6e8bb
PB
5003 GEN_NEON_INTEGER_OP(hsub);
5004 break;
62698be3 5005 case NEON_3R_VSHL:
ad69471c 5006 GEN_NEON_INTEGER_OP(shl);
2c0262af 5007 break;
62698be3 5008 case NEON_3R_VQSHL:
02da0b2d 5009 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5010 break;
62698be3 5011 case NEON_3R_VRSHL:
ad69471c 5012 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5013 break;
62698be3 5014 case NEON_3R_VQRSHL:
02da0b2d 5015 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5016 break;
62698be3 5017 case NEON_3R_VABD:
9ee6e8bb
PB
5018 GEN_NEON_INTEGER_OP(abd);
5019 break;
62698be3 5020 case NEON_3R_VABA:
9ee6e8bb 5021 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5022 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5023 tmp2 = neon_load_reg(rd, pass);
5024 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5025 break;
62698be3 5026 case NEON_3R_VMUL:
82083184
RH
5027 /* VMUL.P8; other cases already eliminated. */
5028 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb 5029 break;
62698be3 5030 case NEON_3R_VPMAX:
9ee6e8bb
PB
5031 GEN_NEON_INTEGER_OP(pmax);
5032 break;
62698be3 5033 case NEON_3R_VPMIN:
9ee6e8bb
PB
5034 GEN_NEON_INTEGER_OP(pmin);
5035 break;
62698be3 5036 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5037 if (!u) { /* VQDMULH */
5038 switch (size) {
02da0b2d
PM
5039 case 1:
5040 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5041 break;
5042 case 2:
5043 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5044 break;
62698be3 5045 default: abort();
9ee6e8bb 5046 }
62698be3 5047 } else { /* VQRDMULH */
9ee6e8bb 5048 switch (size) {
02da0b2d
PM
5049 case 1:
5050 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5051 break;
5052 case 2:
5053 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5054 break;
62698be3 5055 default: abort();
9ee6e8bb
PB
5056 }
5057 }
5058 break;
36a71934 5059 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 5060 switch (size) {
dd8fbd78
FN
5061 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5062 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5063 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5064 default: abort();
9ee6e8bb
PB
5065 }
5066 break;
62698be3 5067 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5068 {
5069 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5070 switch ((u << 2) | size) {
5071 case 0: /* VADD */
aa47cfdd
PM
5072 case 4: /* VPADD */
5073 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5074 break;
5075 case 2: /* VSUB */
aa47cfdd 5076 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5077 break;
5078 case 6: /* VABD */
aa47cfdd 5079 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5080 break;
5081 default:
62698be3 5082 abort();
9ee6e8bb 5083 }
aa47cfdd 5084 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5085 break;
aa47cfdd 5086 }
62698be3 5087 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5088 {
5089 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5090 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5091 if (!u) {
7d1b0095 5092 tcg_temp_free_i32(tmp2);
dd8fbd78 5093 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5094 if (size == 0) {
aa47cfdd 5095 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5096 } else {
aa47cfdd 5097 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5098 }
5099 }
aa47cfdd 5100 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5101 break;
aa47cfdd 5102 }
62698be3 5103 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5104 {
5105 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5106 if (!u) {
aa47cfdd 5107 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5108 } else {
aa47cfdd
PM
5109 if (size == 0) {
5110 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5111 } else {
5112 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5113 }
b5ff1b31 5114 }
aa47cfdd 5115 tcg_temp_free_ptr(fpstatus);
2c0262af 5116 break;
aa47cfdd 5117 }
62698be3 5118 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5119 {
5120 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5121 if (size == 0) {
5122 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5123 } else {
5124 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5125 }
5126 tcg_temp_free_ptr(fpstatus);
2c0262af 5127 break;
aa47cfdd 5128 }
62698be3 5129 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5130 {
5131 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5132 if (size == 0) {
f71a2ae5 5133 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5134 } else {
f71a2ae5 5135 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5136 }
5137 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5138 break;
aa47cfdd 5139 }
505935fc
WN
5140 case NEON_3R_FLOAT_MISC:
5141 if (u) {
5142 /* VMAXNM/VMINNM */
5143 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5144 if (size == 0) {
f71a2ae5 5145 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5146 } else {
f71a2ae5 5147 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5148 }
5149 tcg_temp_free_ptr(fpstatus);
5150 } else {
5151 if (size == 0) {
5152 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5153 } else {
5154 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5155 }
5156 }
2c0262af 5157 break;
36a71934 5158 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
5159 {
5160 /* VFMA, VFMS: fused multiply-add */
5161 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5162 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5163 if (size) {
5164 /* VFMS */
5165 gen_helper_vfp_negs(tmp, tmp);
5166 }
5167 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5168 tcg_temp_free_i32(tmp3);
5169 tcg_temp_free_ptr(fpstatus);
5170 break;
5171 }
9ee6e8bb
PB
5172 default:
5173 abort();
2c0262af 5174 }
7d1b0095 5175 tcg_temp_free_i32(tmp2);
dd8fbd78 5176
9ee6e8bb
PB
5177 /* Save the result. For elementwise operations we can put it
5178 straight into the destination register. For pairwise operations
5179 we have to be careful to avoid clobbering the source operands. */
5180 if (pairwise && rd == rm) {
dd8fbd78 5181 neon_store_scratch(pass, tmp);
9ee6e8bb 5182 } else {
dd8fbd78 5183 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5184 }
5185
5186 } /* for pass */
5187 if (pairwise && rd == rm) {
5188 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5189 tmp = neon_load_scratch(pass);
5190 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5191 }
5192 }
ad69471c 5193 /* End of 3 register same size operations. */
9ee6e8bb
PB
5194 } else if (insn & (1 << 4)) {
5195 if ((insn & 0x00380080) != 0) {
5196 /* Two registers and shift. */
5197 op = (insn >> 8) & 0xf;
5198 if (insn & (1 << 7)) {
cc13115b
PM
5199 /* 64-bit shift. */
5200 if (op > 7) {
5201 return 1;
5202 }
9ee6e8bb
PB
5203 size = 3;
5204 } else {
5205 size = 2;
5206 while ((insn & (1 << (size + 19))) == 0)
5207 size--;
5208 }
5209 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
9ee6e8bb
PB
5210 if (op < 8) {
5211 /* Shift by immediate:
5212 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5213 if (q && ((rd | rm) & 1)) {
5214 return 1;
5215 }
5216 if (!u && (op == 4 || op == 6)) {
5217 return 1;
5218 }
9ee6e8bb
PB
5219 /* Right shifts are encoded as N - shift, where N is the
5220 element size in bits. */
1dc8425e 5221 if (op <= 4) {
9ee6e8bb 5222 shift = shift - (1 << (size + 3));
1dc8425e
RH
5223 }
5224
5225 switch (op) {
5226 case 0: /* VSHR */
5227 /* Right shift comes here negative. */
5228 shift = -shift;
5229 /* Shifts larger than the element size are architecturally
5230 * valid. Unsigned results in all zeros; signed results
5231 * in all sign bits.
5232 */
5233 if (!u) {
5234 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
5235 MIN(shift, (8 << size) - 1),
5236 vec_size, vec_size);
5237 } else if (shift >= 8 << size) {
5238 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5239 } else {
5240 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
5241 vec_size, vec_size);
5242 }
5243 return 0;
5244
41f6c113
RH
5245 case 1: /* VSRA */
5246 /* Right shift comes here negative. */
5247 shift = -shift;
5248 /* Shifts larger than the element size are architecturally
5249 * valid. Unsigned results in all zeros; signed results
5250 * in all sign bits.
5251 */
5252 if (!u) {
5253 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5254 MIN(shift, (8 << size) - 1),
5255 &ssra_op[size]);
5256 } else if (shift >= 8 << size) {
5257 /* rd += 0 */
5258 } else {
5259 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5260 shift, &usra_op[size]);
5261 }
5262 return 0;
5263
f3cd8218
RH
5264 case 4: /* VSRI */
5265 if (!u) {
5266 return 1;
5267 }
5268 /* Right shift comes here negative. */
5269 shift = -shift;
5270 /* Shift out of range leaves destination unchanged. */
5271 if (shift < 8 << size) {
5272 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5273 shift, &sri_op[size]);
5274 }
5275 return 0;
5276
1dc8425e 5277 case 5: /* VSHL, VSLI */
f3cd8218
RH
5278 if (u) { /* VSLI */
5279 /* Shift out of range leaves destination unchanged. */
5280 if (shift < 8 << size) {
5281 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
5282 vec_size, shift, &sli_op[size]);
5283 }
5284 } else { /* VSHL */
1dc8425e
RH
5285 /* Shifts larger than the element size are
5286 * architecturally valid and results in zero.
5287 */
5288 if (shift >= 8 << size) {
5289 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5290 } else {
5291 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
5292 vec_size, vec_size);
5293 }
1dc8425e 5294 }
f3cd8218 5295 return 0;
1dc8425e
RH
5296 }
5297
9ee6e8bb
PB
5298 if (size == 3) {
5299 count = q + 1;
5300 } else {
5301 count = q ? 4: 2;
5302 }
1dc8425e
RH
5303
5304 /* To avoid excessive duplication of ops we implement shift
5305 * by immediate using the variable shift operations.
5306 */
5307 imm = dup_const(size, shift);
9ee6e8bb
PB
5308
5309 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5310 if (size == 3) {
5311 neon_load_reg64(cpu_V0, rm + pass);
5312 tcg_gen_movi_i64(cpu_V1, imm);
5313 switch (op) {
ad69471c
PB
5314 case 2: /* VRSHR */
5315 case 3: /* VRSRA */
5316 if (u)
5317 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5318 else
ad69471c 5319 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5320 break;
0322b26e 5321 case 6: /* VQSHLU */
02da0b2d
PM
5322 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5323 cpu_V0, cpu_V1);
ad69471c 5324 break;
0322b26e
PM
5325 case 7: /* VQSHL */
5326 if (u) {
02da0b2d 5327 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5328 cpu_V0, cpu_V1);
5329 } else {
02da0b2d 5330 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5331 cpu_V0, cpu_V1);
5332 }
9ee6e8bb 5333 break;
1dc8425e
RH
5334 default:
5335 g_assert_not_reached();
9ee6e8bb 5336 }
41f6c113 5337 if (op == 3) {
ad69471c 5338 /* Accumulate. */
5371cb81 5339 neon_load_reg64(cpu_V1, rd + pass);
ad69471c 5340 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5341 }
5342 neon_store_reg64(cpu_V0, rd + pass);
5343 } else { /* size < 3 */
5344 /* Operands in T0 and T1. */
dd8fbd78 5345 tmp = neon_load_reg(rm, pass);
7d1b0095 5346 tmp2 = tcg_temp_new_i32();
dd8fbd78 5347 tcg_gen_movi_i32(tmp2, imm);
ad69471c 5348 switch (op) {
ad69471c
PB
5349 case 2: /* VRSHR */
5350 case 3: /* VRSRA */
5351 GEN_NEON_INTEGER_OP(rshl);
5352 break;
0322b26e 5353 case 6: /* VQSHLU */
ad69471c 5354 switch (size) {
0322b26e 5355 case 0:
02da0b2d
PM
5356 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5357 tmp, tmp2);
0322b26e
PM
5358 break;
5359 case 1:
02da0b2d
PM
5360 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5361 tmp, tmp2);
0322b26e
PM
5362 break;
5363 case 2:
02da0b2d
PM
5364 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5365 tmp, tmp2);
0322b26e
PM
5366 break;
5367 default:
cc13115b 5368 abort();
ad69471c
PB
5369 }
5370 break;
0322b26e 5371 case 7: /* VQSHL */
02da0b2d 5372 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5373 break;
1dc8425e
RH
5374 default:
5375 g_assert_not_reached();
ad69471c 5376 }
7d1b0095 5377 tcg_temp_free_i32(tmp2);
ad69471c 5378
41f6c113 5379 if (op == 3) {
ad69471c 5380 /* Accumulate. */
dd8fbd78 5381 tmp2 = neon_load_reg(rd, pass);
5371cb81 5382 gen_neon_add(size, tmp, tmp2);
7d1b0095 5383 tcg_temp_free_i32(tmp2);
ad69471c 5384 }
dd8fbd78 5385 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5386 }
5387 } /* for pass */
5388 } else if (op < 10) {
ad69471c 5389 /* Shift by immediate and narrow:
9ee6e8bb 5390 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5391 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5392 if (rm & 1) {
5393 return 1;
5394 }
9ee6e8bb
PB
5395 shift = shift - (1 << (size + 3));
5396 size++;
92cdfaeb 5397 if (size == 3) {
a7812ae4 5398 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5399 neon_load_reg64(cpu_V0, rm);
5400 neon_load_reg64(cpu_V1, rm + 1);
5401 for (pass = 0; pass < 2; pass++) {
5402 TCGv_i64 in;
5403 if (pass == 0) {
5404 in = cpu_V0;
5405 } else {
5406 in = cpu_V1;
5407 }
ad69471c 5408 if (q) {
0b36f4cd 5409 if (input_unsigned) {
92cdfaeb 5410 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5411 } else {
92cdfaeb 5412 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5413 }
ad69471c 5414 } else {
0b36f4cd 5415 if (input_unsigned) {
92cdfaeb 5416 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5417 } else {
92cdfaeb 5418 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5419 }
ad69471c 5420 }
7d1b0095 5421 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5422 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5423 neon_store_reg(rd, pass, tmp);
5424 } /* for pass */
5425 tcg_temp_free_i64(tmp64);
5426 } else {
5427 if (size == 1) {
5428 imm = (uint16_t)shift;
5429 imm |= imm << 16;
2c0262af 5430 } else {
92cdfaeb
PM
5431 /* size == 2 */
5432 imm = (uint32_t)shift;
5433 }
5434 tmp2 = tcg_const_i32(imm);
5435 tmp4 = neon_load_reg(rm + 1, 0);
5436 tmp5 = neon_load_reg(rm + 1, 1);
5437 for (pass = 0; pass < 2; pass++) {
5438 if (pass == 0) {
5439 tmp = neon_load_reg(rm, 0);
5440 } else {
5441 tmp = tmp4;
5442 }
0b36f4cd
CL
5443 gen_neon_shift_narrow(size, tmp, tmp2, q,
5444 input_unsigned);
92cdfaeb
PM
5445 if (pass == 0) {
5446 tmp3 = neon_load_reg(rm, 1);
5447 } else {
5448 tmp3 = tmp5;
5449 }
0b36f4cd
CL
5450 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5451 input_unsigned);
36aa55dc 5452 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5453 tcg_temp_free_i32(tmp);
5454 tcg_temp_free_i32(tmp3);
5455 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5456 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5457 neon_store_reg(rd, pass, tmp);
5458 } /* for pass */
c6067f04 5459 tcg_temp_free_i32(tmp2);
b75263d6 5460 }
9ee6e8bb 5461 } else if (op == 10) {
cc13115b
PM
5462 /* VSHLL, VMOVL */
5463 if (q || (rd & 1)) {
9ee6e8bb 5464 return 1;
cc13115b 5465 }
ad69471c
PB
5466 tmp = neon_load_reg(rm, 0);
5467 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5468 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5469 if (pass == 1)
5470 tmp = tmp2;
5471
5472 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5473
9ee6e8bb
PB
5474 if (shift != 0) {
5475 /* The shift is less than the width of the source
ad69471c
PB
5476 type, so we can just shift the whole register. */
5477 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5478 /* Widen the result of shift: we need to clear
5479 * the potential overflow bits resulting from
5480 * left bits of the narrow input appearing as
5481 * right bits of left the neighbour narrow
5482 * input. */
ad69471c
PB
5483 if (size < 2 || !u) {
5484 uint64_t imm64;
5485 if (size == 0) {
5486 imm = (0xffu >> (8 - shift));
5487 imm |= imm << 16;
acdf01ef 5488 } else if (size == 1) {
ad69471c 5489 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5490 } else {
5491 /* size == 2 */
5492 imm = 0xffffffff >> (32 - shift);
5493 }
5494 if (size < 2) {
5495 imm64 = imm | (((uint64_t)imm) << 32);
5496 } else {
5497 imm64 = imm;
9ee6e8bb 5498 }
acdf01ef 5499 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5500 }
5501 }
ad69471c 5502 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5503 }
f73534a5 5504 } else if (op >= 14) {
9ee6e8bb 5505 /* VCVT fixed-point. */
c253dd78
PM
5506 TCGv_ptr fpst;
5507 TCGv_i32 shiftv;
5508 VFPGenFixPointFn *fn;
5509
cc13115b
PM
5510 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5511 return 1;
5512 }
c253dd78
PM
5513
5514 if (!(op & 1)) {
5515 if (u) {
5516 fn = gen_helper_vfp_ultos;
5517 } else {
5518 fn = gen_helper_vfp_sltos;
5519 }
5520 } else {
5521 if (u) {
5522 fn = gen_helper_vfp_touls_round_to_zero;
5523 } else {
5524 fn = gen_helper_vfp_tosls_round_to_zero;
5525 }
5526 }
5527
f73534a5
PM
5528 /* We have already masked out the must-be-1 top bit of imm6,
5529 * hence this 32-shift where the ARM ARM has 64-imm6.
5530 */
5531 shift = 32 - shift;
c253dd78
PM
5532 fpst = get_fpstatus_ptr(1);
5533 shiftv = tcg_const_i32(shift);
9ee6e8bb 5534 for (pass = 0; pass < (q ? 4 : 2); pass++) {
c253dd78
PM
5535 TCGv_i32 tmpf = neon_load_reg(rm, pass);
5536 fn(tmpf, tmpf, shiftv, fpst);
5537 neon_store_reg(rd, pass, tmpf);
2c0262af 5538 }
c253dd78
PM
5539 tcg_temp_free_ptr(fpst);
5540 tcg_temp_free_i32(shiftv);
2c0262af 5541 } else {
9ee6e8bb
PB
5542 return 1;
5543 }
5544 } else { /* (insn & 0x00380080) == 0 */
246fa4ac
RH
5545 int invert, reg_ofs, vec_size;
5546
7d80fee5
PM
5547 if (q && (rd & 1)) {
5548 return 1;
5549 }
9ee6e8bb
PB
5550
5551 op = (insn >> 8) & 0xf;
5552 /* One register and immediate. */
5553 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5554 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5555 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5556 * We choose to not special-case this and will behave as if a
5557 * valid constant encoding of 0 had been given.
5558 */
9ee6e8bb
PB
5559 switch (op) {
5560 case 0: case 1:
5561 /* no-op */
5562 break;
5563 case 2: case 3:
5564 imm <<= 8;
5565 break;
5566 case 4: case 5:
5567 imm <<= 16;
5568 break;
5569 case 6: case 7:
5570 imm <<= 24;
5571 break;
5572 case 8: case 9:
5573 imm |= imm << 16;
5574 break;
5575 case 10: case 11:
5576 imm = (imm << 8) | (imm << 24);
5577 break;
5578 case 12:
8e31209e 5579 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5580 break;
5581 case 13:
5582 imm = (imm << 16) | 0xffff;
5583 break;
5584 case 14:
5585 imm |= (imm << 8) | (imm << 16) | (imm << 24);
246fa4ac 5586 if (invert) {
9ee6e8bb 5587 imm = ~imm;
246fa4ac 5588 }
9ee6e8bb
PB
5589 break;
5590 case 15:
7d80fee5
PM
5591 if (invert) {
5592 return 1;
5593 }
9ee6e8bb
PB
5594 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5595 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5596 break;
5597 }
246fa4ac 5598 if (invert) {
9ee6e8bb 5599 imm = ~imm;
246fa4ac 5600 }
9ee6e8bb 5601
246fa4ac
RH
5602 reg_ofs = neon_reg_offset(rd, 0);
5603 vec_size = q ? 16 : 8;
5604
5605 if (op & 1 && op < 12) {
5606 if (invert) {
5607 /* The immediate value has already been inverted,
5608 * so BIC becomes AND.
5609 */
5610 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
5611 vec_size, vec_size);
9ee6e8bb 5612 } else {
246fa4ac
RH
5613 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
5614 vec_size, vec_size);
5615 }
5616 } else {
5617 /* VMOV, VMVN. */
5618 if (op == 14 && invert) {
5619 TCGv_i64 t64 = tcg_temp_new_i64();
5620
5621 for (pass = 0; pass <= q; ++pass) {
5622 uint64_t val = 0;
a5a14945 5623 int n;
246fa4ac
RH
5624
5625 for (n = 0; n < 8; n++) {
5626 if (imm & (1 << (n + pass * 8))) {
5627 val |= 0xffull << (n * 8);
5628 }
9ee6e8bb 5629 }
246fa4ac
RH
5630 tcg_gen_movi_i64(t64, val);
5631 neon_store_reg64(t64, rd + pass);
9ee6e8bb 5632 }
246fa4ac
RH
5633 tcg_temp_free_i64(t64);
5634 } else {
5635 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
9ee6e8bb
PB
5636 }
5637 }
5638 }
e4b3861d 5639 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5640 if (size != 3) {
5641 op = (insn >> 8) & 0xf;
5642 if ((insn & (1 << 6)) == 0) {
5643 /* Three registers of different lengths. */
5644 int src1_wide;
5645 int src2_wide;
5646 int prewiden;
526d0096
PM
5647 /* undefreq: bit 0 : UNDEF if size == 0
5648 * bit 1 : UNDEF if size == 1
5649 * bit 2 : UNDEF if size == 2
5650 * bit 3 : UNDEF if U == 1
5651 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
5652 */
5653 int undefreq;
5654 /* prewiden, src1_wide, src2_wide, undefreq */
5655 static const int neon_3reg_wide[16][4] = {
5656 {1, 0, 0, 0}, /* VADDL */
5657 {1, 1, 0, 0}, /* VADDW */
5658 {1, 0, 0, 0}, /* VSUBL */
5659 {1, 1, 0, 0}, /* VSUBW */
5660 {0, 1, 1, 0}, /* VADDHN */
5661 {0, 0, 0, 0}, /* VABAL */
5662 {0, 1, 1, 0}, /* VSUBHN */
5663 {0, 0, 0, 0}, /* VABDL */
5664 {0, 0, 0, 0}, /* VMLAL */
526d0096 5665 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 5666 {0, 0, 0, 0}, /* VMLSL */
526d0096 5667 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 5668 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 5669 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 5670 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 5671 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5672 };
5673
5674 prewiden = neon_3reg_wide[op][0];
5675 src1_wide = neon_3reg_wide[op][1];
5676 src2_wide = neon_3reg_wide[op][2];
695272dc 5677 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5678
526d0096
PM
5679 if ((undefreq & (1 << size)) ||
5680 ((undefreq & 8) && u)) {
695272dc
PM
5681 return 1;
5682 }
5683 if ((src1_wide && (rn & 1)) ||
5684 (src2_wide && (rm & 1)) ||
5685 (!src2_wide && (rd & 1))) {
ad69471c 5686 return 1;
695272dc 5687 }
ad69471c 5688
4e624eda
PM
5689 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
5690 * outside the loop below as it only performs a single pass.
5691 */
5692 if (op == 14 && size == 2) {
5693 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
5694
962fcbf2 5695 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
5696 return 1;
5697 }
5698 tcg_rn = tcg_temp_new_i64();
5699 tcg_rm = tcg_temp_new_i64();
5700 tcg_rd = tcg_temp_new_i64();
5701 neon_load_reg64(tcg_rn, rn);
5702 neon_load_reg64(tcg_rm, rm);
5703 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
5704 neon_store_reg64(tcg_rd, rd);
5705 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
5706 neon_store_reg64(tcg_rd, rd + 1);
5707 tcg_temp_free_i64(tcg_rn);
5708 tcg_temp_free_i64(tcg_rm);
5709 tcg_temp_free_i64(tcg_rd);
5710 return 0;
5711 }
5712
9ee6e8bb
PB
5713 /* Avoid overlapping operands. Wide source operands are
5714 always aligned so will never overlap with wide
5715 destinations in problematic ways. */
8f8e3aa4 5716 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5717 tmp = neon_load_reg(rm, 1);
5718 neon_store_scratch(2, tmp);
8f8e3aa4 5719 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5720 tmp = neon_load_reg(rn, 1);
5721 neon_store_scratch(2, tmp);
9ee6e8bb 5722 }
f764718d 5723 tmp3 = NULL;
9ee6e8bb 5724 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5725 if (src1_wide) {
5726 neon_load_reg64(cpu_V0, rn + pass);
f764718d 5727 tmp = NULL;
9ee6e8bb 5728 } else {
ad69471c 5729 if (pass == 1 && rd == rn) {
dd8fbd78 5730 tmp = neon_load_scratch(2);
9ee6e8bb 5731 } else {
ad69471c
PB
5732 tmp = neon_load_reg(rn, pass);
5733 }
5734 if (prewiden) {
5735 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5736 }
5737 }
ad69471c
PB
5738 if (src2_wide) {
5739 neon_load_reg64(cpu_V1, rm + pass);
f764718d 5740 tmp2 = NULL;
9ee6e8bb 5741 } else {
ad69471c 5742 if (pass == 1 && rd == rm) {
dd8fbd78 5743 tmp2 = neon_load_scratch(2);
9ee6e8bb 5744 } else {
ad69471c
PB
5745 tmp2 = neon_load_reg(rm, pass);
5746 }
5747 if (prewiden) {
5748 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5749 }
9ee6e8bb
PB
5750 }
5751 switch (op) {
5752 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5753 gen_neon_addl(size);
9ee6e8bb 5754 break;
79b0e534 5755 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5756 gen_neon_subl(size);
9ee6e8bb
PB
5757 break;
5758 case 5: case 7: /* VABAL, VABDL */
5759 switch ((size << 1) | u) {
ad69471c
PB
5760 case 0:
5761 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5762 break;
5763 case 1:
5764 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5765 break;
5766 case 2:
5767 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5768 break;
5769 case 3:
5770 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5771 break;
5772 case 4:
5773 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5774 break;
5775 case 5:
5776 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5777 break;
9ee6e8bb
PB
5778 default: abort();
5779 }
7d1b0095
PM
5780 tcg_temp_free_i32(tmp2);
5781 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5782 break;
5783 case 8: case 9: case 10: case 11: case 12: case 13:
5784 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5785 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5786 break;
5787 case 14: /* Polynomial VMULL */
e5ca24cb 5788 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5789 tcg_temp_free_i32(tmp2);
5790 tcg_temp_free_i32(tmp);
e5ca24cb 5791 break;
695272dc
PM
5792 default: /* 15 is RESERVED: caught earlier */
5793 abort();
9ee6e8bb 5794 }
ebcd88ce
PM
5795 if (op == 13) {
5796 /* VQDMULL */
5797 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5798 neon_store_reg64(cpu_V0, rd + pass);
5799 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5800 /* Accumulate. */
ebcd88ce 5801 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5802 switch (op) {
4dc064e6
PM
5803 case 10: /* VMLSL */
5804 gen_neon_negl(cpu_V0, size);
5805 /* Fall through */
5806 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5807 gen_neon_addl(size);
9ee6e8bb
PB
5808 break;
5809 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5810 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5811 if (op == 11) {
5812 gen_neon_negl(cpu_V0, size);
5813 }
ad69471c
PB
5814 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5815 break;
9ee6e8bb
PB
5816 default:
5817 abort();
5818 }
ad69471c 5819 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5820 } else if (op == 4 || op == 6) {
5821 /* Narrowing operation. */
7d1b0095 5822 tmp = tcg_temp_new_i32();
79b0e534 5823 if (!u) {
9ee6e8bb 5824 switch (size) {
ad69471c
PB
5825 case 0:
5826 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5827 break;
5828 case 1:
5829 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5830 break;
5831 case 2:
664b7e3b 5832 tcg_gen_extrh_i64_i32(tmp, cpu_V0);
ad69471c 5833 break;
9ee6e8bb
PB
5834 default: abort();
5835 }
5836 } else {
5837 switch (size) {
ad69471c
PB
5838 case 0:
5839 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5840 break;
5841 case 1:
5842 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5843 break;
5844 case 2:
5845 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
664b7e3b 5846 tcg_gen_extrh_i64_i32(tmp, cpu_V0);
ad69471c 5847 break;
9ee6e8bb
PB
5848 default: abort();
5849 }
5850 }
ad69471c
PB
5851 if (pass == 0) {
5852 tmp3 = tmp;
5853 } else {
5854 neon_store_reg(rd, 0, tmp3);
5855 neon_store_reg(rd, 1, tmp);
5856 }
9ee6e8bb
PB
5857 } else {
5858 /* Write back the result. */
ad69471c 5859 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5860 }
5861 }
5862 } else {
3e3326df
PM
5863 /* Two registers and a scalar. NB that for ops of this form
5864 * the ARM ARM labels bit 24 as Q, but it is in our variable
5865 * 'u', not 'q'.
5866 */
5867 if (size == 0) {
5868 return 1;
5869 }
9ee6e8bb 5870 switch (op) {
9ee6e8bb 5871 case 1: /* Float VMLA scalar */
9ee6e8bb 5872 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5873 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5874 if (size == 1) {
5875 return 1;
5876 }
5877 /* fall through */
5878 case 0: /* Integer VMLA scalar */
5879 case 4: /* Integer VMLS scalar */
5880 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5881 case 12: /* VQDMULH scalar */
5882 case 13: /* VQRDMULH scalar */
3e3326df
PM
5883 if (u && ((rd | rn) & 1)) {
5884 return 1;
5885 }
dd8fbd78
FN
5886 tmp = neon_get_scalar(size, rm);
5887 neon_store_scratch(0, tmp);
9ee6e8bb 5888 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5889 tmp = neon_load_scratch(0);
5890 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5891 if (op == 12) {
5892 if (size == 1) {
02da0b2d 5893 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5894 } else {
02da0b2d 5895 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5896 }
5897 } else if (op == 13) {
5898 if (size == 1) {
02da0b2d 5899 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5900 } else {
02da0b2d 5901 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5902 }
5903 } else if (op & 1) {
aa47cfdd
PM
5904 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5905 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5906 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5907 } else {
5908 switch (size) {
dd8fbd78
FN
5909 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5910 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5911 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5912 default: abort();
9ee6e8bb
PB
5913 }
5914 }
7d1b0095 5915 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5916 if (op < 8) {
5917 /* Accumulate. */
dd8fbd78 5918 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5919 switch (op) {
5920 case 0:
dd8fbd78 5921 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5922 break;
5923 case 1:
aa47cfdd
PM
5924 {
5925 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5926 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5927 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5928 break;
aa47cfdd 5929 }
9ee6e8bb 5930 case 4:
dd8fbd78 5931 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5932 break;
5933 case 5:
aa47cfdd
PM
5934 {
5935 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5936 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5937 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5938 break;
aa47cfdd 5939 }
9ee6e8bb
PB
5940 default:
5941 abort();
5942 }
7d1b0095 5943 tcg_temp_free_i32(tmp2);
9ee6e8bb 5944 }
dd8fbd78 5945 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5946 }
5947 break;
9ee6e8bb 5948 case 3: /* VQDMLAL scalar */
9ee6e8bb 5949 case 7: /* VQDMLSL scalar */
9ee6e8bb 5950 case 11: /* VQDMULL scalar */
3e3326df 5951 if (u == 1) {
ad69471c 5952 return 1;
3e3326df
PM
5953 }
5954 /* fall through */
5955 case 2: /* VMLAL sclar */
5956 case 6: /* VMLSL scalar */
5957 case 10: /* VMULL scalar */
5958 if (rd & 1) {
5959 return 1;
5960 }
dd8fbd78 5961 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5962 /* We need a copy of tmp2 because gen_neon_mull
5963 * deletes it during pass 0. */
7d1b0095 5964 tmp4 = tcg_temp_new_i32();
c6067f04 5965 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5966 tmp3 = neon_load_reg(rn, 1);
ad69471c 5967
9ee6e8bb 5968 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5969 if (pass == 0) {
5970 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5971 } else {
dd8fbd78 5972 tmp = tmp3;
c6067f04 5973 tmp2 = tmp4;
9ee6e8bb 5974 }
ad69471c 5975 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5976 if (op != 11) {
5977 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5978 }
9ee6e8bb 5979 switch (op) {
4dc064e6
PM
5980 case 6:
5981 gen_neon_negl(cpu_V0, size);
5982 /* Fall through */
5983 case 2:
ad69471c 5984 gen_neon_addl(size);
9ee6e8bb
PB
5985 break;
5986 case 3: case 7:
ad69471c 5987 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5988 if (op == 7) {
5989 gen_neon_negl(cpu_V0, size);
5990 }
ad69471c 5991 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5992 break;
5993 case 10:
5994 /* no-op */
5995 break;
5996 case 11:
ad69471c 5997 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5998 break;
5999 default:
6000 abort();
6001 }
ad69471c 6002 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6003 }
61adacc8
RH
6004 break;
6005 case 14: /* VQRDMLAH scalar */
6006 case 15: /* VQRDMLSH scalar */
6007 {
6008 NeonGenThreeOpEnvFn *fn;
dd8fbd78 6009
962fcbf2 6010 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
6011 return 1;
6012 }
6013 if (u && ((rd | rn) & 1)) {
6014 return 1;
6015 }
6016 if (op == 14) {
6017 if (size == 1) {
6018 fn = gen_helper_neon_qrdmlah_s16;
6019 } else {
6020 fn = gen_helper_neon_qrdmlah_s32;
6021 }
6022 } else {
6023 if (size == 1) {
6024 fn = gen_helper_neon_qrdmlsh_s16;
6025 } else {
6026 fn = gen_helper_neon_qrdmlsh_s32;
6027 }
6028 }
dd8fbd78 6029
61adacc8
RH
6030 tmp2 = neon_get_scalar(size, rm);
6031 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6032 tmp = neon_load_reg(rn, pass);
6033 tmp3 = neon_load_reg(rd, pass);
6034 fn(tmp, cpu_env, tmp, tmp2, tmp3);
6035 tcg_temp_free_i32(tmp3);
6036 neon_store_reg(rd, pass, tmp);
6037 }
6038 tcg_temp_free_i32(tmp2);
6039 }
9ee6e8bb 6040 break;
61adacc8
RH
6041 default:
6042 g_assert_not_reached();
9ee6e8bb
PB
6043 }
6044 }
6045 } else { /* size == 3 */
6046 if (!u) {
6047 /* Extract. */
9ee6e8bb 6048 imm = (insn >> 8) & 0xf;
ad69471c
PB
6049
6050 if (imm > 7 && !q)
6051 return 1;
6052
52579ea1
PM
6053 if (q && ((rd | rn | rm) & 1)) {
6054 return 1;
6055 }
6056
ad69471c
PB
6057 if (imm == 0) {
6058 neon_load_reg64(cpu_V0, rn);
6059 if (q) {
6060 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6061 }
ad69471c
PB
6062 } else if (imm == 8) {
6063 neon_load_reg64(cpu_V0, rn + 1);
6064 if (q) {
6065 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6066 }
ad69471c 6067 } else if (q) {
a7812ae4 6068 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6069 if (imm < 8) {
6070 neon_load_reg64(cpu_V0, rn);
a7812ae4 6071 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6072 } else {
6073 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6074 neon_load_reg64(tmp64, rm);
ad69471c
PB
6075 }
6076 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6077 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6078 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6079 if (imm < 8) {
6080 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6081 } else {
ad69471c
PB
6082 neon_load_reg64(cpu_V1, rm + 1);
6083 imm -= 8;
9ee6e8bb 6084 }
ad69471c 6085 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6086 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6087 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6088 tcg_temp_free_i64(tmp64);
ad69471c 6089 } else {
a7812ae4 6090 /* BUGFIX */
ad69471c 6091 neon_load_reg64(cpu_V0, rn);
a7812ae4 6092 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6093 neon_load_reg64(cpu_V1, rm);
a7812ae4 6094 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6095 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6096 }
6097 neon_store_reg64(cpu_V0, rd);
6098 if (q) {
6099 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6100 }
6101 } else if ((insn & (1 << 11)) == 0) {
6102 /* Two register misc. */
6103 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6104 size = (insn >> 18) & 3;
600b828c
PM
6105 /* UNDEF for unknown op values and bad op-size combinations */
6106 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6107 return 1;
6108 }
fe8fcf3d
PM
6109 if (neon_2rm_is_v8_op(op) &&
6110 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6111 return 1;
6112 }
fc2a9b37
PM
6113 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6114 q && ((rm | rd) & 1)) {
6115 return 1;
6116 }
9ee6e8bb 6117 switch (op) {
600b828c 6118 case NEON_2RM_VREV64:
9ee6e8bb 6119 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6120 tmp = neon_load_reg(rm, pass * 2);
6121 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6122 switch (size) {
dd8fbd78
FN
6123 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6124 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6125 case 2: /* no-op */ break;
6126 default: abort();
6127 }
dd8fbd78 6128 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6129 if (size == 2) {
dd8fbd78 6130 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6131 } else {
9ee6e8bb 6132 switch (size) {
dd8fbd78
FN
6133 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6134 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6135 default: abort();
6136 }
dd8fbd78 6137 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6138 }
6139 }
6140 break;
600b828c
PM
6141 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6142 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6143 for (pass = 0; pass < q + 1; pass++) {
6144 tmp = neon_load_reg(rm, pass * 2);
6145 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6146 tmp = neon_load_reg(rm, pass * 2 + 1);
6147 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6148 switch (size) {
6149 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6150 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6151 case 2: tcg_gen_add_i64(CPU_V001); break;
6152 default: abort();
6153 }
600b828c 6154 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6155 /* Accumulate. */
ad69471c
PB
6156 neon_load_reg64(cpu_V1, rd + pass);
6157 gen_neon_addl(size);
9ee6e8bb 6158 }
ad69471c 6159 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6160 }
6161 break;
600b828c 6162 case NEON_2RM_VTRN:
9ee6e8bb 6163 if (size == 2) {
a5a14945 6164 int n;
9ee6e8bb 6165 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6166 tmp = neon_load_reg(rm, n);
6167 tmp2 = neon_load_reg(rd, n + 1);
6168 neon_store_reg(rm, n, tmp2);
6169 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6170 }
6171 } else {
6172 goto elementwise;
6173 }
6174 break;
600b828c 6175 case NEON_2RM_VUZP:
02acedf9 6176 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6177 return 1;
9ee6e8bb
PB
6178 }
6179 break;
600b828c 6180 case NEON_2RM_VZIP:
d68a6f3a 6181 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6182 return 1;
9ee6e8bb
PB
6183 }
6184 break;
600b828c
PM
6185 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6186 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6187 if (rm & 1) {
6188 return 1;
6189 }
f764718d 6190 tmp2 = NULL;
9ee6e8bb 6191 for (pass = 0; pass < 2; pass++) {
ad69471c 6192 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6193 tmp = tcg_temp_new_i32();
600b828c
PM
6194 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6195 tmp, cpu_V0);
ad69471c
PB
6196 if (pass == 0) {
6197 tmp2 = tmp;
6198 } else {
6199 neon_store_reg(rd, 0, tmp2);
6200 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6201 }
9ee6e8bb
PB
6202 }
6203 break;
600b828c 6204 case NEON_2RM_VSHLL:
fc2a9b37 6205 if (q || (rd & 1)) {
9ee6e8bb 6206 return 1;
600b828c 6207 }
ad69471c
PB
6208 tmp = neon_load_reg(rm, 0);
6209 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6210 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6211 if (pass == 1)
6212 tmp = tmp2;
6213 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6214 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6215 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6216 }
6217 break;
600b828c 6218 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
6219 {
6220 TCGv_ptr fpst;
6221 TCGv_i32 ahp;
6222
602f6e42 6223 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
6224 q || (rm & 1)) {
6225 return 1;
6226 }
486624fc
AB
6227 fpst = get_fpstatus_ptr(true);
6228 ahp = get_ahp_flag();
58f2682e
PM
6229 tmp = neon_load_reg(rm, 0);
6230 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6231 tmp2 = neon_load_reg(rm, 1);
6232 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
60011498
PB
6233 tcg_gen_shli_i32(tmp2, tmp2, 16);
6234 tcg_gen_or_i32(tmp2, tmp2, tmp);
58f2682e
PM
6235 tcg_temp_free_i32(tmp);
6236 tmp = neon_load_reg(rm, 2);
6237 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6238 tmp3 = neon_load_reg(rm, 3);
60011498 6239 neon_store_reg(rd, 0, tmp2);
58f2682e
PM
6240 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
6241 tcg_gen_shli_i32(tmp3, tmp3, 16);
6242 tcg_gen_or_i32(tmp3, tmp3, tmp);
6243 neon_store_reg(rd, 1, tmp3);
7d1b0095 6244 tcg_temp_free_i32(tmp);
486624fc
AB
6245 tcg_temp_free_i32(ahp);
6246 tcg_temp_free_ptr(fpst);
60011498 6247 break;
486624fc 6248 }
600b828c 6249 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
6250 {
6251 TCGv_ptr fpst;
6252 TCGv_i32 ahp;
602f6e42 6253 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
6254 q || (rd & 1)) {
6255 return 1;
6256 }
486624fc
AB
6257 fpst = get_fpstatus_ptr(true);
6258 ahp = get_ahp_flag();
7d1b0095 6259 tmp3 = tcg_temp_new_i32();
60011498
PB
6260 tmp = neon_load_reg(rm, 0);
6261 tmp2 = neon_load_reg(rm, 1);
6262 tcg_gen_ext16u_i32(tmp3, tmp);
b66f6b99
PM
6263 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6264 neon_store_reg(rd, 0, tmp3);
6265 tcg_gen_shri_i32(tmp, tmp, 16);
6266 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
6267 neon_store_reg(rd, 1, tmp);
6268 tmp3 = tcg_temp_new_i32();
60011498 6269 tcg_gen_ext16u_i32(tmp3, tmp2);
b66f6b99
PM
6270 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6271 neon_store_reg(rd, 2, tmp3);
6272 tcg_gen_shri_i32(tmp2, tmp2, 16);
6273 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
6274 neon_store_reg(rd, 3, tmp2);
486624fc
AB
6275 tcg_temp_free_i32(ahp);
6276 tcg_temp_free_ptr(fpst);
60011498 6277 break;
486624fc 6278 }
9d935509 6279 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 6280 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
6281 return 1;
6282 }
1a66ac61
RH
6283 ptr1 = vfp_reg_ptr(true, rd);
6284 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
6285
6286 /* Bit 6 is the lowest opcode bit; it distinguishes between
6287 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6288 */
6289 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6290
6291 if (op == NEON_2RM_AESE) {
1a66ac61 6292 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 6293 } else {
1a66ac61 6294 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 6295 }
1a66ac61
RH
6296 tcg_temp_free_ptr(ptr1);
6297 tcg_temp_free_ptr(ptr2);
9d935509
AB
6298 tcg_temp_free_i32(tmp3);
6299 break;
f1ecb913 6300 case NEON_2RM_SHA1H:
962fcbf2 6301 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
6302 return 1;
6303 }
1a66ac61
RH
6304 ptr1 = vfp_reg_ptr(true, rd);
6305 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 6306
1a66ac61 6307 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 6308
1a66ac61
RH
6309 tcg_temp_free_ptr(ptr1);
6310 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
6311 break;
6312 case NEON_2RM_SHA1SU1:
6313 if ((rm | rd) & 1) {
6314 return 1;
6315 }
6316 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6317 if (q) {
962fcbf2 6318 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
6319 return 1;
6320 }
962fcbf2 6321 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
6322 return 1;
6323 }
1a66ac61
RH
6324 ptr1 = vfp_reg_ptr(true, rd);
6325 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 6326 if (q) {
1a66ac61 6327 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 6328 } else {
1a66ac61 6329 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 6330 }
1a66ac61
RH
6331 tcg_temp_free_ptr(ptr1);
6332 tcg_temp_free_ptr(ptr2);
f1ecb913 6333 break;
4bf940be
RH
6334
6335 case NEON_2RM_VMVN:
6336 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
6337 break;
6338 case NEON_2RM_VNEG:
6339 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
6340 break;
4e027a71
RH
6341 case NEON_2RM_VABS:
6342 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
6343 break;
4bf940be 6344
9ee6e8bb
PB
6345 default:
6346 elementwise:
6347 for (pass = 0; pass < (q ? 4 : 2); pass++) {
60737ed5 6348 tmp = neon_load_reg(rm, pass);
9ee6e8bb 6349 switch (op) {
600b828c 6350 case NEON_2RM_VREV32:
9ee6e8bb 6351 switch (size) {
dd8fbd78
FN
6352 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6353 case 1: gen_swap_half(tmp); break;
600b828c 6354 default: abort();
9ee6e8bb
PB
6355 }
6356 break;
600b828c 6357 case NEON_2RM_VREV16:
46497f6a 6358 gen_rev16(tmp, tmp);
9ee6e8bb 6359 break;
600b828c 6360 case NEON_2RM_VCLS:
9ee6e8bb 6361 switch (size) {
dd8fbd78
FN
6362 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6363 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6364 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6365 default: abort();
9ee6e8bb
PB
6366 }
6367 break;
600b828c 6368 case NEON_2RM_VCLZ:
9ee6e8bb 6369 switch (size) {
dd8fbd78
FN
6370 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6371 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 6372 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 6373 default: abort();
9ee6e8bb
PB
6374 }
6375 break;
600b828c 6376 case NEON_2RM_VCNT:
dd8fbd78 6377 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6378 break;
600b828c 6379 case NEON_2RM_VQABS:
9ee6e8bb 6380 switch (size) {
02da0b2d
PM
6381 case 0:
6382 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6383 break;
6384 case 1:
6385 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6386 break;
6387 case 2:
6388 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6389 break;
600b828c 6390 default: abort();
9ee6e8bb
PB
6391 }
6392 break;
600b828c 6393 case NEON_2RM_VQNEG:
9ee6e8bb 6394 switch (size) {
02da0b2d
PM
6395 case 0:
6396 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6397 break;
6398 case 1:
6399 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6400 break;
6401 case 2:
6402 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6403 break;
600b828c 6404 default: abort();
9ee6e8bb
PB
6405 }
6406 break;
600b828c 6407 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6408 tmp2 = tcg_const_i32(0);
9ee6e8bb 6409 switch(size) {
dd8fbd78
FN
6410 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6411 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6412 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6413 default: abort();
9ee6e8bb 6414 }
39d5492a 6415 tcg_temp_free_i32(tmp2);
600b828c 6416 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6417 tcg_gen_not_i32(tmp, tmp);
600b828c 6418 }
9ee6e8bb 6419 break;
600b828c 6420 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6421 tmp2 = tcg_const_i32(0);
9ee6e8bb 6422 switch(size) {
dd8fbd78
FN
6423 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6424 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6425 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6426 default: abort();
9ee6e8bb 6427 }
39d5492a 6428 tcg_temp_free_i32(tmp2);
600b828c 6429 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6430 tcg_gen_not_i32(tmp, tmp);
600b828c 6431 }
9ee6e8bb 6432 break;
600b828c 6433 case NEON_2RM_VCEQ0:
dd8fbd78 6434 tmp2 = tcg_const_i32(0);
9ee6e8bb 6435 switch(size) {
dd8fbd78
FN
6436 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6437 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6438 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6439 default: abort();
9ee6e8bb 6440 }
39d5492a 6441 tcg_temp_free_i32(tmp2);
9ee6e8bb 6442 break;
600b828c 6443 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6444 {
6445 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6446 tmp2 = tcg_const_i32(0);
aa47cfdd 6447 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6448 tcg_temp_free_i32(tmp2);
aa47cfdd 6449 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6450 break;
aa47cfdd 6451 }
600b828c 6452 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6453 {
6454 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6455 tmp2 = tcg_const_i32(0);
aa47cfdd 6456 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6457 tcg_temp_free_i32(tmp2);
aa47cfdd 6458 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6459 break;
aa47cfdd 6460 }
600b828c 6461 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6462 {
6463 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6464 tmp2 = tcg_const_i32(0);
aa47cfdd 6465 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6466 tcg_temp_free_i32(tmp2);
aa47cfdd 6467 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6468 break;
aa47cfdd 6469 }
600b828c 6470 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6471 {
6472 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6473 tmp2 = tcg_const_i32(0);
aa47cfdd 6474 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6475 tcg_temp_free_i32(tmp2);
aa47cfdd 6476 tcg_temp_free_ptr(fpstatus);
0e326109 6477 break;
aa47cfdd 6478 }
600b828c 6479 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6480 {
6481 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6482 tmp2 = tcg_const_i32(0);
aa47cfdd 6483 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6484 tcg_temp_free_i32(tmp2);
aa47cfdd 6485 tcg_temp_free_ptr(fpstatus);
0e326109 6486 break;
aa47cfdd 6487 }
600b828c 6488 case NEON_2RM_VABS_F:
fd8a68cd 6489 gen_helper_vfp_abss(tmp, tmp);
9ee6e8bb 6490 break;
600b828c 6491 case NEON_2RM_VNEG_F:
cedcc96f 6492 gen_helper_vfp_negs(tmp, tmp);
9ee6e8bb 6493 break;
600b828c 6494 case NEON_2RM_VSWP:
dd8fbd78
FN
6495 tmp2 = neon_load_reg(rd, pass);
6496 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6497 break;
600b828c 6498 case NEON_2RM_VTRN:
dd8fbd78 6499 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6500 switch (size) {
dd8fbd78
FN
6501 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6502 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6503 default: abort();
9ee6e8bb 6504 }
dd8fbd78 6505 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6506 break;
34f7b0a2
WN
6507 case NEON_2RM_VRINTN:
6508 case NEON_2RM_VRINTA:
6509 case NEON_2RM_VRINTM:
6510 case NEON_2RM_VRINTP:
6511 case NEON_2RM_VRINTZ:
6512 {
6513 TCGv_i32 tcg_rmode;
6514 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6515 int rmode;
6516
6517 if (op == NEON_2RM_VRINTZ) {
6518 rmode = FPROUNDING_ZERO;
6519 } else {
6520 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6521 }
6522
6523 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6524 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6525 cpu_env);
3b52ad1f 6526 gen_helper_rints(tmp, tmp, fpstatus);
34f7b0a2
WN
6527 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6528 cpu_env);
6529 tcg_temp_free_ptr(fpstatus);
6530 tcg_temp_free_i32(tcg_rmode);
6531 break;
6532 }
2ce70625
WN
6533 case NEON_2RM_VRINTX:
6534 {
6535 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
3b52ad1f 6536 gen_helper_rints_exact(tmp, tmp, fpstatus);
2ce70625
WN
6537 tcg_temp_free_ptr(fpstatus);
6538 break;
6539 }
901ad525
WN
6540 case NEON_2RM_VCVTAU:
6541 case NEON_2RM_VCVTAS:
6542 case NEON_2RM_VCVTNU:
6543 case NEON_2RM_VCVTNS:
6544 case NEON_2RM_VCVTPU:
6545 case NEON_2RM_VCVTPS:
6546 case NEON_2RM_VCVTMU:
6547 case NEON_2RM_VCVTMS:
6548 {
6549 bool is_signed = !extract32(insn, 7, 1);
6550 TCGv_ptr fpst = get_fpstatus_ptr(1);
6551 TCGv_i32 tcg_rmode, tcg_shift;
6552 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6553
6554 tcg_shift = tcg_const_i32(0);
6555 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6556 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6557 cpu_env);
6558
6559 if (is_signed) {
30bf0a01 6560 gen_helper_vfp_tosls(tmp, tmp,
901ad525
WN
6561 tcg_shift, fpst);
6562 } else {
30bf0a01 6563 gen_helper_vfp_touls(tmp, tmp,
901ad525
WN
6564 tcg_shift, fpst);
6565 }
6566
6567 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6568 cpu_env);
6569 tcg_temp_free_i32(tcg_rmode);
6570 tcg_temp_free_i32(tcg_shift);
6571 tcg_temp_free_ptr(fpst);
6572 break;
6573 }
600b828c 6574 case NEON_2RM_VRECPE:
b6d4443a
AB
6575 {
6576 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6577 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6578 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6579 break;
b6d4443a 6580 }
600b828c 6581 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6582 {
6583 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6584 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6585 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6586 break;
c2fb418e 6587 }
600b828c 6588 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6589 {
6590 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9a011fec 6591 gen_helper_recpe_f32(tmp, tmp, fpstatus);
b6d4443a 6592 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6593 break;
b6d4443a 6594 }
600b828c 6595 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6596 {
6597 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9a011fec 6598 gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
c2fb418e 6599 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6600 break;
c2fb418e 6601 }
600b828c 6602 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
60737ed5
PM
6603 {
6604 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6605 gen_helper_vfp_sitos(tmp, tmp, fpstatus);
6606 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6607 break;
60737ed5 6608 }
600b828c 6609 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
60737ed5
PM
6610 {
6611 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6612 gen_helper_vfp_uitos(tmp, tmp, fpstatus);
6613 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6614 break;
60737ed5 6615 }
600b828c 6616 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
60737ed5
PM
6617 {
6618 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6619 gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
6620 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6621 break;
60737ed5 6622 }
600b828c 6623 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
60737ed5
PM
6624 {
6625 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6626 gen_helper_vfp_touizs(tmp, tmp, fpstatus);
6627 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6628 break;
60737ed5 6629 }
9ee6e8bb 6630 default:
600b828c
PM
6631 /* Reserved op values were caught by the
6632 * neon_2rm_sizes[] check earlier.
6633 */
6634 abort();
9ee6e8bb 6635 }
60737ed5 6636 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6637 }
6638 break;
6639 }
6640 } else if ((insn & (1 << 10)) == 0) {
6641 /* VTBL, VTBX. */
56907d77
PM
6642 int n = ((insn >> 8) & 3) + 1;
6643 if ((rn + n) > 32) {
6644 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6645 * helper function running off the end of the register file.
6646 */
6647 return 1;
6648 }
6649 n <<= 3;
9ee6e8bb 6650 if (insn & (1 << 6)) {
8f8e3aa4 6651 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6652 } else {
7d1b0095 6653 tmp = tcg_temp_new_i32();
8f8e3aa4 6654 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6655 }
8f8e3aa4 6656 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 6657 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 6658 tmp5 = tcg_const_i32(n);
e7c06c4e 6659 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 6660 tcg_temp_free_i32(tmp);
9ee6e8bb 6661 if (insn & (1 << 6)) {
8f8e3aa4 6662 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6663 } else {
7d1b0095 6664 tmp = tcg_temp_new_i32();
8f8e3aa4 6665 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6666 }
8f8e3aa4 6667 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 6668 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 6669 tcg_temp_free_i32(tmp5);
e7c06c4e 6670 tcg_temp_free_ptr(ptr1);
8f8e3aa4 6671 neon_store_reg(rd, 0, tmp2);
3018f259 6672 neon_store_reg(rd, 1, tmp3);
7d1b0095 6673 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6674 } else if ((insn & 0x380) == 0) {
6675 /* VDUP */
32f91fb7 6676 int element;
14776ab5 6677 MemOp size;
32f91fb7 6678
133da6aa
JR
6679 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6680 return 1;
6681 }
9ee6e8bb 6682 if (insn & (1 << 16)) {
32f91fb7
RH
6683 size = MO_8;
6684 element = (insn >> 17) & 7;
9ee6e8bb 6685 } else if (insn & (1 << 17)) {
32f91fb7
RH
6686 size = MO_16;
6687 element = (insn >> 18) & 3;
6688 } else {
6689 size = MO_32;
6690 element = (insn >> 19) & 1;
9ee6e8bb 6691 }
32f91fb7
RH
6692 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
6693 neon_element_offset(rm, element, size),
6694 q ? 16 : 8, q ? 16 : 8);
9ee6e8bb
PB
6695 } else {
6696 return 1;
6697 }
6698 }
6699 }
6700 return 0;
6701}
6702
8b7209fa
RH
6703/* Advanced SIMD three registers of the same length extension.
6704 * 31 25 23 22 20 16 12 11 10 9 8 3 0
6705 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6706 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6707 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6708 */
6709static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
6710{
26c470a7
RH
6711 gen_helper_gvec_3 *fn_gvec = NULL;
6712 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
6713 int rd, rn, rm, opr_sz;
6714 int data = 0;
87732318
RH
6715 int off_rn, off_rm;
6716 bool is_long = false, q = extract32(insn, 6, 1);
6717 bool ptr_is_env = false;
8b7209fa
RH
6718
6719 if ((insn & 0xfe200f10) == 0xfc200800) {
6720 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
6721 int size = extract32(insn, 20, 1);
6722 data = extract32(insn, 23, 2); /* rot */
962fcbf2 6723 if (!dc_isar_feature(aa32_vcma, s)
5763190f 6724 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
6725 return 1;
6726 }
6727 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
6728 } else if ((insn & 0xfea00f10) == 0xfc800800) {
6729 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
6730 int size = extract32(insn, 20, 1);
6731 data = extract32(insn, 24, 1); /* rot */
962fcbf2 6732 if (!dc_isar_feature(aa32_vcma, s)
5763190f 6733 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
6734 return 1;
6735 }
6736 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
6737 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
6738 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
6739 bool u = extract32(insn, 4, 1);
962fcbf2 6740 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
6741 return 1;
6742 }
6743 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
87732318
RH
6744 } else if ((insn & 0xff300f10) == 0xfc200810) {
6745 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
6746 int is_s = extract32(insn, 23, 1);
6747 if (!dc_isar_feature(aa32_fhm, s)) {
6748 return 1;
6749 }
6750 is_long = true;
6751 data = is_s; /* is_2 == 0 */
6752 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
6753 ptr_is_env = true;
8b7209fa
RH
6754 } else {
6755 return 1;
6756 }
6757
87732318
RH
6758 VFP_DREG_D(rd, insn);
6759 if (rd & q) {
6760 return 1;
6761 }
6762 if (q || !is_long) {
6763 VFP_DREG_N(rn, insn);
6764 VFP_DREG_M(rm, insn);
6765 if ((rn | rm) & q & !is_long) {
6766 return 1;
6767 }
6768 off_rn = vfp_reg_offset(1, rn);
6769 off_rm = vfp_reg_offset(1, rm);
6770 } else {
6771 rn = VFP_SREG_N(insn);
6772 rm = VFP_SREG_M(insn);
6773 off_rn = vfp_reg_offset(0, rn);
6774 off_rm = vfp_reg_offset(0, rm);
6775 }
6776
8b7209fa 6777 if (s->fp_excp_el) {
a767fac8 6778 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4be42f40 6779 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8b7209fa
RH
6780 return 0;
6781 }
6782 if (!s->vfp_enabled) {
6783 return 1;
6784 }
6785
6786 opr_sz = (1 + q) * 8;
26c470a7 6787 if (fn_gvec_ptr) {
87732318
RH
6788 TCGv_ptr ptr;
6789 if (ptr_is_env) {
6790 ptr = cpu_env;
6791 } else {
6792 ptr = get_fpstatus_ptr(1);
6793 }
6794 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 6795 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
6796 if (!ptr_is_env) {
6797 tcg_temp_free_ptr(ptr);
6798 }
26c470a7 6799 } else {
87732318 6800 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
6801 opr_sz, opr_sz, data, fn_gvec);
6802 }
8b7209fa
RH
6803 return 0;
6804}
6805
638808ff
RH
6806/* Advanced SIMD two registers and a scalar extension.
6807 * 31 24 23 22 20 16 12 11 10 9 8 3 0
6808 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
6809 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6810 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
6811 *
6812 */
6813
6814static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
6815{
26c470a7
RH
6816 gen_helper_gvec_3 *fn_gvec = NULL;
6817 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 6818 int rd, rn, rm, opr_sz, data;
87732318
RH
6819 int off_rn, off_rm;
6820 bool is_long = false, q = extract32(insn, 6, 1);
6821 bool ptr_is_env = false;
638808ff
RH
6822
6823 if ((insn & 0xff000f10) == 0xfe000800) {
6824 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
6825 int rot = extract32(insn, 20, 2);
6826 int size = extract32(insn, 23, 1);
6827 int index;
6828
962fcbf2 6829 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
6830 return 1;
6831 }
2cc99919 6832 if (size == 0) {
5763190f 6833 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2cc99919
RH
6834 return 1;
6835 }
6836 /* For fp16, rm is just Vm, and index is M. */
6837 rm = extract32(insn, 0, 4);
6838 index = extract32(insn, 5, 1);
6839 } else {
6840 /* For fp32, rm is the usual M:Vm, and index is 0. */
6841 VFP_DREG_M(rm, insn);
6842 index = 0;
6843 }
6844 data = (index << 2) | rot;
6845 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
6846 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
6847 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
6848 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
6849 int u = extract32(insn, 4, 1);
87732318 6850
962fcbf2 6851 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
6852 return 1;
6853 }
6854 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
6855 /* rm is just Vm, and index is M. */
6856 data = extract32(insn, 5, 1); /* index */
6857 rm = extract32(insn, 0, 4);
87732318
RH
6858 } else if ((insn & 0xffa00f10) == 0xfe000810) {
6859 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
6860 int is_s = extract32(insn, 20, 1);
6861 int vm20 = extract32(insn, 0, 3);
6862 int vm3 = extract32(insn, 3, 1);
6863 int m = extract32(insn, 5, 1);
6864 int index;
6865
6866 if (!dc_isar_feature(aa32_fhm, s)) {
6867 return 1;
6868 }
6869 if (q) {
6870 rm = vm20;
6871 index = m * 2 + vm3;
6872 } else {
6873 rm = vm20 * 2 + m;
6874 index = vm3;
6875 }
6876 is_long = true;
6877 data = (index << 2) | is_s; /* is_2 == 0 */
6878 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
6879 ptr_is_env = true;
638808ff
RH
6880 } else {
6881 return 1;
6882 }
6883
87732318
RH
6884 VFP_DREG_D(rd, insn);
6885 if (rd & q) {
6886 return 1;
6887 }
6888 if (q || !is_long) {
6889 VFP_DREG_N(rn, insn);
6890 if (rn & q & !is_long) {
6891 return 1;
6892 }
6893 off_rn = vfp_reg_offset(1, rn);
6894 off_rm = vfp_reg_offset(1, rm);
6895 } else {
6896 rn = VFP_SREG_N(insn);
6897 off_rn = vfp_reg_offset(0, rn);
6898 off_rm = vfp_reg_offset(0, rm);
6899 }
638808ff 6900 if (s->fp_excp_el) {
a767fac8 6901 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4be42f40 6902 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
638808ff
RH
6903 return 0;
6904 }
6905 if (!s->vfp_enabled) {
6906 return 1;
6907 }
6908
6909 opr_sz = (1 + q) * 8;
26c470a7 6910 if (fn_gvec_ptr) {
87732318
RH
6911 TCGv_ptr ptr;
6912 if (ptr_is_env) {
6913 ptr = cpu_env;
6914 } else {
6915 ptr = get_fpstatus_ptr(1);
6916 }
6917 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 6918 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
6919 if (!ptr_is_env) {
6920 tcg_temp_free_ptr(ptr);
6921 }
26c470a7 6922 } else {
87732318 6923 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
6924 opr_sz, opr_sz, data, fn_gvec);
6925 }
638808ff
RH
6926 return 0;
6927}
6928
7dcc1f89 6929static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 6930{
4b6a83fb
PM
6931 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6932 const ARMCPRegInfo *ri;
9ee6e8bb
PB
6933
6934 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
6935
6936 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 6937 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
6938 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
6939 return 1;
6940 }
d614a513 6941 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 6942 return disas_iwmmxt_insn(s, insn);
d614a513 6943 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 6944 return disas_dsp_insn(s, insn);
c0f4af17
PM
6945 }
6946 return 1;
4b6a83fb
PM
6947 }
6948
6949 /* Otherwise treat as a generic register access */
6950 is64 = (insn & (1 << 25)) == 0;
6951 if (!is64 && ((insn & (1 << 4)) == 0)) {
6952 /* cdp */
6953 return 1;
6954 }
6955
6956 crm = insn & 0xf;
6957 if (is64) {
6958 crn = 0;
6959 opc1 = (insn >> 4) & 0xf;
6960 opc2 = 0;
6961 rt2 = (insn >> 16) & 0xf;
6962 } else {
6963 crn = (insn >> 16) & 0xf;
6964 opc1 = (insn >> 21) & 7;
6965 opc2 = (insn >> 5) & 7;
6966 rt2 = 0;
6967 }
6968 isread = (insn >> 20) & 1;
6969 rt = (insn >> 12) & 0xf;
6970
60322b39 6971 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 6972 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
6973 if (ri) {
6974 /* Check access permissions */
dcbff19b 6975 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
6976 return 1;
6977 }
6978
c0f4af17 6979 if (ri->accessfn ||
d614a513 6980 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
6981 /* Emit code to perform further access permissions checks at
6982 * runtime; this may result in an exception.
c0f4af17
PM
6983 * Note that on XScale all cp0..c13 registers do an access check
6984 * call in order to handle c15_cpar.
f59df3f2
PM
6985 */
6986 TCGv_ptr tmpptr;
3f208fd7 6987 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
6988 uint32_t syndrome;
6989
6990 /* Note that since we are an implementation which takes an
6991 * exception on a trapped conditional instruction only if the
6992 * instruction passes its condition code check, we can take
6993 * advantage of the clause in the ARM ARM that allows us to set
6994 * the COND field in the instruction to 0xE in all cases.
6995 * We could fish the actual condition out of the insn (ARM)
6996 * or the condexec bits (Thumb) but it isn't necessary.
6997 */
6998 switch (cpnum) {
6999 case 14:
7000 if (is64) {
7001 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7002 isread, false);
8bcbf37c
PM
7003 } else {
7004 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7005 rt, isread, false);
8bcbf37c
PM
7006 }
7007 break;
7008 case 15:
7009 if (is64) {
7010 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7011 isread, false);
8bcbf37c
PM
7012 } else {
7013 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7014 rt, isread, false);
8bcbf37c
PM
7015 }
7016 break;
7017 default:
7018 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7019 * so this can only happen if this is an ARMv7 or earlier CPU,
7020 * in which case the syndrome information won't actually be
7021 * guest visible.
7022 */
d614a513 7023 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7024 syndrome = syn_uncategorized();
7025 break;
7026 }
7027
43bfa4a1 7028 gen_set_condexec(s);
43722a6d 7029 gen_set_pc_im(s, s->pc_curr);
f59df3f2 7030 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7031 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7032 tcg_isread = tcg_const_i32(isread);
7033 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7034 tcg_isread);
f59df3f2 7035 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7036 tcg_temp_free_i32(tcg_syn);
3f208fd7 7037 tcg_temp_free_i32(tcg_isread);
37ff584c
PM
7038 } else if (ri->type & ARM_CP_RAISES_EXC) {
7039 /*
7040 * The readfn or writefn might raise an exception;
7041 * synchronize the CPU state in case it does.
7042 */
7043 gen_set_condexec(s);
7044 gen_set_pc_im(s, s->pc_curr);
f59df3f2
PM
7045 }
7046
4b6a83fb
PM
7047 /* Handle special cases first */
7048 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7049 case ARM_CP_NOP:
7050 return 0;
7051 case ARM_CP_WFI:
7052 if (isread) {
7053 return 1;
7054 }
a0415916 7055 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 7056 s->base.is_jmp = DISAS_WFI;
2bee5105 7057 return 0;
4b6a83fb
PM
7058 default:
7059 break;
7060 }
7061
c5a49c63 7062 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7063 gen_io_start();
7064 }
7065
4b6a83fb
PM
7066 if (isread) {
7067 /* Read */
7068 if (is64) {
7069 TCGv_i64 tmp64;
7070 TCGv_i32 tmp;
7071 if (ri->type & ARM_CP_CONST) {
7072 tmp64 = tcg_const_i64(ri->resetvalue);
7073 } else if (ri->readfn) {
7074 TCGv_ptr tmpptr;
4b6a83fb
PM
7075 tmp64 = tcg_temp_new_i64();
7076 tmpptr = tcg_const_ptr(ri);
7077 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7078 tcg_temp_free_ptr(tmpptr);
7079 } else {
7080 tmp64 = tcg_temp_new_i64();
7081 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7082 }
7083 tmp = tcg_temp_new_i32();
ecc7b3aa 7084 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb 7085 store_reg(s, rt, tmp);
ed336850 7086 tmp = tcg_temp_new_i32();
664b7e3b 7087 tcg_gen_extrh_i64_i32(tmp, tmp64);
ed336850 7088 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7089 store_reg(s, rt2, tmp);
7090 } else {
39d5492a 7091 TCGv_i32 tmp;
4b6a83fb
PM
7092 if (ri->type & ARM_CP_CONST) {
7093 tmp = tcg_const_i32(ri->resetvalue);
7094 } else if (ri->readfn) {
7095 TCGv_ptr tmpptr;
4b6a83fb
PM
7096 tmp = tcg_temp_new_i32();
7097 tmpptr = tcg_const_ptr(ri);
7098 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7099 tcg_temp_free_ptr(tmpptr);
7100 } else {
7101 tmp = load_cpu_offset(ri->fieldoffset);
7102 }
7103 if (rt == 15) {
7104 /* Destination register of r15 for 32 bit loads sets
7105 * the condition codes from the high 4 bits of the value
7106 */
7107 gen_set_nzcv(tmp);
7108 tcg_temp_free_i32(tmp);
7109 } else {
7110 store_reg(s, rt, tmp);
7111 }
7112 }
7113 } else {
7114 /* Write */
7115 if (ri->type & ARM_CP_CONST) {
7116 /* If not forbidden by access permissions, treat as WI */
7117 return 0;
7118 }
7119
7120 if (is64) {
39d5492a 7121 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7122 TCGv_i64 tmp64 = tcg_temp_new_i64();
7123 tmplo = load_reg(s, rt);
7124 tmphi = load_reg(s, rt2);
7125 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7126 tcg_temp_free_i32(tmplo);
7127 tcg_temp_free_i32(tmphi);
7128 if (ri->writefn) {
7129 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7130 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7131 tcg_temp_free_ptr(tmpptr);
7132 } else {
7133 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7134 }
7135 tcg_temp_free_i64(tmp64);
7136 } else {
7137 if (ri->writefn) {
39d5492a 7138 TCGv_i32 tmp;
4b6a83fb 7139 TCGv_ptr tmpptr;
4b6a83fb
PM
7140 tmp = load_reg(s, rt);
7141 tmpptr = tcg_const_ptr(ri);
7142 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7143 tcg_temp_free_ptr(tmpptr);
7144 tcg_temp_free_i32(tmp);
7145 } else {
39d5492a 7146 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7147 store_cpu_offset(tmp, ri->fieldoffset);
7148 }
7149 }
2452731c
PM
7150 }
7151
c5a49c63 7152 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c 7153 /* I/O operations must end the TB here (whether read or write) */
2452731c
PM
7154 gen_lookup_tb(s);
7155 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7156 /* We default to ending the TB on a coprocessor register write,
7157 * but allow this to be suppressed by the register definition
7158 * (usually only necessary to work around guest bugs).
7159 */
2452731c 7160 gen_lookup_tb(s);
4b6a83fb 7161 }
2452731c 7162
4b6a83fb
PM
7163 return 0;
7164 }
7165
626187d8
PM
7166 /* Unknown register; this might be a guest error or a QEMU
7167 * unimplemented feature.
7168 */
7169 if (is64) {
7170 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7171 "64 bit system register cp:%d opc1: %d crm:%d "
7172 "(%s)\n",
7173 isread ? "read" : "write", cpnum, opc1, crm,
7174 s->ns ? "non-secure" : "secure");
626187d8
PM
7175 } else {
7176 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7177 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7178 "(%s)\n",
7179 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7180 s->ns ? "non-secure" : "secure");
626187d8
PM
7181 }
7182
4a9a539f 7183 return 1;
9ee6e8bb
PB
7184}
7185
5e3f878a
PB
7186
7187/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7188static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7189{
39d5492a 7190 TCGv_i32 tmp;
7d1b0095 7191 tmp = tcg_temp_new_i32();
ecc7b3aa 7192 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7193 store_reg(s, rlow, tmp);
7d1b0095 7194 tmp = tcg_temp_new_i32();
664b7e3b 7195 tcg_gen_extrh_i64_i32(tmp, val);
5e3f878a
PB
7196 store_reg(s, rhigh, tmp);
7197}
7198
5e3f878a 7199/* load and add a 64-bit value from a register pair. */
a7812ae4 7200static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7201{
a7812ae4 7202 TCGv_i64 tmp;
39d5492a
PM
7203 TCGv_i32 tmpl;
7204 TCGv_i32 tmph;
5e3f878a
PB
7205
7206 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7207 tmpl = load_reg(s, rlow);
7208 tmph = load_reg(s, rhigh);
a7812ae4 7209 tmp = tcg_temp_new_i64();
36aa55dc 7210 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7211 tcg_temp_free_i32(tmpl);
7212 tcg_temp_free_i32(tmph);
5e3f878a 7213 tcg_gen_add_i64(val, val, tmp);
b75263d6 7214 tcg_temp_free_i64(tmp);
5e3f878a
PB
7215}
7216
c9f10124 7217/* Set N and Z flags from hi|lo. */
39d5492a 7218static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7219{
c9f10124
RH
7220 tcg_gen_mov_i32(cpu_NF, hi);
7221 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7222}
7223
426f5abc
PB
7224/* Load/Store exclusive instructions are implemented by remembering
7225 the value/address loaded, and seeing if these are the same
354161b3 7226 when the store is performed. This should be sufficient to implement
426f5abc 7227 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7228 regular stores. The compare vs the remembered value is done during
7229 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7230static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7231 TCGv_i32 addr, int size)
426f5abc 7232{
94ee24e7 7233 TCGv_i32 tmp = tcg_temp_new_i32();
14776ab5 7234 MemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7235
50225ad0
PM
7236 s->is_ldex = true;
7237
426f5abc 7238 if (size == 3) {
39d5492a 7239 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7240 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7241
3448d47b
PM
7242 /* For AArch32, architecturally the 32-bit word at the lowest
7243 * address is always Rt and the one at addr+4 is Rt2, even if
7244 * the CPU is big-endian. That means we don't want to do a
7245 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7246 * for an architecturally 64-bit access, but instead do a
7247 * 64-bit access using MO_BE if appropriate and then split
7248 * the two halves.
7249 * This only makes a difference for BE32 user-mode, where
7250 * frob64() must not flip the two halves of the 64-bit data
7251 * but this code must treat BE32 user-mode like BE32 system.
7252 */
7253 TCGv taddr = gen_aa32_addr(s, addr, opc);
7254
7255 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7256 tcg_temp_free(taddr);
354161b3 7257 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
7258 if (s->be_data == MO_BE) {
7259 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7260 } else {
7261 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7262 }
354161b3
EC
7263 tcg_temp_free_i64(t64);
7264
7265 store_reg(s, rt2, tmp2);
03d05e2d 7266 } else {
354161b3 7267 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7268 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7269 }
03d05e2d
PM
7270
7271 store_reg(s, rt, tmp);
7272 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7273}
7274
7275static void gen_clrex(DisasContext *s)
7276{
03d05e2d 7277 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7278}
7279
426f5abc 7280static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7281 TCGv_i32 addr, int size)
426f5abc 7282{
354161b3
EC
7283 TCGv_i32 t0, t1, t2;
7284 TCGv_i64 extaddr;
7285 TCGv taddr;
42a268c2
RH
7286 TCGLabel *done_label;
7287 TCGLabel *fail_label;
14776ab5 7288 MemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7289
7290 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7291 [addr] = {Rt};
7292 {Rd} = 0;
7293 } else {
7294 {Rd} = 1;
7295 } */
7296 fail_label = gen_new_label();
7297 done_label = gen_new_label();
03d05e2d
PM
7298 extaddr = tcg_temp_new_i64();
7299 tcg_gen_extu_i32_i64(extaddr, addr);
7300 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7301 tcg_temp_free_i64(extaddr);
7302
354161b3
EC
7303 taddr = gen_aa32_addr(s, addr, opc);
7304 t0 = tcg_temp_new_i32();
7305 t1 = load_reg(s, rt);
426f5abc 7306 if (size == 3) {
354161b3
EC
7307 TCGv_i64 o64 = tcg_temp_new_i64();
7308 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7309
354161b3 7310 t2 = load_reg(s, rt2);
3448d47b
PM
7311 /* For AArch32, architecturally the 32-bit word at the lowest
7312 * address is always Rt and the one at addr+4 is Rt2, even if
7313 * the CPU is big-endian. Since we're going to treat this as a
7314 * single 64-bit BE store, we need to put the two halves in the
7315 * opposite order for BE to LE, so that they end up in the right
7316 * places.
7317 * We don't want gen_aa32_frob64() because that does the wrong
7318 * thing for BE32 usermode.
7319 */
7320 if (s->be_data == MO_BE) {
7321 tcg_gen_concat_i32_i64(n64, t2, t1);
7322 } else {
7323 tcg_gen_concat_i32_i64(n64, t1, t2);
7324 }
354161b3 7325 tcg_temp_free_i32(t2);
03d05e2d 7326
354161b3
EC
7327 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7328 get_mem_index(s), opc);
7329 tcg_temp_free_i64(n64);
7330
354161b3
EC
7331 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7332 tcg_gen_extrl_i64_i32(t0, o64);
7333
7334 tcg_temp_free_i64(o64);
7335 } else {
7336 t2 = tcg_temp_new_i32();
7337 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7338 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7339 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7340 tcg_temp_free_i32(t2);
426f5abc 7341 }
354161b3
EC
7342 tcg_temp_free_i32(t1);
7343 tcg_temp_free(taddr);
7344 tcg_gen_mov_i32(cpu_R[rd], t0);
7345 tcg_temp_free_i32(t0);
426f5abc 7346 tcg_gen_br(done_label);
354161b3 7347
426f5abc
PB
7348 gen_set_label(fail_label);
7349 tcg_gen_movi_i32(cpu_R[rd], 1);
7350 gen_set_label(done_label);
03d05e2d 7351 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 7352}
426f5abc 7353
81465888
PM
7354/* gen_srs:
7355 * @env: CPUARMState
7356 * @s: DisasContext
7357 * @mode: mode field from insn (which stack to store to)
7358 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7359 * @writeback: true if writeback bit set
7360 *
7361 * Generate code for the SRS (Store Return State) insn.
7362 */
7363static void gen_srs(DisasContext *s,
7364 uint32_t mode, uint32_t amode, bool writeback)
7365{
7366 int32_t offset;
cbc0326b
PM
7367 TCGv_i32 addr, tmp;
7368 bool undef = false;
7369
7370 /* SRS is:
7371 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7372 * and specified mode is monitor mode
cbc0326b
PM
7373 * - UNDEFINED in Hyp mode
7374 * - UNPREDICTABLE in User or System mode
7375 * - UNPREDICTABLE if the specified mode is:
7376 * -- not implemented
7377 * -- not a valid mode number
7378 * -- a mode that's at a higher exception level
7379 * -- Monitor, if we are Non-secure
f01377f5 7380 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7381 */
ba63cf47 7382 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
a767fac8 7383 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
cbc0326b
PM
7384 return;
7385 }
7386
7387 if (s->current_el == 0 || s->current_el == 2) {
7388 undef = true;
7389 }
7390
7391 switch (mode) {
7392 case ARM_CPU_MODE_USR:
7393 case ARM_CPU_MODE_FIQ:
7394 case ARM_CPU_MODE_IRQ:
7395 case ARM_CPU_MODE_SVC:
7396 case ARM_CPU_MODE_ABT:
7397 case ARM_CPU_MODE_UND:
7398 case ARM_CPU_MODE_SYS:
7399 break;
7400 case ARM_CPU_MODE_HYP:
7401 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7402 undef = true;
7403 }
7404 break;
7405 case ARM_CPU_MODE_MON:
7406 /* No need to check specifically for "are we non-secure" because
7407 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7408 * so if this isn't EL3 then we must be non-secure.
7409 */
7410 if (s->current_el != 3) {
7411 undef = true;
7412 }
7413 break;
7414 default:
7415 undef = true;
7416 }
7417
7418 if (undef) {
1ce21ba1 7419 unallocated_encoding(s);
cbc0326b
PM
7420 return;
7421 }
7422
7423 addr = tcg_temp_new_i32();
7424 tmp = tcg_const_i32(mode);
f01377f5
PM
7425 /* get_r13_banked() will raise an exception if called from System mode */
7426 gen_set_condexec(s);
43722a6d 7427 gen_set_pc_im(s, s->pc_curr);
81465888
PM
7428 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7429 tcg_temp_free_i32(tmp);
7430 switch (amode) {
7431 case 0: /* DA */
7432 offset = -4;
7433 break;
7434 case 1: /* IA */
7435 offset = 0;
7436 break;
7437 case 2: /* DB */
7438 offset = -8;
7439 break;
7440 case 3: /* IB */
7441 offset = 4;
7442 break;
7443 default:
7444 abort();
7445 }
7446 tcg_gen_addi_i32(addr, addr, offset);
7447 tmp = load_reg(s, 14);
12dcc321 7448 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7449 tcg_temp_free_i32(tmp);
81465888
PM
7450 tmp = load_cpu_field(spsr);
7451 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 7452 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7453 tcg_temp_free_i32(tmp);
81465888
PM
7454 if (writeback) {
7455 switch (amode) {
7456 case 0:
7457 offset = -8;
7458 break;
7459 case 1:
7460 offset = 4;
7461 break;
7462 case 2:
7463 offset = -4;
7464 break;
7465 case 3:
7466 offset = 0;
7467 break;
7468 default:
7469 abort();
7470 }
7471 tcg_gen_addi_i32(addr, addr, offset);
7472 tmp = tcg_const_i32(mode);
7473 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7474 tcg_temp_free_i32(tmp);
7475 }
7476 tcg_temp_free_i32(addr);
dcba3a8d 7477 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
7478}
7479
c2d9644e
RK
7480/* Generate a label used for skipping this instruction */
7481static void arm_gen_condlabel(DisasContext *s)
7482{
7483 if (!s->condjmp) {
7484 s->condlabel = gen_new_label();
7485 s->condjmp = 1;
7486 }
7487}
7488
7489/* Skip this instruction if the ARM condition is false */
7490static void arm_skip_unless(DisasContext *s, uint32_t cond)
7491{
7492 arm_gen_condlabel(s);
7493 arm_gen_test_cc(cond ^ 1, s->condlabel);
7494}
7495
581c6ebd
RH
7496
7497/*
7498 * Constant expanders for the decoders.
7499 */
7500
145952e8
RH
7501static int negate(DisasContext *s, int x)
7502{
7503 return -x;
7504}
7505
581c6ebd
RH
7506static int times_2(DisasContext *s, int x)
7507{
7508 return x * 2;
7509}
7510
5e291fe1
RH
7511static int times_4(DisasContext *s, int x)
7512{
7513 return x * 4;
7514}
7515
581c6ebd
RH
7516/* Return only the rotation part of T32ExpandImm. */
7517static int t32_expandimm_rot(DisasContext *s, int x)
7518{
7519 return x & 0xc00 ? extract32(x, 7, 5) : 0;
7520}
7521
7522/* Return the unrotated immediate from T32ExpandImm. */
7523static int t32_expandimm_imm(DisasContext *s, int x)
7524{
7525 int imm = extract32(x, 0, 8);
7526
7527 switch (extract32(x, 8, 4)) {
7528 case 0: /* XY */
7529 /* Nothing to do. */
7530 break;
7531 case 1: /* 00XY00XY */
7532 imm *= 0x00010001;
7533 break;
7534 case 2: /* XY00XY00 */
7535 imm *= 0x01000100;
7536 break;
7537 case 3: /* XYXYXYXY */
7538 imm *= 0x01010101;
7539 break;
7540 default:
7541 /* Rotated constant. */
7542 imm |= 0x80;
7543 break;
7544 }
7545 return imm;
7546}
7547
51409b9e
RH
7548/*
7549 * Include the generated decoders.
7550 */
7551
7552#include "decode-a32.inc.c"
7553#include "decode-a32-uncond.inc.c"
7554#include "decode-t32.inc.c"
7555
25ae32c5
RH
7556/* Helpers to swap operands for reverse-subtract. */
7557static void gen_rsb(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
7558{
7559 tcg_gen_sub_i32(dst, b, a);
7560}
7561
7562static void gen_rsb_CC(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
7563{
7564 gen_sub_CC(dst, b, a);
7565}
7566
7567static void gen_rsc(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
7568{
7569 gen_sub_carry(dest, b, a);
7570}
7571
7572static void gen_rsc_CC(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
7573{
7574 gen_sbc_CC(dest, b, a);
7575}
7576
7577/*
7578 * Helpers for the data processing routines.
7579 *
7580 * After the computation store the results back.
7581 * This may be suppressed altogether (STREG_NONE), require a runtime
7582 * check against the stack limits (STREG_SP_CHECK), or generate an
7583 * exception return. Oh, or store into a register.
7584 *
7585 * Always return true, indicating success for a trans_* function.
7586 */
7587typedef enum {
7588 STREG_NONE,
7589 STREG_NORMAL,
7590 STREG_SP_CHECK,
7591 STREG_EXC_RET,
7592} StoreRegKind;
7593
7594static bool store_reg_kind(DisasContext *s, int rd,
7595 TCGv_i32 val, StoreRegKind kind)
7596{
7597 switch (kind) {
7598 case STREG_NONE:
7599 tcg_temp_free_i32(val);
7600 return true;
7601 case STREG_NORMAL:
7602 /* See ALUWritePC: Interworking only from a32 mode. */
7603 if (s->thumb) {
7604 store_reg(s, rd, val);
7605 } else {
7606 store_reg_bx(s, rd, val);
7607 }
7608 return true;
7609 case STREG_SP_CHECK:
7610 store_sp_checked(s, val);
7611 return true;
7612 case STREG_EXC_RET:
7613 gen_exception_return(s, val);
7614 return true;
7615 }
7616 g_assert_not_reached();
7617}
7618
7619/*
7620 * Data Processing (register)
7621 *
7622 * Operate, with set flags, one register source,
7623 * one immediate shifted register source, and a destination.
7624 */
7625static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a,
7626 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
7627 int logic_cc, StoreRegKind kind)
7628{
7629 TCGv_i32 tmp1, tmp2;
7630
7631 tmp2 = load_reg(s, a->rm);
7632 gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc);
7633 tmp1 = load_reg(s, a->rn);
7634
7635 gen(tmp1, tmp1, tmp2);
7636 tcg_temp_free_i32(tmp2);
7637
7638 if (logic_cc) {
7639 gen_logic_CC(tmp1);
7640 }
7641 return store_reg_kind(s, a->rd, tmp1, kind);
7642}
7643
7644static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a,
7645 void (*gen)(TCGv_i32, TCGv_i32),
7646 int logic_cc, StoreRegKind kind)
7647{
7648 TCGv_i32 tmp;
7649
7650 tmp = load_reg(s, a->rm);
7651 gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc);
7652
7653 gen(tmp, tmp);
7654 if (logic_cc) {
7655 gen_logic_CC(tmp);
7656 }
7657 return store_reg_kind(s, a->rd, tmp, kind);
7658}
7659
5be2c123
RH
7660/*
7661 * Data-processing (register-shifted register)
7662 *
7663 * Operate, with set flags, one register source,
7664 * one register shifted register source, and a destination.
7665 */
7666static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a,
7667 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
7668 int logic_cc, StoreRegKind kind)
7669{
7670 TCGv_i32 tmp1, tmp2;
7671
7672 tmp1 = load_reg(s, a->rs);
7673 tmp2 = load_reg(s, a->rm);
7674 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
7675 tmp1 = load_reg(s, a->rn);
7676
7677 gen(tmp1, tmp1, tmp2);
7678 tcg_temp_free_i32(tmp2);
7679
7680 if (logic_cc) {
7681 gen_logic_CC(tmp1);
7682 }
7683 return store_reg_kind(s, a->rd, tmp1, kind);
7684}
7685
7686static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a,
7687 void (*gen)(TCGv_i32, TCGv_i32),
7688 int logic_cc, StoreRegKind kind)
7689{
7690 TCGv_i32 tmp1, tmp2;
7691
7692 tmp1 = load_reg(s, a->rs);
7693 tmp2 = load_reg(s, a->rm);
7694 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
7695
7696 gen(tmp2, tmp2);
7697 if (logic_cc) {
7698 gen_logic_CC(tmp2);
7699 }
7700 return store_reg_kind(s, a->rd, tmp2, kind);
7701}
7702
581c6ebd
RH
7703/*
7704 * Data-processing (immediate)
7705 *
7706 * Operate, with set flags, one register source,
7707 * one rotated immediate, and a destination.
7708 *
7709 * Note that logic_cc && a->rot setting CF based on the msb of the
7710 * immediate is the reason why we must pass in the unrotated form
7711 * of the immediate.
7712 */
7713static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a,
7714 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
7715 int logic_cc, StoreRegKind kind)
7716{
7717 TCGv_i32 tmp1, tmp2;
7718 uint32_t imm;
7719
7720 imm = ror32(a->imm, a->rot);
7721 if (logic_cc && a->rot) {
7722 tcg_gen_movi_i32(cpu_CF, imm >> 31);
7723 }
7724 tmp2 = tcg_const_i32(imm);
7725 tmp1 = load_reg(s, a->rn);
7726
7727 gen(tmp1, tmp1, tmp2);
7728 tcg_temp_free_i32(tmp2);
7729
7730 if (logic_cc) {
7731 gen_logic_CC(tmp1);
7732 }
7733 return store_reg_kind(s, a->rd, tmp1, kind);
7734}
7735
7736static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a,
7737 void (*gen)(TCGv_i32, TCGv_i32),
7738 int logic_cc, StoreRegKind kind)
7739{
7740 TCGv_i32 tmp;
7741 uint32_t imm;
7742
7743 imm = ror32(a->imm, a->rot);
7744 if (logic_cc && a->rot) {
7745 tcg_gen_movi_i32(cpu_CF, imm >> 31);
7746 }
7747 tmp = tcg_const_i32(imm);
7748
7749 gen(tmp, tmp);
7750 if (logic_cc) {
7751 gen_logic_CC(tmp);
7752 }
7753 return store_reg_kind(s, a->rd, tmp, kind);
7754}
7755
25ae32c5
RH
7756#define DO_ANY3(NAME, OP, L, K) \
7757 static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
5be2c123
RH
7758 { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
7759 static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
581c6ebd
RH
7760 { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
7761 static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
7762 { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
25ae32c5
RH
7763
7764#define DO_ANY2(NAME, OP, L, K) \
7765 static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
5be2c123
RH
7766 { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
7767 static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
581c6ebd
RH
7768 { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
7769 static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
7770 { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
25ae32c5
RH
7771
7772#define DO_CMP2(NAME, OP, L) \
7773 static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
5be2c123
RH
7774 { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
7775 static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
581c6ebd
RH
7776 { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
7777 static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
7778 { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
25ae32c5
RH
7779
7780DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
7781DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
7782DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
7783DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
7784
7785DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
7786DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
7787DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
7788DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
7789
7790DO_CMP2(TST, tcg_gen_and_i32, true)
7791DO_CMP2(TEQ, tcg_gen_xor_i32, true)
7792DO_CMP2(CMN, gen_add_CC, false)
7793DO_CMP2(CMP, gen_sub_CC, false)
7794
7795DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false,
7796 a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL)
7797
7798/*
7799 * Note for the computation of StoreRegKind we return out of the
7800 * middle of the functions that are expanded by DO_ANY3, and that
7801 * we modify a->s via that parameter before it is used by OP.
7802 */
7803DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false,
7804 ({
7805 StoreRegKind ret = STREG_NORMAL;
7806 if (a->rd == 15 && a->s) {
7807 /*
7808 * See ALUExceptionReturn:
7809 * In User mode, UNPREDICTABLE; we choose UNDEF.
7810 * In Hyp mode, UNDEFINED.
7811 */
7812 if (IS_USER(s) || s->current_el == 2) {
7813 unallocated_encoding(s);
7814 return true;
7815 }
7816 /* There is no writeback of nzcv to PSTATE. */
7817 a->s = 0;
7818 ret = STREG_EXC_RET;
7819 } else if (a->rd == 13 && a->rn == 13) {
7820 ret = STREG_SP_CHECK;
7821 }
7822 ret;
7823 }))
7824
7825DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
7826 ({
7827 StoreRegKind ret = STREG_NORMAL;
7828 if (a->rd == 15 && a->s) {
7829 /*
7830 * See ALUExceptionReturn:
7831 * In User mode, UNPREDICTABLE; we choose UNDEF.
7832 * In Hyp mode, UNDEFINED.
7833 */
7834 if (IS_USER(s) || s->current_el == 2) {
7835 unallocated_encoding(s);
7836 return true;
7837 }
7838 /* There is no writeback of nzcv to PSTATE. */
7839 a->s = 0;
7840 ret = STREG_EXC_RET;
7841 } else if (a->rd == 13) {
7842 ret = STREG_SP_CHECK;
7843 }
7844 ret;
7845 }))
7846
7847DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
7848
7849/*
7850 * ORN is only available with T32, so there is no register-shifted-register
7851 * form of the insn. Using the DO_ANY3 macro would create an unused function.
7852 */
7853static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a)
7854{
7855 return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
7856}
7857
581c6ebd
RH
7858static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a)
7859{
7860 return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
7861}
7862
25ae32c5
RH
7863#undef DO_ANY3
7864#undef DO_ANY2
7865#undef DO_CMP2
7866
145952e8
RH
7867static bool trans_ADR(DisasContext *s, arg_ri *a)
7868{
7869 store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm));
7870 return true;
7871}
7872
bd92fe35
RH
7873/*
7874 * Multiply and multiply accumulate
7875 */
7876
7877static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add)
7878{
7879 TCGv_i32 t1, t2;
7880
7881 t1 = load_reg(s, a->rn);
7882 t2 = load_reg(s, a->rm);
7883 tcg_gen_mul_i32(t1, t1, t2);
7884 tcg_temp_free_i32(t2);
7885 if (add) {
7886 t2 = load_reg(s, a->ra);
7887 tcg_gen_add_i32(t1, t1, t2);
7888 tcg_temp_free_i32(t2);
7889 }
7890 if (a->s) {
7891 gen_logic_CC(t1);
7892 }
7893 store_reg(s, a->rd, t1);
7894 return true;
7895}
7896
7897static bool trans_MUL(DisasContext *s, arg_MUL *a)
7898{
7899 return op_mla(s, a, false);
7900}
7901
7902static bool trans_MLA(DisasContext *s, arg_MLA *a)
7903{
7904 return op_mla(s, a, true);
7905}
7906
7907static bool trans_MLS(DisasContext *s, arg_MLS *a)
7908{
7909 TCGv_i32 t1, t2;
7910
7911 if (!ENABLE_ARCH_6T2) {
7912 return false;
7913 }
7914 t1 = load_reg(s, a->rn);
7915 t2 = load_reg(s, a->rm);
7916 tcg_gen_mul_i32(t1, t1, t2);
7917 tcg_temp_free_i32(t2);
7918 t2 = load_reg(s, a->ra);
7919 tcg_gen_sub_i32(t1, t2, t1);
7920 tcg_temp_free_i32(t2);
7921 store_reg(s, a->rd, t1);
7922 return true;
7923}
7924
7925static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add)
7926{
7927 TCGv_i32 t0, t1, t2, t3;
7928
7929 t0 = load_reg(s, a->rm);
7930 t1 = load_reg(s, a->rn);
7931 if (uns) {
7932 tcg_gen_mulu2_i32(t0, t1, t0, t1);
7933 } else {
7934 tcg_gen_muls2_i32(t0, t1, t0, t1);
7935 }
7936 if (add) {
7937 t2 = load_reg(s, a->ra);
7938 t3 = load_reg(s, a->rd);
7939 tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3);
7940 tcg_temp_free_i32(t2);
7941 tcg_temp_free_i32(t3);
7942 }
7943 if (a->s) {
7944 gen_logicq_cc(t0, t1);
7945 }
7946 store_reg(s, a->ra, t0);
7947 store_reg(s, a->rd, t1);
7948 return true;
7949}
7950
7951static bool trans_UMULL(DisasContext *s, arg_UMULL *a)
7952{
7953 return op_mlal(s, a, true, false);
7954}
7955
7956static bool trans_SMULL(DisasContext *s, arg_SMULL *a)
7957{
7958 return op_mlal(s, a, false, false);
7959}
7960
7961static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a)
7962{
7963 return op_mlal(s, a, true, true);
7964}
7965
7966static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a)
7967{
7968 return op_mlal(s, a, false, true);
7969}
7970
7971static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a)
7972{
2409d564 7973 TCGv_i32 t0, t1, t2, zero;
bd92fe35
RH
7974
7975 if (s->thumb
7976 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
7977 : !ENABLE_ARCH_6) {
7978 return false;
7979 }
7980
7981 t0 = load_reg(s, a->rm);
7982 t1 = load_reg(s, a->rn);
2409d564
RH
7983 tcg_gen_mulu2_i32(t0, t1, t0, t1);
7984 zero = tcg_const_i32(0);
7985 t2 = load_reg(s, a->ra);
7986 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
7987 tcg_temp_free_i32(t2);
7988 t2 = load_reg(s, a->rd);
7989 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
7990 tcg_temp_free_i32(t2);
7991 tcg_temp_free_i32(zero);
7992 store_reg(s, a->ra, t0);
7993 store_reg(s, a->rd, t1);
bd92fe35
RH
7994 return true;
7995}
7996
6d0730a8
RH
7997/*
7998 * Saturating addition and subtraction
7999 */
8000
8001static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub)
8002{
8003 TCGv_i32 t0, t1;
8004
8005 if (s->thumb
8006 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
8007 : !ENABLE_ARCH_5TE) {
8008 return false;
8009 }
8010
8011 t0 = load_reg(s, a->rm);
8012 t1 = load_reg(s, a->rn);
8013 if (doub) {
8014 gen_helper_add_saturate(t1, cpu_env, t1, t1);
8015 }
8016 if (add) {
8017 gen_helper_add_saturate(t0, cpu_env, t0, t1);
8018 } else {
8019 gen_helper_sub_saturate(t0, cpu_env, t0, t1);
8020 }
8021 tcg_temp_free_i32(t1);
8022 store_reg(s, a->rd, t0);
8023 return true;
8024}
8025
8026#define DO_QADDSUB(NAME, ADD, DOUB) \
8027static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
8028{ \
8029 return op_qaddsub(s, a, ADD, DOUB); \
8030}
8031
8032DO_QADDSUB(QADD, true, false)
8033DO_QADDSUB(QSUB, false, false)
8034DO_QADDSUB(QDADD, true, true)
8035DO_QADDSUB(QDSUB, false, true)
8036
8037#undef DO_QADDSUB
8038
26c6923d
RH
8039/*
8040 * Halfword multiply and multiply accumulate
8041 */
8042
8043static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
8044 int add_long, bool nt, bool mt)
8045{
ea96b374 8046 TCGv_i32 t0, t1, tl, th;
26c6923d
RH
8047
8048 if (s->thumb
8049 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
8050 : !ENABLE_ARCH_5TE) {
8051 return false;
8052 }
8053
8054 t0 = load_reg(s, a->rn);
8055 t1 = load_reg(s, a->rm);
8056 gen_mulxy(t0, t1, nt, mt);
8057 tcg_temp_free_i32(t1);
8058
8059 switch (add_long) {
8060 case 0:
8061 store_reg(s, a->rd, t0);
8062 break;
8063 case 1:
8064 t1 = load_reg(s, a->ra);
8065 gen_helper_add_setq(t0, cpu_env, t0, t1);
8066 tcg_temp_free_i32(t1);
8067 store_reg(s, a->rd, t0);
8068 break;
8069 case 2:
ea96b374
RH
8070 tl = load_reg(s, a->ra);
8071 th = load_reg(s, a->rd);
8072 t1 = tcg_const_i32(0);
8073 tcg_gen_add2_i32(tl, th, tl, th, t0, t1);
26c6923d 8074 tcg_temp_free_i32(t0);
ea96b374
RH
8075 tcg_temp_free_i32(t1);
8076 store_reg(s, a->ra, tl);
8077 store_reg(s, a->rd, th);
26c6923d
RH
8078 break;
8079 default:
8080 g_assert_not_reached();
8081 }
8082 return true;
8083}
8084
8085#define DO_SMLAX(NAME, add, nt, mt) \
8086static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
8087{ \
8088 return op_smlaxxx(s, a, add, nt, mt); \
8089}
8090
8091DO_SMLAX(SMULBB, 0, 0, 0)
8092DO_SMLAX(SMULBT, 0, 0, 1)
8093DO_SMLAX(SMULTB, 0, 1, 0)
8094DO_SMLAX(SMULTT, 0, 1, 1)
8095
8096DO_SMLAX(SMLABB, 1, 0, 0)
8097DO_SMLAX(SMLABT, 1, 0, 1)
8098DO_SMLAX(SMLATB, 1, 1, 0)
8099DO_SMLAX(SMLATT, 1, 1, 1)
8100
8101DO_SMLAX(SMLALBB, 2, 0, 0)
8102DO_SMLAX(SMLALBT, 2, 0, 1)
8103DO_SMLAX(SMLALTB, 2, 1, 0)
8104DO_SMLAX(SMLALTT, 2, 1, 1)
8105
8106#undef DO_SMLAX
8107
8108static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
8109{
8110 TCGv_i32 t0, t1;
26c6923d
RH
8111
8112 if (!ENABLE_ARCH_5TE) {
8113 return false;
8114 }
8115
8116 t0 = load_reg(s, a->rn);
8117 t1 = load_reg(s, a->rm);
485b607d
RH
8118 /*
8119 * Since the nominal result is product<47:16>, shift the 16-bit
8120 * input up by 16 bits, so that the result is at product<63:32>.
8121 */
26c6923d 8122 if (mt) {
485b607d 8123 tcg_gen_andi_i32(t1, t1, 0xffff0000);
26c6923d 8124 } else {
485b607d 8125 tcg_gen_shli_i32(t1, t1, 16);
26c6923d 8126 }
485b607d
RH
8127 tcg_gen_muls2_i32(t0, t1, t0, t1);
8128 tcg_temp_free_i32(t0);
26c6923d
RH
8129 if (add) {
8130 t0 = load_reg(s, a->ra);
8131 gen_helper_add_setq(t1, cpu_env, t1, t0);
8132 tcg_temp_free_i32(t0);
8133 }
8134 store_reg(s, a->rd, t1);
8135 return true;
8136}
8137
8138#define DO_SMLAWX(NAME, add, mt) \
8139static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
8140{ \
8141 return op_smlawx(s, a, add, mt); \
8142}
8143
8144DO_SMLAWX(SMULWB, 0, 0)
8145DO_SMLAWX(SMULWT, 0, 1)
8146DO_SMLAWX(SMLAWB, 1, 0)
8147DO_SMLAWX(SMLAWT, 1, 1)
8148
8149#undef DO_SMLAWX
8150
63130596
RH
8151/*
8152 * MSR (immediate) and hints
8153 */
8154
8155static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
8156{
8157 gen_nop_hint(s, 1);
8158 return true;
8159}
8160
8161static bool trans_WFE(DisasContext *s, arg_WFE *a)
8162{
8163 gen_nop_hint(s, 2);
8164 return true;
8165}
8166
8167static bool trans_WFI(DisasContext *s, arg_WFI *a)
8168{
8169 gen_nop_hint(s, 3);
8170 return true;
8171}
8172
8173static bool trans_NOP(DisasContext *s, arg_NOP *a)
8174{
8175 return true;
8176}
8177
8178static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a)
8179{
8180 uint32_t val = ror32(a->imm, a->rot * 2);
8181 uint32_t mask = msr_mask(s, a->mask, a->r);
8182
8183 if (gen_set_psr_im(s, mask, a->r, val)) {
8184 unallocated_encoding(s);
8185 }
8186 return true;
8187}
8188
6c35d53f
RH
8189/*
8190 * Cyclic Redundancy Check
8191 */
8192
8193static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz)
8194{
8195 TCGv_i32 t1, t2, t3;
8196
8197 if (!dc_isar_feature(aa32_crc32, s)) {
8198 return false;
8199 }
8200
8201 t1 = load_reg(s, a->rn);
8202 t2 = load_reg(s, a->rm);
8203 switch (sz) {
8204 case MO_8:
8205 gen_uxtb(t2);
8206 break;
8207 case MO_16:
8208 gen_uxth(t2);
8209 break;
8210 case MO_32:
8211 break;
8212 default:
8213 g_assert_not_reached();
8214 }
8215 t3 = tcg_const_i32(1 << sz);
8216 if (c) {
8217 gen_helper_crc32c(t1, t1, t2, t3);
8218 } else {
8219 gen_helper_crc32(t1, t1, t2, t3);
8220 }
8221 tcg_temp_free_i32(t2);
8222 tcg_temp_free_i32(t3);
8223 store_reg(s, a->rd, t1);
8224 return true;
8225}
8226
8227#define DO_CRC32(NAME, c, sz) \
8228static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
8229 { return op_crc32(s, a, c, sz); }
8230
8231DO_CRC32(CRC32B, false, MO_8)
8232DO_CRC32(CRC32H, false, MO_16)
8233DO_CRC32(CRC32W, false, MO_32)
8234DO_CRC32(CRC32CB, true, MO_8)
8235DO_CRC32(CRC32CH, true, MO_16)
8236DO_CRC32(CRC32CW, true, MO_32)
8237
8238#undef DO_CRC32
8239
d0b26644
RH
8240/*
8241 * Miscellaneous instructions
8242 */
8243
8244static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a)
8245{
8246 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8247 return false;
8248 }
8249 gen_mrs_banked(s, a->r, a->sysm, a->rd);
8250 return true;
8251}
8252
8253static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a)
8254{
8255 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8256 return false;
8257 }
8258 gen_msr_banked(s, a->r, a->sysm, a->rn);
8259 return true;
8260}
8261
8262static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a)
8263{
8264 TCGv_i32 tmp;
8265
8266 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8267 return false;
8268 }
8269 if (a->r) {
8270 if (IS_USER(s)) {
8271 unallocated_encoding(s);
8272 return true;
8273 }
8274 tmp = load_cpu_field(spsr);
8275 } else {
8276 tmp = tcg_temp_new_i32();
8277 gen_helper_cpsr_read(tmp, cpu_env);
8278 }
8279 store_reg(s, a->rd, tmp);
8280 return true;
8281}
8282
8283static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a)
8284{
8285 TCGv_i32 tmp;
8286 uint32_t mask = msr_mask(s, a->mask, a->r);
8287
8288 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8289 return false;
8290 }
8291 tmp = load_reg(s, a->rn);
8292 if (gen_set_psr(s, mask, a->r, tmp)) {
8293 unallocated_encoding(s);
8294 }
8295 return true;
8296}
8297
8298static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
8299{
8300 TCGv_i32 tmp;
8301
8302 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
8303 return false;
8304 }
8305 tmp = tcg_const_i32(a->sysm);
8306 gen_helper_v7m_mrs(tmp, cpu_env, tmp);
8307 store_reg(s, a->rd, tmp);
8308 return true;
8309}
8310
8311static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
8312{
8313 TCGv_i32 addr, reg;
8314
8315 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
8316 return false;
8317 }
8318 addr = tcg_const_i32((a->mask << 10) | a->sysm);
8319 reg = load_reg(s, a->rn);
8320 gen_helper_v7m_msr(cpu_env, addr, reg);
8321 tcg_temp_free_i32(addr);
8322 tcg_temp_free_i32(reg);
8323 gen_lookup_tb(s);
8324 return true;
8325}
8326
4ed95abd
RH
8327static bool trans_BX(DisasContext *s, arg_BX *a)
8328{
8329 if (!ENABLE_ARCH_4T) {
8330 return false;
8331 }
8332 gen_bx(s, load_reg(s, a->rm));
8333 return true;
8334}
8335
8336static bool trans_BXJ(DisasContext *s, arg_BXJ *a)
8337{
8338 if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) {
8339 return false;
8340 }
8341 /* Trivial implementation equivalent to bx. */
8342 gen_bx(s, load_reg(s, a->rm));
8343 return true;
8344}
8345
8346static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
8347{
8348 TCGv_i32 tmp;
8349
8350 if (!ENABLE_ARCH_5) {
8351 return false;
8352 }
8353 tmp = load_reg(s, a->rm);
8354 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
8355 gen_bx(s, tmp);
8356 return true;
8357}
8358
4c97f5b2
RH
8359static bool trans_CLZ(DisasContext *s, arg_CLZ *a)
8360{
8361 TCGv_i32 tmp;
8362
8363 if (!ENABLE_ARCH_5) {
8364 return false;
8365 }
8366 tmp = load_reg(s, a->rm);
8367 tcg_gen_clzi_i32(tmp, tmp, 32);
8368 store_reg(s, a->rd, tmp);
8369 return true;
8370}
8371
ef11bc3c
RH
8372static bool trans_ERET(DisasContext *s, arg_ERET *a)
8373{
8374 TCGv_i32 tmp;
8375
8376 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8377 return false;
8378 }
8379 if (IS_USER(s)) {
8380 unallocated_encoding(s);
8381 return true;
8382 }
8383 if (s->current_el == 2) {
8384 /* ERET from Hyp uses ELR_Hyp, not LR */
8385 tmp = load_cpu_field(elr_el[2]);
8386 } else {
8387 tmp = load_reg(s, 14);
8388 }
8389 gen_exception_return(s, tmp);
8390 return true;
8391}
8392
2cde9ea5
RH
8393static bool trans_HLT(DisasContext *s, arg_HLT *a)
8394{
8395 gen_hlt(s, a->imm);
8396 return true;
8397}
8398
8399static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
8400{
8401 if (!ENABLE_ARCH_5) {
8402 return false;
8403 }
8404 gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
8405 return true;
8406}
8407
8408static bool trans_HVC(DisasContext *s, arg_HVC *a)
8409{
8410 if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) {
8411 return false;
8412 }
8413 if (IS_USER(s)) {
8414 unallocated_encoding(s);
8415 } else {
8416 gen_hvc(s, a->imm);
8417 }
8418 return true;
8419}
8420
8421static bool trans_SMC(DisasContext *s, arg_SMC *a)
8422{
8423 if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) {
8424 return false;
8425 }
8426 if (IS_USER(s)) {
8427 unallocated_encoding(s);
8428 } else {
8429 gen_smc(s);
8430 }
8431 return true;
8432}
8433
5e291fe1
RH
8434/*
8435 * Load/store register index
8436 */
8437
8438static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
8439{
8440 ISSInfo ret;
8441
8442 /* ISS not valid if writeback */
8443 if (p && !w) {
8444 ret = rd;
8445 } else {
8446 ret = ISSInvalid;
8447 }
8448 return ret;
8449}
8450
8451static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
8452{
8453 TCGv_i32 addr = load_reg(s, a->rn);
8454
8455 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
8456 gen_helper_v8m_stackcheck(cpu_env, addr);
8457 }
8458
8459 if (a->p) {
8460 TCGv_i32 ofs = load_reg(s, a->rm);
8461 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
8462 if (a->u) {
8463 tcg_gen_add_i32(addr, addr, ofs);
8464 } else {
8465 tcg_gen_sub_i32(addr, addr, ofs);
8466 }
8467 tcg_temp_free_i32(ofs);
8468 }
8469 return addr;
8470}
8471
8472static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
8473 TCGv_i32 addr, int address_offset)
8474{
8475 if (!a->p) {
8476 TCGv_i32 ofs = load_reg(s, a->rm);
8477 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
8478 if (a->u) {
8479 tcg_gen_add_i32(addr, addr, ofs);
8480 } else {
8481 tcg_gen_sub_i32(addr, addr, ofs);
8482 }
8483 tcg_temp_free_i32(ofs);
8484 } else if (!a->w) {
8485 tcg_temp_free_i32(addr);
8486 return;
8487 }
8488 tcg_gen_addi_i32(addr, addr, address_offset);
8489 store_reg(s, a->rn, addr);
8490}
8491
8492static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
8493 MemOp mop, int mem_idx)
8494{
8495 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
8496 TCGv_i32 addr, tmp;
8497
8498 addr = op_addr_rr_pre(s, a);
8499
8500 tmp = tcg_temp_new_i32();
8501 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
8502 disas_set_da_iss(s, mop, issinfo);
8503
8504 /*
8505 * Perform base writeback before the loaded value to
8506 * ensure correct behavior with overlapping index registers.
8507 */
8508 op_addr_rr_post(s, a, addr, 0);
8509 store_reg_from_load(s, a->rt, tmp);
8510 return true;
8511}
8512
8513static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
8514 MemOp mop, int mem_idx)
8515{
8516 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
8517 TCGv_i32 addr, tmp;
8518
8519 addr = op_addr_rr_pre(s, a);
8520
8521 tmp = load_reg(s, a->rt);
8522 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
8523 disas_set_da_iss(s, mop, issinfo);
8524 tcg_temp_free_i32(tmp);
8525
8526 op_addr_rr_post(s, a, addr, 0);
8527 return true;
8528}
8529
8530static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
8531{
8532 int mem_idx = get_mem_index(s);
8533 TCGv_i32 addr, tmp;
8534
8535 if (!ENABLE_ARCH_5TE) {
8536 return false;
8537 }
8538 if (a->rt & 1) {
8539 unallocated_encoding(s);
8540 return true;
8541 }
8542 addr = op_addr_rr_pre(s, a);
8543
8544 tmp = tcg_temp_new_i32();
8545 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8546 store_reg(s, a->rt, tmp);
8547
8548 tcg_gen_addi_i32(addr, addr, 4);
8549
8550 tmp = tcg_temp_new_i32();
8551 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8552 store_reg(s, a->rt + 1, tmp);
8553
8554 /* LDRD w/ base writeback is undefined if the registers overlap. */
8555 op_addr_rr_post(s, a, addr, -4);
8556 return true;
8557}
8558
8559static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
8560{
8561 int mem_idx = get_mem_index(s);
8562 TCGv_i32 addr, tmp;
8563
8564 if (!ENABLE_ARCH_5TE) {
8565 return false;
8566 }
8567 if (a->rt & 1) {
8568 unallocated_encoding(s);
8569 return true;
8570 }
8571 addr = op_addr_rr_pre(s, a);
8572
8573 tmp = load_reg(s, a->rt);
8574 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8575 tcg_temp_free_i32(tmp);
8576
8577 tcg_gen_addi_i32(addr, addr, 4);
8578
8579 tmp = load_reg(s, a->rt + 1);
8580 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8581 tcg_temp_free_i32(tmp);
8582
8583 op_addr_rr_post(s, a, addr, -4);
8584 return true;
8585}
8586
8587/*
8588 * Load/store immediate index
8589 */
8590
8591static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
8592{
8593 int ofs = a->imm;
8594
8595 if (!a->u) {
8596 ofs = -ofs;
8597 }
8598
8599 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
8600 /*
8601 * Stackcheck. Here we know 'addr' is the current SP;
8602 * U is set if we're moving SP up, else down. It is
8603 * UNKNOWN whether the limit check triggers when SP starts
8604 * below the limit and ends up above it; we chose to do so.
8605 */
8606 if (!a->u) {
8607 TCGv_i32 newsp = tcg_temp_new_i32();
8608 tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
8609 gen_helper_v8m_stackcheck(cpu_env, newsp);
8610 tcg_temp_free_i32(newsp);
8611 } else {
8612 gen_helper_v8m_stackcheck(cpu_env, cpu_R[13]);
8613 }
8614 }
8615
8616 return add_reg_for_lit(s, a->rn, a->p ? ofs : 0);
8617}
8618
8619static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
8620 TCGv_i32 addr, int address_offset)
8621{
8622 if (!a->p) {
8623 if (a->u) {
8624 address_offset += a->imm;
8625 } else {
8626 address_offset -= a->imm;
8627 }
8628 } else if (!a->w) {
8629 tcg_temp_free_i32(addr);
8630 return;
8631 }
8632 tcg_gen_addi_i32(addr, addr, address_offset);
8633 store_reg(s, a->rn, addr);
8634}
8635
8636static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
8637 MemOp mop, int mem_idx)
8638{
8639 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
8640 TCGv_i32 addr, tmp;
8641
8642 addr = op_addr_ri_pre(s, a);
8643
8644 tmp = tcg_temp_new_i32();
8645 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
8646 disas_set_da_iss(s, mop, issinfo);
8647
8648 /*
8649 * Perform base writeback before the loaded value to
8650 * ensure correct behavior with overlapping index registers.
8651 */
8652 op_addr_ri_post(s, a, addr, 0);
8653 store_reg_from_load(s, a->rt, tmp);
8654 return true;
8655}
8656
8657static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
8658 MemOp mop, int mem_idx)
8659{
8660 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
8661 TCGv_i32 addr, tmp;
8662
8663 addr = op_addr_ri_pre(s, a);
8664
8665 tmp = load_reg(s, a->rt);
8666 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
8667 disas_set_da_iss(s, mop, issinfo);
8668 tcg_temp_free_i32(tmp);
8669
8670 op_addr_ri_post(s, a, addr, 0);
8671 return true;
8672}
8673
8674static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
8675{
8676 int mem_idx = get_mem_index(s);
8677 TCGv_i32 addr, tmp;
8678
8679 addr = op_addr_ri_pre(s, a);
8680
8681 tmp = tcg_temp_new_i32();
8682 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8683 store_reg(s, a->rt, tmp);
8684
8685 tcg_gen_addi_i32(addr, addr, 4);
8686
8687 tmp = tcg_temp_new_i32();
8688 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8689 store_reg(s, rt2, tmp);
8690
8691 /* LDRD w/ base writeback is undefined if the registers overlap. */
8692 op_addr_ri_post(s, a, addr, -4);
8693 return true;
8694}
8695
8696static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
8697{
8698 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
8699 return false;
8700 }
8701 return op_ldrd_ri(s, a, a->rt + 1);
8702}
8703
8704static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
8705{
8706 arg_ldst_ri b = {
8707 .u = a->u, .w = a->w, .p = a->p,
8708 .rn = a->rn, .rt = a->rt, .imm = a->imm
8709 };
8710 return op_ldrd_ri(s, &b, a->rt2);
8711}
8712
8713static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
8714{
8715 int mem_idx = get_mem_index(s);
8716 TCGv_i32 addr, tmp;
8717
8718 addr = op_addr_ri_pre(s, a);
8719
8720 tmp = load_reg(s, a->rt);
8721 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8722 tcg_temp_free_i32(tmp);
8723
8724 tcg_gen_addi_i32(addr, addr, 4);
8725
8726 tmp = load_reg(s, rt2);
8727 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8728 tcg_temp_free_i32(tmp);
8729
8730 op_addr_ri_post(s, a, addr, -4);
8731 return true;
8732}
8733
8734static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
8735{
8736 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
8737 return false;
8738 }
8739 return op_strd_ri(s, a, a->rt + 1);
8740}
8741
8742static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
8743{
8744 arg_ldst_ri b = {
8745 .u = a->u, .w = a->w, .p = a->p,
8746 .rn = a->rn, .rt = a->rt, .imm = a->imm
8747 };
8748 return op_strd_ri(s, &b, a->rt2);
8749}
8750
8751#define DO_LDST(NAME, WHICH, MEMOP) \
8752static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
8753{ \
8754 return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
8755} \
8756static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
8757{ \
8758 return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
8759} \
8760static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
8761{ \
8762 return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
8763} \
8764static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
8765{ \
8766 return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
8767}
8768
8769DO_LDST(LDR, load, MO_UL)
8770DO_LDST(LDRB, load, MO_UB)
8771DO_LDST(LDRH, load, MO_UW)
8772DO_LDST(LDRSB, load, MO_SB)
8773DO_LDST(LDRSH, load, MO_SW)
8774
8775DO_LDST(STR, store, MO_UL)
8776DO_LDST(STRB, store, MO_UB)
8777DO_LDST(STRH, store, MO_UW)
8778
8779#undef DO_LDST
8780
1efdd407
RH
8781/*
8782 * Synchronization primitives
8783 */
8784
8785static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc)
8786{
8787 TCGv_i32 addr, tmp;
8788 TCGv taddr;
8789
8790 opc |= s->be_data;
8791 addr = load_reg(s, a->rn);
8792 taddr = gen_aa32_addr(s, addr, opc);
8793 tcg_temp_free_i32(addr);
8794
8795 tmp = load_reg(s, a->rt2);
8796 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc);
8797 tcg_temp_free(taddr);
8798
8799 store_reg(s, a->rt, tmp);
8800 return true;
8801}
8802
8803static bool trans_SWP(DisasContext *s, arg_SWP *a)
8804{
8805 return op_swp(s, a, MO_UL | MO_ALIGN);
8806}
8807
8808static bool trans_SWPB(DisasContext *s, arg_SWP *a)
8809{
8810 return op_swp(s, a, MO_UB);
8811}
8812
8813/*
8814 * Load/Store Exclusive and Load-Acquire/Store-Release
8815 */
8816
8817static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel)
8818{
8819 TCGv_i32 addr;
8820
af288228
RH
8821 /* We UNDEF for these UNPREDICTABLE cases. */
8822 if (a->rd == 15 || a->rn == 15 || a->rt == 15
8823 || a->rd == a->rn || a->rd == a->rt
8824 || (s->thumb && (a->rd == 13 || a->rt == 13))
8825 || (mop == MO_64
8826 && (a->rt2 == 15
8827 || a->rd == a->rt2 || a->rt == a->rt2
8828 || (s->thumb && a->rt2 == 13)))) {
8829 unallocated_encoding(s);
8830 return true;
8831 }
8832
1efdd407
RH
8833 if (rel) {
8834 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8835 }
8836
8837 addr = tcg_temp_local_new_i32();
8838 load_reg_var(s, addr, a->rn);
8839 tcg_gen_addi_i32(addr, addr, a->imm);
8840
8841 gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop);
8842 tcg_temp_free_i32(addr);
8843 return true;
8844}
8845
8846static bool trans_STREX(DisasContext *s, arg_STREX *a)
8847{
8848 if (!ENABLE_ARCH_6) {
8849 return false;
8850 }
8851 return op_strex(s, a, MO_32, false);
8852}
8853
8854static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a)
8855{
8856 if (!ENABLE_ARCH_6K) {
8857 return false;
8858 }
af288228 8859 /* We UNDEF for these UNPREDICTABLE cases. */
1efdd407
RH
8860 if (a->rt & 1) {
8861 unallocated_encoding(s);
8862 return true;
8863 }
8864 a->rt2 = a->rt + 1;
8865 return op_strex(s, a, MO_64, false);
8866}
8867
8868static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a)
8869{
8870 return op_strex(s, a, MO_64, false);
8871}
8872
8873static bool trans_STREXB(DisasContext *s, arg_STREX *a)
8874{
8875 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
8876 return false;
8877 }
8878 return op_strex(s, a, MO_8, false);
8879}
8880
8881static bool trans_STREXH(DisasContext *s, arg_STREX *a)
8882{
8883 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
8884 return false;
8885 }
8886 return op_strex(s, a, MO_16, false);
8887}
8888
8889static bool trans_STLEX(DisasContext *s, arg_STREX *a)
8890{
8891 if (!ENABLE_ARCH_8) {
8892 return false;
8893 }
8894 return op_strex(s, a, MO_32, true);
8895}
8896
8897static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a)
8898{
8899 if (!ENABLE_ARCH_8) {
8900 return false;
8901 }
af288228 8902 /* We UNDEF for these UNPREDICTABLE cases. */
1efdd407
RH
8903 if (a->rt & 1) {
8904 unallocated_encoding(s);
8905 return true;
8906 }
8907 a->rt2 = a->rt + 1;
8908 return op_strex(s, a, MO_64, true);
8909}
8910
8911static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a)
8912{
8913 if (!ENABLE_ARCH_8) {
8914 return false;
8915 }
8916 return op_strex(s, a, MO_64, true);
8917}
8918
8919static bool trans_STLEXB(DisasContext *s, arg_STREX *a)
8920{
8921 if (!ENABLE_ARCH_8) {
8922 return false;
8923 }
8924 return op_strex(s, a, MO_8, true);
8925}
8926
8927static bool trans_STLEXH(DisasContext *s, arg_STREX *a)
8928{
8929 if (!ENABLE_ARCH_8) {
8930 return false;
8931 }
8932 return op_strex(s, a, MO_16, true);
8933}
8934
8935static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
8936{
8937 TCGv_i32 addr, tmp;
8938
8939 if (!ENABLE_ARCH_8) {
8940 return false;
8941 }
af288228
RH
8942 /* We UNDEF for these UNPREDICTABLE cases. */
8943 if (a->rn == 15 || a->rt == 15) {
8944 unallocated_encoding(s);
8945 return true;
8946 }
1efdd407 8947
af288228 8948 addr = load_reg(s, a->rn);
1efdd407
RH
8949 tmp = load_reg(s, a->rt);
8950 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8951 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
8952 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
8953
8954 tcg_temp_free_i32(tmp);
8955 tcg_temp_free_i32(addr);
8956 return true;
8957}
8958
8959static bool trans_STL(DisasContext *s, arg_STL *a)
8960{
8961 return op_stl(s, a, MO_UL);
8962}
8963
8964static bool trans_STLB(DisasContext *s, arg_STL *a)
8965{
8966 return op_stl(s, a, MO_UB);
8967}
8968
8969static bool trans_STLH(DisasContext *s, arg_STL *a)
8970{
8971 return op_stl(s, a, MO_UW);
8972}
8973
8974static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq)
8975{
8976 TCGv_i32 addr;
8977
af288228
RH
8978 /* We UNDEF for these UNPREDICTABLE cases. */
8979 if (a->rn == 15 || a->rt == 15
8980 || (s->thumb && a->rt == 13)
8981 || (mop == MO_64
8982 && (a->rt2 == 15 || a->rt == a->rt2
8983 || (s->thumb && a->rt2 == 13)))) {
8984 unallocated_encoding(s);
8985 return true;
8986 }
8987
1efdd407
RH
8988 addr = tcg_temp_local_new_i32();
8989 load_reg_var(s, addr, a->rn);
8990 tcg_gen_addi_i32(addr, addr, a->imm);
8991
8992 gen_load_exclusive(s, a->rt, a->rt2, addr, mop);
8993 tcg_temp_free_i32(addr);
8994
8995 if (acq) {
8996 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
8997 }
8998 return true;
8999}
9000
9001static bool trans_LDREX(DisasContext *s, arg_LDREX *a)
9002{
9003 if (!ENABLE_ARCH_6) {
9004 return false;
9005 }
9006 return op_ldrex(s, a, MO_32, false);
9007}
9008
9009static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a)
9010{
9011 if (!ENABLE_ARCH_6K) {
9012 return false;
9013 }
af288228 9014 /* We UNDEF for these UNPREDICTABLE cases. */
1efdd407
RH
9015 if (a->rt & 1) {
9016 unallocated_encoding(s);
9017 return true;
9018 }
9019 a->rt2 = a->rt + 1;
9020 return op_ldrex(s, a, MO_64, false);
9021}
9022
9023static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a)
9024{
9025 return op_ldrex(s, a, MO_64, false);
9026}
9027
9028static bool trans_LDREXB(DisasContext *s, arg_LDREX *a)
9029{
9030 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
9031 return false;
9032 }
9033 return op_ldrex(s, a, MO_8, false);
9034}
9035
9036static bool trans_LDREXH(DisasContext *s, arg_LDREX *a)
9037{
9038 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
9039 return false;
9040 }
9041 return op_ldrex(s, a, MO_16, false);
9042}
9043
9044static bool trans_LDAEX(DisasContext *s, arg_LDREX *a)
9045{
9046 if (!ENABLE_ARCH_8) {
9047 return false;
9048 }
9049 return op_ldrex(s, a, MO_32, true);
9050}
9051
9052static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a)
9053{
9054 if (!ENABLE_ARCH_8) {
9055 return false;
9056 }
af288228 9057 /* We UNDEF for these UNPREDICTABLE cases. */
1efdd407
RH
9058 if (a->rt & 1) {
9059 unallocated_encoding(s);
9060 return true;
9061 }
9062 a->rt2 = a->rt + 1;
9063 return op_ldrex(s, a, MO_64, true);
9064}
9065
9066static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a)
9067{
9068 if (!ENABLE_ARCH_8) {
9069 return false;
9070 }
9071 return op_ldrex(s, a, MO_64, true);
9072}
9073
9074static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a)
9075{
9076 if (!ENABLE_ARCH_8) {
9077 return false;
9078 }
9079 return op_ldrex(s, a, MO_8, true);
9080}
9081
9082static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a)
9083{
9084 if (!ENABLE_ARCH_8) {
9085 return false;
9086 }
9087 return op_ldrex(s, a, MO_16, true);
9088}
9089
9090static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
9091{
9092 TCGv_i32 addr, tmp;
9093
9094 if (!ENABLE_ARCH_8) {
9095 return false;
9096 }
af288228
RH
9097 /* We UNDEF for these UNPREDICTABLE cases. */
9098 if (a->rn == 15 || a->rt == 15) {
9099 unallocated_encoding(s);
9100 return true;
9101 }
1efdd407 9102
af288228 9103 addr = load_reg(s, a->rn);
1efdd407
RH
9104 tmp = tcg_temp_new_i32();
9105 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
9106 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
9107 tcg_temp_free_i32(addr);
9108
9109 store_reg(s, a->rt, tmp);
9110 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9111 return true;
9112}
9113
9114static bool trans_LDA(DisasContext *s, arg_LDA *a)
9115{
9116 return op_lda(s, a, MO_UL);
9117}
9118
9119static bool trans_LDAB(DisasContext *s, arg_LDA *a)
9120{
9121 return op_lda(s, a, MO_UB);
9122}
9123
9124static bool trans_LDAH(DisasContext *s, arg_LDA *a)
9125{
9126 return op_lda(s, a, MO_UW);
9127}
9128
86d21e4b
RH
9129/*
9130 * Media instructions
9131 */
9132
9133static bool trans_USADA8(DisasContext *s, arg_USADA8 *a)
9134{
9135 TCGv_i32 t1, t2;
9136
9137 if (!ENABLE_ARCH_6) {
9138 return false;
9139 }
9140
9141 t1 = load_reg(s, a->rn);
9142 t2 = load_reg(s, a->rm);
9143 gen_helper_usad8(t1, t1, t2);
9144 tcg_temp_free_i32(t2);
9145 if (a->ra != 15) {
9146 t2 = load_reg(s, a->ra);
9147 tcg_gen_add_i32(t1, t1, t2);
9148 tcg_temp_free_i32(t2);
9149 }
9150 store_reg(s, a->rd, t1);
9151 return true;
9152}
9153
9154static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u)
9155{
9156 TCGv_i32 tmp;
9157 int width = a->widthm1 + 1;
9158 int shift = a->lsb;
9159
9160 if (!ENABLE_ARCH_6T2) {
9161 return false;
9162 }
9163 if (shift + width > 32) {
9164 /* UNPREDICTABLE; we choose to UNDEF */
9165 unallocated_encoding(s);
9166 return true;
9167 }
9168
9169 tmp = load_reg(s, a->rn);
9170 if (u) {
9171 tcg_gen_extract_i32(tmp, tmp, shift, width);
9172 } else {
9173 tcg_gen_sextract_i32(tmp, tmp, shift, width);
9174 }
9175 store_reg(s, a->rd, tmp);
9176 return true;
9177}
9178
9179static bool trans_SBFX(DisasContext *s, arg_SBFX *a)
9180{
9181 return op_bfx(s, a, false);
9182}
9183
9184static bool trans_UBFX(DisasContext *s, arg_UBFX *a)
9185{
9186 return op_bfx(s, a, true);
9187}
9188
9189static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
9190{
9191 TCGv_i32 tmp;
9192 int msb = a->msb, lsb = a->lsb;
9193 int width;
9194
9195 if (!ENABLE_ARCH_6T2) {
9196 return false;
9197 }
9198 if (msb < lsb) {
9199 /* UNPREDICTABLE; we choose to UNDEF */
9200 unallocated_encoding(s);
9201 return true;
9202 }
9203
9204 width = msb + 1 - lsb;
9205 if (a->rn == 15) {
9206 /* BFC */
9207 tmp = tcg_const_i32(0);
9208 } else {
9209 /* BFI */
9210 tmp = load_reg(s, a->rn);
9211 }
9212 if (width != 32) {
9213 TCGv_i32 tmp2 = load_reg(s, a->rd);
9214 tcg_gen_deposit_i32(tmp, tmp2, tmp, lsb, width);
9215 tcg_temp_free_i32(tmp2);
9216 }
9217 store_reg(s, a->rd, tmp);
9218 return true;
9219}
9220
9221static bool trans_UDF(DisasContext *s, arg_UDF *a)
9222{
9223 unallocated_encoding(s);
9224 return true;
9225}
9226
adf1a566
RH
9227/*
9228 * Parallel addition and subtraction
9229 */
9230
9231static bool op_par_addsub(DisasContext *s, arg_rrr *a,
9232 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
9233{
9234 TCGv_i32 t0, t1;
9235
9236 if (s->thumb
9237 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9238 : !ENABLE_ARCH_6) {
9239 return false;
9240 }
9241
9242 t0 = load_reg(s, a->rn);
9243 t1 = load_reg(s, a->rm);
9244
9245 gen(t0, t0, t1);
9246
9247 tcg_temp_free_i32(t1);
9248 store_reg(s, a->rd, t0);
9249 return true;
9250}
9251
9252static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a,
9253 void (*gen)(TCGv_i32, TCGv_i32,
9254 TCGv_i32, TCGv_ptr))
9255{
9256 TCGv_i32 t0, t1;
9257 TCGv_ptr ge;
9258
9259 if (s->thumb
9260 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9261 : !ENABLE_ARCH_6) {
9262 return false;
9263 }
9264
9265 t0 = load_reg(s, a->rn);
9266 t1 = load_reg(s, a->rm);
9267
9268 ge = tcg_temp_new_ptr();
9269 tcg_gen_addi_ptr(ge, cpu_env, offsetof(CPUARMState, GE));
9270 gen(t0, t0, t1, ge);
9271
9272 tcg_temp_free_ptr(ge);
9273 tcg_temp_free_i32(t1);
9274 store_reg(s, a->rd, t0);
9275 return true;
9276}
9277
9278#define DO_PAR_ADDSUB(NAME, helper) \
9279static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
9280{ \
9281 return op_par_addsub(s, a, helper); \
9282}
9283
9284#define DO_PAR_ADDSUB_GE(NAME, helper) \
9285static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
9286{ \
9287 return op_par_addsub_ge(s, a, helper); \
9288}
9289
9290DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16)
9291DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx)
9292DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx)
9293DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16)
9294DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8)
9295DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8)
9296
9297DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16)
9298DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx)
9299DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx)
9300DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16)
9301DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8)
9302DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8)
9303
9304DO_PAR_ADDSUB(QADD16, gen_helper_qadd16)
9305DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx)
9306DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx)
9307DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16)
9308DO_PAR_ADDSUB(QADD8, gen_helper_qadd8)
9309DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8)
9310
9311DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16)
9312DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx)
9313DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx)
9314DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16)
9315DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8)
9316DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8)
9317
9318DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16)
9319DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx)
9320DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx)
9321DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16)
9322DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8)
9323DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8)
9324
9325DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16)
9326DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx)
9327DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx)
9328DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16)
9329DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8)
9330DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8)
9331
9332#undef DO_PAR_ADDSUB
9333#undef DO_PAR_ADDSUB_GE
9334
46497f6a
RH
9335/*
9336 * Packing, unpacking, saturation, and reversal
9337 */
9338
9339static bool trans_PKH(DisasContext *s, arg_PKH *a)
9340{
9341 TCGv_i32 tn, tm;
9342 int shift = a->imm;
9343
9344 if (s->thumb
9345 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9346 : !ENABLE_ARCH_6) {
9347 return false;
9348 }
9349
9350 tn = load_reg(s, a->rn);
9351 tm = load_reg(s, a->rm);
9352 if (a->tb) {
9353 /* PKHTB */
9354 if (shift == 0) {
9355 shift = 31;
9356 }
9357 tcg_gen_sari_i32(tm, tm, shift);
9358 tcg_gen_deposit_i32(tn, tn, tm, 0, 16);
9359 } else {
9360 /* PKHBT */
9361 tcg_gen_shli_i32(tm, tm, shift);
9362 tcg_gen_deposit_i32(tn, tm, tn, 0, 16);
9363 }
9364 tcg_temp_free_i32(tm);
9365 store_reg(s, a->rd, tn);
9366 return true;
9367}
9368
9369static bool op_sat(DisasContext *s, arg_sat *a,
9370 void (*gen)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
9371{
9372 TCGv_i32 tmp, satimm;
9373 int shift = a->imm;
9374
9375 if (!ENABLE_ARCH_6) {
9376 return false;
9377 }
9378
9379 tmp = load_reg(s, a->rn);
9380 if (a->sh) {
9381 tcg_gen_sari_i32(tmp, tmp, shift ? shift : 31);
9382 } else {
9383 tcg_gen_shli_i32(tmp, tmp, shift);
9384 }
9385
9386 satimm = tcg_const_i32(a->satimm);
9387 gen(tmp, cpu_env, tmp, satimm);
9388 tcg_temp_free_i32(satimm);
9389
9390 store_reg(s, a->rd, tmp);
9391 return true;
9392}
9393
9394static bool trans_SSAT(DisasContext *s, arg_sat *a)
9395{
9396 return op_sat(s, a, gen_helper_ssat);
9397}
9398
9399static bool trans_USAT(DisasContext *s, arg_sat *a)
9400{
9401 return op_sat(s, a, gen_helper_usat);
9402}
9403
9404static bool trans_SSAT16(DisasContext *s, arg_sat *a)
9405{
9406 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9407 return false;
9408 }
9409 return op_sat(s, a, gen_helper_ssat16);
9410}
9411
9412static bool trans_USAT16(DisasContext *s, arg_sat *a)
9413{
9414 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9415 return false;
9416 }
9417 return op_sat(s, a, gen_helper_usat16);
9418}
9419
9420static bool op_xta(DisasContext *s, arg_rrr_rot *a,
9421 void (*gen_extract)(TCGv_i32, TCGv_i32),
9422 void (*gen_add)(TCGv_i32, TCGv_i32, TCGv_i32))
9423{
9424 TCGv_i32 tmp;
9425
9426 if (!ENABLE_ARCH_6) {
9427 return false;
9428 }
9429
9430 tmp = load_reg(s, a->rm);
9431 /*
9432 * TODO: In many cases we could do a shift instead of a rotate.
9433 * Combined with a simple extend, that becomes an extract.
9434 */
9435 tcg_gen_rotri_i32(tmp, tmp, a->rot * 8);
9436 gen_extract(tmp, tmp);
9437
9438 if (a->rn != 15) {
9439 TCGv_i32 tmp2 = load_reg(s, a->rn);
9440 gen_add(tmp, tmp, tmp2);
9441 tcg_temp_free_i32(tmp2);
9442 }
9443 store_reg(s, a->rd, tmp);
9444 return true;
9445}
9446
9447static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a)
9448{
9449 return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32);
9450}
9451
9452static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a)
9453{
9454 return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32);
9455}
9456
9457static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a)
9458{
9459 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9460 return false;
9461 }
9462 return op_xta(s, a, gen_helper_sxtb16, gen_add16);
9463}
9464
9465static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a)
9466{
9467 return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32);
9468}
9469
9470static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a)
9471{
9472 return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32);
9473}
9474
9475static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a)
9476{
9477 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9478 return false;
9479 }
9480 return op_xta(s, a, gen_helper_uxtb16, gen_add16);
9481}
9482
9483static bool trans_SEL(DisasContext *s, arg_rrr *a)
9484{
9485 TCGv_i32 t1, t2, t3;
9486
9487 if (s->thumb
9488 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9489 : !ENABLE_ARCH_6) {
9490 return false;
9491 }
9492
9493 t1 = load_reg(s, a->rn);
9494 t2 = load_reg(s, a->rm);
9495 t3 = tcg_temp_new_i32();
9496 tcg_gen_ld_i32(t3, cpu_env, offsetof(CPUARMState, GE));
9497 gen_helper_sel_flags(t1, t3, t1, t2);
9498 tcg_temp_free_i32(t3);
9499 tcg_temp_free_i32(t2);
9500 store_reg(s, a->rd, t1);
9501 return true;
9502}
9503
9504static bool op_rr(DisasContext *s, arg_rr *a,
9505 void (*gen)(TCGv_i32, TCGv_i32))
9506{
9507 TCGv_i32 tmp;
9508
9509 tmp = load_reg(s, a->rm);
9510 gen(tmp, tmp);
9511 store_reg(s, a->rd, tmp);
9512 return true;
9513}
9514
9515static bool trans_REV(DisasContext *s, arg_rr *a)
9516{
9517 if (!ENABLE_ARCH_6) {
9518 return false;
9519 }
9520 return op_rr(s, a, tcg_gen_bswap32_i32);
9521}
9522
9523static bool trans_REV16(DisasContext *s, arg_rr *a)
9524{
9525 if (!ENABLE_ARCH_6) {
9526 return false;
9527 }
9528 return op_rr(s, a, gen_rev16);
9529}
9530
9531static bool trans_REVSH(DisasContext *s, arg_rr *a)
9532{
9533 if (!ENABLE_ARCH_6) {
9534 return false;
9535 }
9536 return op_rr(s, a, gen_revsh);
9537}
9538
9539static bool trans_RBIT(DisasContext *s, arg_rr *a)
9540{
9541 if (!ENABLE_ARCH_6T2) {
9542 return false;
9543 }
9544 return op_rr(s, a, gen_helper_rbit);
9545}
9546
2c7c4e09
RH
9547/*
9548 * Signed multiply, signed and unsigned divide
9549 */
9550
9551static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
9552{
9553 TCGv_i32 t1, t2;
9554
9555 if (!ENABLE_ARCH_6) {
9556 return false;
9557 }
9558
9559 t1 = load_reg(s, a->rn);
9560 t2 = load_reg(s, a->rm);
9561 if (m_swap) {
9562 gen_swap_half(t2);
9563 }
9564 gen_smul_dual(t1, t2);
9565
9566 if (sub) {
9567 /* This subtraction cannot overflow. */
9568 tcg_gen_sub_i32(t1, t1, t2);
9569 } else {
9570 /*
9571 * This addition cannot overflow 32 bits; however it may
9572 * overflow considered as a signed operation, in which case
9573 * we must set the Q flag.
9574 */
9575 gen_helper_add_setq(t1, cpu_env, t1, t2);
9576 }
9577 tcg_temp_free_i32(t2);
9578
9579 if (a->ra != 15) {
9580 t2 = load_reg(s, a->ra);
9581 gen_helper_add_setq(t1, cpu_env, t1, t2);
9582 tcg_temp_free_i32(t2);
9583 }
9584 store_reg(s, a->rd, t1);
9585 return true;
9586}
9587
9588static bool trans_SMLAD(DisasContext *s, arg_rrrr *a)
9589{
9590 return op_smlad(s, a, false, false);
9591}
9592
9593static bool trans_SMLADX(DisasContext *s, arg_rrrr *a)
9594{
9595 return op_smlad(s, a, true, false);
9596}
9597
9598static bool trans_SMLSD(DisasContext *s, arg_rrrr *a)
9599{
9600 return op_smlad(s, a, false, true);
9601}
9602
9603static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a)
9604{
9605 return op_smlad(s, a, true, true);
9606}
9607
9608static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
9609{
9610 TCGv_i32 t1, t2;
9611 TCGv_i64 l1, l2;
9612
9613 if (!ENABLE_ARCH_6) {
9614 return false;
9615 }
9616
9617 t1 = load_reg(s, a->rn);
9618 t2 = load_reg(s, a->rm);
9619 if (m_swap) {
9620 gen_swap_half(t2);
9621 }
9622 gen_smul_dual(t1, t2);
9623
9624 l1 = tcg_temp_new_i64();
9625 l2 = tcg_temp_new_i64();
9626 tcg_gen_ext_i32_i64(l1, t1);
9627 tcg_gen_ext_i32_i64(l2, t2);
9628 tcg_temp_free_i32(t1);
9629 tcg_temp_free_i32(t2);
9630
9631 if (sub) {
9632 tcg_gen_sub_i64(l1, l1, l2);
9633 } else {
9634 tcg_gen_add_i64(l1, l1, l2);
9635 }
9636 tcg_temp_free_i64(l2);
9637
9638 gen_addq(s, l1, a->ra, a->rd);
9639 gen_storeq_reg(s, a->ra, a->rd, l1);
9640 tcg_temp_free_i64(l1);
9641 return true;
9642}
9643
9644static bool trans_SMLALD(DisasContext *s, arg_rrrr *a)
9645{
9646 return op_smlald(s, a, false, false);
9647}
9648
9649static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a)
9650{
9651 return op_smlald(s, a, true, false);
9652}
9653
9654static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a)
9655{
9656 return op_smlald(s, a, false, true);
9657}
9658
9659static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a)
9660{
9661 return op_smlald(s, a, true, true);
9662}
9663
9664static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub)
9665{
9666 TCGv_i32 t1, t2;
9667
9668 if (s->thumb
9669 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9670 : !ENABLE_ARCH_6) {
9671 return false;
9672 }
9673
9674 t1 = load_reg(s, a->rn);
9675 t2 = load_reg(s, a->rm);
9676 tcg_gen_muls2_i32(t2, t1, t1, t2);
9677
9678 if (a->ra != 15) {
9679 TCGv_i32 t3 = load_reg(s, a->ra);
9680 if (sub) {
9681 /*
9682 * For SMMLS, we need a 64-bit subtract. Borrow caused by
9683 * a non-zero multiplicand lowpart, and the correct result
9684 * lowpart for rounding.
9685 */
9686 TCGv_i32 zero = tcg_const_i32(0);
9687 tcg_gen_sub2_i32(t2, t1, zero, t3, t2, t1);
9688 tcg_temp_free_i32(zero);
9689 } else {
9690 tcg_gen_add_i32(t1, t1, t3);
9691 }
9692 tcg_temp_free_i32(t3);
9693 }
9694 if (round) {
9695 /*
9696 * Adding 0x80000000 to the 64-bit quantity means that we have
9697 * carry in to the high word when the low word has the msb set.
9698 */
9699 tcg_gen_shri_i32(t2, t2, 31);
9700 tcg_gen_add_i32(t1, t1, t2);
9701 }
9702 tcg_temp_free_i32(t2);
9703 store_reg(s, a->rd, t1);
9704 return true;
9705}
9706
9707static bool trans_SMMLA(DisasContext *s, arg_rrrr *a)
9708{
9709 return op_smmla(s, a, false, false);
9710}
9711
9712static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a)
9713{
9714 return op_smmla(s, a, true, false);
9715}
9716
9717static bool trans_SMMLS(DisasContext *s, arg_rrrr *a)
9718{
9719 return op_smmla(s, a, false, true);
9720}
9721
9722static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a)
9723{
9724 return op_smmla(s, a, true, true);
9725}
9726
9727static bool op_div(DisasContext *s, arg_rrr *a, bool u)
9728{
9729 TCGv_i32 t1, t2;
9730
9731 if (s->thumb
9732 ? !dc_isar_feature(thumb_div, s)
9733 : !dc_isar_feature(arm_div, s)) {
9734 return false;
9735 }
9736
9737 t1 = load_reg(s, a->rn);
9738 t2 = load_reg(s, a->rm);
9739 if (u) {
9740 gen_helper_udiv(t1, t1, t2);
9741 } else {
9742 gen_helper_sdiv(t1, t1, t2);
9743 }
9744 tcg_temp_free_i32(t2);
9745 store_reg(s, a->rd, t1);
9746 return true;
9747}
9748
9749static bool trans_SDIV(DisasContext *s, arg_rrr *a)
9750{
9751 return op_div(s, a, false);
9752}
9753
9754static bool trans_UDIV(DisasContext *s, arg_rrr *a)
9755{
9756 return op_div(s, a, true);
9757}
9758
51409b9e
RH
9759/*
9760 * Legacy decoder.
9761 */
9762
f4df2210 9763static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 9764{
2c7c4e09 9765 unsigned int cond, val, op1, i, rn, rd;
39d5492a
PM
9766 TCGv_i32 tmp;
9767 TCGv_i32 tmp2;
39d5492a 9768 TCGv_i32 addr;
9ee6e8bb 9769
e13886e3
PM
9770 /* M variants do not implement ARM mode; this must raise the INVSTATE
9771 * UsageFault exception.
9772 */
b53d8923 9773 if (arm_dc_feature(s, ARM_FEATURE_M)) {
a767fac8 9774 gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
e13886e3
PM
9775 default_exception_el(s));
9776 return;
b53d8923 9777 }
9ee6e8bb 9778 cond = insn >> 28;
51409b9e
RH
9779
9780 if (cond == 0xf) {
be5e7a76
DES
9781 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
9782 * choose to UNDEF. In ARMv5 and above the space is used
9783 * for miscellaneous unconditional instructions.
9784 */
9785 ARCH(5);
9786
9ee6e8bb 9787 /* Unconditional instructions. */
51409b9e
RH
9788 if (disas_a32_uncond(s, insn)) {
9789 return;
9790 }
9791 /* fall back to legacy decoder */
9792
9ee6e8bb
PB
9793 if (((insn >> 25) & 7) == 1) {
9794 /* NEON Data processing. */
d614a513 9795 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 9796 goto illegal_op;
d614a513 9797 }
9ee6e8bb 9798
7dcc1f89 9799 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 9800 goto illegal_op;
7dcc1f89 9801 }
9ee6e8bb
PB
9802 return;
9803 }
9804 if ((insn & 0x0f100000) == 0x04000000) {
9805 /* NEON load/store. */
d614a513 9806 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 9807 goto illegal_op;
d614a513 9808 }
9ee6e8bb 9809
7dcc1f89 9810 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 9811 goto illegal_op;
7dcc1f89 9812 }
9ee6e8bb
PB
9813 return;
9814 }
6a57f3eb
WN
9815 if ((insn & 0x0f000e10) == 0x0e000a00) {
9816 /* VFP. */
7dcc1f89 9817 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9818 goto illegal_op;
9819 }
9820 return;
9821 }
3d185e5d
PM
9822 if (((insn & 0x0f30f000) == 0x0510f000) ||
9823 ((insn & 0x0f30f010) == 0x0710f000)) {
9824 if ((insn & (1 << 22)) == 0) {
9825 /* PLDW; v7MP */
d614a513 9826 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
9827 goto illegal_op;
9828 }
9829 }
9830 /* Otherwise PLD; v5TE+ */
be5e7a76 9831 ARCH(5TE);
3d185e5d
PM
9832 return;
9833 }
9834 if (((insn & 0x0f70f000) == 0x0450f000) ||
9835 ((insn & 0x0f70f010) == 0x0650f000)) {
9836 ARCH(7);
9837 return; /* PLI; V7 */
9838 }
9839 if (((insn & 0x0f700000) == 0x04100000) ||
9840 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 9841 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
9842 goto illegal_op;
9843 }
9844 return; /* v7MP: Unallocated memory hint: must NOP */
9845 }
9846
9847 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
9848 ARCH(6);
9849 /* setend */
9886ecdf
PB
9850 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
9851 gen_helper_setend(cpu_env);
dcba3a8d 9852 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
9853 }
9854 return;
9855 } else if ((insn & 0x0fffff00) == 0x057ff000) {
9856 switch ((insn >> 4) & 0xf) {
9857 case 1: /* clrex */
9858 ARCH(6K);
426f5abc 9859 gen_clrex(s);
9ee6e8bb
PB
9860 return;
9861 case 4: /* dsb */
9862 case 5: /* dmb */
9ee6e8bb 9863 ARCH(7);
61e4c432 9864 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 9865 return;
6df99dec
SS
9866 case 6: /* isb */
9867 /* We need to break the TB after this insn to execute
9868 * self-modifying code correctly and also to take
9869 * any pending interrupts immediately.
9870 */
a0415916 9871 gen_goto_tb(s, 0, s->base.pc_next);
6df99dec 9872 return;
9888bd1e
RH
9873 case 7: /* sb */
9874 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
9875 goto illegal_op;
9876 }
9877 /*
9878 * TODO: There is no speculation barrier opcode
9879 * for TCG; MB and end the TB instead.
9880 */
9881 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
a0415916 9882 gen_goto_tb(s, 0, s->base.pc_next);
9888bd1e 9883 return;
9ee6e8bb
PB
9884 default:
9885 goto illegal_op;
9886 }
9887 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
9888 /* srs */
81465888
PM
9889 ARCH(6);
9890 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 9891 return;
ea825eee 9892 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 9893 /* rfe */
c67b6b71 9894 int32_t offset;
9ee6e8bb
PB
9895 if (IS_USER(s))
9896 goto illegal_op;
9897 ARCH(6);
9898 rn = (insn >> 16) & 0xf;
b0109805 9899 addr = load_reg(s, rn);
9ee6e8bb
PB
9900 i = (insn >> 23) & 3;
9901 switch (i) {
b0109805 9902 case 0: offset = -4; break; /* DA */
c67b6b71
FN
9903 case 1: offset = 0; break; /* IA */
9904 case 2: offset = -8; break; /* DB */
b0109805 9905 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
9906 default: abort();
9907 }
9908 if (offset)
b0109805
PB
9909 tcg_gen_addi_i32(addr, addr, offset);
9910 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 9911 tmp = tcg_temp_new_i32();
12dcc321 9912 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9913 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9914 tmp2 = tcg_temp_new_i32();
12dcc321 9915 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9916 if (insn & (1 << 21)) {
9917 /* Base writeback. */
9918 switch (i) {
b0109805 9919 case 0: offset = -8; break;
c67b6b71
FN
9920 case 1: offset = 4; break;
9921 case 2: offset = -4; break;
b0109805 9922 case 3: offset = 0; break;
9ee6e8bb
PB
9923 default: abort();
9924 }
9925 if (offset)
b0109805
PB
9926 tcg_gen_addi_i32(addr, addr, offset);
9927 store_reg(s, rn, addr);
9928 } else {
7d1b0095 9929 tcg_temp_free_i32(addr);
9ee6e8bb 9930 }
b0109805 9931 gen_rfe(s, tmp, tmp2);
c67b6b71 9932 return;
9ee6e8bb
PB
9933 } else if ((insn & 0x0e000000) == 0x0a000000) {
9934 /* branch link and change to thumb (blx <offset>) */
9935 int32_t offset;
9936
7d1b0095 9937 tmp = tcg_temp_new_i32();
a0415916 9938 tcg_gen_movi_i32(tmp, s->base.pc_next);
d9ba4830 9939 store_reg(s, 14, tmp);
9ee6e8bb
PB
9940 /* Sign-extend the 24-bit offset */
9941 offset = (((int32_t)insn) << 8) >> 8;
fdbcf632 9942 val = read_pc(s);
9ee6e8bb
PB
9943 /* offset * 4 + bit24 * 2 + (thumb bit) */
9944 val += (offset << 2) | ((insn >> 23) & 2) | 1;
be5e7a76 9945 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 9946 gen_bx_im(s, val);
9ee6e8bb
PB
9947 return;
9948 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 9949 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 9950 /* iWMMXt register transfer. */
c0f4af17 9951 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 9952 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 9953 return;
c0f4af17
PM
9954 }
9955 }
9ee6e8bb 9956 }
8b7209fa
RH
9957 } else if ((insn & 0x0e000a00) == 0x0c000800
9958 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9959 if (disas_neon_insn_3same_ext(s, insn)) {
9960 goto illegal_op;
9961 }
9962 return;
638808ff
RH
9963 } else if ((insn & 0x0f000a00) == 0x0e000800
9964 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9965 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
9966 goto illegal_op;
9967 }
9968 return;
9ee6e8bb
PB
9969 } else if ((insn & 0x0fe00000) == 0x0c400000) {
9970 /* Coprocessor double register transfer. */
be5e7a76 9971 ARCH(5TE);
9ee6e8bb
PB
9972 } else if ((insn & 0x0f000010) == 0x0e000010) {
9973 /* Additional coprocessor register transfer. */
7997d92f 9974 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
9975 uint32_t mask;
9976 uint32_t val;
9977 /* cps (privileged) */
9978 if (IS_USER(s))
9979 return;
9980 mask = val = 0;
9981 if (insn & (1 << 19)) {
9982 if (insn & (1 << 8))
9983 mask |= CPSR_A;
9984 if (insn & (1 << 7))
9985 mask |= CPSR_I;
9986 if (insn & (1 << 6))
9987 mask |= CPSR_F;
9988 if (insn & (1 << 18))
9989 val |= mask;
9990 }
7997d92f 9991 if (insn & (1 << 17)) {
9ee6e8bb
PB
9992 mask |= CPSR_M;
9993 val |= (insn & 0x1f);
9994 }
9995 if (mask) {
2fbac54b 9996 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
9997 }
9998 return;
9999 }
10000 goto illegal_op;
10001 }
10002 if (cond != 0xe) {
10003 /* if not always execute, we generate a conditional jump to
10004 next instruction */
c2d9644e 10005 arm_skip_unless(s, cond);
9ee6e8bb 10006 }
51409b9e
RH
10007
10008 if (disas_a32(s, insn)) {
10009 return;
10010 }
10011 /* fall back to legacy decoder */
10012
9ee6e8bb
PB
10013 if ((insn & 0x0f900000) == 0x03000000) {
10014 if ((insn & (1 << 21)) == 0) {
10015 ARCH(6T2);
10016 rd = (insn >> 12) & 0xf;
10017 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
10018 if ((insn & (1 << 22)) == 0) {
10019 /* MOVW */
7d1b0095 10020 tmp = tcg_temp_new_i32();
5e3f878a 10021 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
10022 } else {
10023 /* MOVT */
5e3f878a 10024 tmp = load_reg(s, rd);
86831435 10025 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10026 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 10027 }
5e3f878a 10028 store_reg(s, rd, tmp);
9ee6e8bb 10029 } else {
63130596
RH
10030 /* MSR (immediate) and hints */
10031 /* All done in decodetree. Illegal ops already signalled. */
10032 g_assert_not_reached();
9ee6e8bb
PB
10033 }
10034 } else if ((insn & 0x0f900000) == 0x01000000
10035 && (insn & 0x00000090) != 0x00000090) {
10036 /* miscellaneous instructions */
2cde9ea5
RH
10037 /* All done in decodetree. Illegal ops reach here. */
10038 goto illegal_op;
9ee6e8bb
PB
10039 } else if (((insn & 0x0e000000) == 0 &&
10040 (insn & 0x00000090) != 0x90) ||
10041 ((insn & 0x0e000000) == (1 << 25))) {
581c6ebd
RH
10042 /* Data-processing (reg, reg-shift-reg, imm). */
10043 /* All done in decodetree. Reach here for illegal ops. */
10044 goto illegal_op;
9ee6e8bb
PB
10045 } else {
10046 /* other instructions */
10047 op1 = (insn >> 24) & 0xf;
10048 switch(op1) {
10049 case 0x0:
10050 case 0x1:
9ee6e8bb
PB
10051 case 0x4:
10052 case 0x5:
9ee6e8bb
PB
10053 case 0x6:
10054 case 0x7:
5e291fe1
RH
10055 /* All done in decodetree. Reach here for illegal ops. */
10056 goto illegal_op;
9ee6e8bb
PB
10057 case 0x08:
10058 case 0x09:
10059 {
da3e53dd
PM
10060 int j, n, loaded_base;
10061 bool exc_return = false;
10062 bool is_load = extract32(insn, 20, 1);
10063 bool user = false;
39d5492a 10064 TCGv_i32 loaded_var;
9ee6e8bb
PB
10065 /* load/store multiple words */
10066 /* XXX: store correct base if write back */
9ee6e8bb 10067 if (insn & (1 << 22)) {
da3e53dd 10068 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
10069 if (IS_USER(s))
10070 goto illegal_op; /* only usable in supervisor mode */
10071
da3e53dd
PM
10072 if (is_load && extract32(insn, 15, 1)) {
10073 exc_return = true;
10074 } else {
10075 user = true;
10076 }
9ee6e8bb
PB
10077 }
10078 rn = (insn >> 16) & 0xf;
b0109805 10079 addr = load_reg(s, rn);
9ee6e8bb
PB
10080
10081 /* compute total size */
10082 loaded_base = 0;
f764718d 10083 loaded_var = NULL;
9ee6e8bb 10084 n = 0;
9798ac71 10085 for (i = 0; i < 16; i++) {
9ee6e8bb
PB
10086 if (insn & (1 << i))
10087 n++;
10088 }
10089 /* XXX: test invalid n == 0 case ? */
10090 if (insn & (1 << 23)) {
10091 if (insn & (1 << 24)) {
10092 /* pre increment */
b0109805 10093 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10094 } else {
10095 /* post increment */
10096 }
10097 } else {
10098 if (insn & (1 << 24)) {
10099 /* pre decrement */
b0109805 10100 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10101 } else {
10102 /* post decrement */
10103 if (n != 1)
b0109805 10104 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10105 }
10106 }
10107 j = 0;
9798ac71 10108 for (i = 0; i < 16; i++) {
9ee6e8bb 10109 if (insn & (1 << i)) {
da3e53dd 10110 if (is_load) {
9ee6e8bb 10111 /* load */
5a839c0d 10112 tmp = tcg_temp_new_i32();
12dcc321 10113 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 10114 if (user) {
b75263d6 10115 tmp2 = tcg_const_i32(i);
1ce94f81 10116 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 10117 tcg_temp_free_i32(tmp2);
7d1b0095 10118 tcg_temp_free_i32(tmp);
9ee6e8bb 10119 } else if (i == rn) {
b0109805 10120 loaded_var = tmp;
9ee6e8bb 10121 loaded_base = 1;
9d090d17 10122 } else if (i == 15 && exc_return) {
fb0e8e79 10123 store_pc_exc_ret(s, tmp);
9ee6e8bb 10124 } else {
7dcc1f89 10125 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
10126 }
10127 } else {
10128 /* store */
10129 if (i == 15) {
7d1b0095 10130 tmp = tcg_temp_new_i32();
fdbcf632 10131 tcg_gen_movi_i32(tmp, read_pc(s));
9ee6e8bb 10132 } else if (user) {
7d1b0095 10133 tmp = tcg_temp_new_i32();
b75263d6 10134 tmp2 = tcg_const_i32(i);
9ef39277 10135 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 10136 tcg_temp_free_i32(tmp2);
9ee6e8bb 10137 } else {
b0109805 10138 tmp = load_reg(s, i);
9ee6e8bb 10139 }
12dcc321 10140 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10141 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10142 }
10143 j++;
10144 /* no need to add after the last transfer */
10145 if (j != n)
b0109805 10146 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10147 }
10148 }
10149 if (insn & (1 << 21)) {
10150 /* write back */
10151 if (insn & (1 << 23)) {
10152 if (insn & (1 << 24)) {
10153 /* pre increment */
10154 } else {
10155 /* post increment */
b0109805 10156 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10157 }
10158 } else {
10159 if (insn & (1 << 24)) {
10160 /* pre decrement */
10161 if (n != 1)
b0109805 10162 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10163 } else {
10164 /* post decrement */
b0109805 10165 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10166 }
10167 }
b0109805
PB
10168 store_reg(s, rn, addr);
10169 } else {
7d1b0095 10170 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10171 }
10172 if (loaded_base) {
b0109805 10173 store_reg(s, rn, loaded_var);
9ee6e8bb 10174 }
da3e53dd 10175 if (exc_return) {
9ee6e8bb 10176 /* Restore CPSR from SPSR. */
d9ba4830 10177 tmp = load_cpu_field(spsr);
e69ad9df
AL
10178 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10179 gen_io_start();
10180 }
235ea1f5 10181 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 10182 tcg_temp_free_i32(tmp);
b29fd33d 10183 /* Must exit loop to check un-masked IRQs */
dcba3a8d 10184 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
10185 }
10186 }
10187 break;
10188 case 0xa:
10189 case 0xb:
10190 {
10191 int32_t offset;
10192
10193 /* branch (and link) */
9ee6e8bb 10194 if (insn & (1 << 24)) {
7d1b0095 10195 tmp = tcg_temp_new_i32();
a0415916 10196 tcg_gen_movi_i32(tmp, s->base.pc_next);
5e3f878a 10197 store_reg(s, 14, tmp);
9ee6e8bb 10198 }
534df156 10199 offset = sextract32(insn << 2, 0, 26);
fdbcf632 10200 gen_jmp(s, read_pc(s) + offset);
9ee6e8bb
PB
10201 }
10202 break;
10203 case 0xc:
10204 case 0xd:
10205 case 0xe:
6a57f3eb
WN
10206 if (((insn >> 8) & 0xe) == 10) {
10207 /* VFP. */
7dcc1f89 10208 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10209 goto illegal_op;
10210 }
7dcc1f89 10211 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 10212 /* Coprocessor. */
9ee6e8bb 10213 goto illegal_op;
6a57f3eb 10214 }
9ee6e8bb
PB
10215 break;
10216 case 0xf:
10217 /* swi */
a0415916 10218 gen_set_pc_im(s, s->base.pc_next);
d4a2dc67 10219 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 10220 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
10221 break;
10222 default:
10223 illegal_op:
1ce21ba1 10224 unallocated_encoding(s);
9ee6e8bb
PB
10225 break;
10226 }
10227 }
10228}
10229
331b1ca6 10230static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
296e5a0a 10231{
331b1ca6
RH
10232 /*
10233 * Return true if this is a 16 bit instruction. We must be precise
10234 * about this (matching the decode).
296e5a0a
PM
10235 */
10236 if ((insn >> 11) < 0x1d) {
10237 /* Definitely a 16-bit instruction */
10238 return true;
10239 }
10240
10241 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10242 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10243 * end up actually treating this as two 16-bit insns, though,
10244 * if it's half of a bl/blx pair that might span a page boundary.
10245 */
14120108
JS
10246 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10247 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
10248 /* Thumb2 cores (including all M profile ones) always treat
10249 * 32-bit insns as 32-bit.
10250 */
10251 return false;
10252 }
10253
331b1ca6 10254 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
10255 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10256 * is not on the next page; we merge this into a 32-bit
10257 * insn.
10258 */
10259 return false;
10260 }
10261 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10262 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10263 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10264 * -- handle as single 16 bit insn
10265 */
10266 return true;
10267}
10268
2eea841c
PM
10269/* Translate a 32-bit thumb instruction. */
10270static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 10271{
46497f6a 10272 uint32_t imm, offset;
9ee6e8bb 10273 uint32_t rd, rn, rm, rs;
39d5492a
PM
10274 TCGv_i32 tmp;
10275 TCGv_i32 tmp2;
39d5492a 10276 TCGv_i32 addr;
9ee6e8bb 10277 int op;
9ee6e8bb 10278
14120108
JS
10279 /*
10280 * ARMv6-M supports a limited subset of Thumb2 instructions.
10281 * Other Thumb1 architectures allow only 32-bit
10282 * combined BL/BLX prefix and suffix.
296e5a0a 10283 */
14120108
JS
10284 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10285 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10286 int i;
10287 bool found = false;
8297cb13
JS
10288 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10289 0xf3b08040 /* dsb */,
10290 0xf3b08050 /* dmb */,
10291 0xf3b08060 /* isb */,
10292 0xf3e08000 /* mrs */,
10293 0xf000d000 /* bl */};
10294 static const uint32_t armv6m_mask[] = {0xffe0d000,
10295 0xfff0d0f0,
10296 0xfff0d0f0,
10297 0xfff0d0f0,
10298 0xffe0d000,
10299 0xf800d000};
14120108
JS
10300
10301 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10302 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10303 found = true;
10304 break;
10305 }
10306 }
10307 if (!found) {
10308 goto illegal_op;
10309 }
10310 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
10311 ARCH(6T2);
10312 }
10313
51409b9e
RH
10314 if (disas_t32(s, insn)) {
10315 return;
10316 }
10317 /* fall back to legacy decoder */
10318
9ee6e8bb
PB
10319 rn = (insn >> 16) & 0xf;
10320 rs = (insn >> 12) & 0xf;
10321 rd = (insn >> 8) & 0xf;
10322 rm = insn & 0xf;
10323 switch ((insn >> 25) & 0xf) {
10324 case 0: case 1: case 2: case 3:
10325 /* 16-bit instructions. Should never happen. */
10326 abort();
10327 case 4:
10328 if (insn & (1 << 22)) {
ebfe27c5
PM
10329 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10330 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 10331 * table branch, TT.
ebfe27c5 10332 */
76eff04d
PM
10333 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10334 arm_dc_feature(s, ARM_FEATURE_V8)) {
10335 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10336 * - SG (v8M only)
10337 * The bulk of the behaviour for this instruction is implemented
10338 * in v7m_handle_execute_nsc(), which deals with the insn when
10339 * it is executed by a CPU in non-secure state from memory
10340 * which is Secure & NonSecure-Callable.
10341 * Here we only need to handle the remaining cases:
10342 * * in NS memory (including the "security extension not
10343 * implemented" case) : NOP
10344 * * in S memory but CPU already secure (clear IT bits)
10345 * We know that the attribute for the memory this insn is
10346 * in must match the current CPU state, because otherwise
10347 * get_phys_addr_pmsav8 would have generated an exception.
10348 */
10349 if (s->v8m_secure) {
10350 /* Like the IT insn, we don't need to generate any code */
10351 s->condexec_cond = 0;
10352 s->condexec_mask = 0;
10353 }
10354 } else if (insn & 0x01200000) {
5e291fe1
RH
10355 /* load/store dual, in decodetree */
10356 goto illegal_op;
9ee6e8bb 10357 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
10358 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10359 * - load/store exclusive word
5158de24 10360 * - TT (v8M only)
ebfe27c5
PM
10361 */
10362 if (rs == 15) {
5158de24
PM
10363 if (!(insn & (1 << 20)) &&
10364 arm_dc_feature(s, ARM_FEATURE_M) &&
10365 arm_dc_feature(s, ARM_FEATURE_V8)) {
10366 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10367 * - TT (v8M only)
10368 */
10369 bool alt = insn & (1 << 7);
10370 TCGv_i32 addr, op, ttresp;
10371
10372 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10373 /* we UNDEF for these UNPREDICTABLE cases */
10374 goto illegal_op;
10375 }
10376
10377 if (alt && !s->v8m_secure) {
10378 goto illegal_op;
10379 }
10380
10381 addr = load_reg(s, rn);
10382 op = tcg_const_i32(extract32(insn, 6, 2));
10383 ttresp = tcg_temp_new_i32();
10384 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10385 tcg_temp_free_i32(addr);
10386 tcg_temp_free_i32(op);
10387 store_reg(s, rd, ttresp);
384c6c03 10388 break;
5158de24 10389 }
ebfe27c5
PM
10390 goto illegal_op;
10391 }
1efdd407
RH
10392 /* Load/store exclusive, in decodetree */
10393 goto illegal_op;
2359bf80 10394 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb 10395 /* Table Branch. */
fdbcf632 10396 addr = load_reg(s, rn);
b26eefb6 10397 tmp = load_reg(s, rm);
b0109805 10398 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
10399 if (insn & (1 << 4)) {
10400 /* tbh */
b0109805 10401 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10402 tcg_temp_free_i32(tmp);
e2592fad 10403 tmp = tcg_temp_new_i32();
12dcc321 10404 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10405 } else { /* tbb */
7d1b0095 10406 tcg_temp_free_i32(tmp);
e2592fad 10407 tmp = tcg_temp_new_i32();
12dcc321 10408 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10409 }
7d1b0095 10410 tcg_temp_free_i32(addr);
b0109805 10411 tcg_gen_shli_i32(tmp, tmp, 1);
fdbcf632 10412 tcg_gen_addi_i32(tmp, tmp, read_pc(s));
b0109805 10413 store_reg(s, 15, tmp);
9ee6e8bb 10414 } else {
1efdd407
RH
10415 /* Load/store exclusive, load-acq/store-rel, in decodetree */
10416 goto illegal_op;
9ee6e8bb
PB
10417 }
10418 } else {
10419 /* Load/store multiple, RFE, SRS. */
10420 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 10421 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 10422 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10423 goto illegal_op;
00115976 10424 }
9ee6e8bb
PB
10425 if (insn & (1 << 20)) {
10426 /* rfe */
b0109805
PB
10427 addr = load_reg(s, rn);
10428 if ((insn & (1 << 24)) == 0)
10429 tcg_gen_addi_i32(addr, addr, -8);
10430 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 10431 tmp = tcg_temp_new_i32();
12dcc321 10432 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10433 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10434 tmp2 = tcg_temp_new_i32();
12dcc321 10435 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
10436 if (insn & (1 << 21)) {
10437 /* Base writeback. */
b0109805
PB
10438 if (insn & (1 << 24)) {
10439 tcg_gen_addi_i32(addr, addr, 4);
10440 } else {
10441 tcg_gen_addi_i32(addr, addr, -4);
10442 }
10443 store_reg(s, rn, addr);
10444 } else {
7d1b0095 10445 tcg_temp_free_i32(addr);
9ee6e8bb 10446 }
b0109805 10447 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10448 } else {
10449 /* srs */
81465888
PM
10450 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10451 insn & (1 << 21));
9ee6e8bb
PB
10452 }
10453 } else {
5856d44e 10454 int i, loaded_base = 0;
39d5492a 10455 TCGv_i32 loaded_var;
7c0ed88e 10456 bool wback = extract32(insn, 21, 1);
9ee6e8bb 10457 /* Load/store multiple. */
b0109805 10458 addr = load_reg(s, rn);
9ee6e8bb
PB
10459 offset = 0;
10460 for (i = 0; i < 16; i++) {
10461 if (insn & (1 << i))
10462 offset += 4;
10463 }
7c0ed88e 10464
9ee6e8bb 10465 if (insn & (1 << 24)) {
b0109805 10466 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10467 }
10468
7c0ed88e
PM
10469 if (s->v8m_stackcheck && rn == 13 && wback) {
10470 /*
10471 * If the writeback is incrementing SP rather than
10472 * decrementing it, and the initial SP is below the
10473 * stack limit but the final written-back SP would
10474 * be above, then then we must not perform any memory
10475 * accesses, but it is IMPDEF whether we generate
10476 * an exception. We choose to do so in this case.
10477 * At this point 'addr' is the lowest address, so
10478 * either the original SP (if incrementing) or our
10479 * final SP (if decrementing), so that's what we check.
10480 */
10481 gen_helper_v8m_stackcheck(cpu_env, addr);
10482 }
10483
f764718d 10484 loaded_var = NULL;
9ee6e8bb
PB
10485 for (i = 0; i < 16; i++) {
10486 if ((insn & (1 << i)) == 0)
10487 continue;
10488 if (insn & (1 << 20)) {
10489 /* Load. */
e2592fad 10490 tmp = tcg_temp_new_i32();
12dcc321 10491 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
69be3e13 10492 if (i == rn) {
5856d44e
YO
10493 loaded_var = tmp;
10494 loaded_base = 1;
9ee6e8bb 10495 } else {
69be3e13 10496 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
10497 }
10498 } else {
10499 /* Store. */
b0109805 10500 tmp = load_reg(s, i);
12dcc321 10501 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10502 tcg_temp_free_i32(tmp);
9ee6e8bb 10503 }
b0109805 10504 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 10505 }
5856d44e
YO
10506 if (loaded_base) {
10507 store_reg(s, rn, loaded_var);
10508 }
7c0ed88e 10509 if (wback) {
9ee6e8bb
PB
10510 /* Base register writeback. */
10511 if (insn & (1 << 24)) {
b0109805 10512 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10513 }
10514 /* Fault if writeback register is in register list. */
10515 if (insn & (1 << rn))
10516 goto illegal_op;
b0109805
PB
10517 store_reg(s, rn, addr);
10518 } else {
7d1b0095 10519 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10520 }
10521 }
10522 }
10523 break;
2af9ab77 10524 case 5:
46497f6a
RH
10525 /* All in decodetree */
10526 goto illegal_op;
9ee6e8bb
PB
10527 case 13: /* Misc data processing. */
10528 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10529 if (op < 4 && (insn & 0xf000) != 0xf000)
10530 goto illegal_op;
10531 switch (op) {
5be2c123 10532 case 0: /* Register controlled shift, in decodetree */
46497f6a 10533 case 1: /* Sign/zero extend, in decodetree */
adf1a566 10534 case 2: /* SIMD add/subtract, in decodetree */
46497f6a 10535 case 3: /* Other data processing, in decodetree */
adf1a566 10536 goto illegal_op;
2c7c4e09
RH
10537 case 4: case 5:
10538 /* 32-bit multiply. Sum of absolute differences, in decodetree */
10539 goto illegal_op;
10540 case 6: case 7: /* 64-bit multiply, Divide, in decodetree */
10541 goto illegal_op;
9ee6e8bb
PB
10542 }
10543 break;
10544 case 6: case 7: case 14: case 15:
10545 /* Coprocessor. */
7517748e 10546 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8859ba3c
PM
10547 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10548 if (extract32(insn, 24, 2) == 3) {
10549 goto illegal_op; /* op0 = 0b11 : unallocated */
10550 }
10551
10552 /*
10553 * Decode VLLDM and VLSTM first: these are nonstandard because:
10554 * * if there is no FPU then these insns must NOP in
10555 * Secure state and UNDEF in Nonsecure state
10556 * * if there is an FPU then these insns do not have
10557 * the usual behaviour that disas_vfp_insn() provides of
10558 * being controlled by CPACR/NSACR enable bits or the
10559 * lazy-stacking logic.
7517748e 10560 */
b1e5336a
PM
10561 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10562 (insn & 0xffa00f00) == 0xec200a00) {
10563 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10564 * - VLLDM, VLSTM
10565 * We choose to UNDEF if the RAZ bits are non-zero.
10566 */
10567 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10568 goto illegal_op;
10569 }
019076b0
PM
10570
10571 if (arm_dc_feature(s, ARM_FEATURE_VFP)) {
10572 TCGv_i32 fptr = load_reg(s, rn);
10573
10574 if (extract32(insn, 20, 1)) {
956fe143 10575 gen_helper_v7m_vlldm(cpu_env, fptr);
019076b0
PM
10576 } else {
10577 gen_helper_v7m_vlstm(cpu_env, fptr);
10578 }
10579 tcg_temp_free_i32(fptr);
10580
10581 /* End the TB, because we have updated FP control bits */
10582 s->base.is_jmp = DISAS_UPDATE;
10583 }
b1e5336a
PM
10584 break;
10585 }
8859ba3c
PM
10586 if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
10587 ((insn >> 8) & 0xe) == 10) {
10588 /* FP, and the CPU supports it */
10589 if (disas_vfp_insn(s, insn)) {
10590 goto illegal_op;
10591 }
10592 break;
10593 }
10594
b1e5336a 10595 /* All other insns: NOCP */
a767fac8 10596 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
7517748e
PM
10597 default_exception_el(s));
10598 break;
10599 }
0052087e
RH
10600 if ((insn & 0xfe000a00) == 0xfc000800
10601 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10602 /* The Thumb2 and ARM encodings are identical. */
10603 if (disas_neon_insn_3same_ext(s, insn)) {
10604 goto illegal_op;
10605 }
10606 } else if ((insn & 0xff000a00) == 0xfe000800
10607 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10608 /* The Thumb2 and ARM encodings are identical. */
10609 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10610 goto illegal_op;
10611 }
10612 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 10613 /* Translate into the equivalent ARM encoding. */
f06053e3 10614 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10615 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10616 goto illegal_op;
7dcc1f89 10617 }
6a57f3eb 10618 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10619 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10620 goto illegal_op;
10621 }
9ee6e8bb
PB
10622 } else {
10623 if (insn & (1 << 28))
10624 goto illegal_op;
7dcc1f89 10625 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10626 goto illegal_op;
7dcc1f89 10627 }
9ee6e8bb
PB
10628 }
10629 break;
10630 case 8: case 9: case 10: case 11:
10631 if (insn & (1 << 15)) {
10632 /* Branches, misc control. */
10633 if (insn & 0x5000) {
10634 /* Unconditional branch. */
10635 /* signextend(hw1[10:0]) -> offset[:12]. */
10636 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10637 /* hw1[10:0] -> offset[11:1]. */
10638 offset |= (insn & 0x7ff) << 1;
10639 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10640 offset[24:22] already have the same value because of the
10641 sign extension above. */
10642 offset ^= ((~insn) & (1 << 13)) << 10;
10643 offset ^= ((~insn) & (1 << 11)) << 11;
10644
9ee6e8bb
PB
10645 if (insn & (1 << 14)) {
10646 /* Branch and link. */
a0415916 10647 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
b5ff1b31 10648 }
3b46e624 10649
fdbcf632 10650 offset += read_pc(s);
9ee6e8bb
PB
10651 if (insn & (1 << 12)) {
10652 /* b/bl */
b0109805 10653 gen_jmp(s, offset);
9ee6e8bb
PB
10654 } else {
10655 /* blx */
b0109805 10656 offset &= ~(uint32_t)2;
be5e7a76 10657 /* thumb2 bx, no need to check */
b0109805 10658 gen_bx_im(s, offset);
2c0262af 10659 }
9ee6e8bb
PB
10660 } else if (((insn >> 23) & 7) == 7) {
10661 /* Misc control */
10662 if (insn & (1 << 13))
10663 goto illegal_op;
10664
10665 if (insn & (1 << 26)) {
2cde9ea5
RH
10666 /* hvc, smc, in decodetree */
10667 goto illegal_op;
2c0262af 10668 } else {
9ee6e8bb
PB
10669 op = (insn >> 20) & 7;
10670 switch (op) {
d0b26644
RH
10671 case 0: /* msr cpsr, in decodetree */
10672 case 1: /* msr spsr, in decodetree */
10673 goto illegal_op;
9ee6e8bb 10674 case 2: /* cps, nop-hint. */
63130596 10675 /* nop hints in decodetree */
9ee6e8bb
PB
10676 /* Implemented as NOP in user mode. */
10677 if (IS_USER(s))
10678 break;
10679 offset = 0;
10680 imm = 0;
10681 if (insn & (1 << 10)) {
10682 if (insn & (1 << 7))
10683 offset |= CPSR_A;
10684 if (insn & (1 << 6))
10685 offset |= CPSR_I;
10686 if (insn & (1 << 5))
10687 offset |= CPSR_F;
10688 if (insn & (1 << 9))
10689 imm = CPSR_A | CPSR_I | CPSR_F;
10690 }
10691 if (insn & (1 << 8)) {
10692 offset |= 0x1f;
10693 imm |= (insn & 0x1f);
10694 }
10695 if (offset) {
2fbac54b 10696 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10697 }
10698 break;
10699 case 3: /* Special control operations. */
14120108 10700 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 10701 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
10702 goto illegal_op;
10703 }
9ee6e8bb
PB
10704 op = (insn >> 4) & 0xf;
10705 switch (op) {
10706 case 2: /* clrex */
426f5abc 10707 gen_clrex(s);
9ee6e8bb
PB
10708 break;
10709 case 4: /* dsb */
10710 case 5: /* dmb */
61e4c432 10711 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10712 break;
6df99dec
SS
10713 case 6: /* isb */
10714 /* We need to break the TB after this insn
10715 * to execute self-modifying code correctly
10716 * and also to take any pending interrupts
10717 * immediately.
10718 */
a0415916 10719 gen_goto_tb(s, 0, s->base.pc_next);
6df99dec 10720 break;
9888bd1e
RH
10721 case 7: /* sb */
10722 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
10723 goto illegal_op;
10724 }
10725 /*
10726 * TODO: There is no speculation barrier opcode
10727 * for TCG; MB and end the TB instead.
10728 */
10729 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
a0415916 10730 gen_goto_tb(s, 0, s->base.pc_next);
9888bd1e 10731 break;
9ee6e8bb
PB
10732 default:
10733 goto illegal_op;
10734 }
10735 break;
4ed95abd
RH
10736 case 4: /* bxj, in decodetree */
10737 goto illegal_op;
9ee6e8bb 10738 case 5: /* Exception return. */
d0b26644
RH
10739 case 6: /* MRS, in decodetree */
10740 case 7: /* MSR, in decodetree */
10741 goto illegal_op;
2c0262af
FB
10742 }
10743 }
9ee6e8bb
PB
10744 } else {
10745 /* Conditional branch. */
10746 op = (insn >> 22) & 0xf;
10747 /* Generate a conditional jump to next instruction. */
c2d9644e 10748 arm_skip_unless(s, op);
9ee6e8bb
PB
10749
10750 /* offset[11:1] = insn[10:0] */
10751 offset = (insn & 0x7ff) << 1;
10752 /* offset[17:12] = insn[21:16]. */
10753 offset |= (insn & 0x003f0000) >> 4;
10754 /* offset[31:20] = insn[26]. */
10755 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10756 /* offset[18] = insn[13]. */
10757 offset |= (insn & (1 << 13)) << 5;
10758 /* offset[19] = insn[11]. */
10759 offset |= (insn & (1 << 11)) << 8;
10760
10761 /* jump to the offset */
fdbcf632 10762 gen_jmp(s, read_pc(s) + offset);
9ee6e8bb
PB
10763 }
10764 } else {
55203189
PM
10765 /*
10766 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
10767 * - Data-processing (modified immediate, plain binary immediate)
10768 */
9ee6e8bb 10769 if (insn & (1 << 25)) {
55203189
PM
10770 /*
10771 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
10772 * - Data-processing (plain binary immediate)
10773 */
9ee6e8bb 10774 if (insn & (1 << 24)) {
46497f6a
RH
10775 /* Bitfield/Saturate, in decodetree */
10776 goto illegal_op;
9ee6e8bb
PB
10777 } else {
10778 imm = ((insn & 0x04000000) >> 15)
10779 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10780 if (insn & (1 << 22)) {
10781 /* 16-bit immediate. */
10782 imm |= (insn >> 4) & 0xf000;
10783 if (insn & (1 << 23)) {
10784 /* movt */
5e3f878a 10785 tmp = load_reg(s, rd);
86831435 10786 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10787 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10788 } else {
9ee6e8bb 10789 /* movw */
7d1b0095 10790 tmp = tcg_temp_new_i32();
5e3f878a 10791 tcg_gen_movi_i32(tmp, imm);
2c0262af 10792 }
55203189 10793 store_reg(s, rd, tmp);
2c0262af 10794 } else {
145952e8
RH
10795 /* Add/sub 12-bit immediate, in decodetree */
10796 goto illegal_op;
9ee6e8bb 10797 }
191abaa2 10798 }
9ee6e8bb 10799 } else {
581c6ebd
RH
10800 /* Data-processing (modified immediate) */
10801 /* All done in decodetree. Reach here for illegal ops. */
10802 goto illegal_op;
2c0262af 10803 }
9ee6e8bb
PB
10804 }
10805 break;
5e291fe1 10806 case 12:
9ee6e8bb 10807 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10808 if (disas_neon_ls_insn(s, insn)) {
c1713132 10809 goto illegal_op;
7dcc1f89 10810 }
9ee6e8bb
PB
10811 break;
10812 }
5e291fe1
RH
10813 /* Load/store single data item, in decodetree */
10814 goto illegal_op;
9ee6e8bb
PB
10815 default:
10816 goto illegal_op;
2c0262af 10817 }
2eea841c 10818 return;
9ee6e8bb 10819illegal_op:
1ce21ba1 10820 unallocated_encoding(s);
2c0262af
FB
10821}
10822
296e5a0a 10823static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 10824{
296e5a0a 10825 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
10826 int32_t offset;
10827 int i;
39d5492a
PM
10828 TCGv_i32 tmp;
10829 TCGv_i32 tmp2;
10830 TCGv_i32 addr;
99c475ab 10831
99c475ab
FB
10832 switch (insn >> 12) {
10833 case 0: case 1:
396e467c 10834
99c475ab
FB
10835 rd = insn & 7;
10836 op = (insn >> 11) & 3;
10837 if (op == 3) {
a2d12f0f
PM
10838 /*
10839 * 0b0001_1xxx_xxxx_xxxx
10840 * - Add, subtract (three low registers)
10841 * - Add, subtract (two low registers and immediate)
10842 */
99c475ab 10843 rn = (insn >> 3) & 7;
396e467c 10844 tmp = load_reg(s, rn);
99c475ab
FB
10845 if (insn & (1 << 10)) {
10846 /* immediate */
7d1b0095 10847 tmp2 = tcg_temp_new_i32();
396e467c 10848 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10849 } else {
10850 /* reg */
10851 rm = (insn >> 6) & 7;
396e467c 10852 tmp2 = load_reg(s, rm);
99c475ab 10853 }
9ee6e8bb
PB
10854 if (insn & (1 << 9)) {
10855 if (s->condexec_mask)
396e467c 10856 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10857 else
72485ec4 10858 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10859 } else {
10860 if (s->condexec_mask)
396e467c 10861 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10862 else
72485ec4 10863 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10864 }
7d1b0095 10865 tcg_temp_free_i32(tmp2);
396e467c 10866 store_reg(s, rd, tmp);
99c475ab
FB
10867 } else {
10868 /* shift immediate */
10869 rm = (insn >> 3) & 7;
10870 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10871 tmp = load_reg(s, rm);
10872 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10873 if (!s->condexec_mask)
10874 gen_logic_CC(tmp);
10875 store_reg(s, rd, tmp);
99c475ab
FB
10876 }
10877 break;
10878 case 2: case 3:
a2d12f0f
PM
10879 /*
10880 * 0b001x_xxxx_xxxx_xxxx
10881 * - Add, subtract, compare, move (one low register and immediate)
10882 */
99c475ab
FB
10883 op = (insn >> 11) & 3;
10884 rd = (insn >> 8) & 0x7;
396e467c 10885 if (op == 0) { /* mov */
7d1b0095 10886 tmp = tcg_temp_new_i32();
396e467c 10887 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10888 if (!s->condexec_mask)
396e467c
FN
10889 gen_logic_CC(tmp);
10890 store_reg(s, rd, tmp);
10891 } else {
10892 tmp = load_reg(s, rd);
7d1b0095 10893 tmp2 = tcg_temp_new_i32();
396e467c
FN
10894 tcg_gen_movi_i32(tmp2, insn & 0xff);
10895 switch (op) {
10896 case 1: /* cmp */
72485ec4 10897 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10898 tcg_temp_free_i32(tmp);
10899 tcg_temp_free_i32(tmp2);
396e467c
FN
10900 break;
10901 case 2: /* add */
10902 if (s->condexec_mask)
10903 tcg_gen_add_i32(tmp, tmp, tmp2);
10904 else
72485ec4 10905 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10906 tcg_temp_free_i32(tmp2);
396e467c
FN
10907 store_reg(s, rd, tmp);
10908 break;
10909 case 3: /* sub */
10910 if (s->condexec_mask)
10911 tcg_gen_sub_i32(tmp, tmp, tmp2);
10912 else
72485ec4 10913 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10914 tcg_temp_free_i32(tmp2);
396e467c
FN
10915 store_reg(s, rd, tmp);
10916 break;
10917 }
99c475ab 10918 }
99c475ab
FB
10919 break;
10920 case 4:
10921 if (insn & (1 << 11)) {
10922 rd = (insn >> 8) & 7;
5899f386 10923 /* load pc-relative. Bit 1 of PC is ignored. */
16e0d823 10924 addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
c40c8556 10925 tmp = tcg_temp_new_i32();
9bb6558a
PM
10926 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10927 rd | ISSIs16Bit);
7d1b0095 10928 tcg_temp_free_i32(addr);
b0109805 10929 store_reg(s, rd, tmp);
99c475ab
FB
10930 break;
10931 }
10932 if (insn & (1 << 10)) {
ebfe27c5
PM
10933 /* 0b0100_01xx_xxxx_xxxx
10934 * - data processing extended, branch and exchange
10935 */
99c475ab
FB
10936 rd = (insn & 7) | ((insn >> 4) & 8);
10937 rm = (insn >> 3) & 0xf;
10938 op = (insn >> 8) & 3;
10939 switch (op) {
10940 case 0: /* add */
396e467c
FN
10941 tmp = load_reg(s, rd);
10942 tmp2 = load_reg(s, rm);
10943 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10944 tcg_temp_free_i32(tmp2);
55203189
PM
10945 if (rd == 13) {
10946 /* ADD SP, SP, reg */
10947 store_sp_checked(s, tmp);
10948 } else {
10949 store_reg(s, rd, tmp);
10950 }
99c475ab
FB
10951 break;
10952 case 1: /* cmp */
396e467c
FN
10953 tmp = load_reg(s, rd);
10954 tmp2 = load_reg(s, rm);
72485ec4 10955 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10956 tcg_temp_free_i32(tmp2);
10957 tcg_temp_free_i32(tmp);
99c475ab
FB
10958 break;
10959 case 2: /* mov/cpy */
396e467c 10960 tmp = load_reg(s, rm);
55203189
PM
10961 if (rd == 13) {
10962 /* MOV SP, reg */
10963 store_sp_checked(s, tmp);
10964 } else {
10965 store_reg(s, rd, tmp);
10966 }
99c475ab 10967 break;
ebfe27c5
PM
10968 case 3:
10969 {
10970 /* 0b0100_0111_xxxx_xxxx
10971 * - branch [and link] exchange thumb register
10972 */
10973 bool link = insn & (1 << 7);
10974
fb602cb7 10975 if (insn & 3) {
ebfe27c5
PM
10976 goto undef;
10977 }
10978 if (link) {
be5e7a76 10979 ARCH(5);
ebfe27c5 10980 }
fb602cb7
PM
10981 if ((insn & 4)) {
10982 /* BXNS/BLXNS: only exists for v8M with the
10983 * security extensions, and always UNDEF if NonSecure.
10984 * We don't implement these in the user-only mode
10985 * either (in theory you can use them from Secure User
10986 * mode but they are too tied in to system emulation.)
10987 */
10988 if (!s->v8m_secure || IS_USER_ONLY) {
10989 goto undef;
10990 }
10991 if (link) {
3e3fa230 10992 gen_blxns(s, rm);
fb602cb7
PM
10993 } else {
10994 gen_bxns(s, rm);
10995 }
10996 break;
10997 }
10998 /* BLX/BX */
ebfe27c5
PM
10999 tmp = load_reg(s, rm);
11000 if (link) {
a0415916 11001 val = (uint32_t)s->base.pc_next | 1;
7d1b0095 11002 tmp2 = tcg_temp_new_i32();
b0109805
PB
11003 tcg_gen_movi_i32(tmp2, val);
11004 store_reg(s, 14, tmp2);
3bb8a96f
PM
11005 gen_bx(s, tmp);
11006 } else {
11007 /* Only BX works as exception-return, not BLX */
11008 gen_bx_excret(s, tmp);
99c475ab 11009 }
99c475ab
FB
11010 break;
11011 }
ebfe27c5 11012 }
99c475ab
FB
11013 break;
11014 }
11015
a2d12f0f
PM
11016 /*
11017 * 0b0100_00xx_xxxx_xxxx
11018 * - Data-processing (two low registers)
11019 */
99c475ab
FB
11020 rd = insn & 7;
11021 rm = (insn >> 3) & 7;
11022 op = (insn >> 6) & 0xf;
11023 if (op == 2 || op == 3 || op == 4 || op == 7) {
11024 /* the shift/rotate ops want the operands backwards */
11025 val = rm;
11026 rm = rd;
11027 rd = val;
11028 val = 1;
11029 } else {
11030 val = 0;
11031 }
11032
396e467c 11033 if (op == 9) { /* neg */
7d1b0095 11034 tmp = tcg_temp_new_i32();
396e467c
FN
11035 tcg_gen_movi_i32(tmp, 0);
11036 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11037 tmp = load_reg(s, rd);
11038 } else {
f764718d 11039 tmp = NULL;
396e467c 11040 }
99c475ab 11041
396e467c 11042 tmp2 = load_reg(s, rm);
5899f386 11043 switch (op) {
99c475ab 11044 case 0x0: /* and */
396e467c 11045 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11046 if (!s->condexec_mask)
396e467c 11047 gen_logic_CC(tmp);
99c475ab
FB
11048 break;
11049 case 0x1: /* eor */
396e467c 11050 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11051 if (!s->condexec_mask)
396e467c 11052 gen_logic_CC(tmp);
99c475ab
FB
11053 break;
11054 case 0x2: /* lsl */
9ee6e8bb 11055 if (s->condexec_mask) {
365af80e 11056 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11057 } else {
9ef39277 11058 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11059 gen_logic_CC(tmp2);
9ee6e8bb 11060 }
99c475ab
FB
11061 break;
11062 case 0x3: /* lsr */
9ee6e8bb 11063 if (s->condexec_mask) {
365af80e 11064 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11065 } else {
9ef39277 11066 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11067 gen_logic_CC(tmp2);
9ee6e8bb 11068 }
99c475ab
FB
11069 break;
11070 case 0x4: /* asr */
9ee6e8bb 11071 if (s->condexec_mask) {
365af80e 11072 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11073 } else {
9ef39277 11074 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11075 gen_logic_CC(tmp2);
9ee6e8bb 11076 }
99c475ab
FB
11077 break;
11078 case 0x5: /* adc */
49b4c31e 11079 if (s->condexec_mask) {
396e467c 11080 gen_adc(tmp, tmp2);
49b4c31e
RH
11081 } else {
11082 gen_adc_CC(tmp, tmp, tmp2);
11083 }
99c475ab
FB
11084 break;
11085 case 0x6: /* sbc */
2de68a49 11086 if (s->condexec_mask) {
396e467c 11087 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11088 } else {
11089 gen_sbc_CC(tmp, tmp, tmp2);
11090 }
99c475ab
FB
11091 break;
11092 case 0x7: /* ror */
9ee6e8bb 11093 if (s->condexec_mask) {
f669df27
AJ
11094 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11095 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11096 } else {
9ef39277 11097 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11098 gen_logic_CC(tmp2);
9ee6e8bb 11099 }
99c475ab
FB
11100 break;
11101 case 0x8: /* tst */
396e467c
FN
11102 tcg_gen_and_i32(tmp, tmp, tmp2);
11103 gen_logic_CC(tmp);
99c475ab 11104 rd = 16;
5899f386 11105 break;
99c475ab 11106 case 0x9: /* neg */
9ee6e8bb 11107 if (s->condexec_mask)
396e467c 11108 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11109 else
72485ec4 11110 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11111 break;
11112 case 0xa: /* cmp */
72485ec4 11113 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11114 rd = 16;
11115 break;
11116 case 0xb: /* cmn */
72485ec4 11117 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11118 rd = 16;
11119 break;
11120 case 0xc: /* orr */
396e467c 11121 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11122 if (!s->condexec_mask)
396e467c 11123 gen_logic_CC(tmp);
99c475ab
FB
11124 break;
11125 case 0xd: /* mul */
7b2919a0 11126 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11127 if (!s->condexec_mask)
396e467c 11128 gen_logic_CC(tmp);
99c475ab
FB
11129 break;
11130 case 0xe: /* bic */
f669df27 11131 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11132 if (!s->condexec_mask)
396e467c 11133 gen_logic_CC(tmp);
99c475ab
FB
11134 break;
11135 case 0xf: /* mvn */
396e467c 11136 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11137 if (!s->condexec_mask)
396e467c 11138 gen_logic_CC(tmp2);
99c475ab 11139 val = 1;
5899f386 11140 rm = rd;
99c475ab
FB
11141 break;
11142 }
11143 if (rd != 16) {
396e467c
FN
11144 if (val) {
11145 store_reg(s, rm, tmp2);
11146 if (op != 0xf)
7d1b0095 11147 tcg_temp_free_i32(tmp);
396e467c
FN
11148 } else {
11149 store_reg(s, rd, tmp);
7d1b0095 11150 tcg_temp_free_i32(tmp2);
396e467c
FN
11151 }
11152 } else {
7d1b0095
PM
11153 tcg_temp_free_i32(tmp);
11154 tcg_temp_free_i32(tmp2);
99c475ab
FB
11155 }
11156 break;
11157
11158 case 5:
11159 /* load/store register offset. */
11160 rd = insn & 7;
11161 rn = (insn >> 3) & 7;
11162 rm = (insn >> 6) & 7;
11163 op = (insn >> 9) & 7;
b0109805 11164 addr = load_reg(s, rn);
b26eefb6 11165 tmp = load_reg(s, rm);
b0109805 11166 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11167 tcg_temp_free_i32(tmp);
99c475ab 11168
c40c8556 11169 if (op < 3) { /* store */
b0109805 11170 tmp = load_reg(s, rd);
c40c8556
PM
11171 } else {
11172 tmp = tcg_temp_new_i32();
11173 }
99c475ab
FB
11174
11175 switch (op) {
11176 case 0: /* str */
9bb6558a 11177 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11178 break;
11179 case 1: /* strh */
9bb6558a 11180 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11181 break;
11182 case 2: /* strb */
9bb6558a 11183 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11184 break;
11185 case 3: /* ldrsb */
9bb6558a 11186 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11187 break;
11188 case 4: /* ldr */
9bb6558a 11189 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11190 break;
11191 case 5: /* ldrh */
9bb6558a 11192 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11193 break;
11194 case 6: /* ldrb */
9bb6558a 11195 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11196 break;
11197 case 7: /* ldrsh */
9bb6558a 11198 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11199 break;
11200 }
c40c8556 11201 if (op >= 3) { /* load */
b0109805 11202 store_reg(s, rd, tmp);
c40c8556
PM
11203 } else {
11204 tcg_temp_free_i32(tmp);
11205 }
7d1b0095 11206 tcg_temp_free_i32(addr);
99c475ab
FB
11207 break;
11208
11209 case 6:
11210 /* load/store word immediate offset */
11211 rd = insn & 7;
11212 rn = (insn >> 3) & 7;
b0109805 11213 addr = load_reg(s, rn);
99c475ab 11214 val = (insn >> 4) & 0x7c;
b0109805 11215 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11216
11217 if (insn & (1 << 11)) {
11218 /* load */
c40c8556 11219 tmp = tcg_temp_new_i32();
12dcc321 11220 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11221 store_reg(s, rd, tmp);
99c475ab
FB
11222 } else {
11223 /* store */
b0109805 11224 tmp = load_reg(s, rd);
12dcc321 11225 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11226 tcg_temp_free_i32(tmp);
99c475ab 11227 }
7d1b0095 11228 tcg_temp_free_i32(addr);
99c475ab
FB
11229 break;
11230
11231 case 7:
11232 /* load/store byte immediate offset */
11233 rd = insn & 7;
11234 rn = (insn >> 3) & 7;
b0109805 11235 addr = load_reg(s, rn);
99c475ab 11236 val = (insn >> 6) & 0x1f;
b0109805 11237 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11238
11239 if (insn & (1 << 11)) {
11240 /* load */
c40c8556 11241 tmp = tcg_temp_new_i32();
9bb6558a 11242 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11243 store_reg(s, rd, tmp);
99c475ab
FB
11244 } else {
11245 /* store */
b0109805 11246 tmp = load_reg(s, rd);
9bb6558a 11247 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11248 tcg_temp_free_i32(tmp);
99c475ab 11249 }
7d1b0095 11250 tcg_temp_free_i32(addr);
99c475ab
FB
11251 break;
11252
11253 case 8:
11254 /* load/store halfword immediate offset */
11255 rd = insn & 7;
11256 rn = (insn >> 3) & 7;
b0109805 11257 addr = load_reg(s, rn);
99c475ab 11258 val = (insn >> 5) & 0x3e;
b0109805 11259 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11260
11261 if (insn & (1 << 11)) {
11262 /* load */
c40c8556 11263 tmp = tcg_temp_new_i32();
9bb6558a 11264 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11265 store_reg(s, rd, tmp);
99c475ab
FB
11266 } else {
11267 /* store */
b0109805 11268 tmp = load_reg(s, rd);
9bb6558a 11269 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11270 tcg_temp_free_i32(tmp);
99c475ab 11271 }
7d1b0095 11272 tcg_temp_free_i32(addr);
99c475ab
FB
11273 break;
11274
11275 case 9:
11276 /* load/store from stack */
11277 rd = (insn >> 8) & 7;
b0109805 11278 addr = load_reg(s, 13);
99c475ab 11279 val = (insn & 0xff) * 4;
b0109805 11280 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11281
11282 if (insn & (1 << 11)) {
11283 /* load */
c40c8556 11284 tmp = tcg_temp_new_i32();
9bb6558a 11285 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11286 store_reg(s, rd, tmp);
99c475ab
FB
11287 } else {
11288 /* store */
b0109805 11289 tmp = load_reg(s, rd);
9bb6558a 11290 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11291 tcg_temp_free_i32(tmp);
99c475ab 11292 }
7d1b0095 11293 tcg_temp_free_i32(addr);
99c475ab
FB
11294 break;
11295
11296 case 10:
55203189
PM
11297 /*
11298 * 0b1010_xxxx_xxxx_xxxx
11299 * - Add PC/SP (immediate)
11300 */
99c475ab 11301 rd = (insn >> 8) & 7;
99c475ab 11302 val = (insn & 0xff) * 4;
16e0d823 11303 tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
5e3f878a 11304 store_reg(s, rd, tmp);
99c475ab
FB
11305 break;
11306
11307 case 11:
11308 /* misc */
11309 op = (insn >> 8) & 0xf;
11310 switch (op) {
11311 case 0:
55203189
PM
11312 /*
11313 * 0b1011_0000_xxxx_xxxx
11314 * - ADD (SP plus immediate)
11315 * - SUB (SP minus immediate)
11316 */
b26eefb6 11317 tmp = load_reg(s, 13);
99c475ab
FB
11318 val = (insn & 0x7f) * 4;
11319 if (insn & (1 << 7))
6a0d8a1d 11320 val = -(int32_t)val;
b26eefb6 11321 tcg_gen_addi_i32(tmp, tmp, val);
55203189 11322 store_sp_checked(s, tmp);
99c475ab
FB
11323 break;
11324
9ee6e8bb
PB
11325 case 2: /* sign/zero extend. */
11326 ARCH(6);
11327 rd = insn & 7;
11328 rm = (insn >> 3) & 7;
b0109805 11329 tmp = load_reg(s, rm);
9ee6e8bb 11330 switch ((insn >> 6) & 3) {
b0109805
PB
11331 case 0: gen_sxth(tmp); break;
11332 case 1: gen_sxtb(tmp); break;
11333 case 2: gen_uxth(tmp); break;
11334 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11335 }
b0109805 11336 store_reg(s, rd, tmp);
9ee6e8bb 11337 break;
99c475ab 11338 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
11339 /*
11340 * 0b1011_x10x_xxxx_xxxx
11341 * - push/pop
11342 */
b0109805 11343 addr = load_reg(s, 13);
5899f386
FB
11344 if (insn & (1 << 8))
11345 offset = 4;
99c475ab 11346 else
5899f386
FB
11347 offset = 0;
11348 for (i = 0; i < 8; i++) {
11349 if (insn & (1 << i))
11350 offset += 4;
11351 }
11352 if ((insn & (1 << 11)) == 0) {
b0109805 11353 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11354 }
aa369e5c
PM
11355
11356 if (s->v8m_stackcheck) {
11357 /*
11358 * Here 'addr' is the lower of "old SP" and "new SP";
11359 * if this is a pop that starts below the limit and ends
11360 * above it, it is UNKNOWN whether the limit check triggers;
11361 * we choose to trigger.
11362 */
11363 gen_helper_v8m_stackcheck(cpu_env, addr);
11364 }
11365
99c475ab
FB
11366 for (i = 0; i < 8; i++) {
11367 if (insn & (1 << i)) {
11368 if (insn & (1 << 11)) {
11369 /* pop */
c40c8556 11370 tmp = tcg_temp_new_i32();
12dcc321 11371 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11372 store_reg(s, i, tmp);
99c475ab
FB
11373 } else {
11374 /* push */
b0109805 11375 tmp = load_reg(s, i);
12dcc321 11376 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11377 tcg_temp_free_i32(tmp);
99c475ab 11378 }
5899f386 11379 /* advance to the next address. */
b0109805 11380 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11381 }
11382 }
f764718d 11383 tmp = NULL;
99c475ab
FB
11384 if (insn & (1 << 8)) {
11385 if (insn & (1 << 11)) {
11386 /* pop pc */
c40c8556 11387 tmp = tcg_temp_new_i32();
12dcc321 11388 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11389 /* don't set the pc until the rest of the instruction
11390 has completed */
11391 } else {
11392 /* push lr */
b0109805 11393 tmp = load_reg(s, 14);
12dcc321 11394 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11395 tcg_temp_free_i32(tmp);
99c475ab 11396 }
b0109805 11397 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11398 }
5899f386 11399 if ((insn & (1 << 11)) == 0) {
b0109805 11400 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11401 }
99c475ab 11402 /* write back the new stack pointer */
b0109805 11403 store_reg(s, 13, addr);
99c475ab 11404 /* set the new PC value */
be5e7a76 11405 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11406 store_reg_from_load(s, 15, tmp);
be5e7a76 11407 }
99c475ab
FB
11408 break;
11409
9ee6e8bb
PB
11410 case 1: case 3: case 9: case 11: /* czb */
11411 rm = insn & 7;
d9ba4830 11412 tmp = load_reg(s, rm);
c2d9644e 11413 arm_gen_condlabel(s);
9ee6e8bb 11414 if (insn & (1 << 11))
cb63669a 11415 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11416 else
cb63669a 11417 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11418 tcg_temp_free_i32(tmp);
9ee6e8bb 11419 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
fdbcf632 11420 gen_jmp(s, read_pc(s) + offset);
9ee6e8bb
PB
11421 break;
11422
11423 case 15: /* IT, nop-hint. */
11424 if ((insn & 0xf) == 0) {
11425 gen_nop_hint(s, (insn >> 4) & 0xf);
11426 break;
11427 }
5529de1e
PM
11428 /*
11429 * IT (If-Then)
11430 *
11431 * Combinations of firstcond and mask which set up an 0b1111
11432 * condition are UNPREDICTABLE; we take the CONSTRAINED
11433 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
11434 * i.e. both meaning "execute always".
11435 */
9ee6e8bb
PB
11436 s->condexec_cond = (insn >> 4) & 0xe;
11437 s->condexec_mask = insn & 0x1f;
11438 /* No actual code generated for this insn, just setup state. */
11439 break;
11440
06c949e6 11441 case 0xe: /* bkpt */
d4a2dc67
PM
11442 {
11443 int imm8 = extract32(insn, 0, 8);
be5e7a76 11444 ARCH(5);
06bcbda3 11445 gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm8, true));
06c949e6 11446 break;
d4a2dc67 11447 }
06c949e6 11448
19a6e31c
PM
11449 case 0xa: /* rev, and hlt */
11450 {
11451 int op1 = extract32(insn, 6, 2);
11452
11453 if (op1 == 2) {
11454 /* HLT */
11455 int imm6 = extract32(insn, 0, 6);
11456
11457 gen_hlt(s, imm6);
11458 break;
11459 }
11460
11461 /* Otherwise this is rev */
9ee6e8bb
PB
11462 ARCH(6);
11463 rn = (insn >> 3) & 0x7;
11464 rd = insn & 0x7;
b0109805 11465 tmp = load_reg(s, rn);
19a6e31c 11466 switch (op1) {
66896cb8 11467 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
46497f6a
RH
11468 case 1: gen_rev16(tmp, tmp); break;
11469 case 3: gen_revsh(tmp, tmp); break;
19a6e31c
PM
11470 default:
11471 g_assert_not_reached();
9ee6e8bb 11472 }
b0109805 11473 store_reg(s, rd, tmp);
9ee6e8bb 11474 break;
19a6e31c 11475 }
9ee6e8bb 11476
d9e028c1
PM
11477 case 6:
11478 switch ((insn >> 5) & 7) {
11479 case 2:
11480 /* setend */
11481 ARCH(6);
9886ecdf
PB
11482 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11483 gen_helper_setend(cpu_env);
dcba3a8d 11484 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 11485 }
9ee6e8bb 11486 break;
d9e028c1
PM
11487 case 3:
11488 /* cps */
11489 ARCH(6);
11490 if (IS_USER(s)) {
11491 break;
8984bd2e 11492 }
b53d8923 11493 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11494 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11495 /* FAULTMASK */
11496 if (insn & 1) {
11497 addr = tcg_const_i32(19);
11498 gen_helper_v7m_msr(cpu_env, addr, tmp);
11499 tcg_temp_free_i32(addr);
11500 }
11501 /* PRIMASK */
11502 if (insn & 2) {
11503 addr = tcg_const_i32(16);
11504 gen_helper_v7m_msr(cpu_env, addr, tmp);
11505 tcg_temp_free_i32(addr);
11506 }
11507 tcg_temp_free_i32(tmp);
11508 gen_lookup_tb(s);
11509 } else {
11510 if (insn & (1 << 4)) {
11511 shift = CPSR_A | CPSR_I | CPSR_F;
11512 } else {
11513 shift = 0;
11514 }
11515 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11516 }
d9e028c1
PM
11517 break;
11518 default:
11519 goto undef;
9ee6e8bb
PB
11520 }
11521 break;
11522
99c475ab
FB
11523 default:
11524 goto undef;
11525 }
11526 break;
11527
11528 case 12:
a7d3970d 11529 {
99c475ab 11530 /* load/store multiple */
f764718d 11531 TCGv_i32 loaded_var = NULL;
99c475ab 11532 rn = (insn >> 8) & 0x7;
b0109805 11533 addr = load_reg(s, rn);
99c475ab
FB
11534 for (i = 0; i < 8; i++) {
11535 if (insn & (1 << i)) {
99c475ab
FB
11536 if (insn & (1 << 11)) {
11537 /* load */
c40c8556 11538 tmp = tcg_temp_new_i32();
12dcc321 11539 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11540 if (i == rn) {
11541 loaded_var = tmp;
11542 } else {
11543 store_reg(s, i, tmp);
11544 }
99c475ab
FB
11545 } else {
11546 /* store */
b0109805 11547 tmp = load_reg(s, i);
12dcc321 11548 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11549 tcg_temp_free_i32(tmp);
99c475ab 11550 }
5899f386 11551 /* advance to the next address */
b0109805 11552 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11553 }
11554 }
b0109805 11555 if ((insn & (1 << rn)) == 0) {
a7d3970d 11556 /* base reg not in list: base register writeback */
b0109805
PB
11557 store_reg(s, rn, addr);
11558 } else {
a7d3970d
PM
11559 /* base reg in list: if load, complete it now */
11560 if (insn & (1 << 11)) {
11561 store_reg(s, rn, loaded_var);
11562 }
7d1b0095 11563 tcg_temp_free_i32(addr);
b0109805 11564 }
99c475ab 11565 break;
a7d3970d 11566 }
99c475ab
FB
11567 case 13:
11568 /* conditional branch or swi */
11569 cond = (insn >> 8) & 0xf;
11570 if (cond == 0xe)
11571 goto undef;
11572
11573 if (cond == 0xf) {
11574 /* swi */
a0415916 11575 gen_set_pc_im(s, s->base.pc_next);
d4a2dc67 11576 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 11577 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
11578 break;
11579 }
11580 /* generate a conditional jump to next instruction */
c2d9644e 11581 arm_skip_unless(s, cond);
99c475ab
FB
11582
11583 /* jump to the offset */
fdbcf632 11584 val = read_pc(s);
99c475ab 11585 offset = ((int32_t)insn << 24) >> 24;
5899f386 11586 val += offset << 1;
8aaca4c0 11587 gen_jmp(s, val);
99c475ab
FB
11588 break;
11589
11590 case 14:
358bf29e 11591 if (insn & (1 << 11)) {
296e5a0a
PM
11592 /* thumb_insn_is_16bit() ensures we can't get here for
11593 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11594 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11595 */
11596 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11597 ARCH(5);
11598 offset = ((insn & 0x7ff) << 1);
11599 tmp = load_reg(s, 14);
11600 tcg_gen_addi_i32(tmp, tmp, offset);
11601 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11602
11603 tmp2 = tcg_temp_new_i32();
a0415916 11604 tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
296e5a0a
PM
11605 store_reg(s, 14, tmp2);
11606 gen_bx(s, tmp);
358bf29e
PB
11607 break;
11608 }
9ee6e8bb 11609 /* unconditional branch */
fdbcf632 11610 val = read_pc(s);
99c475ab 11611 offset = ((int32_t)insn << 21) >> 21;
fdbcf632 11612 val += offset << 1;
8aaca4c0 11613 gen_jmp(s, val);
99c475ab
FB
11614 break;
11615
11616 case 15:
296e5a0a
PM
11617 /* thumb_insn_is_16bit() ensures we can't get here for
11618 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11619 */
11620 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11621
11622 if (insn & (1 << 11)) {
11623 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11624 offset = ((insn & 0x7ff) << 1) | 1;
11625 tmp = load_reg(s, 14);
11626 tcg_gen_addi_i32(tmp, tmp, offset);
11627
11628 tmp2 = tcg_temp_new_i32();
a0415916 11629 tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
296e5a0a
PM
11630 store_reg(s, 14, tmp2);
11631 gen_bx(s, tmp);
11632 } else {
11633 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11634 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11635
fdbcf632 11636 tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
296e5a0a 11637 }
9ee6e8bb 11638 break;
99c475ab
FB
11639 }
11640 return;
9ee6e8bb 11641illegal_op:
99c475ab 11642undef:
1ce21ba1 11643 unallocated_encoding(s);
99c475ab
FB
11644}
11645
541ebcd4
PM
11646static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11647{
a0415916 11648 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
541ebcd4 11649 * (False positives are OK, false negatives are not.)
5b8d7289 11650 * We know this is a Thumb insn, and our caller ensures we are
a0415916 11651 * only called if dc->base.pc_next is less than 4 bytes from the page
5b8d7289
PM
11652 * boundary, so we cross the page if the first 16 bits indicate
11653 * that this is a 32 bit insn.
541ebcd4 11654 */
a0415916 11655 uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
541ebcd4 11656
a0415916 11657 return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
541ebcd4
PM
11658}
11659
b542683d 11660static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 11661{
1d8a5535 11662 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 11663 CPUARMState *env = cs->env_ptr;
2fc0cc0e 11664 ARMCPU *cpu = env_archcpu(env);
aad821ac
RH
11665 uint32_t tb_flags = dc->base.tb->flags;
11666 uint32_t condexec, core_mmu_idx;
3b46e624 11667
962fcbf2 11668 dc->isar = &cpu->isar;
e50e6a20 11669 dc->condjmp = 0;
3926cc84 11670
40f860cd 11671 dc->aarch64 = 0;
cef9ee70
SS
11672 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11673 * there is no secure EL1, so we route exceptions to EL3.
11674 */
11675 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11676 !arm_el_is_aa64(env, 3);
aad821ac
RH
11677 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
11678 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
11679 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
11680 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
11681 dc->condexec_mask = (condexec & 0xf) << 1;
11682 dc->condexec_cond = condexec >> 4;
11683 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
11684 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
c1e37810 11685 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11686#if !defined(CONFIG_USER_ONLY)
c1e37810 11687 dc->user = (dc->current_el == 0);
3926cc84 11688#endif
aad821ac
RH
11689 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
11690 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
11691 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
11692 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
ea7ac69d
PM
11693 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11694 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
11695 dc->vec_stride = 0;
11696 } else {
11697 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
11698 dc->c15_cpar = 0;
11699 }
aad821ac 11700 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
fb602cb7
PM
11701 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11702 regime_is_secure(env, dc->mmu_idx);
aad821ac 11703 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
6d60c67a 11704 dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
6000531e
PM
11705 dc->v7m_new_fp_ctxt_needed =
11706 FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
e33cf0f8 11707 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
60322b39 11708 dc->cp_regs = cpu->cp_regs;
a984e42c 11709 dc->features = env->features;
40f860cd 11710
50225ad0
PM
11711 /* Single step state. The code-generation logic here is:
11712 * SS_ACTIVE == 0:
11713 * generate code with no special handling for single-stepping (except
11714 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11715 * this happens anyway because those changes are all system register or
11716 * PSTATE writes).
11717 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11718 * emit code for one insn
11719 * emit code to clear PSTATE.SS
11720 * emit code to generate software step exception for completed step
11721 * end TB (as usual for having generated an exception)
11722 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11723 * emit code to generate a software step exception
11724 * end the TB
11725 */
aad821ac
RH
11726 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
11727 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
50225ad0 11728 dc->is_ldex = false;
8bd587c1
PM
11729 if (!arm_feature(env, ARM_FEATURE_M)) {
11730 dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
11731 }
50225ad0 11732
bfe7ad5b 11733 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 11734
f7708456
RH
11735 /* If architectural single step active, limit to 1. */
11736 if (is_singlestepping(dc)) {
b542683d 11737 dc->base.max_insns = 1;
f7708456
RH
11738 }
11739
d0264d86
RH
11740 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11741 to those left on the page. */
11742 if (!dc->thumb) {
bfe7ad5b 11743 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 11744 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
11745 }
11746
d9eea52c
PM
11747 cpu_V0 = tcg_temp_new_i64();
11748 cpu_V1 = tcg_temp_new_i64();
e677137d 11749 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11750 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
11751}
11752
b1476854
LV
11753static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11754{
11755 DisasContext *dc = container_of(dcbase, DisasContext, base);
11756
11757 /* A note on handling of the condexec (IT) bits:
11758 *
11759 * We want to avoid the overhead of having to write the updated condexec
11760 * bits back to the CPUARMState for every instruction in an IT block. So:
11761 * (1) if the condexec bits are not already zero then we write
11762 * zero back into the CPUARMState now. This avoids complications trying
11763 * to do it at the end of the block. (For example if we don't do this
11764 * it's hard to identify whether we can safely skip writing condexec
11765 * at the end of the TB, which we definitely want to do for the case
11766 * where a TB doesn't do anything with the IT state at all.)
11767 * (2) if we are going to leave the TB then we call gen_set_condexec()
11768 * which will write the correct value into CPUARMState if zero is wrong.
11769 * This is done both for leaving the TB at the end, and for leaving
11770 * it because of an exception we know will happen, which is done in
11771 * gen_exception_insn(). The latter is necessary because we need to
11772 * leave the TB with the PC/IT state just prior to execution of the
11773 * instruction which caused the exception.
11774 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11775 * then the CPUARMState will be wrong and we need to reset it.
11776 * This is handled in the same way as restoration of the
11777 * PC in these situations; we save the value of the condexec bits
11778 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11779 * then uses this to restore them after an exception.
11780 *
11781 * Note that there are no instructions which can read the condexec
11782 * bits, and none which can write non-static values to them, so
11783 * we don't need to care about whether CPUARMState is correct in the
11784 * middle of a TB.
11785 */
11786
11787 /* Reset the conditional execution bits immediately. This avoids
11788 complications trying to do it at the end of the block. */
11789 if (dc->condexec_mask || dc->condexec_cond) {
11790 TCGv_i32 tmp = tcg_temp_new_i32();
11791 tcg_gen_movi_i32(tmp, 0);
11792 store_cpu_field(tmp, condexec_bits);
11793 }
11794}
11795
f62bd897
LV
11796static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11797{
11798 DisasContext *dc = container_of(dcbase, DisasContext, base);
11799
a0415916 11800 tcg_gen_insn_start(dc->base.pc_next,
f62bd897
LV
11801 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11802 0);
15fa08f8 11803 dc->insn_start = tcg_last_op();
f62bd897
LV
11804}
11805
a68956ad
LV
11806static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
11807 const CPUBreakpoint *bp)
11808{
11809 DisasContext *dc = container_of(dcbase, DisasContext, base);
11810
11811 if (bp->flags & BP_CPU) {
11812 gen_set_condexec(dc);
a0415916 11813 gen_set_pc_im(dc, dc->base.pc_next);
a68956ad
LV
11814 gen_helper_check_breakpoints(cpu_env);
11815 /* End the TB early; it's likely not going to be executed */
11816 dc->base.is_jmp = DISAS_TOO_MANY;
11817 } else {
aee828e7 11818 gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
a68956ad
LV
11819 /* The address covered by the breakpoint must be
11820 included in [tb->pc, tb->pc + tb->size) in order
11821 to for it to be properly cleared -- thus we
11822 increment the PC here so that the logic setting
11823 tb->size below does the right thing. */
11824 /* TODO: Advance PC by correct instruction length to
11825 * avoid disassembler error messages */
a0415916 11826 dc->base.pc_next += 2;
a68956ad
LV
11827 dc->base.is_jmp = DISAS_NORETURN;
11828 }
11829
11830 return true;
11831}
11832
722ef0a5 11833static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 11834{
13189a90
LV
11835#ifdef CONFIG_USER_ONLY
11836 /* Intercept jump to the magic kernel page. */
a0415916 11837 if (dc->base.pc_next >= 0xffff0000) {
13189a90
LV
11838 /* We always get here via a jump, so know we are not in a
11839 conditional execution block. */
11840 gen_exception_internal(EXCP_KERNEL_TRAP);
11841 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 11842 return true;
13189a90
LV
11843 }
11844#endif
11845
11846 if (dc->ss_active && !dc->pstate_ss) {
11847 /* Singlestep state is Active-pending.
11848 * If we're in this state at the start of a TB then either
11849 * a) we just took an exception to an EL which is being debugged
11850 * and this is the first insn in the exception handler
11851 * b) debug exceptions were masked and we just unmasked them
11852 * without changing EL (eg by clearing PSTATE.D)
11853 * In either case we're going to take a swstep exception in the
11854 * "did not step an insn" case, and so the syndrome ISV and EX
11855 * bits should be zero.
11856 */
11857 assert(dc->base.num_insns == 1);
c1d5f50f 11858 gen_swstep_exception(dc, 0, 0);
13189a90 11859 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 11860 return true;
13189a90
LV
11861 }
11862
722ef0a5
RH
11863 return false;
11864}
13189a90 11865
d0264d86 11866static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 11867{
13189a90
LV
11868 if (dc->condjmp && !dc->base.is_jmp) {
11869 gen_set_label(dc->condlabel);
11870 dc->condjmp = 0;
11871 }
23169224 11872 translator_loop_temp_check(&dc->base);
13189a90
LV
11873}
11874
722ef0a5
RH
11875static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
11876{
11877 DisasContext *dc = container_of(dcbase, DisasContext, base);
11878 CPUARMState *env = cpu->env_ptr;
11879 unsigned int insn;
11880
11881 if (arm_pre_translate_insn(dc)) {
11882 return;
11883 }
11884
a0415916
RH
11885 dc->pc_curr = dc->base.pc_next;
11886 insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
58803318 11887 dc->insn = insn;
a0415916 11888 dc->base.pc_next += 4;
722ef0a5
RH
11889 disas_arm_insn(dc, insn);
11890
d0264d86
RH
11891 arm_post_translate_insn(dc);
11892
11893 /* ARM is a fixed-length ISA. We performed the cross-page check
11894 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
11895}
11896
dcf14dfb
PM
11897static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
11898{
11899 /* Return true if this Thumb insn is always unconditional,
11900 * even inside an IT block. This is true of only a very few
11901 * instructions: BKPT, HLT, and SG.
11902 *
11903 * A larger class of instructions are UNPREDICTABLE if used
11904 * inside an IT block; we do not need to detect those here, because
11905 * what we do by default (perform the cc check and update the IT
11906 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
11907 * choice for those situations.
11908 *
11909 * insn is either a 16-bit or a 32-bit instruction; the two are
11910 * distinguishable because for the 16-bit case the top 16 bits
11911 * are zeroes, and that isn't a valid 32-bit encoding.
11912 */
11913 if ((insn & 0xffffff00) == 0xbe00) {
11914 /* BKPT */
11915 return true;
11916 }
11917
11918 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
11919 !arm_dc_feature(s, ARM_FEATURE_M)) {
11920 /* HLT: v8A only. This is unconditional even when it is going to
11921 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
11922 * For v7 cores this was a plain old undefined encoding and so
11923 * honours its cc check. (We might be using the encoding as
11924 * a semihosting trap, but we don't change the cc check behaviour
11925 * on that account, because a debugger connected to a real v7A
11926 * core and emulating semihosting traps by catching the UNDEF
11927 * exception would also only see cases where the cc check passed.
11928 * No guest code should be trying to do a HLT semihosting trap
11929 * in an IT block anyway.
11930 */
11931 return true;
11932 }
11933
11934 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
11935 arm_dc_feature(s, ARM_FEATURE_M)) {
11936 /* SG: v8M only */
11937 return true;
11938 }
11939
11940 return false;
11941}
11942
722ef0a5
RH
11943static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
11944{
11945 DisasContext *dc = container_of(dcbase, DisasContext, base);
11946 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
11947 uint32_t insn;
11948 bool is_16bit;
722ef0a5
RH
11949
11950 if (arm_pre_translate_insn(dc)) {
11951 return;
11952 }
11953
a0415916
RH
11954 dc->pc_curr = dc->base.pc_next;
11955 insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
11956 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
11957 dc->base.pc_next += 2;
296e5a0a 11958 if (!is_16bit) {
a0415916 11959 uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
296e5a0a
PM
11960
11961 insn = insn << 16 | insn2;
a0415916 11962 dc->base.pc_next += 2;
296e5a0a 11963 }
58803318 11964 dc->insn = insn;
296e5a0a 11965
dcf14dfb 11966 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
11967 uint32_t cond = dc->condexec_cond;
11968
5529de1e
PM
11969 /*
11970 * Conditionally skip the insn. Note that both 0xe and 0xf mean
11971 * "always"; 0xf is not "never".
11972 */
11973 if (cond < 0x0e) {
c2d9644e 11974 arm_skip_unless(dc, cond);
296e5a0a
PM
11975 }
11976 }
11977
11978 if (is_16bit) {
11979 disas_thumb_insn(dc, insn);
11980 } else {
2eea841c 11981 disas_thumb2_insn(dc, insn);
296e5a0a 11982 }
722ef0a5
RH
11983
11984 /* Advance the Thumb condexec condition. */
11985 if (dc->condexec_mask) {
11986 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
11987 ((dc->condexec_mask >> 4) & 1));
11988 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11989 if (dc->condexec_mask == 0) {
11990 dc->condexec_cond = 0;
11991 }
11992 }
11993
d0264d86
RH
11994 arm_post_translate_insn(dc);
11995
11996 /* Thumb is a variable-length ISA. Stop translation when the next insn
11997 * will touch a new page. This ensures that prefetch aborts occur at
11998 * the right place.
11999 *
12000 * We want to stop the TB if the next insn starts in a new page,
12001 * or if it spans between this page and the next. This means that
12002 * if we're looking at the last halfword in the page we need to
12003 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12004 * or a 32-bit Thumb insn (which won't).
12005 * This is to avoid generating a silly TB with a single 16-bit insn
12006 * in it at the end of this page (which would execute correctly
12007 * but isn't very efficient).
12008 */
12009 if (dc->base.is_jmp == DISAS_NEXT
a0415916
RH
12010 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
12011 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
12012 && insn_crosses_page(env, dc)))) {
12013 dc->base.is_jmp = DISAS_TOO_MANY;
12014 }
722ef0a5
RH
12015}
12016
70d3c035 12017static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12018{
70d3c035 12019 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12020
c5a49c63 12021 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12022 /* FIXME: This can theoretically happen with self-modifying code. */
12023 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12024 }
9ee6e8bb 12025
b5ff1b31 12026 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12027 instruction was a conditional branch or trap, and the PC has
12028 already been written. */
f021b2c4 12029 gen_set_condexec(dc);
dcba3a8d 12030 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12031 /* Exception return branches need some special case code at the
12032 * end of the TB, which is complex enough that it has to
12033 * handle the single-step vs not and the condition-failed
12034 * insn codepath itself.
12035 */
12036 gen_bx_excret_final_code(dc);
12037 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12038 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12039 switch (dc->base.is_jmp) {
7999a5c8 12040 case DISAS_SWI:
50225ad0 12041 gen_ss_advance(dc);
73710361
GB
12042 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12043 default_exception_el(dc));
7999a5c8
SF
12044 break;
12045 case DISAS_HVC:
37e6456e 12046 gen_ss_advance(dc);
73710361 12047 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12048 break;
12049 case DISAS_SMC:
37e6456e 12050 gen_ss_advance(dc);
73710361 12051 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12052 break;
12053 case DISAS_NEXT:
a68956ad 12054 case DISAS_TOO_MANY:
7999a5c8 12055 case DISAS_UPDATE:
a0415916 12056 gen_set_pc_im(dc, dc->base.pc_next);
7999a5c8
SF
12057 /* fall through */
12058 default:
5425415e
PM
12059 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12060 gen_singlestep_exception(dc);
a0c231e6
RH
12061 break;
12062 case DISAS_NORETURN:
12063 break;
7999a5c8 12064 }
8aaca4c0 12065 } else {
9ee6e8bb
PB
12066 /* While branches must always occur at the end of an IT block,
12067 there are a few other things that can cause us to terminate
65626741 12068 the TB in the middle of an IT block:
9ee6e8bb
PB
12069 - Exception generating instructions (bkpt, swi, undefined).
12070 - Page boundaries.
12071 - Hardware watchpoints.
12072 Hardware breakpoints have already been handled and skip this code.
12073 */
dcba3a8d 12074 switch(dc->base.is_jmp) {
8aaca4c0 12075 case DISAS_NEXT:
a68956ad 12076 case DISAS_TOO_MANY:
a0415916 12077 gen_goto_tb(dc, 1, dc->base.pc_next);
8aaca4c0 12078 break;
577bf808 12079 case DISAS_JUMP:
8a6b28c7
EC
12080 gen_goto_ptr();
12081 break;
e8d52302 12082 case DISAS_UPDATE:
a0415916 12083 gen_set_pc_im(dc, dc->base.pc_next);
e8d52302 12084 /* fall through */
577bf808 12085 default:
8aaca4c0 12086 /* indicate that the hash table must be used to find the next TB */
07ea28b4 12087 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 12088 break;
a0c231e6 12089 case DISAS_NORETURN:
8aaca4c0
FB
12090 /* nothing more to generate */
12091 break;
9ee6e8bb 12092 case DISAS_WFI:
58803318
SS
12093 {
12094 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12095 !(dc->insn & (1U << 31))) ? 2 : 4);
12096
12097 gen_helper_wfi(cpu_env, tmp);
12098 tcg_temp_free_i32(tmp);
84549b6d
PM
12099 /* The helper doesn't necessarily throw an exception, but we
12100 * must go back to the main loop to check for interrupts anyway.
12101 */
07ea28b4 12102 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 12103 break;
58803318 12104 }
72c1d3af
PM
12105 case DISAS_WFE:
12106 gen_helper_wfe(cpu_env);
12107 break;
c87e5a61
PM
12108 case DISAS_YIELD:
12109 gen_helper_yield(cpu_env);
12110 break;
9ee6e8bb 12111 case DISAS_SWI:
73710361
GB
12112 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12113 default_exception_el(dc));
9ee6e8bb 12114 break;
37e6456e 12115 case DISAS_HVC:
73710361 12116 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12117 break;
12118 case DISAS_SMC:
73710361 12119 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12120 break;
8aaca4c0 12121 }
f021b2c4
PM
12122 }
12123
12124 if (dc->condjmp) {
12125 /* "Condition failed" instruction codepath for the branch/trap insn */
12126 gen_set_label(dc->condlabel);
12127 gen_set_condexec(dc);
b636649f 12128 if (unlikely(is_singlestepping(dc))) {
a0415916 12129 gen_set_pc_im(dc, dc->base.pc_next);
f021b2c4
PM
12130 gen_singlestep_exception(dc);
12131 } else {
a0415916 12132 gen_goto_tb(dc, 1, dc->base.pc_next);
e50e6a20 12133 }
2c0262af 12134 }
70d3c035
LV
12135}
12136
4013f7fc
LV
12137static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12138{
12139 DisasContext *dc = container_of(dcbase, DisasContext, base);
12140
12141 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12142 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12143}
12144
23169224
LV
12145static const TranslatorOps arm_translator_ops = {
12146 .init_disas_context = arm_tr_init_disas_context,
12147 .tb_start = arm_tr_tb_start,
12148 .insn_start = arm_tr_insn_start,
12149 .breakpoint_check = arm_tr_breakpoint_check,
12150 .translate_insn = arm_tr_translate_insn,
12151 .tb_stop = arm_tr_tb_stop,
12152 .disas_log = arm_tr_disas_log,
12153};
12154
722ef0a5
RH
12155static const TranslatorOps thumb_translator_ops = {
12156 .init_disas_context = arm_tr_init_disas_context,
12157 .tb_start = arm_tr_tb_start,
12158 .insn_start = arm_tr_insn_start,
12159 .breakpoint_check = arm_tr_breakpoint_check,
12160 .translate_insn = thumb_tr_translate_insn,
12161 .tb_stop = arm_tr_tb_stop,
12162 .disas_log = arm_tr_disas_log,
12163};
12164
70d3c035 12165/* generate intermediate code for basic block 'tb'. */
8b86d6d2 12166void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
70d3c035 12167{
23169224
LV
12168 DisasContext dc;
12169 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12170
aad821ac 12171 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
722ef0a5
RH
12172 ops = &thumb_translator_ops;
12173 }
23169224 12174#ifdef TARGET_AARCH64
aad821ac 12175 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
23169224 12176 ops = &aarch64_translator_ops;
2c0262af
FB
12177 }
12178#endif
23169224 12179
8b86d6d2 12180 translator_loop(ops, &dc.base, cpu, tb, max_insns);
2c0262af
FB
12181}
12182
bad729e2
RH
12183void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12184 target_ulong *data)
d2856f1a 12185{
3926cc84 12186 if (is_a64(env)) {
bad729e2 12187 env->pc = data[0];
40f860cd 12188 env->condexec_bits = 0;
aaa1f954 12189 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12190 } else {
bad729e2
RH
12191 env->regs[15] = data[0];
12192 env->condexec_bits = data[1];
aaa1f954 12193 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12194 }
d2856f1a 12195}