]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Convert Data Processing (immediate)
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
1d854765 31#include "arm_ldst.h"
f1672e6f 32#include "hw/semihosting/semihost.h"
1497c961 33
2ef6175a
RH
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
2c0262af 36
a7e30d84 37#include "trace-tcg.h"
508127e2 38#include "exec/log.h"
a7e30d84
LV
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
09cbd501 45#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d 55
b5ff1b31
FB
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
022c62cb 69#include "exec/gen-icount.h"
2e70f6ef 70
308e5636 71static const char * const regnames[] =
155c3eac
FN
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
74
61adacc8
RH
75/* Function prototypes for gen_ functions calling Neon helpers. */
76typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
77 TCGv_i32, TCGv_i32);
c253dd78
PM
78/* Function prototypes for gen_ functions for fix point conversions */
79typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
61adacc8 80
b26eefb6
PB
81/* initialize TCG globals. */
82void arm_translate_init(void)
83{
155c3eac
FN
84 int i;
85
155c3eac 86 for (i = 0; i < 16; i++) {
e1ccc054 87 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 88 offsetof(CPUARMState, regs[i]),
155c3eac
FN
89 regnames[i]);
90 }
e1ccc054
RH
91 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
92 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
93 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
94 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 95
e1ccc054 96 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 97 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 98 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 100
14ade10f 101 a64_translate_init();
b26eefb6
PB
102}
103
9bb6558a
PM
104/* Flags for the disas_set_da_iss info argument:
105 * lower bits hold the Rt register number, higher bits are flags.
106 */
107typedef enum ISSInfo {
108 ISSNone = 0,
109 ISSRegMask = 0x1f,
110 ISSInvalid = (1 << 5),
111 ISSIsAcqRel = (1 << 6),
112 ISSIsWrite = (1 << 7),
113 ISSIs16Bit = (1 << 8),
114} ISSInfo;
115
116/* Save the syndrome information for a Data Abort */
14776ab5 117static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
9bb6558a
PM
118{
119 uint32_t syn;
120 int sas = memop & MO_SIZE;
121 bool sse = memop & MO_SIGN;
122 bool is_acqrel = issinfo & ISSIsAcqRel;
123 bool is_write = issinfo & ISSIsWrite;
124 bool is_16bit = issinfo & ISSIs16Bit;
125 int srt = issinfo & ISSRegMask;
126
127 if (issinfo & ISSInvalid) {
128 /* Some callsites want to conditionally provide ISS info,
129 * eg "only if this was not a writeback"
130 */
131 return;
132 }
133
134 if (srt == 15) {
135 /* For AArch32, insns where the src/dest is R15 never generate
136 * ISS information. Catching that here saves checking at all
137 * the call sites.
138 */
139 return;
140 }
141
142 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
143 0, 0, 0, is_write, 0, is_16bit);
144 disas_set_insn_syndrome(s, syn);
145}
146
8bd5c820 147static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 148{
8bd5c820 149 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
150 * insns:
151 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
152 * otherwise, access as if at PL0.
153 */
154 switch (s->mmu_idx) {
155 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
156 case ARMMMUIdx_S12NSE0:
157 case ARMMMUIdx_S12NSE1:
8bd5c820 158 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
159 case ARMMMUIdx_S1E3:
160 case ARMMMUIdx_S1SE0:
161 case ARMMMUIdx_S1SE1:
8bd5c820 162 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
163 case ARMMMUIdx_MUser:
164 case ARMMMUIdx_MPriv:
165 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
166 case ARMMMUIdx_MUserNegPri:
167 case ARMMMUIdx_MPrivNegPri:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
169 case ARMMMUIdx_MSUser:
170 case ARMMMUIdx_MSPriv:
b9f587d6 171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
172 case ARMMMUIdx_MSUserNegPri:
173 case ARMMMUIdx_MSPrivNegPri:
174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
175 case ARMMMUIdx_S2NS:
176 default:
177 g_assert_not_reached();
178 }
179}
180
39d5492a 181static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 182{
39d5492a 183 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
184 tcg_gen_ld_i32(tmp, cpu_env, offset);
185 return tmp;
186}
187
0ecb72a5 188#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 189
39d5492a 190static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
191{
192 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 193 tcg_temp_free_i32(var);
d9ba4830
PB
194}
195
196#define store_cpu_field(var, name) \
0ecb72a5 197 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 198
fdbcf632
RH
199/* The architectural value of PC. */
200static uint32_t read_pc(DisasContext *s)
201{
202 return s->pc_curr + (s->thumb ? 4 : 8);
203}
204
b26eefb6 205/* Set a variable to the value of a CPU register. */
39d5492a 206static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
207{
208 if (reg == 15) {
fdbcf632 209 tcg_gen_movi_i32(var, read_pc(s));
b26eefb6 210 } else {
155c3eac 211 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
212 }
213}
214
215/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 216static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 217{
39d5492a 218 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
219 load_reg_var(s, tmp, reg);
220 return tmp;
221}
222
16e0d823
RH
223/*
224 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
225 * This is used for load/store for which use of PC implies (literal),
226 * or ADD that implies ADR.
227 */
228static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
229{
230 TCGv_i32 tmp = tcg_temp_new_i32();
231
232 if (reg == 15) {
233 tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
234 } else {
235 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
236 }
237 return tmp;
238}
239
b26eefb6
PB
240/* Set a CPU register. The source must be a temporary and will be
241 marked as dead. */
39d5492a 242static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
243{
244 if (reg == 15) {
9b6a3ea7
PM
245 /* In Thumb mode, we must ignore bit 0.
246 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
247 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
248 * We choose to ignore [1:0] in ARM mode for all architecture versions.
249 */
250 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 251 s->base.is_jmp = DISAS_JUMP;
b26eefb6 252 }
155c3eac 253 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 254 tcg_temp_free_i32(var);
b26eefb6
PB
255}
256
55203189
PM
257/*
258 * Variant of store_reg which applies v8M stack-limit checks before updating
259 * SP. If the check fails this will result in an exception being taken.
260 * We disable the stack checks for CONFIG_USER_ONLY because we have
261 * no idea what the stack limits should be in that case.
262 * If stack checking is not being done this just acts like store_reg().
263 */
264static void store_sp_checked(DisasContext *s, TCGv_i32 var)
265{
266#ifndef CONFIG_USER_ONLY
267 if (s->v8m_stackcheck) {
268 gen_helper_v8m_stackcheck(cpu_env, var);
269 }
270#endif
271 store_reg(s, 13, var);
272}
273
b26eefb6 274/* Value extensions. */
86831435
PB
275#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
276#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
277#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
278#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
279
1497c961
PB
280#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
281#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 282
b26eefb6 283
39d5492a 284static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 285{
39d5492a 286 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 287 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
288 tcg_temp_free_i32(tmp_mask);
289}
d9ba4830
PB
290/* Set NZCV flags from the high 4 bits of var. */
291#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
292
d4a2dc67 293static void gen_exception_internal(int excp)
d9ba4830 294{
d4a2dc67
PM
295 TCGv_i32 tcg_excp = tcg_const_i32(excp);
296
297 assert(excp_is_internal(excp));
298 gen_helper_exception_internal(cpu_env, tcg_excp);
299 tcg_temp_free_i32(tcg_excp);
300}
301
50225ad0
PM
302static void gen_step_complete_exception(DisasContext *s)
303{
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
312 */
313 gen_ss_advance(s);
c1d5f50f 314 gen_swstep_exception(s, 1, s->is_ldex);
dcba3a8d 315 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
316}
317
5425415e
PM
318static void gen_singlestep_exception(DisasContext *s)
319{
320 /* Generate the right kind of exception for singlestep, which is
321 * either the architectural singlestep or EXCP_DEBUG for QEMU's
322 * gdb singlestepping.
323 */
324 if (s->ss_active) {
325 gen_step_complete_exception(s);
326 } else {
327 gen_exception_internal(EXCP_DEBUG);
328 }
329}
330
b636649f
PM
331static inline bool is_singlestepping(DisasContext *s)
332{
333 /* Return true if we are singlestepping either because of
334 * architectural singlestep or QEMU gdbstub singlestep. This does
335 * not include the command line '-singlestep' mode which is rather
336 * misnamed as it only means "one instruction per TB" and doesn't
337 * affect the code we generate.
338 */
dcba3a8d 339 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
340}
341
39d5492a 342static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 343{
39d5492a
PM
344 TCGv_i32 tmp1 = tcg_temp_new_i32();
345 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
346 tcg_gen_ext16s_i32(tmp1, a);
347 tcg_gen_ext16s_i32(tmp2, b);
3670669c 348 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 349 tcg_temp_free_i32(tmp2);
3670669c
PB
350 tcg_gen_sari_i32(a, a, 16);
351 tcg_gen_sari_i32(b, b, 16);
352 tcg_gen_mul_i32(b, b, a);
353 tcg_gen_mov_i32(a, tmp1);
7d1b0095 354 tcg_temp_free_i32(tmp1);
3670669c
PB
355}
356
357/* Byteswap each halfword. */
39d5492a 358static void gen_rev16(TCGv_i32 var)
3670669c 359{
39d5492a 360 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 361 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 362 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
363 tcg_gen_and_i32(tmp, tmp, mask);
364 tcg_gen_and_i32(var, var, mask);
3670669c 365 tcg_gen_shli_i32(var, var, 8);
3670669c 366 tcg_gen_or_i32(var, var, tmp);
68cedf73 367 tcg_temp_free_i32(mask);
7d1b0095 368 tcg_temp_free_i32(tmp);
3670669c
PB
369}
370
371/* Byteswap low halfword and sign extend. */
39d5492a 372static void gen_revsh(TCGv_i32 var)
3670669c 373{
1a855029
AJ
374 tcg_gen_ext16u_i32(var, var);
375 tcg_gen_bswap16_i32(var, var);
376 tcg_gen_ext16s_i32(var, var);
3670669c
PB
377}
378
5e3f878a 379/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 380static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 381{
39d5492a
PM
382 TCGv_i32 lo = tcg_temp_new_i32();
383 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 384 TCGv_i64 ret;
5e3f878a 385
831d7fe8 386 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 387 tcg_temp_free_i32(a);
7d1b0095 388 tcg_temp_free_i32(b);
831d7fe8
RH
389
390 ret = tcg_temp_new_i64();
391 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
392 tcg_temp_free_i32(lo);
393 tcg_temp_free_i32(hi);
831d7fe8
RH
394
395 return ret;
5e3f878a
PB
396}
397
39d5492a 398static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 399{
39d5492a
PM
400 TCGv_i32 lo = tcg_temp_new_i32();
401 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 402 TCGv_i64 ret;
5e3f878a 403
831d7fe8 404 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 405 tcg_temp_free_i32(a);
7d1b0095 406 tcg_temp_free_i32(b);
831d7fe8
RH
407
408 ret = tcg_temp_new_i64();
409 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
410 tcg_temp_free_i32(lo);
411 tcg_temp_free_i32(hi);
831d7fe8
RH
412
413 return ret;
5e3f878a
PB
414}
415
8f01245e 416/* Swap low and high halfwords. */
39d5492a 417static void gen_swap_half(TCGv_i32 var)
8f01245e 418{
adefba76 419 tcg_gen_rotri_i32(var, var, 16);
8f01245e
PB
420}
421
b26eefb6
PB
422/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
423 tmp = (t0 ^ t1) & 0x8000;
424 t0 &= ~0x8000;
425 t1 &= ~0x8000;
426 t0 = (t0 + t1) ^ tmp;
427 */
428
39d5492a 429static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 430{
39d5492a 431 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
432 tcg_gen_xor_i32(tmp, t0, t1);
433 tcg_gen_andi_i32(tmp, tmp, 0x8000);
434 tcg_gen_andi_i32(t0, t0, ~0x8000);
435 tcg_gen_andi_i32(t1, t1, ~0x8000);
436 tcg_gen_add_i32(t0, t0, t1);
437 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
438 tcg_temp_free_i32(tmp);
439 tcg_temp_free_i32(t1);
b26eefb6
PB
440}
441
b26eefb6 442/* Set N and Z flags from var. */
39d5492a 443static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 444{
66c374de
AJ
445 tcg_gen_mov_i32(cpu_NF, var);
446 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
447}
448
449/* T0 += T1 + CF. */
39d5492a 450static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 451{
396e467c 452 tcg_gen_add_i32(t0, t0, t1);
66c374de 453 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
454}
455
e9bb4aa9 456/* dest = T0 + T1 + CF. */
39d5492a 457static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 458{
e9bb4aa9 459 tcg_gen_add_i32(dest, t0, t1);
66c374de 460 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
461}
462
3670669c 463/* dest = T0 - T1 + CF - 1. */
39d5492a 464static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 465{
3670669c 466 tcg_gen_sub_i32(dest, t0, t1);
66c374de 467 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 468 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
469}
470
72485ec4 471/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 472static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 473{
39d5492a 474 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
475 tcg_gen_movi_i32(tmp, 0);
476 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 477 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 478 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
479 tcg_gen_xor_i32(tmp, t0, t1);
480 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
481 tcg_temp_free_i32(tmp);
482 tcg_gen_mov_i32(dest, cpu_NF);
483}
484
49b4c31e 485/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 486static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 487{
39d5492a 488 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
489 if (TCG_TARGET_HAS_add2_i32) {
490 tcg_gen_movi_i32(tmp, 0);
491 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 492 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
493 } else {
494 TCGv_i64 q0 = tcg_temp_new_i64();
495 TCGv_i64 q1 = tcg_temp_new_i64();
496 tcg_gen_extu_i32_i64(q0, t0);
497 tcg_gen_extu_i32_i64(q1, t1);
498 tcg_gen_add_i64(q0, q0, q1);
499 tcg_gen_extu_i32_i64(q1, cpu_CF);
500 tcg_gen_add_i64(q0, q0, q1);
501 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
502 tcg_temp_free_i64(q0);
503 tcg_temp_free_i64(q1);
504 }
505 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
506 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
507 tcg_gen_xor_i32(tmp, t0, t1);
508 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
509 tcg_temp_free_i32(tmp);
510 tcg_gen_mov_i32(dest, cpu_NF);
511}
512
72485ec4 513/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 514static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 515{
39d5492a 516 TCGv_i32 tmp;
72485ec4
AJ
517 tcg_gen_sub_i32(cpu_NF, t0, t1);
518 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
519 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
520 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
521 tmp = tcg_temp_new_i32();
522 tcg_gen_xor_i32(tmp, t0, t1);
523 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
524 tcg_temp_free_i32(tmp);
525 tcg_gen_mov_i32(dest, cpu_NF);
526}
527
e77f0832 528/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 529static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 530{
39d5492a 531 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
532 tcg_gen_not_i32(tmp, t1);
533 gen_adc_CC(dest, t0, tmp);
39d5492a 534 tcg_temp_free_i32(tmp);
2de68a49
RH
535}
536
365af80e 537#define GEN_SHIFT(name) \
39d5492a 538static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 539{ \
39d5492a 540 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
541 tmp1 = tcg_temp_new_i32(); \
542 tcg_gen_andi_i32(tmp1, t1, 0xff); \
543 tmp2 = tcg_const_i32(0); \
544 tmp3 = tcg_const_i32(0x1f); \
545 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
546 tcg_temp_free_i32(tmp3); \
547 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
548 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
549 tcg_temp_free_i32(tmp2); \
550 tcg_temp_free_i32(tmp1); \
551}
552GEN_SHIFT(shl)
553GEN_SHIFT(shr)
554#undef GEN_SHIFT
555
39d5492a 556static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 557{
39d5492a 558 TCGv_i32 tmp1, tmp2;
365af80e
AJ
559 tmp1 = tcg_temp_new_i32();
560 tcg_gen_andi_i32(tmp1, t1, 0xff);
561 tmp2 = tcg_const_i32(0x1f);
562 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
563 tcg_temp_free_i32(tmp2);
564 tcg_gen_sar_i32(dest, t0, tmp1);
565 tcg_temp_free_i32(tmp1);
566}
567
39d5492a 568static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 569{
191f4bfe 570 tcg_gen_extract_i32(cpu_CF, var, shift, 1);
9a119ff6 571}
b26eefb6 572
9a119ff6 573/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
574static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
575 int shift, int flags)
9a119ff6
PB
576{
577 switch (shiftop) {
578 case 0: /* LSL */
579 if (shift != 0) {
580 if (flags)
581 shifter_out_im(var, 32 - shift);
582 tcg_gen_shli_i32(var, var, shift);
583 }
584 break;
585 case 1: /* LSR */
586 if (shift == 0) {
587 if (flags) {
66c374de 588 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
589 }
590 tcg_gen_movi_i32(var, 0);
591 } else {
592 if (flags)
593 shifter_out_im(var, shift - 1);
594 tcg_gen_shri_i32(var, var, shift);
595 }
596 break;
597 case 2: /* ASR */
598 if (shift == 0)
599 shift = 32;
600 if (flags)
601 shifter_out_im(var, shift - 1);
602 if (shift == 32)
603 shift = 31;
604 tcg_gen_sari_i32(var, var, shift);
605 break;
606 case 3: /* ROR/RRX */
607 if (shift != 0) {
608 if (flags)
609 shifter_out_im(var, shift - 1);
f669df27 610 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 611 } else {
39d5492a 612 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 613 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
614 if (flags)
615 shifter_out_im(var, 0);
616 tcg_gen_shri_i32(var, var, 1);
b26eefb6 617 tcg_gen_or_i32(var, var, tmp);
7d1b0095 618 tcg_temp_free_i32(tmp);
b26eefb6
PB
619 }
620 }
621};
622
39d5492a
PM
623static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
624 TCGv_i32 shift, int flags)
8984bd2e
PB
625{
626 if (flags) {
627 switch (shiftop) {
9ef39277
BS
628 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
629 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
630 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
631 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
632 }
633 } else {
634 switch (shiftop) {
365af80e
AJ
635 case 0:
636 gen_shl(var, var, shift);
637 break;
638 case 1:
639 gen_shr(var, var, shift);
640 break;
641 case 2:
642 gen_sar(var, var, shift);
643 break;
f669df27
AJ
644 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
645 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
646 }
647 }
7d1b0095 648 tcg_temp_free_i32(shift);
8984bd2e
PB
649}
650
6ddbc6e4
PB
651#define PAS_OP(pfx) \
652 switch (op2) { \
653 case 0: gen_pas_helper(glue(pfx,add16)); break; \
654 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
655 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
656 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
657 case 4: gen_pas_helper(glue(pfx,add8)); break; \
658 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
659 }
39d5492a 660static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 661{
a7812ae4 662 TCGv_ptr tmp;
6ddbc6e4
PB
663
664 switch (op1) {
665#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
666 case 1:
a7812ae4 667 tmp = tcg_temp_new_ptr();
0ecb72a5 668 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 669 PAS_OP(s)
b75263d6 670 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
671 break;
672 case 5:
a7812ae4 673 tmp = tcg_temp_new_ptr();
0ecb72a5 674 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 675 PAS_OP(u)
b75263d6 676 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
677 break;
678#undef gen_pas_helper
679#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
680 case 2:
681 PAS_OP(q);
682 break;
683 case 3:
684 PAS_OP(sh);
685 break;
686 case 6:
687 PAS_OP(uq);
688 break;
689 case 7:
690 PAS_OP(uh);
691 break;
692#undef gen_pas_helper
693 }
694}
9ee6e8bb
PB
695#undef PAS_OP
696
6ddbc6e4
PB
697/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
698#define PAS_OP(pfx) \
ed89a2f1 699 switch (op1) { \
6ddbc6e4
PB
700 case 0: gen_pas_helper(glue(pfx,add8)); break; \
701 case 1: gen_pas_helper(glue(pfx,add16)); break; \
702 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
703 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
704 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
705 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
706 }
39d5492a 707static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 708{
a7812ae4 709 TCGv_ptr tmp;
6ddbc6e4 710
ed89a2f1 711 switch (op2) {
6ddbc6e4
PB
712#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
713 case 0:
a7812ae4 714 tmp = tcg_temp_new_ptr();
0ecb72a5 715 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 716 PAS_OP(s)
b75263d6 717 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
718 break;
719 case 4:
a7812ae4 720 tmp = tcg_temp_new_ptr();
0ecb72a5 721 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 722 PAS_OP(u)
b75263d6 723 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
724 break;
725#undef gen_pas_helper
726#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
727 case 1:
728 PAS_OP(q);
729 break;
730 case 2:
731 PAS_OP(sh);
732 break;
733 case 5:
734 PAS_OP(uq);
735 break;
736 case 6:
737 PAS_OP(uh);
738 break;
739#undef gen_pas_helper
740 }
741}
9ee6e8bb
PB
742#undef PAS_OP
743
39fb730a 744/*
6c2c63d3 745 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
746 * This is common between ARM and Aarch64 targets.
747 */
6c2c63d3 748void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 749{
6c2c63d3
RH
750 TCGv_i32 value;
751 TCGCond cond;
752 bool global = true;
d9ba4830 753
d9ba4830
PB
754 switch (cc) {
755 case 0: /* eq: Z */
d9ba4830 756 case 1: /* ne: !Z */
6c2c63d3
RH
757 cond = TCG_COND_EQ;
758 value = cpu_ZF;
d9ba4830 759 break;
6c2c63d3 760
d9ba4830 761 case 2: /* cs: C */
d9ba4830 762 case 3: /* cc: !C */
6c2c63d3
RH
763 cond = TCG_COND_NE;
764 value = cpu_CF;
d9ba4830 765 break;
6c2c63d3 766
d9ba4830 767 case 4: /* mi: N */
d9ba4830 768 case 5: /* pl: !N */
6c2c63d3
RH
769 cond = TCG_COND_LT;
770 value = cpu_NF;
d9ba4830 771 break;
6c2c63d3 772
d9ba4830 773 case 6: /* vs: V */
d9ba4830 774 case 7: /* vc: !V */
6c2c63d3
RH
775 cond = TCG_COND_LT;
776 value = cpu_VF;
d9ba4830 777 break;
6c2c63d3 778
d9ba4830 779 case 8: /* hi: C && !Z */
6c2c63d3
RH
780 case 9: /* ls: !C || Z -> !(C && !Z) */
781 cond = TCG_COND_NE;
782 value = tcg_temp_new_i32();
783 global = false;
784 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
785 ZF is non-zero for !Z; so AND the two subexpressions. */
786 tcg_gen_neg_i32(value, cpu_CF);
787 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 788 break;
6c2c63d3 789
d9ba4830 790 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 791 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
792 /* Since we're only interested in the sign bit, == 0 is >= 0. */
793 cond = TCG_COND_GE;
794 value = tcg_temp_new_i32();
795 global = false;
796 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 797 break;
6c2c63d3 798
d9ba4830 799 case 12: /* gt: !Z && N == V */
d9ba4830 800 case 13: /* le: Z || N != V */
6c2c63d3
RH
801 cond = TCG_COND_NE;
802 value = tcg_temp_new_i32();
803 global = false;
804 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
805 * the sign bit then AND with ZF to yield the result. */
806 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
807 tcg_gen_sari_i32(value, value, 31);
808 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 809 break;
6c2c63d3 810
9305eac0
RH
811 case 14: /* always */
812 case 15: /* always */
813 /* Use the ALWAYS condition, which will fold early.
814 * It doesn't matter what we use for the value. */
815 cond = TCG_COND_ALWAYS;
816 value = cpu_ZF;
817 goto no_invert;
818
d9ba4830
PB
819 default:
820 fprintf(stderr, "Bad condition code 0x%x\n", cc);
821 abort();
822 }
6c2c63d3
RH
823
824 if (cc & 1) {
825 cond = tcg_invert_cond(cond);
826 }
827
9305eac0 828 no_invert:
6c2c63d3
RH
829 cmp->cond = cond;
830 cmp->value = value;
831 cmp->value_global = global;
832}
833
834void arm_free_cc(DisasCompare *cmp)
835{
836 if (!cmp->value_global) {
837 tcg_temp_free_i32(cmp->value);
838 }
839}
840
841void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
842{
843 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
844}
845
846void arm_gen_test_cc(int cc, TCGLabel *label)
847{
848 DisasCompare cmp;
849 arm_test_cc(&cmp, cc);
850 arm_jump_cc(&cmp, label);
851 arm_free_cc(&cmp);
d9ba4830 852}
2c0262af 853
4d5e8c96
PM
854static inline void gen_set_condexec(DisasContext *s)
855{
856 if (s->condexec_mask) {
857 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
858 TCGv_i32 tmp = tcg_temp_new_i32();
859 tcg_gen_movi_i32(tmp, val);
860 store_cpu_field(tmp, condexec_bits);
861 }
862}
863
864static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
865{
866 tcg_gen_movi_i32(cpu_R[15], val);
867}
868
d9ba4830
PB
869/* Set PC and Thumb state from an immediate address. */
870static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 871{
39d5492a 872 TCGv_i32 tmp;
99c475ab 873
dcba3a8d 874 s->base.is_jmp = DISAS_JUMP;
d9ba4830 875 if (s->thumb != (addr & 1)) {
7d1b0095 876 tmp = tcg_temp_new_i32();
d9ba4830 877 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 878 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 879 tcg_temp_free_i32(tmp);
d9ba4830 880 }
155c3eac 881 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
882}
883
884/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 885static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 886{
dcba3a8d 887 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
888 tcg_gen_andi_i32(cpu_R[15], var, ~1);
889 tcg_gen_andi_i32(var, var, 1);
890 store_cpu_field(var, thumb);
d9ba4830
PB
891}
892
5e5584c8
PM
893/*
894 * Set PC and Thumb state from var. var is marked as dead.
3bb8a96f
PM
895 * For M-profile CPUs, include logic to detect exception-return
896 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
897 * and BX reg, and no others, and happens only for code in Handler mode.
5e5584c8
PM
898 * The Security Extension also requires us to check for the FNC_RETURN
899 * which signals a function return from non-secure state; this can happen
900 * in both Handler and Thread mode.
901 * To avoid having to do multiple comparisons in inline generated code,
902 * we make the check we do here loose, so it will match for EXC_RETURN
903 * in Thread mode. For system emulation do_v7m_exception_exit() checks
904 * for these spurious cases and returns without doing anything (giving
905 * the same behaviour as for a branch to a non-magic address).
906 *
907 * In linux-user mode it is unclear what the right behaviour for an
908 * attempted FNC_RETURN should be, because in real hardware this will go
909 * directly to Secure code (ie not the Linux kernel) which will then treat
910 * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
911 * attempt behave the way it would on a CPU without the security extension,
912 * which is to say "like a normal branch". That means we can simply treat
913 * all branches as normal with no magic address behaviour.
3bb8a96f
PM
914 */
915static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
916{
917 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 918 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
919 */
920 gen_bx(s, var);
5e5584c8 921#ifndef CONFIG_USER_ONLY
d02a8698
PM
922 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
923 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 924 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f 925 }
5e5584c8 926#endif
3bb8a96f
PM
927}
928
929static inline void gen_bx_excret_final_code(DisasContext *s)
930{
931 /* Generate the code to finish possible exception return and end the TB */
932 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
933 uint32_t min_magic;
934
935 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
936 /* Covers FNC_RETURN and EXC_RETURN magic */
937 min_magic = FNC_RETURN_MIN_MAGIC;
938 } else {
939 /* EXC_RETURN magic only */
940 min_magic = EXC_RETURN_MIN_MAGIC;
941 }
3bb8a96f
PM
942
943 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 944 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
945 /* No: end the TB as we would for a DISAS_JMP */
946 if (is_singlestepping(s)) {
947 gen_singlestep_exception(s);
948 } else {
07ea28b4 949 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
950 }
951 gen_set_label(excret_label);
952 /* Yes: this is an exception return.
953 * At this point in runtime env->regs[15] and env->thumb will hold
954 * the exception-return magic number, which do_v7m_exception_exit()
955 * will read. Nothing else will be able to see those values because
956 * the cpu-exec main loop guarantees that we will always go straight
957 * from raising the exception to the exception-handling code.
958 *
959 * gen_ss_advance(s) does nothing on M profile currently but
960 * calling it is conceptually the right thing as we have executed
961 * this instruction (compare SWI, HVC, SMC handling).
962 */
963 gen_ss_advance(s);
964 gen_exception_internal(EXCP_EXCEPTION_EXIT);
965}
966
fb602cb7
PM
967static inline void gen_bxns(DisasContext *s, int rm)
968{
969 TCGv_i32 var = load_reg(s, rm);
970
971 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
972 * we need to sync state before calling it, but:
973 * - we don't need to do gen_set_pc_im() because the bxns helper will
974 * always set the PC itself
975 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
976 * unless it's outside an IT block or the last insn in an IT block,
977 * so we know that condexec == 0 (already set at the top of the TB)
978 * is correct in the non-UNPREDICTABLE cases, and we can choose
979 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
980 */
981 gen_helper_v7m_bxns(cpu_env, var);
982 tcg_temp_free_i32(var);
ef475b5d 983 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
984}
985
3e3fa230
PM
986static inline void gen_blxns(DisasContext *s, int rm)
987{
988 TCGv_i32 var = load_reg(s, rm);
989
990 /* We don't need to sync condexec state, for the same reason as bxns.
991 * We do however need to set the PC, because the blxns helper reads it.
992 * The blxns helper may throw an exception.
993 */
a0415916 994 gen_set_pc_im(s, s->base.pc_next);
3e3fa230
PM
995 gen_helper_v7m_blxns(cpu_env, var);
996 tcg_temp_free_i32(var);
997 s->base.is_jmp = DISAS_EXIT;
998}
999
21aeb343
JR
1000/* Variant of store_reg which uses branch&exchange logic when storing
1001 to r15 in ARM architecture v7 and above. The source must be a temporary
1002 and will be marked as dead. */
7dcc1f89 1003static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1004{
1005 if (reg == 15 && ENABLE_ARCH_7) {
1006 gen_bx(s, var);
1007 } else {
1008 store_reg(s, reg, var);
1009 }
1010}
1011
be5e7a76
DES
1012/* Variant of store_reg which uses branch&exchange logic when storing
1013 * to r15 in ARM architecture v5T and above. This is used for storing
1014 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1015 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1016static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1017{
1018 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1019 gen_bx_excret(s, var);
be5e7a76
DES
1020 } else {
1021 store_reg(s, reg, var);
1022 }
1023}
1024
e334bd31
PB
1025#ifdef CONFIG_USER_ONLY
1026#define IS_USER_ONLY 1
1027#else
1028#define IS_USER_ONLY 0
1029#endif
1030
08307563
PM
1031/* Abstractions of "generate code to do a guest load/store for
1032 * AArch32", where a vaddr is always 32 bits (and is zero
1033 * extended if we're a 64 bit core) and data is also
1034 * 32 bits unless specifically doing a 64 bit access.
1035 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1036 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1037 */
08307563 1038
14776ab5 1039static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
08307563 1040{
7f5616f5
RH
1041 TCGv addr = tcg_temp_new();
1042 tcg_gen_extu_i32_tl(addr, a32);
1043
e334bd31 1044 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1045 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1046 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1047 }
7f5616f5 1048 return addr;
08307563
PM
1049}
1050
7f5616f5 1051static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
14776ab5 1052 int index, MemOp opc)
08307563 1053{
2aeba0d0
JS
1054 TCGv addr;
1055
1056 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1057 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1058 opc |= MO_ALIGN;
1059 }
1060
1061 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1062 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1063 tcg_temp_free(addr);
08307563
PM
1064}
1065
7f5616f5 1066static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
14776ab5 1067 int index, MemOp opc)
7f5616f5 1068{
2aeba0d0
JS
1069 TCGv addr;
1070
1071 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1072 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1073 opc |= MO_ALIGN;
1074 }
1075
1076 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1077 tcg_gen_qemu_st_i32(val, addr, index, opc);
1078 tcg_temp_free(addr);
1079}
08307563 1080
7f5616f5 1081#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1082static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1083 TCGv_i32 a32, int index) \
08307563 1084{ \
7f5616f5 1085 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1086} \
1087static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1088 TCGv_i32 val, \
1089 TCGv_i32 a32, int index, \
1090 ISSInfo issinfo) \
1091{ \
1092 gen_aa32_ld##SUFF(s, val, a32, index); \
1093 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1094}
1095
7f5616f5 1096#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1097static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1098 TCGv_i32 a32, int index) \
08307563 1099{ \
7f5616f5 1100 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1101} \
1102static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1103 TCGv_i32 val, \
1104 TCGv_i32 a32, int index, \
1105 ISSInfo issinfo) \
1106{ \
1107 gen_aa32_st##SUFF(s, val, a32, index); \
1108 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1109}
1110
7f5616f5 1111static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1112{
e334bd31
PB
1113 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1114 if (!IS_USER_ONLY && s->sctlr_b) {
1115 tcg_gen_rotri_i64(val, val, 32);
1116 }
08307563
PM
1117}
1118
7f5616f5 1119static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
14776ab5 1120 int index, MemOp opc)
08307563 1121{
7f5616f5
RH
1122 TCGv addr = gen_aa32_addr(s, a32, opc);
1123 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1124 gen_aa32_frob64(s, val);
1125 tcg_temp_free(addr);
1126}
1127
1128static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1129 TCGv_i32 a32, int index)
1130{
1131 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1132}
1133
1134static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
14776ab5 1135 int index, MemOp opc)
7f5616f5
RH
1136{
1137 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1138
1139 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1140 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1141 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1142 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1143 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1144 tcg_temp_free_i64(tmp);
e334bd31 1145 } else {
7f5616f5 1146 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1147 }
7f5616f5 1148 tcg_temp_free(addr);
08307563
PM
1149}
1150
7f5616f5
RH
1151static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1152 TCGv_i32 a32, int index)
1153{
1154 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1155}
08307563 1156
7f5616f5
RH
1157DO_GEN_LD(8s, MO_SB)
1158DO_GEN_LD(8u, MO_UB)
1159DO_GEN_LD(16s, MO_SW)
1160DO_GEN_LD(16u, MO_UW)
1161DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1162DO_GEN_ST(8, MO_UB)
1163DO_GEN_ST(16, MO_UW)
1164DO_GEN_ST(32, MO_UL)
08307563 1165
37e6456e
PM
1166static inline void gen_hvc(DisasContext *s, int imm16)
1167{
1168 /* The pre HVC helper handles cases when HVC gets trapped
1169 * as an undefined insn by runtime configuration (ie before
1170 * the insn really executes).
1171 */
43722a6d 1172 gen_set_pc_im(s, s->pc_curr);
37e6456e
PM
1173 gen_helper_pre_hvc(cpu_env);
1174 /* Otherwise we will treat this as a real exception which
1175 * happens after execution of the insn. (The distinction matters
1176 * for the PC value reported to the exception handler and also
1177 * for single stepping.)
1178 */
1179 s->svc_imm = imm16;
a0415916 1180 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 1181 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1182}
1183
1184static inline void gen_smc(DisasContext *s)
1185{
1186 /* As with HVC, we may take an exception either before or after
1187 * the insn executes.
1188 */
1189 TCGv_i32 tmp;
1190
43722a6d 1191 gen_set_pc_im(s, s->pc_curr);
37e6456e
PM
1192 tmp = tcg_const_i32(syn_aa32_smc());
1193 gen_helper_pre_smc(cpu_env, tmp);
1194 tcg_temp_free_i32(tmp);
a0415916 1195 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 1196 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1197}
1198
aee828e7 1199static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
d4a2dc67
PM
1200{
1201 gen_set_condexec(s);
aee828e7 1202 gen_set_pc_im(s, pc);
d4a2dc67 1203 gen_exception_internal(excp);
dcba3a8d 1204 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1205}
1206
a767fac8 1207static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
73710361 1208 int syn, uint32_t target_el)
d4a2dc67
PM
1209{
1210 gen_set_condexec(s);
a767fac8 1211 gen_set_pc_im(s, pc);
73710361 1212 gen_exception(excp, syn, target_el);
dcba3a8d 1213 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1214}
1215
06bcbda3 1216static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
c900a2e6
PM
1217{
1218 TCGv_i32 tcg_syn;
1219
1220 gen_set_condexec(s);
06bcbda3 1221 gen_set_pc_im(s, s->pc_curr);
c900a2e6
PM
1222 tcg_syn = tcg_const_i32(syn);
1223 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1224 tcg_temp_free_i32(tcg_syn);
1225 s->base.is_jmp = DISAS_NORETURN;
1226}
1227
1ce21ba1
RH
1228static void unallocated_encoding(DisasContext *s)
1229{
1230 /* Unallocated and reserved encodings are uncategorized */
1231 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
1232 default_exception_el(s));
1233}
1234
b5ff1b31
FB
1235/* Force a TB lookup after an instruction that changes the CPU state. */
1236static inline void gen_lookup_tb(DisasContext *s)
1237{
a0415916 1238 tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
dcba3a8d 1239 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1240}
1241
19a6e31c
PM
1242static inline void gen_hlt(DisasContext *s, int imm)
1243{
1244 /* HLT. This has two purposes.
1245 * Architecturally, it is an external halting debug instruction.
1246 * Since QEMU doesn't implement external debug, we treat this as
1247 * it is required for halting debug disabled: it will UNDEF.
1248 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1249 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1250 * must trigger semihosting even for ARMv7 and earlier, where
1251 * HLT was an undefined encoding.
1252 * In system mode, we don't allow userspace access to
1253 * semihosting, to provide some semblance of security
1254 * (and for consistency with our 32-bit semihosting).
1255 */
1256 if (semihosting_enabled() &&
1257#ifndef CONFIG_USER_ONLY
1258 s->current_el != 0 &&
1259#endif
1260 (imm == (s->thumb ? 0x3c : 0xf000))) {
aee828e7 1261 gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
19a6e31c
PM
1262 return;
1263 }
1264
1ce21ba1 1265 unallocated_encoding(s);
19a6e31c
PM
1266}
1267
b0109805 1268static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1269 TCGv_i32 var)
2c0262af 1270{
1e8d4eec 1271 int val, rm, shift, shiftop;
39d5492a 1272 TCGv_i32 offset;
2c0262af
FB
1273
1274 if (!(insn & (1 << 25))) {
1275 /* immediate */
1276 val = insn & 0xfff;
1277 if (!(insn & (1 << 23)))
1278 val = -val;
537730b9 1279 if (val != 0)
b0109805 1280 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1281 } else {
1282 /* shift/register */
1283 rm = (insn) & 0xf;
1284 shift = (insn >> 7) & 0x1f;
1e8d4eec 1285 shiftop = (insn >> 5) & 3;
b26eefb6 1286 offset = load_reg(s, rm);
9a119ff6 1287 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1288 if (!(insn & (1 << 23)))
b0109805 1289 tcg_gen_sub_i32(var, var, offset);
2c0262af 1290 else
b0109805 1291 tcg_gen_add_i32(var, var, offset);
7d1b0095 1292 tcg_temp_free_i32(offset);
2c0262af
FB
1293 }
1294}
1295
191f9a93 1296static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1297 int extra, TCGv_i32 var)
2c0262af
FB
1298{
1299 int val, rm;
39d5492a 1300 TCGv_i32 offset;
3b46e624 1301
2c0262af
FB
1302 if (insn & (1 << 22)) {
1303 /* immediate */
1304 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1305 if (!(insn & (1 << 23)))
1306 val = -val;
18acad92 1307 val += extra;
537730b9 1308 if (val != 0)
b0109805 1309 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1310 } else {
1311 /* register */
191f9a93 1312 if (extra)
b0109805 1313 tcg_gen_addi_i32(var, var, extra);
2c0262af 1314 rm = (insn) & 0xf;
b26eefb6 1315 offset = load_reg(s, rm);
2c0262af 1316 if (!(insn & (1 << 23)))
b0109805 1317 tcg_gen_sub_i32(var, var, offset);
2c0262af 1318 else
b0109805 1319 tcg_gen_add_i32(var, var, offset);
7d1b0095 1320 tcg_temp_free_i32(offset);
2c0262af
FB
1321 }
1322}
1323
5aaebd13
PM
1324static TCGv_ptr get_fpstatus_ptr(int neon)
1325{
1326 TCGv_ptr statusptr = tcg_temp_new_ptr();
1327 int offset;
1328 if (neon) {
0ecb72a5 1329 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1330 } else {
0ecb72a5 1331 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1332 }
1333 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1334 return statusptr;
1335}
1336
c39c2b90 1337static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1338{
9a2b5256 1339 if (dp) {
c39c2b90 1340 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1341 } else {
c39c2b90 1342 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1343 if (reg & 1) {
1344 ofs += offsetof(CPU_DoubleU, l.upper);
1345 } else {
1346 ofs += offsetof(CPU_DoubleU, l.lower);
1347 }
1348 return ofs;
8e96005d
FB
1349 }
1350}
9ee6e8bb
PB
1351
1352/* Return the offset of a 32-bit piece of a NEON register.
1353 zero is the least significant end of the register. */
1354static inline long
1355neon_reg_offset (int reg, int n)
1356{
1357 int sreg;
1358 sreg = reg * 2 + n;
1359 return vfp_reg_offset(0, sreg);
1360}
1361
32f91fb7
RH
1362/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1363 * where 0 is the least significant end of the register.
1364 */
1365static inline long
14776ab5 1366neon_element_offset(int reg, int element, MemOp size)
32f91fb7
RH
1367{
1368 int element_size = 1 << size;
1369 int ofs = element * element_size;
1370#ifdef HOST_WORDS_BIGENDIAN
1371 /* Calculate the offset assuming fully little-endian,
1372 * then XOR to account for the order of the 8-byte units.
1373 */
1374 if (element_size < 8) {
1375 ofs ^= 8 - element_size;
1376 }
1377#endif
1378 return neon_reg_offset(reg, 0) + ofs;
1379}
1380
39d5492a 1381static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1382{
39d5492a 1383 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1384 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1385 return tmp;
1386}
1387
14776ab5 1388static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
2d6ac920
RH
1389{
1390 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1391
1392 switch (mop) {
1393 case MO_UB:
1394 tcg_gen_ld8u_i32(var, cpu_env, offset);
1395 break;
1396 case MO_UW:
1397 tcg_gen_ld16u_i32(var, cpu_env, offset);
1398 break;
1399 case MO_UL:
1400 tcg_gen_ld_i32(var, cpu_env, offset);
1401 break;
1402 default:
1403 g_assert_not_reached();
1404 }
1405}
1406
14776ab5 1407static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop)
ac55d007
RH
1408{
1409 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1410
1411 switch (mop) {
1412 case MO_UB:
1413 tcg_gen_ld8u_i64(var, cpu_env, offset);
1414 break;
1415 case MO_UW:
1416 tcg_gen_ld16u_i64(var, cpu_env, offset);
1417 break;
1418 case MO_UL:
1419 tcg_gen_ld32u_i64(var, cpu_env, offset);
1420 break;
1421 case MO_Q:
1422 tcg_gen_ld_i64(var, cpu_env, offset);
1423 break;
1424 default:
1425 g_assert_not_reached();
1426 }
1427}
1428
39d5492a 1429static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1430{
1431 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1432 tcg_temp_free_i32(var);
8f8e3aa4
PB
1433}
1434
14776ab5 1435static void neon_store_element(int reg, int ele, MemOp size, TCGv_i32 var)
2d6ac920
RH
1436{
1437 long offset = neon_element_offset(reg, ele, size);
1438
1439 switch (size) {
1440 case MO_8:
1441 tcg_gen_st8_i32(var, cpu_env, offset);
1442 break;
1443 case MO_16:
1444 tcg_gen_st16_i32(var, cpu_env, offset);
1445 break;
1446 case MO_32:
1447 tcg_gen_st_i32(var, cpu_env, offset);
1448 break;
1449 default:
1450 g_assert_not_reached();
1451 }
1452}
1453
14776ab5 1454static void neon_store_element64(int reg, int ele, MemOp size, TCGv_i64 var)
ac55d007
RH
1455{
1456 long offset = neon_element_offset(reg, ele, size);
1457
1458 switch (size) {
1459 case MO_8:
1460 tcg_gen_st8_i64(var, cpu_env, offset);
1461 break;
1462 case MO_16:
1463 tcg_gen_st16_i64(var, cpu_env, offset);
1464 break;
1465 case MO_32:
1466 tcg_gen_st32_i64(var, cpu_env, offset);
1467 break;
1468 case MO_64:
1469 tcg_gen_st_i64(var, cpu_env, offset);
1470 break;
1471 default:
1472 g_assert_not_reached();
1473 }
1474}
1475
a7812ae4 1476static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1477{
1478 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1479}
1480
a7812ae4 1481static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1482{
1483 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1484}
1485
160f3b64
PM
1486static inline void neon_load_reg32(TCGv_i32 var, int reg)
1487{
1488 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1489}
1490
1491static inline void neon_store_reg32(TCGv_i32 var, int reg)
1492{
1493 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1494}
1495
1a66ac61
RH
1496static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1497{
1498 TCGv_ptr ret = tcg_temp_new_ptr();
1499 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1500 return ret;
1501}
1502
d00584b7 1503#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1504
78e138bc
PM
1505/* Include the VFP decoder */
1506#include "translate-vfp.inc.c"
1507
a7812ae4 1508static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1509{
0ecb72a5 1510 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1511}
1512
a7812ae4 1513static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1514{
0ecb72a5 1515 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1516}
1517
39d5492a 1518static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1519{
39d5492a 1520 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1521 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1522 return var;
e677137d
PB
1523}
1524
39d5492a 1525static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1526{
0ecb72a5 1527 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1528 tcg_temp_free_i32(var);
e677137d
PB
1529}
1530
1531static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1532{
1533 iwmmxt_store_reg(cpu_M0, rn);
1534}
1535
1536static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1537{
1538 iwmmxt_load_reg(cpu_M0, rn);
1539}
1540
1541static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1542{
1543 iwmmxt_load_reg(cpu_V1, rn);
1544 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1545}
1546
1547static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1548{
1549 iwmmxt_load_reg(cpu_V1, rn);
1550 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1551}
1552
1553static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1554{
1555 iwmmxt_load_reg(cpu_V1, rn);
1556 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1557}
1558
1559#define IWMMXT_OP(name) \
1560static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1561{ \
1562 iwmmxt_load_reg(cpu_V1, rn); \
1563 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1564}
1565
477955bd
PM
1566#define IWMMXT_OP_ENV(name) \
1567static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1568{ \
1569 iwmmxt_load_reg(cpu_V1, rn); \
1570 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1571}
1572
1573#define IWMMXT_OP_ENV_SIZE(name) \
1574IWMMXT_OP_ENV(name##b) \
1575IWMMXT_OP_ENV(name##w) \
1576IWMMXT_OP_ENV(name##l)
e677137d 1577
477955bd 1578#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1579static inline void gen_op_iwmmxt_##name##_M0(void) \
1580{ \
477955bd 1581 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1582}
1583
1584IWMMXT_OP(maddsq)
1585IWMMXT_OP(madduq)
1586IWMMXT_OP(sadb)
1587IWMMXT_OP(sadw)
1588IWMMXT_OP(mulslw)
1589IWMMXT_OP(mulshw)
1590IWMMXT_OP(mululw)
1591IWMMXT_OP(muluhw)
1592IWMMXT_OP(macsw)
1593IWMMXT_OP(macuw)
1594
477955bd
PM
1595IWMMXT_OP_ENV_SIZE(unpackl)
1596IWMMXT_OP_ENV_SIZE(unpackh)
1597
1598IWMMXT_OP_ENV1(unpacklub)
1599IWMMXT_OP_ENV1(unpackluw)
1600IWMMXT_OP_ENV1(unpacklul)
1601IWMMXT_OP_ENV1(unpackhub)
1602IWMMXT_OP_ENV1(unpackhuw)
1603IWMMXT_OP_ENV1(unpackhul)
1604IWMMXT_OP_ENV1(unpacklsb)
1605IWMMXT_OP_ENV1(unpacklsw)
1606IWMMXT_OP_ENV1(unpacklsl)
1607IWMMXT_OP_ENV1(unpackhsb)
1608IWMMXT_OP_ENV1(unpackhsw)
1609IWMMXT_OP_ENV1(unpackhsl)
1610
1611IWMMXT_OP_ENV_SIZE(cmpeq)
1612IWMMXT_OP_ENV_SIZE(cmpgtu)
1613IWMMXT_OP_ENV_SIZE(cmpgts)
1614
1615IWMMXT_OP_ENV_SIZE(mins)
1616IWMMXT_OP_ENV_SIZE(minu)
1617IWMMXT_OP_ENV_SIZE(maxs)
1618IWMMXT_OP_ENV_SIZE(maxu)
1619
1620IWMMXT_OP_ENV_SIZE(subn)
1621IWMMXT_OP_ENV_SIZE(addn)
1622IWMMXT_OP_ENV_SIZE(subu)
1623IWMMXT_OP_ENV_SIZE(addu)
1624IWMMXT_OP_ENV_SIZE(subs)
1625IWMMXT_OP_ENV_SIZE(adds)
1626
1627IWMMXT_OP_ENV(avgb0)
1628IWMMXT_OP_ENV(avgb1)
1629IWMMXT_OP_ENV(avgw0)
1630IWMMXT_OP_ENV(avgw1)
e677137d 1631
477955bd
PM
1632IWMMXT_OP_ENV(packuw)
1633IWMMXT_OP_ENV(packul)
1634IWMMXT_OP_ENV(packuq)
1635IWMMXT_OP_ENV(packsw)
1636IWMMXT_OP_ENV(packsl)
1637IWMMXT_OP_ENV(packsq)
e677137d 1638
e677137d
PB
1639static void gen_op_iwmmxt_set_mup(void)
1640{
39d5492a 1641 TCGv_i32 tmp;
e677137d
PB
1642 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1643 tcg_gen_ori_i32(tmp, tmp, 2);
1644 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1645}
1646
1647static void gen_op_iwmmxt_set_cup(void)
1648{
39d5492a 1649 TCGv_i32 tmp;
e677137d
PB
1650 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1651 tcg_gen_ori_i32(tmp, tmp, 1);
1652 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1653}
1654
1655static void gen_op_iwmmxt_setpsr_nz(void)
1656{
39d5492a 1657 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1658 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1659 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1660}
1661
1662static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1663{
1664 iwmmxt_load_reg(cpu_V1, rn);
86831435 1665 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1666 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1667}
1668
39d5492a
PM
1669static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1670 TCGv_i32 dest)
18c9b560
AZ
1671{
1672 int rd;
1673 uint32_t offset;
39d5492a 1674 TCGv_i32 tmp;
18c9b560
AZ
1675
1676 rd = (insn >> 16) & 0xf;
da6b5335 1677 tmp = load_reg(s, rd);
18c9b560
AZ
1678
1679 offset = (insn & 0xff) << ((insn >> 7) & 2);
1680 if (insn & (1 << 24)) {
1681 /* Pre indexed */
1682 if (insn & (1 << 23))
da6b5335 1683 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1684 else
da6b5335
FN
1685 tcg_gen_addi_i32(tmp, tmp, -offset);
1686 tcg_gen_mov_i32(dest, tmp);
18c9b560 1687 if (insn & (1 << 21))
da6b5335
FN
1688 store_reg(s, rd, tmp);
1689 else
7d1b0095 1690 tcg_temp_free_i32(tmp);
18c9b560
AZ
1691 } else if (insn & (1 << 21)) {
1692 /* Post indexed */
da6b5335 1693 tcg_gen_mov_i32(dest, tmp);
18c9b560 1694 if (insn & (1 << 23))
da6b5335 1695 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1696 else
da6b5335
FN
1697 tcg_gen_addi_i32(tmp, tmp, -offset);
1698 store_reg(s, rd, tmp);
18c9b560
AZ
1699 } else if (!(insn & (1 << 23)))
1700 return 1;
1701 return 0;
1702}
1703
39d5492a 1704static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1705{
1706 int rd = (insn >> 0) & 0xf;
39d5492a 1707 TCGv_i32 tmp;
18c9b560 1708
da6b5335
FN
1709 if (insn & (1 << 8)) {
1710 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1711 return 1;
da6b5335
FN
1712 } else {
1713 tmp = iwmmxt_load_creg(rd);
1714 }
1715 } else {
7d1b0095 1716 tmp = tcg_temp_new_i32();
da6b5335 1717 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1718 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1719 }
1720 tcg_gen_andi_i32(tmp, tmp, mask);
1721 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1722 tcg_temp_free_i32(tmp);
18c9b560
AZ
1723 return 0;
1724}
1725
a1c7273b 1726/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1727 (ie. an undefined instruction). */
7dcc1f89 1728static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1729{
1730 int rd, wrd;
1731 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1732 TCGv_i32 addr;
1733 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1734
1735 if ((insn & 0x0e000e00) == 0x0c000000) {
1736 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1737 wrd = insn & 0xf;
1738 rdlo = (insn >> 12) & 0xf;
1739 rdhi = (insn >> 16) & 0xf;
d00584b7 1740 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1741 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1742 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
664b7e3b 1743 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1744 } else { /* TMCRR */
da6b5335
FN
1745 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1746 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1747 gen_op_iwmmxt_set_mup();
1748 }
1749 return 0;
1750 }
1751
1752 wrd = (insn >> 12) & 0xf;
7d1b0095 1753 addr = tcg_temp_new_i32();
da6b5335 1754 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1755 tcg_temp_free_i32(addr);
18c9b560 1756 return 1;
da6b5335 1757 }
18c9b560 1758 if (insn & ARM_CP_RW_BIT) {
d00584b7 1759 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1760 tmp = tcg_temp_new_i32();
12dcc321 1761 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1762 iwmmxt_store_creg(wrd, tmp);
18c9b560 1763 } else {
e677137d
PB
1764 i = 1;
1765 if (insn & (1 << 8)) {
d00584b7 1766 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1767 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1768 i = 0;
d00584b7 1769 } else { /* WLDRW wRd */
29531141 1770 tmp = tcg_temp_new_i32();
12dcc321 1771 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1772 }
1773 } else {
29531141 1774 tmp = tcg_temp_new_i32();
d00584b7 1775 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1776 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 1777 } else { /* WLDRB */
12dcc321 1778 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1779 }
1780 }
1781 if (i) {
1782 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1783 tcg_temp_free_i32(tmp);
e677137d 1784 }
18c9b560
AZ
1785 gen_op_iwmmxt_movq_wRn_M0(wrd);
1786 }
1787 } else {
d00584b7 1788 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1789 tmp = iwmmxt_load_creg(wrd);
12dcc321 1790 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1791 } else {
1792 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1793 tmp = tcg_temp_new_i32();
e677137d 1794 if (insn & (1 << 8)) {
d00584b7 1795 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1796 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 1797 } else { /* WSTRW wRd */
ecc7b3aa 1798 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1799 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1800 }
1801 } else {
d00584b7 1802 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1803 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1804 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 1805 } else { /* WSTRB */
ecc7b3aa 1806 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1807 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1808 }
1809 }
18c9b560 1810 }
29531141 1811 tcg_temp_free_i32(tmp);
18c9b560 1812 }
7d1b0095 1813 tcg_temp_free_i32(addr);
18c9b560
AZ
1814 return 0;
1815 }
1816
1817 if ((insn & 0x0f000000) != 0x0e000000)
1818 return 1;
1819
1820 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 1821 case 0x000: /* WOR */
18c9b560
AZ
1822 wrd = (insn >> 12) & 0xf;
1823 rd0 = (insn >> 0) & 0xf;
1824 rd1 = (insn >> 16) & 0xf;
1825 gen_op_iwmmxt_movq_M0_wRn(rd0);
1826 gen_op_iwmmxt_orq_M0_wRn(rd1);
1827 gen_op_iwmmxt_setpsr_nz();
1828 gen_op_iwmmxt_movq_wRn_M0(wrd);
1829 gen_op_iwmmxt_set_mup();
1830 gen_op_iwmmxt_set_cup();
1831 break;
d00584b7 1832 case 0x011: /* TMCR */
18c9b560
AZ
1833 if (insn & 0xf)
1834 return 1;
1835 rd = (insn >> 12) & 0xf;
1836 wrd = (insn >> 16) & 0xf;
1837 switch (wrd) {
1838 case ARM_IWMMXT_wCID:
1839 case ARM_IWMMXT_wCASF:
1840 break;
1841 case ARM_IWMMXT_wCon:
1842 gen_op_iwmmxt_set_cup();
1843 /* Fall through. */
1844 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1845 tmp = iwmmxt_load_creg(wrd);
1846 tmp2 = load_reg(s, rd);
f669df27 1847 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1848 tcg_temp_free_i32(tmp2);
da6b5335 1849 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1850 break;
1851 case ARM_IWMMXT_wCGR0:
1852 case ARM_IWMMXT_wCGR1:
1853 case ARM_IWMMXT_wCGR2:
1854 case ARM_IWMMXT_wCGR3:
1855 gen_op_iwmmxt_set_cup();
da6b5335
FN
1856 tmp = load_reg(s, rd);
1857 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1858 break;
1859 default:
1860 return 1;
1861 }
1862 break;
d00584b7 1863 case 0x100: /* WXOR */
18c9b560
AZ
1864 wrd = (insn >> 12) & 0xf;
1865 rd0 = (insn >> 0) & 0xf;
1866 rd1 = (insn >> 16) & 0xf;
1867 gen_op_iwmmxt_movq_M0_wRn(rd0);
1868 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1869 gen_op_iwmmxt_setpsr_nz();
1870 gen_op_iwmmxt_movq_wRn_M0(wrd);
1871 gen_op_iwmmxt_set_mup();
1872 gen_op_iwmmxt_set_cup();
1873 break;
d00584b7 1874 case 0x111: /* TMRC */
18c9b560
AZ
1875 if (insn & 0xf)
1876 return 1;
1877 rd = (insn >> 12) & 0xf;
1878 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1879 tmp = iwmmxt_load_creg(wrd);
1880 store_reg(s, rd, tmp);
18c9b560 1881 break;
d00584b7 1882 case 0x300: /* WANDN */
18c9b560
AZ
1883 wrd = (insn >> 12) & 0xf;
1884 rd0 = (insn >> 0) & 0xf;
1885 rd1 = (insn >> 16) & 0xf;
1886 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1887 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1888 gen_op_iwmmxt_andq_M0_wRn(rd1);
1889 gen_op_iwmmxt_setpsr_nz();
1890 gen_op_iwmmxt_movq_wRn_M0(wrd);
1891 gen_op_iwmmxt_set_mup();
1892 gen_op_iwmmxt_set_cup();
1893 break;
d00584b7 1894 case 0x200: /* WAND */
18c9b560
AZ
1895 wrd = (insn >> 12) & 0xf;
1896 rd0 = (insn >> 0) & 0xf;
1897 rd1 = (insn >> 16) & 0xf;
1898 gen_op_iwmmxt_movq_M0_wRn(rd0);
1899 gen_op_iwmmxt_andq_M0_wRn(rd1);
1900 gen_op_iwmmxt_setpsr_nz();
1901 gen_op_iwmmxt_movq_wRn_M0(wrd);
1902 gen_op_iwmmxt_set_mup();
1903 gen_op_iwmmxt_set_cup();
1904 break;
d00584b7 1905 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
1906 wrd = (insn >> 12) & 0xf;
1907 rd0 = (insn >> 0) & 0xf;
1908 rd1 = (insn >> 16) & 0xf;
1909 gen_op_iwmmxt_movq_M0_wRn(rd0);
1910 if (insn & (1 << 21))
1911 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1912 else
1913 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1914 gen_op_iwmmxt_movq_wRn_M0(wrd);
1915 gen_op_iwmmxt_set_mup();
1916 break;
d00584b7 1917 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
1918 wrd = (insn >> 12) & 0xf;
1919 rd0 = (insn >> 16) & 0xf;
1920 rd1 = (insn >> 0) & 0xf;
1921 gen_op_iwmmxt_movq_M0_wRn(rd0);
1922 switch ((insn >> 22) & 3) {
1923 case 0:
1924 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1925 break;
1926 case 1:
1927 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1928 break;
1929 case 2:
1930 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1931 break;
1932 case 3:
1933 return 1;
1934 }
1935 gen_op_iwmmxt_movq_wRn_M0(wrd);
1936 gen_op_iwmmxt_set_mup();
1937 gen_op_iwmmxt_set_cup();
1938 break;
d00584b7 1939 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 rd1 = (insn >> 0) & 0xf;
1943 gen_op_iwmmxt_movq_M0_wRn(rd0);
1944 switch ((insn >> 22) & 3) {
1945 case 0:
1946 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1947 break;
1948 case 1:
1949 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1950 break;
1951 case 2:
1952 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1953 break;
1954 case 3:
1955 return 1;
1956 }
1957 gen_op_iwmmxt_movq_wRn_M0(wrd);
1958 gen_op_iwmmxt_set_mup();
1959 gen_op_iwmmxt_set_cup();
1960 break;
d00584b7 1961 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
1962 wrd = (insn >> 12) & 0xf;
1963 rd0 = (insn >> 16) & 0xf;
1964 rd1 = (insn >> 0) & 0xf;
1965 gen_op_iwmmxt_movq_M0_wRn(rd0);
1966 if (insn & (1 << 22))
1967 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1968 else
1969 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1970 if (!(insn & (1 << 20)))
1971 gen_op_iwmmxt_addl_M0_wRn(wrd);
1972 gen_op_iwmmxt_movq_wRn_M0(wrd);
1973 gen_op_iwmmxt_set_mup();
1974 break;
d00584b7 1975 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
1976 wrd = (insn >> 12) & 0xf;
1977 rd0 = (insn >> 16) & 0xf;
1978 rd1 = (insn >> 0) & 0xf;
1979 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1980 if (insn & (1 << 21)) {
1981 if (insn & (1 << 20))
1982 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1983 else
1984 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1985 } else {
1986 if (insn & (1 << 20))
1987 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1988 else
1989 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1990 }
18c9b560
AZ
1991 gen_op_iwmmxt_movq_wRn_M0(wrd);
1992 gen_op_iwmmxt_set_mup();
1993 break;
d00584b7 1994 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
1995 wrd = (insn >> 12) & 0xf;
1996 rd0 = (insn >> 16) & 0xf;
1997 rd1 = (insn >> 0) & 0xf;
1998 gen_op_iwmmxt_movq_M0_wRn(rd0);
1999 if (insn & (1 << 21))
2000 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2001 else
2002 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2003 if (!(insn & (1 << 20))) {
e677137d
PB
2004 iwmmxt_load_reg(cpu_V1, wrd);
2005 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2006 }
2007 gen_op_iwmmxt_movq_wRn_M0(wrd);
2008 gen_op_iwmmxt_set_mup();
2009 break;
d00584b7 2010 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2011 wrd = (insn >> 12) & 0xf;
2012 rd0 = (insn >> 16) & 0xf;
2013 rd1 = (insn >> 0) & 0xf;
2014 gen_op_iwmmxt_movq_M0_wRn(rd0);
2015 switch ((insn >> 22) & 3) {
2016 case 0:
2017 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2018 break;
2019 case 1:
2020 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2021 break;
2022 case 2:
2023 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2024 break;
2025 case 3:
2026 return 1;
2027 }
2028 gen_op_iwmmxt_movq_wRn_M0(wrd);
2029 gen_op_iwmmxt_set_mup();
2030 gen_op_iwmmxt_set_cup();
2031 break;
d00584b7 2032 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2033 wrd = (insn >> 12) & 0xf;
2034 rd0 = (insn >> 16) & 0xf;
2035 rd1 = (insn >> 0) & 0xf;
2036 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2037 if (insn & (1 << 22)) {
2038 if (insn & (1 << 20))
2039 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2040 else
2041 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2042 } else {
2043 if (insn & (1 << 20))
2044 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2045 else
2046 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2047 }
18c9b560
AZ
2048 gen_op_iwmmxt_movq_wRn_M0(wrd);
2049 gen_op_iwmmxt_set_mup();
2050 gen_op_iwmmxt_set_cup();
2051 break;
d00584b7 2052 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2053 wrd = (insn >> 12) & 0xf;
2054 rd0 = (insn >> 16) & 0xf;
2055 rd1 = (insn >> 0) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2057 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2058 tcg_gen_andi_i32(tmp, tmp, 7);
2059 iwmmxt_load_reg(cpu_V1, rd1);
2060 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2061 tcg_temp_free_i32(tmp);
18c9b560
AZ
2062 gen_op_iwmmxt_movq_wRn_M0(wrd);
2063 gen_op_iwmmxt_set_mup();
2064 break;
d00584b7 2065 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2066 if (((insn >> 6) & 3) == 3)
2067 return 1;
18c9b560
AZ
2068 rd = (insn >> 12) & 0xf;
2069 wrd = (insn >> 16) & 0xf;
da6b5335 2070 tmp = load_reg(s, rd);
18c9b560
AZ
2071 gen_op_iwmmxt_movq_M0_wRn(wrd);
2072 switch ((insn >> 6) & 3) {
2073 case 0:
da6b5335
FN
2074 tmp2 = tcg_const_i32(0xff);
2075 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2076 break;
2077 case 1:
da6b5335
FN
2078 tmp2 = tcg_const_i32(0xffff);
2079 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2080 break;
2081 case 2:
da6b5335
FN
2082 tmp2 = tcg_const_i32(0xffffffff);
2083 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2084 break;
da6b5335 2085 default:
f764718d
RH
2086 tmp2 = NULL;
2087 tmp3 = NULL;
18c9b560 2088 }
da6b5335 2089 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2090 tcg_temp_free_i32(tmp3);
2091 tcg_temp_free_i32(tmp2);
7d1b0095 2092 tcg_temp_free_i32(tmp);
18c9b560
AZ
2093 gen_op_iwmmxt_movq_wRn_M0(wrd);
2094 gen_op_iwmmxt_set_mup();
2095 break;
d00584b7 2096 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2097 rd = (insn >> 12) & 0xf;
2098 wrd = (insn >> 16) & 0xf;
da6b5335 2099 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2100 return 1;
2101 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2102 tmp = tcg_temp_new_i32();
18c9b560
AZ
2103 switch ((insn >> 22) & 3) {
2104 case 0:
da6b5335 2105 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2106 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2107 if (insn & 8) {
2108 tcg_gen_ext8s_i32(tmp, tmp);
2109 } else {
2110 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2111 }
2112 break;
2113 case 1:
da6b5335 2114 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2115 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2116 if (insn & 8) {
2117 tcg_gen_ext16s_i32(tmp, tmp);
2118 } else {
2119 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2120 }
2121 break;
2122 case 2:
da6b5335 2123 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2124 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2125 break;
18c9b560 2126 }
da6b5335 2127 store_reg(s, rd, tmp);
18c9b560 2128 break;
d00584b7 2129 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2130 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2131 return 1;
da6b5335 2132 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2133 switch ((insn >> 22) & 3) {
2134 case 0:
da6b5335 2135 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2136 break;
2137 case 1:
da6b5335 2138 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2139 break;
2140 case 2:
da6b5335 2141 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2142 break;
18c9b560 2143 }
da6b5335
FN
2144 tcg_gen_shli_i32(tmp, tmp, 28);
2145 gen_set_nzcv(tmp);
7d1b0095 2146 tcg_temp_free_i32(tmp);
18c9b560 2147 break;
d00584b7 2148 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2149 if (((insn >> 6) & 3) == 3)
2150 return 1;
18c9b560
AZ
2151 rd = (insn >> 12) & 0xf;
2152 wrd = (insn >> 16) & 0xf;
da6b5335 2153 tmp = load_reg(s, rd);
18c9b560
AZ
2154 switch ((insn >> 6) & 3) {
2155 case 0:
da6b5335 2156 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2157 break;
2158 case 1:
da6b5335 2159 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2160 break;
2161 case 2:
da6b5335 2162 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2163 break;
18c9b560 2164 }
7d1b0095 2165 tcg_temp_free_i32(tmp);
18c9b560
AZ
2166 gen_op_iwmmxt_movq_wRn_M0(wrd);
2167 gen_op_iwmmxt_set_mup();
2168 break;
d00584b7 2169 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2170 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2171 return 1;
da6b5335 2172 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2173 tmp2 = tcg_temp_new_i32();
da6b5335 2174 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2175 switch ((insn >> 22) & 3) {
2176 case 0:
2177 for (i = 0; i < 7; i ++) {
da6b5335
FN
2178 tcg_gen_shli_i32(tmp2, tmp2, 4);
2179 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2180 }
2181 break;
2182 case 1:
2183 for (i = 0; i < 3; i ++) {
da6b5335
FN
2184 tcg_gen_shli_i32(tmp2, tmp2, 8);
2185 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2186 }
2187 break;
2188 case 2:
da6b5335
FN
2189 tcg_gen_shli_i32(tmp2, tmp2, 16);
2190 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2191 break;
18c9b560 2192 }
da6b5335 2193 gen_set_nzcv(tmp);
7d1b0095
PM
2194 tcg_temp_free_i32(tmp2);
2195 tcg_temp_free_i32(tmp);
18c9b560 2196 break;
d00584b7 2197 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2198 wrd = (insn >> 12) & 0xf;
2199 rd0 = (insn >> 16) & 0xf;
2200 gen_op_iwmmxt_movq_M0_wRn(rd0);
2201 switch ((insn >> 22) & 3) {
2202 case 0:
e677137d 2203 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2204 break;
2205 case 1:
e677137d 2206 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2207 break;
2208 case 2:
e677137d 2209 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2210 break;
2211 case 3:
2212 return 1;
2213 }
2214 gen_op_iwmmxt_movq_wRn_M0(wrd);
2215 gen_op_iwmmxt_set_mup();
2216 break;
d00584b7 2217 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2218 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2219 return 1;
da6b5335 2220 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2221 tmp2 = tcg_temp_new_i32();
da6b5335 2222 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2223 switch ((insn >> 22) & 3) {
2224 case 0:
2225 for (i = 0; i < 7; i ++) {
da6b5335
FN
2226 tcg_gen_shli_i32(tmp2, tmp2, 4);
2227 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2228 }
2229 break;
2230 case 1:
2231 for (i = 0; i < 3; i ++) {
da6b5335
FN
2232 tcg_gen_shli_i32(tmp2, tmp2, 8);
2233 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2234 }
2235 break;
2236 case 2:
da6b5335
FN
2237 tcg_gen_shli_i32(tmp2, tmp2, 16);
2238 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2239 break;
18c9b560 2240 }
da6b5335 2241 gen_set_nzcv(tmp);
7d1b0095
PM
2242 tcg_temp_free_i32(tmp2);
2243 tcg_temp_free_i32(tmp);
18c9b560 2244 break;
d00584b7 2245 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2246 rd = (insn >> 12) & 0xf;
2247 rd0 = (insn >> 16) & 0xf;
da6b5335 2248 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2249 return 1;
2250 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2251 tmp = tcg_temp_new_i32();
18c9b560
AZ
2252 switch ((insn >> 22) & 3) {
2253 case 0:
da6b5335 2254 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2255 break;
2256 case 1:
da6b5335 2257 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2258 break;
2259 case 2:
da6b5335 2260 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2261 break;
18c9b560 2262 }
da6b5335 2263 store_reg(s, rd, tmp);
18c9b560 2264 break;
d00584b7 2265 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2266 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2267 wrd = (insn >> 12) & 0xf;
2268 rd0 = (insn >> 16) & 0xf;
2269 rd1 = (insn >> 0) & 0xf;
2270 gen_op_iwmmxt_movq_M0_wRn(rd0);
2271 switch ((insn >> 22) & 3) {
2272 case 0:
2273 if (insn & (1 << 21))
2274 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2275 else
2276 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2277 break;
2278 case 1:
2279 if (insn & (1 << 21))
2280 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2281 else
2282 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2283 break;
2284 case 2:
2285 if (insn & (1 << 21))
2286 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2287 else
2288 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2289 break;
2290 case 3:
2291 return 1;
2292 }
2293 gen_op_iwmmxt_movq_wRn_M0(wrd);
2294 gen_op_iwmmxt_set_mup();
2295 gen_op_iwmmxt_set_cup();
2296 break;
d00584b7 2297 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2298 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2299 wrd = (insn >> 12) & 0xf;
2300 rd0 = (insn >> 16) & 0xf;
2301 gen_op_iwmmxt_movq_M0_wRn(rd0);
2302 switch ((insn >> 22) & 3) {
2303 case 0:
2304 if (insn & (1 << 21))
2305 gen_op_iwmmxt_unpacklsb_M0();
2306 else
2307 gen_op_iwmmxt_unpacklub_M0();
2308 break;
2309 case 1:
2310 if (insn & (1 << 21))
2311 gen_op_iwmmxt_unpacklsw_M0();
2312 else
2313 gen_op_iwmmxt_unpackluw_M0();
2314 break;
2315 case 2:
2316 if (insn & (1 << 21))
2317 gen_op_iwmmxt_unpacklsl_M0();
2318 else
2319 gen_op_iwmmxt_unpacklul_M0();
2320 break;
2321 case 3:
2322 return 1;
2323 }
2324 gen_op_iwmmxt_movq_wRn_M0(wrd);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2327 break;
d00584b7 2328 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2329 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2330 wrd = (insn >> 12) & 0xf;
2331 rd0 = (insn >> 16) & 0xf;
2332 gen_op_iwmmxt_movq_M0_wRn(rd0);
2333 switch ((insn >> 22) & 3) {
2334 case 0:
2335 if (insn & (1 << 21))
2336 gen_op_iwmmxt_unpackhsb_M0();
2337 else
2338 gen_op_iwmmxt_unpackhub_M0();
2339 break;
2340 case 1:
2341 if (insn & (1 << 21))
2342 gen_op_iwmmxt_unpackhsw_M0();
2343 else
2344 gen_op_iwmmxt_unpackhuw_M0();
2345 break;
2346 case 2:
2347 if (insn & (1 << 21))
2348 gen_op_iwmmxt_unpackhsl_M0();
2349 else
2350 gen_op_iwmmxt_unpackhul_M0();
2351 break;
2352 case 3:
2353 return 1;
2354 }
2355 gen_op_iwmmxt_movq_wRn_M0(wrd);
2356 gen_op_iwmmxt_set_mup();
2357 gen_op_iwmmxt_set_cup();
2358 break;
d00584b7 2359 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2360 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2361 if (((insn >> 22) & 3) == 0)
2362 return 1;
18c9b560
AZ
2363 wrd = (insn >> 12) & 0xf;
2364 rd0 = (insn >> 16) & 0xf;
2365 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2366 tmp = tcg_temp_new_i32();
da6b5335 2367 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2368 tcg_temp_free_i32(tmp);
18c9b560 2369 return 1;
da6b5335 2370 }
18c9b560 2371 switch ((insn >> 22) & 3) {
18c9b560 2372 case 1:
477955bd 2373 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2374 break;
2375 case 2:
477955bd 2376 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2377 break;
2378 case 3:
477955bd 2379 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2380 break;
2381 }
7d1b0095 2382 tcg_temp_free_i32(tmp);
18c9b560
AZ
2383 gen_op_iwmmxt_movq_wRn_M0(wrd);
2384 gen_op_iwmmxt_set_mup();
2385 gen_op_iwmmxt_set_cup();
2386 break;
d00584b7 2387 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2388 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2389 if (((insn >> 22) & 3) == 0)
2390 return 1;
18c9b560
AZ
2391 wrd = (insn >> 12) & 0xf;
2392 rd0 = (insn >> 16) & 0xf;
2393 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2394 tmp = tcg_temp_new_i32();
da6b5335 2395 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2396 tcg_temp_free_i32(tmp);
18c9b560 2397 return 1;
da6b5335 2398 }
18c9b560 2399 switch ((insn >> 22) & 3) {
18c9b560 2400 case 1:
477955bd 2401 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2402 break;
2403 case 2:
477955bd 2404 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2405 break;
2406 case 3:
477955bd 2407 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2408 break;
2409 }
7d1b0095 2410 tcg_temp_free_i32(tmp);
18c9b560
AZ
2411 gen_op_iwmmxt_movq_wRn_M0(wrd);
2412 gen_op_iwmmxt_set_mup();
2413 gen_op_iwmmxt_set_cup();
2414 break;
d00584b7 2415 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2416 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2417 if (((insn >> 22) & 3) == 0)
2418 return 1;
18c9b560
AZ
2419 wrd = (insn >> 12) & 0xf;
2420 rd0 = (insn >> 16) & 0xf;
2421 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2422 tmp = tcg_temp_new_i32();
da6b5335 2423 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2424 tcg_temp_free_i32(tmp);
18c9b560 2425 return 1;
da6b5335 2426 }
18c9b560 2427 switch ((insn >> 22) & 3) {
18c9b560 2428 case 1:
477955bd 2429 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2430 break;
2431 case 2:
477955bd 2432 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2433 break;
2434 case 3:
477955bd 2435 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2436 break;
2437 }
7d1b0095 2438 tcg_temp_free_i32(tmp);
18c9b560
AZ
2439 gen_op_iwmmxt_movq_wRn_M0(wrd);
2440 gen_op_iwmmxt_set_mup();
2441 gen_op_iwmmxt_set_cup();
2442 break;
d00584b7 2443 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2444 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2445 if (((insn >> 22) & 3) == 0)
2446 return 1;
18c9b560
AZ
2447 wrd = (insn >> 12) & 0xf;
2448 rd0 = (insn >> 16) & 0xf;
2449 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2450 tmp = tcg_temp_new_i32();
18c9b560 2451 switch ((insn >> 22) & 3) {
18c9b560 2452 case 1:
da6b5335 2453 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2454 tcg_temp_free_i32(tmp);
18c9b560 2455 return 1;
da6b5335 2456 }
477955bd 2457 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2458 break;
2459 case 2:
da6b5335 2460 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2461 tcg_temp_free_i32(tmp);
18c9b560 2462 return 1;
da6b5335 2463 }
477955bd 2464 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2465 break;
2466 case 3:
da6b5335 2467 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2468 tcg_temp_free_i32(tmp);
18c9b560 2469 return 1;
da6b5335 2470 }
477955bd 2471 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2472 break;
2473 }
7d1b0095 2474 tcg_temp_free_i32(tmp);
18c9b560
AZ
2475 gen_op_iwmmxt_movq_wRn_M0(wrd);
2476 gen_op_iwmmxt_set_mup();
2477 gen_op_iwmmxt_set_cup();
2478 break;
d00584b7 2479 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2480 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2481 wrd = (insn >> 12) & 0xf;
2482 rd0 = (insn >> 16) & 0xf;
2483 rd1 = (insn >> 0) & 0xf;
2484 gen_op_iwmmxt_movq_M0_wRn(rd0);
2485 switch ((insn >> 22) & 3) {
2486 case 0:
2487 if (insn & (1 << 21))
2488 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2489 else
2490 gen_op_iwmmxt_minub_M0_wRn(rd1);
2491 break;
2492 case 1:
2493 if (insn & (1 << 21))
2494 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2495 else
2496 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2497 break;
2498 case 2:
2499 if (insn & (1 << 21))
2500 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2501 else
2502 gen_op_iwmmxt_minul_M0_wRn(rd1);
2503 break;
2504 case 3:
2505 return 1;
2506 }
2507 gen_op_iwmmxt_movq_wRn_M0(wrd);
2508 gen_op_iwmmxt_set_mup();
2509 break;
d00584b7 2510 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2511 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2512 wrd = (insn >> 12) & 0xf;
2513 rd0 = (insn >> 16) & 0xf;
2514 rd1 = (insn >> 0) & 0xf;
2515 gen_op_iwmmxt_movq_M0_wRn(rd0);
2516 switch ((insn >> 22) & 3) {
2517 case 0:
2518 if (insn & (1 << 21))
2519 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2520 else
2521 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2522 break;
2523 case 1:
2524 if (insn & (1 << 21))
2525 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2526 else
2527 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2528 break;
2529 case 2:
2530 if (insn & (1 << 21))
2531 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2532 else
2533 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2534 break;
2535 case 3:
2536 return 1;
2537 }
2538 gen_op_iwmmxt_movq_wRn_M0(wrd);
2539 gen_op_iwmmxt_set_mup();
2540 break;
d00584b7 2541 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2542 case 0x402: case 0x502: case 0x602: case 0x702:
2543 wrd = (insn >> 12) & 0xf;
2544 rd0 = (insn >> 16) & 0xf;
2545 rd1 = (insn >> 0) & 0xf;
2546 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2547 tmp = tcg_const_i32((insn >> 20) & 3);
2548 iwmmxt_load_reg(cpu_V1, rd1);
2549 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2550 tcg_temp_free_i32(tmp);
18c9b560
AZ
2551 gen_op_iwmmxt_movq_wRn_M0(wrd);
2552 gen_op_iwmmxt_set_mup();
2553 break;
d00584b7 2554 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2555 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2556 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2557 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2558 wrd = (insn >> 12) & 0xf;
2559 rd0 = (insn >> 16) & 0xf;
2560 rd1 = (insn >> 0) & 0xf;
2561 gen_op_iwmmxt_movq_M0_wRn(rd0);
2562 switch ((insn >> 20) & 0xf) {
2563 case 0x0:
2564 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2565 break;
2566 case 0x1:
2567 gen_op_iwmmxt_subub_M0_wRn(rd1);
2568 break;
2569 case 0x3:
2570 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2571 break;
2572 case 0x4:
2573 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2574 break;
2575 case 0x5:
2576 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2577 break;
2578 case 0x7:
2579 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2580 break;
2581 case 0x8:
2582 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2583 break;
2584 case 0x9:
2585 gen_op_iwmmxt_subul_M0_wRn(rd1);
2586 break;
2587 case 0xb:
2588 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2589 break;
2590 default:
2591 return 1;
2592 }
2593 gen_op_iwmmxt_movq_wRn_M0(wrd);
2594 gen_op_iwmmxt_set_mup();
2595 gen_op_iwmmxt_set_cup();
2596 break;
d00584b7 2597 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2598 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2599 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2600 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2601 wrd = (insn >> 12) & 0xf;
2602 rd0 = (insn >> 16) & 0xf;
2603 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2604 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2605 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2606 tcg_temp_free_i32(tmp);
18c9b560
AZ
2607 gen_op_iwmmxt_movq_wRn_M0(wrd);
2608 gen_op_iwmmxt_set_mup();
2609 gen_op_iwmmxt_set_cup();
2610 break;
d00584b7 2611 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2612 case 0x418: case 0x518: case 0x618: case 0x718:
2613 case 0x818: case 0x918: case 0xa18: case 0xb18:
2614 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2615 wrd = (insn >> 12) & 0xf;
2616 rd0 = (insn >> 16) & 0xf;
2617 rd1 = (insn >> 0) & 0xf;
2618 gen_op_iwmmxt_movq_M0_wRn(rd0);
2619 switch ((insn >> 20) & 0xf) {
2620 case 0x0:
2621 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2622 break;
2623 case 0x1:
2624 gen_op_iwmmxt_addub_M0_wRn(rd1);
2625 break;
2626 case 0x3:
2627 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2628 break;
2629 case 0x4:
2630 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2631 break;
2632 case 0x5:
2633 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2634 break;
2635 case 0x7:
2636 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2637 break;
2638 case 0x8:
2639 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2640 break;
2641 case 0x9:
2642 gen_op_iwmmxt_addul_M0_wRn(rd1);
2643 break;
2644 case 0xb:
2645 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2646 break;
2647 default:
2648 return 1;
2649 }
2650 gen_op_iwmmxt_movq_wRn_M0(wrd);
2651 gen_op_iwmmxt_set_mup();
2652 gen_op_iwmmxt_set_cup();
2653 break;
d00584b7 2654 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2655 case 0x408: case 0x508: case 0x608: case 0x708:
2656 case 0x808: case 0x908: case 0xa08: case 0xb08:
2657 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2658 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2659 return 1;
18c9b560
AZ
2660 wrd = (insn >> 12) & 0xf;
2661 rd0 = (insn >> 16) & 0xf;
2662 rd1 = (insn >> 0) & 0xf;
2663 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2664 switch ((insn >> 22) & 3) {
18c9b560
AZ
2665 case 1:
2666 if (insn & (1 << 21))
2667 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2668 else
2669 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2670 break;
2671 case 2:
2672 if (insn & (1 << 21))
2673 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2674 else
2675 gen_op_iwmmxt_packul_M0_wRn(rd1);
2676 break;
2677 case 3:
2678 if (insn & (1 << 21))
2679 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2680 else
2681 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2682 break;
2683 }
2684 gen_op_iwmmxt_movq_wRn_M0(wrd);
2685 gen_op_iwmmxt_set_mup();
2686 gen_op_iwmmxt_set_cup();
2687 break;
2688 case 0x201: case 0x203: case 0x205: case 0x207:
2689 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2690 case 0x211: case 0x213: case 0x215: case 0x217:
2691 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2692 wrd = (insn >> 5) & 0xf;
2693 rd0 = (insn >> 12) & 0xf;
2694 rd1 = (insn >> 0) & 0xf;
2695 if (rd0 == 0xf || rd1 == 0xf)
2696 return 1;
2697 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2698 tmp = load_reg(s, rd0);
2699 tmp2 = load_reg(s, rd1);
18c9b560 2700 switch ((insn >> 16) & 0xf) {
d00584b7 2701 case 0x0: /* TMIA */
da6b5335 2702 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2703 break;
d00584b7 2704 case 0x8: /* TMIAPH */
da6b5335 2705 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2706 break;
d00584b7 2707 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2708 if (insn & (1 << 16))
da6b5335 2709 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2710 if (insn & (1 << 17))
da6b5335
FN
2711 tcg_gen_shri_i32(tmp2, tmp2, 16);
2712 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2713 break;
2714 default:
7d1b0095
PM
2715 tcg_temp_free_i32(tmp2);
2716 tcg_temp_free_i32(tmp);
18c9b560
AZ
2717 return 1;
2718 }
7d1b0095
PM
2719 tcg_temp_free_i32(tmp2);
2720 tcg_temp_free_i32(tmp);
18c9b560
AZ
2721 gen_op_iwmmxt_movq_wRn_M0(wrd);
2722 gen_op_iwmmxt_set_mup();
2723 break;
2724 default:
2725 return 1;
2726 }
2727
2728 return 0;
2729}
2730
a1c7273b 2731/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2732 (ie. an undefined instruction). */
7dcc1f89 2733static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2734{
2735 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2736 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2737
2738 if ((insn & 0x0ff00f10) == 0x0e200010) {
2739 /* Multiply with Internal Accumulate Format */
2740 rd0 = (insn >> 12) & 0xf;
2741 rd1 = insn & 0xf;
2742 acc = (insn >> 5) & 7;
2743
2744 if (acc != 0)
2745 return 1;
2746
3a554c0f
FN
2747 tmp = load_reg(s, rd0);
2748 tmp2 = load_reg(s, rd1);
18c9b560 2749 switch ((insn >> 16) & 0xf) {
d00584b7 2750 case 0x0: /* MIA */
3a554c0f 2751 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2752 break;
d00584b7 2753 case 0x8: /* MIAPH */
3a554c0f 2754 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2755 break;
d00584b7
PM
2756 case 0xc: /* MIABB */
2757 case 0xd: /* MIABT */
2758 case 0xe: /* MIATB */
2759 case 0xf: /* MIATT */
18c9b560 2760 if (insn & (1 << 16))
3a554c0f 2761 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2762 if (insn & (1 << 17))
3a554c0f
FN
2763 tcg_gen_shri_i32(tmp2, tmp2, 16);
2764 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2765 break;
2766 default:
2767 return 1;
2768 }
7d1b0095
PM
2769 tcg_temp_free_i32(tmp2);
2770 tcg_temp_free_i32(tmp);
18c9b560
AZ
2771
2772 gen_op_iwmmxt_movq_wRn_M0(acc);
2773 return 0;
2774 }
2775
2776 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2777 /* Internal Accumulator Access Format */
2778 rdhi = (insn >> 16) & 0xf;
2779 rdlo = (insn >> 12) & 0xf;
2780 acc = insn & 7;
2781
2782 if (acc != 0)
2783 return 1;
2784
d00584b7 2785 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2786 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2787 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
664b7e3b 2788 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2789 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 2790 } else { /* MAR */
3a554c0f
FN
2791 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2792 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2793 }
2794 return 0;
2795 }
2796
2797 return 1;
2798}
2799
9ee6e8bb
PB
2800#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2801#define VFP_SREG(insn, bigbit, smallbit) \
2802 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2803#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2804 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2805 reg = (((insn) >> (bigbit)) & 0x0f) \
2806 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2807 } else { \
2808 if (insn & (1 << (smallbit))) \
2809 return 1; \
2810 reg = ((insn) >> (bigbit)) & 0x0f; \
2811 }} while (0)
2812
2813#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2814#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2815#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2816#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2817#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2818#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2819
39d5492a 2820static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2821{
39d5492a 2822 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2823 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2824 tcg_gen_shli_i32(tmp, var, 16);
2825 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2826 tcg_temp_free_i32(tmp);
ad69471c
PB
2827}
2828
39d5492a 2829static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2830{
39d5492a 2831 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2832 tcg_gen_andi_i32(var, var, 0xffff0000);
2833 tcg_gen_shri_i32(tmp, var, 16);
2834 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2835 tcg_temp_free_i32(tmp);
ad69471c
PB
2836}
2837
06db8196
PM
2838/*
2839 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2840 * (ie. an undefined instruction).
2841 */
7dcc1f89 2842static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95 2843{
d614a513 2844 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 2845 return 1;
d614a513 2846 }
40f137e1 2847
78e138bc
PM
2848 /*
2849 * If the decodetree decoder handles this insn it will always
2850 * emit code to either execute the insn or generate an appropriate
2851 * exception; so we don't need to ever return non-zero to tell
2852 * the calling code to emit an UNDEF exception.
2853 */
2854 if (extract32(insn, 28, 4) == 0xf) {
2855 if (disas_vfp_uncond(s, insn)) {
2856 return 0;
2857 }
2858 } else {
2859 if (disas_vfp(s, insn)) {
2860 return 0;
2861 }
2862 }
3111bfc2
PM
2863 /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
2864 return 1;
b7bcbe95
FB
2865}
2866
90aa39a1 2867static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 2868{
90aa39a1 2869#ifndef CONFIG_USER_ONLY
dcba3a8d 2870 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
a0415916 2871 ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
90aa39a1
SF
2872#else
2873 return true;
2874#endif
2875}
6e256c93 2876
8a6b28c7
EC
2877static void gen_goto_ptr(void)
2878{
7f11636d 2879 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
2880}
2881
4cae8f56
AB
2882/* This will end the TB but doesn't guarantee we'll return to
2883 * cpu_loop_exec. Any live exit_requests will be processed as we
2884 * enter the next TB.
2885 */
8a6b28c7 2886static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
2887{
2888 if (use_goto_tb(s, dest)) {
57fec1fe 2889 tcg_gen_goto_tb(n);
eaed129d 2890 gen_set_pc_im(s, dest);
07ea28b4 2891 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 2892 } else {
eaed129d 2893 gen_set_pc_im(s, dest);
8a6b28c7 2894 gen_goto_ptr();
6e256c93 2895 }
dcba3a8d 2896 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
2897}
2898
8aaca4c0
FB
2899static inline void gen_jmp (DisasContext *s, uint32_t dest)
2900{
b636649f 2901 if (unlikely(is_singlestepping(s))) {
8aaca4c0 2902 /* An indirect jump so that we still trigger the debug exception. */
5899f386 2903 if (s->thumb)
d9ba4830
PB
2904 dest |= 1;
2905 gen_bx_im(s, dest);
8aaca4c0 2906 } else {
6e256c93 2907 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
2908 }
2909}
2910
39d5492a 2911static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 2912{
ee097184 2913 if (x)
d9ba4830 2914 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 2915 else
d9ba4830 2916 gen_sxth(t0);
ee097184 2917 if (y)
d9ba4830 2918 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 2919 else
d9ba4830
PB
2920 gen_sxth(t1);
2921 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
2922}
2923
2924/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
2925static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2926{
b5ff1b31
FB
2927 uint32_t mask;
2928
2929 mask = 0;
2930 if (flags & (1 << 0))
2931 mask |= 0xff;
2932 if (flags & (1 << 1))
2933 mask |= 0xff00;
2934 if (flags & (1 << 2))
2935 mask |= 0xff0000;
2936 if (flags & (1 << 3))
2937 mask |= 0xff000000;
9ee6e8bb 2938
2ae23e75 2939 /* Mask out undefined bits. */
9ee6e8bb 2940 mask &= ~CPSR_RESERVED;
d614a513 2941 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 2942 mask &= ~CPSR_T;
d614a513
PM
2943 }
2944 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 2945 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
2946 }
2947 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 2948 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
2949 }
2950 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 2951 mask &= ~CPSR_IT;
d614a513 2952 }
4051e12c
PM
2953 /* Mask out execution state and reserved bits. */
2954 if (!spsr) {
2955 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
2956 }
b5ff1b31
FB
2957 /* Mask out privileged bits. */
2958 if (IS_USER(s))
9ee6e8bb 2959 mask &= CPSR_USER;
b5ff1b31
FB
2960 return mask;
2961}
2962
2fbac54b 2963/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 2964static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 2965{
39d5492a 2966 TCGv_i32 tmp;
b5ff1b31
FB
2967 if (spsr) {
2968 /* ??? This is also undefined in system mode. */
2969 if (IS_USER(s))
2970 return 1;
d9ba4830
PB
2971
2972 tmp = load_cpu_field(spsr);
2973 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
2974 tcg_gen_andi_i32(t0, t0, mask);
2975 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 2976 store_cpu_field(tmp, spsr);
b5ff1b31 2977 } else {
2fbac54b 2978 gen_set_cpsr(t0, mask);
b5ff1b31 2979 }
7d1b0095 2980 tcg_temp_free_i32(t0);
b5ff1b31
FB
2981 gen_lookup_tb(s);
2982 return 0;
2983}
2984
2fbac54b
FN
2985/* Returns nonzero if access to the PSR is not permitted. */
2986static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
2987{
39d5492a 2988 TCGv_i32 tmp;
7d1b0095 2989 tmp = tcg_temp_new_i32();
2fbac54b
FN
2990 tcg_gen_movi_i32(tmp, val);
2991 return gen_set_psr(s, mask, spsr, tmp);
2992}
2993
8bfd0550
PM
2994static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
2995 int *tgtmode, int *regno)
2996{
2997 /* Decode the r and sysm fields of MSR/MRS banked accesses into
2998 * the target mode and register number, and identify the various
2999 * unpredictable cases.
3000 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
3001 * + executed in user mode
3002 * + using R15 as the src/dest register
3003 * + accessing an unimplemented register
3004 * + accessing a register that's inaccessible at current PL/security state*
3005 * + accessing a register that you could access with a different insn
3006 * We choose to UNDEF in all these cases.
3007 * Since we don't know which of the various AArch32 modes we are in
3008 * we have to defer some checks to runtime.
3009 * Accesses to Monitor mode registers from Secure EL1 (which implies
3010 * that EL3 is AArch64) must trap to EL3.
3011 *
3012 * If the access checks fail this function will emit code to take
3013 * an exception and return false. Otherwise it will return true,
3014 * and set *tgtmode and *regno appropriately.
3015 */
3016 int exc_target = default_exception_el(s);
3017
3018 /* These instructions are present only in ARMv8, or in ARMv7 with the
3019 * Virtualization Extensions.
3020 */
3021 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
3022 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3023 goto undef;
3024 }
3025
3026 if (IS_USER(s) || rn == 15) {
3027 goto undef;
3028 }
3029
3030 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
3031 * of registers into (r, sysm).
3032 */
3033 if (r) {
3034 /* SPSRs for other modes */
3035 switch (sysm) {
3036 case 0xe: /* SPSR_fiq */
3037 *tgtmode = ARM_CPU_MODE_FIQ;
3038 break;
3039 case 0x10: /* SPSR_irq */
3040 *tgtmode = ARM_CPU_MODE_IRQ;
3041 break;
3042 case 0x12: /* SPSR_svc */
3043 *tgtmode = ARM_CPU_MODE_SVC;
3044 break;
3045 case 0x14: /* SPSR_abt */
3046 *tgtmode = ARM_CPU_MODE_ABT;
3047 break;
3048 case 0x16: /* SPSR_und */
3049 *tgtmode = ARM_CPU_MODE_UND;
3050 break;
3051 case 0x1c: /* SPSR_mon */
3052 *tgtmode = ARM_CPU_MODE_MON;
3053 break;
3054 case 0x1e: /* SPSR_hyp */
3055 *tgtmode = ARM_CPU_MODE_HYP;
3056 break;
3057 default: /* unallocated */
3058 goto undef;
3059 }
3060 /* We arbitrarily assign SPSR a register number of 16. */
3061 *regno = 16;
3062 } else {
3063 /* general purpose registers for other modes */
3064 switch (sysm) {
3065 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
3066 *tgtmode = ARM_CPU_MODE_USR;
3067 *regno = sysm + 8;
3068 break;
3069 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
3070 *tgtmode = ARM_CPU_MODE_FIQ;
3071 *regno = sysm;
3072 break;
3073 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
3074 *tgtmode = ARM_CPU_MODE_IRQ;
3075 *regno = sysm & 1 ? 13 : 14;
3076 break;
3077 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
3078 *tgtmode = ARM_CPU_MODE_SVC;
3079 *regno = sysm & 1 ? 13 : 14;
3080 break;
3081 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
3082 *tgtmode = ARM_CPU_MODE_ABT;
3083 *regno = sysm & 1 ? 13 : 14;
3084 break;
3085 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
3086 *tgtmode = ARM_CPU_MODE_UND;
3087 *regno = sysm & 1 ? 13 : 14;
3088 break;
3089 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
3090 *tgtmode = ARM_CPU_MODE_MON;
3091 *regno = sysm & 1 ? 13 : 14;
3092 break;
3093 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
3094 *tgtmode = ARM_CPU_MODE_HYP;
3095 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
3096 *regno = sysm & 1 ? 13 : 17;
3097 break;
3098 default: /* unallocated */
3099 goto undef;
3100 }
3101 }
3102
3103 /* Catch the 'accessing inaccessible register' cases we can detect
3104 * at translate time.
3105 */
3106 switch (*tgtmode) {
3107 case ARM_CPU_MODE_MON:
3108 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
3109 goto undef;
3110 }
3111 if (s->current_el == 1) {
3112 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
3113 * then accesses to Mon registers trap to EL3
3114 */
3115 exc_target = 3;
3116 goto undef;
3117 }
3118 break;
3119 case ARM_CPU_MODE_HYP:
aec4dd09
PM
3120 /*
3121 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
3122 * (and so we can forbid accesses from EL2 or below). elr_hyp
3123 * can be accessed also from Hyp mode, so forbid accesses from
3124 * EL0 or EL1.
8bfd0550 3125 */
aec4dd09
PM
3126 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
3127 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
3128 goto undef;
3129 }
3130 break;
3131 default:
3132 break;
3133 }
3134
3135 return true;
3136
3137undef:
3138 /* If we get here then some access check did not pass */
a767fac8
RH
3139 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
3140 syn_uncategorized(), exc_target);
8bfd0550
PM
3141 return false;
3142}
3143
3144static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
3145{
3146 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3147 int tgtmode = 0, regno = 0;
3148
3149 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3150 return;
3151 }
3152
3153 /* Sync state because msr_banked() can raise exceptions */
3154 gen_set_condexec(s);
43722a6d 3155 gen_set_pc_im(s, s->pc_curr);
8bfd0550
PM
3156 tcg_reg = load_reg(s, rn);
3157 tcg_tgtmode = tcg_const_i32(tgtmode);
3158 tcg_regno = tcg_const_i32(regno);
3159 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
3160 tcg_temp_free_i32(tcg_tgtmode);
3161 tcg_temp_free_i32(tcg_regno);
3162 tcg_temp_free_i32(tcg_reg);
dcba3a8d 3163 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
3164}
3165
3166static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
3167{
3168 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3169 int tgtmode = 0, regno = 0;
3170
3171 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3172 return;
3173 }
3174
3175 /* Sync state because mrs_banked() can raise exceptions */
3176 gen_set_condexec(s);
43722a6d 3177 gen_set_pc_im(s, s->pc_curr);
8bfd0550
PM
3178 tcg_reg = tcg_temp_new_i32();
3179 tcg_tgtmode = tcg_const_i32(tgtmode);
3180 tcg_regno = tcg_const_i32(regno);
3181 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
3182 tcg_temp_free_i32(tcg_tgtmode);
3183 tcg_temp_free_i32(tcg_regno);
3184 store_reg(s, rn, tcg_reg);
dcba3a8d 3185 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
3186}
3187
fb0e8e79
PM
3188/* Store value to PC as for an exception return (ie don't
3189 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
3190 * will do the masking based on the new value of the Thumb bit.
3191 */
3192static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3193{
fb0e8e79
PM
3194 tcg_gen_mov_i32(cpu_R[15], pc);
3195 tcg_temp_free_i32(pc);
b5ff1b31
FB
3196}
3197
b0109805 3198/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3199static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3200{
fb0e8e79
PM
3201 store_pc_exc_ret(s, pc);
3202 /* The cpsr_write_eret helper will mask the low bits of PC
3203 * appropriately depending on the new Thumb bit, so it must
3204 * be called after storing the new PC.
3205 */
e69ad9df
AL
3206 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3207 gen_io_start();
3208 }
235ea1f5 3209 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 3210 tcg_temp_free_i32(cpsr);
b29fd33d 3211 /* Must exit loop to check un-masked IRQs */
dcba3a8d 3212 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 3213}
3b46e624 3214
fb0e8e79
PM
3215/* Generate an old-style exception return. Marks pc as dead. */
3216static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3217{
3218 gen_rfe(s, pc, load_cpu_field(spsr));
3219}
3220
c22edfeb
AB
3221/*
3222 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
3223 * only call the helper when running single threaded TCG code to ensure
3224 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
3225 * just skip this instruction. Currently the SEV/SEVL instructions
3226 * which are *one* of many ways to wake the CPU from WFE are not
3227 * implemented so we can't sleep like WFI does.
3228 */
9ee6e8bb
PB
3229static void gen_nop_hint(DisasContext *s, int val)
3230{
3231 switch (val) {
2399d4e7
EC
3232 /* When running in MTTCG we don't generate jumps to the yield and
3233 * WFE helpers as it won't affect the scheduling of other vCPUs.
3234 * If we wanted to more completely model WFE/SEV so we don't busy
3235 * spin unnecessarily we would need to do something more involved.
3236 */
c87e5a61 3237 case 1: /* yield */
2399d4e7 3238 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
a0415916 3239 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 3240 s->base.is_jmp = DISAS_YIELD;
c22edfeb 3241 }
c87e5a61 3242 break;
9ee6e8bb 3243 case 3: /* wfi */
a0415916 3244 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 3245 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
3246 break;
3247 case 2: /* wfe */
2399d4e7 3248 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
a0415916 3249 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 3250 s->base.is_jmp = DISAS_WFE;
c22edfeb 3251 }
72c1d3af 3252 break;
9ee6e8bb 3253 case 4: /* sev */
12b10571
MR
3254 case 5: /* sevl */
3255 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3256 default: /* nop */
3257 break;
3258 }
3259}
99c475ab 3260
ad69471c 3261#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3262
39d5492a 3263static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3264{
3265 switch (size) {
dd8fbd78
FN
3266 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3267 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3268 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3269 default: abort();
9ee6e8bb 3270 }
9ee6e8bb
PB
3271}
3272
39d5492a 3273static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3274{
3275 switch (size) {
dd8fbd78
FN
3276 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3277 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3278 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3279 default: return;
3280 }
3281}
3282
3283/* 32-bit pairwise ops end up the same as the elementwise versions. */
9ecd3c5c
RH
3284#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3285#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3286#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3287#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
ad69471c 3288
ad69471c
PB
3289#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3290 switch ((size << 1) | u) { \
3291 case 0: \
dd8fbd78 3292 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3293 break; \
3294 case 1: \
dd8fbd78 3295 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3296 break; \
3297 case 2: \
dd8fbd78 3298 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3299 break; \
3300 case 3: \
dd8fbd78 3301 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3302 break; \
3303 case 4: \
dd8fbd78 3304 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3305 break; \
3306 case 5: \
dd8fbd78 3307 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3308 break; \
3309 default: return 1; \
3310 }} while (0)
9ee6e8bb
PB
3311
3312#define GEN_NEON_INTEGER_OP(name) do { \
3313 switch ((size << 1) | u) { \
ad69471c 3314 case 0: \
dd8fbd78 3315 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3316 break; \
3317 case 1: \
dd8fbd78 3318 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3319 break; \
3320 case 2: \
dd8fbd78 3321 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3322 break; \
3323 case 3: \
dd8fbd78 3324 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3325 break; \
3326 case 4: \
dd8fbd78 3327 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3328 break; \
3329 case 5: \
dd8fbd78 3330 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3331 break; \
9ee6e8bb
PB
3332 default: return 1; \
3333 }} while (0)
3334
39d5492a 3335static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3336{
39d5492a 3337 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3338 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3339 return tmp;
9ee6e8bb
PB
3340}
3341
39d5492a 3342static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3343{
dd8fbd78 3344 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3345 tcg_temp_free_i32(var);
9ee6e8bb
PB
3346}
3347
39d5492a 3348static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3349{
39d5492a 3350 TCGv_i32 tmp;
9ee6e8bb 3351 if (size == 1) {
0fad6efc
PM
3352 tmp = neon_load_reg(reg & 7, reg >> 4);
3353 if (reg & 8) {
dd8fbd78 3354 gen_neon_dup_high16(tmp);
0fad6efc
PM
3355 } else {
3356 gen_neon_dup_low16(tmp);
dd8fbd78 3357 }
0fad6efc
PM
3358 } else {
3359 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3360 }
dd8fbd78 3361 return tmp;
9ee6e8bb
PB
3362}
3363
02acedf9 3364static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3365{
b13708bb
RH
3366 TCGv_ptr pd, pm;
3367
600b828c 3368 if (!q && size == 2) {
02acedf9
PM
3369 return 1;
3370 }
b13708bb
RH
3371 pd = vfp_reg_ptr(true, rd);
3372 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
3373 if (q) {
3374 switch (size) {
3375 case 0:
b13708bb 3376 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
3377 break;
3378 case 1:
b13708bb 3379 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
3380 break;
3381 case 2:
b13708bb 3382 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
3383 break;
3384 default:
3385 abort();
3386 }
3387 } else {
3388 switch (size) {
3389 case 0:
b13708bb 3390 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
3391 break;
3392 case 1:
b13708bb 3393 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
3394 break;
3395 default:
3396 abort();
3397 }
3398 }
b13708bb
RH
3399 tcg_temp_free_ptr(pd);
3400 tcg_temp_free_ptr(pm);
02acedf9 3401 return 0;
19457615
FN
3402}
3403
d68a6f3a 3404static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3405{
b13708bb
RH
3406 TCGv_ptr pd, pm;
3407
600b828c 3408 if (!q && size == 2) {
d68a6f3a
PM
3409 return 1;
3410 }
b13708bb
RH
3411 pd = vfp_reg_ptr(true, rd);
3412 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
3413 if (q) {
3414 switch (size) {
3415 case 0:
b13708bb 3416 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
3417 break;
3418 case 1:
b13708bb 3419 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
3420 break;
3421 case 2:
b13708bb 3422 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
3423 break;
3424 default:
3425 abort();
3426 }
3427 } else {
3428 switch (size) {
3429 case 0:
b13708bb 3430 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
3431 break;
3432 case 1:
b13708bb 3433 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
3434 break;
3435 default:
3436 abort();
3437 }
3438 }
b13708bb
RH
3439 tcg_temp_free_ptr(pd);
3440 tcg_temp_free_ptr(pm);
d68a6f3a 3441 return 0;
19457615
FN
3442}
3443
39d5492a 3444static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3445{
39d5492a 3446 TCGv_i32 rd, tmp;
19457615 3447
7d1b0095
PM
3448 rd = tcg_temp_new_i32();
3449 tmp = tcg_temp_new_i32();
19457615
FN
3450
3451 tcg_gen_shli_i32(rd, t0, 8);
3452 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3453 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3454 tcg_gen_or_i32(rd, rd, tmp);
3455
3456 tcg_gen_shri_i32(t1, t1, 8);
3457 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3458 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3459 tcg_gen_or_i32(t1, t1, tmp);
3460 tcg_gen_mov_i32(t0, rd);
3461
7d1b0095
PM
3462 tcg_temp_free_i32(tmp);
3463 tcg_temp_free_i32(rd);
19457615
FN
3464}
3465
39d5492a 3466static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3467{
39d5492a 3468 TCGv_i32 rd, tmp;
19457615 3469
7d1b0095
PM
3470 rd = tcg_temp_new_i32();
3471 tmp = tcg_temp_new_i32();
19457615
FN
3472
3473 tcg_gen_shli_i32(rd, t0, 16);
3474 tcg_gen_andi_i32(tmp, t1, 0xffff);
3475 tcg_gen_or_i32(rd, rd, tmp);
3476 tcg_gen_shri_i32(t1, t1, 16);
3477 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3478 tcg_gen_or_i32(t1, t1, tmp);
3479 tcg_gen_mov_i32(t0, rd);
3480
7d1b0095
PM
3481 tcg_temp_free_i32(tmp);
3482 tcg_temp_free_i32(rd);
19457615
FN
3483}
3484
3485
9ee6e8bb
PB
3486static struct {
3487 int nregs;
3488 int interleave;
3489 int spacing;
308e5636 3490} const neon_ls_element_type[11] = {
ac55d007
RH
3491 {1, 4, 1},
3492 {1, 4, 2},
9ee6e8bb 3493 {4, 1, 1},
ac55d007
RH
3494 {2, 2, 2},
3495 {1, 3, 1},
3496 {1, 3, 2},
9ee6e8bb
PB
3497 {3, 1, 1},
3498 {1, 1, 1},
ac55d007
RH
3499 {1, 2, 1},
3500 {1, 2, 2},
9ee6e8bb
PB
3501 {2, 1, 1}
3502};
3503
3504/* Translate a NEON load/store element instruction. Return nonzero if the
3505 instruction is invalid. */
7dcc1f89 3506static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3507{
3508 int rd, rn, rm;
3509 int op;
3510 int nregs;
3511 int interleave;
84496233 3512 int spacing;
9ee6e8bb
PB
3513 int stride;
3514 int size;
3515 int reg;
9ee6e8bb 3516 int load;
9ee6e8bb 3517 int n;
7377c2c9 3518 int vec_size;
ac55d007 3519 int mmu_idx;
14776ab5 3520 MemOp endian;
39d5492a
PM
3521 TCGv_i32 addr;
3522 TCGv_i32 tmp;
3523 TCGv_i32 tmp2;
84496233 3524 TCGv_i64 tmp64;
9ee6e8bb 3525
2c7ffc41
PM
3526 /* FIXME: this access check should not take precedence over UNDEF
3527 * for invalid encodings; we will generate incorrect syndrome information
3528 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3529 */
9dbbc748 3530 if (s->fp_excp_el) {
a767fac8 3531 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4be42f40 3532 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3533 return 0;
3534 }
3535
5df8bac1 3536 if (!s->vfp_enabled)
9ee6e8bb
PB
3537 return 1;
3538 VFP_DREG_D(rd, insn);
3539 rn = (insn >> 16) & 0xf;
3540 rm = insn & 0xf;
3541 load = (insn & (1 << 21)) != 0;
ac55d007
RH
3542 endian = s->be_data;
3543 mmu_idx = get_mem_index(s);
9ee6e8bb
PB
3544 if ((insn & (1 << 23)) == 0) {
3545 /* Load store all elements. */
3546 op = (insn >> 8) & 0xf;
3547 size = (insn >> 6) & 3;
84496233 3548 if (op > 10)
9ee6e8bb 3549 return 1;
f2dd89d0
PM
3550 /* Catch UNDEF cases for bad values of align field */
3551 switch (op & 0xc) {
3552 case 4:
3553 if (((insn >> 5) & 1) == 1) {
3554 return 1;
3555 }
3556 break;
3557 case 8:
3558 if (((insn >> 4) & 3) == 3) {
3559 return 1;
3560 }
3561 break;
3562 default:
3563 break;
3564 }
9ee6e8bb
PB
3565 nregs = neon_ls_element_type[op].nregs;
3566 interleave = neon_ls_element_type[op].interleave;
84496233 3567 spacing = neon_ls_element_type[op].spacing;
ac55d007 3568 if (size == 3 && (interleave | spacing) != 1) {
84496233 3569 return 1;
ac55d007 3570 }
e23f12b3
RH
3571 /* For our purposes, bytes are always little-endian. */
3572 if (size == 0) {
3573 endian = MO_LE;
3574 }
3575 /* Consecutive little-endian elements from a single register
3576 * can be promoted to a larger little-endian operation.
3577 */
3578 if (interleave == 1 && endian == MO_LE) {
3579 size = 3;
3580 }
ac55d007 3581 tmp64 = tcg_temp_new_i64();
e318a60b 3582 addr = tcg_temp_new_i32();
ac55d007 3583 tmp2 = tcg_const_i32(1 << size);
dcc65026 3584 load_reg_var(s, addr, rn);
9ee6e8bb 3585 for (reg = 0; reg < nregs; reg++) {
ac55d007
RH
3586 for (n = 0; n < 8 >> size; n++) {
3587 int xs;
3588 for (xs = 0; xs < interleave; xs++) {
3589 int tt = rd + reg + spacing * xs;
3590
3591 if (load) {
3592 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
3593 neon_store_element64(tt, n, size, tmp64);
3594 } else {
3595 neon_load_element64(tmp64, tt, n, size);
3596 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
9ee6e8bb 3597 }
ac55d007 3598 tcg_gen_add_i32(addr, addr, tmp2);
9ee6e8bb
PB
3599 }
3600 }
9ee6e8bb 3601 }
e318a60b 3602 tcg_temp_free_i32(addr);
ac55d007
RH
3603 tcg_temp_free_i32(tmp2);
3604 tcg_temp_free_i64(tmp64);
3605 stride = nregs * interleave * 8;
9ee6e8bb
PB
3606 } else {
3607 size = (insn >> 10) & 3;
3608 if (size == 3) {
3609 /* Load single element to all lanes. */
8e18cde3
PM
3610 int a = (insn >> 4) & 1;
3611 if (!load) {
9ee6e8bb 3612 return 1;
8e18cde3 3613 }
9ee6e8bb
PB
3614 size = (insn >> 6) & 3;
3615 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3616
3617 if (size == 3) {
3618 if (nregs != 4 || a == 0) {
9ee6e8bb 3619 return 1;
99c475ab 3620 }
8e18cde3
PM
3621 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3622 size = 2;
3623 }
3624 if (nregs == 1 && a == 1 && size == 0) {
3625 return 1;
3626 }
3627 if (nregs == 3 && a == 1) {
3628 return 1;
3629 }
e318a60b 3630 addr = tcg_temp_new_i32();
8e18cde3 3631 load_reg_var(s, addr, rn);
7377c2c9
RH
3632
3633 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
3634 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
3635 */
3636 stride = (insn & (1 << 5)) ? 2 : 1;
3637 vec_size = nregs == 1 ? stride * 8 : 8;
3638
3639 tmp = tcg_temp_new_i32();
3640 for (reg = 0; reg < nregs; reg++) {
3641 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3642 s->be_data | size);
3643 if ((rd & 1) && vec_size == 16) {
3644 /* We cannot write 16 bytes at once because the
3645 * destination is unaligned.
3646 */
3647 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3648 8, 8, tmp);
3649 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
3650 neon_reg_offset(rd, 0), 8, 8);
3651 } else {
3652 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3653 vec_size, vec_size, tmp);
8e18cde3 3654 }
7377c2c9
RH
3655 tcg_gen_addi_i32(addr, addr, 1 << size);
3656 rd += stride;
9ee6e8bb 3657 }
7377c2c9 3658 tcg_temp_free_i32(tmp);
e318a60b 3659 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3660 stride = (1 << size) * nregs;
3661 } else {
3662 /* Single element. */
93262b16 3663 int idx = (insn >> 4) & 0xf;
2d6ac920 3664 int reg_idx;
9ee6e8bb
PB
3665 switch (size) {
3666 case 0:
2d6ac920 3667 reg_idx = (insn >> 5) & 7;
9ee6e8bb
PB
3668 stride = 1;
3669 break;
3670 case 1:
2d6ac920 3671 reg_idx = (insn >> 6) & 3;
9ee6e8bb
PB
3672 stride = (insn & (1 << 5)) ? 2 : 1;
3673 break;
3674 case 2:
2d6ac920 3675 reg_idx = (insn >> 7) & 1;
9ee6e8bb
PB
3676 stride = (insn & (1 << 6)) ? 2 : 1;
3677 break;
3678 default:
3679 abort();
3680 }
3681 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
3682 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3683 switch (nregs) {
3684 case 1:
3685 if (((idx & (1 << size)) != 0) ||
3686 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3687 return 1;
3688 }
3689 break;
3690 case 3:
3691 if ((idx & 1) != 0) {
3692 return 1;
3693 }
3694 /* fall through */
3695 case 2:
3696 if (size == 2 && (idx & 2) != 0) {
3697 return 1;
3698 }
3699 break;
3700 case 4:
3701 if ((size == 2) && ((idx & 3) == 3)) {
3702 return 1;
3703 }
3704 break;
3705 default:
3706 abort();
3707 }
3708 if ((rd + stride * (nregs - 1)) > 31) {
3709 /* Attempts to write off the end of the register file
3710 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3711 * the neon_load_reg() would write off the end of the array.
3712 */
3713 return 1;
3714 }
2d6ac920 3715 tmp = tcg_temp_new_i32();
e318a60b 3716 addr = tcg_temp_new_i32();
dcc65026 3717 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3718 for (reg = 0; reg < nregs; reg++) {
3719 if (load) {
2d6ac920
RH
3720 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3721 s->be_data | size);
3722 neon_store_element(rd, reg_idx, size, tmp);
9ee6e8bb 3723 } else { /* Store */
2d6ac920
RH
3724 neon_load_element(tmp, rd, reg_idx, size);
3725 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
3726 s->be_data | size);
99c475ab 3727 }
9ee6e8bb 3728 rd += stride;
1b2b1e54 3729 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3730 }
e318a60b 3731 tcg_temp_free_i32(addr);
2d6ac920 3732 tcg_temp_free_i32(tmp);
9ee6e8bb 3733 stride = nregs * (1 << size);
99c475ab 3734 }
9ee6e8bb
PB
3735 }
3736 if (rm != 15) {
39d5492a 3737 TCGv_i32 base;
b26eefb6
PB
3738
3739 base = load_reg(s, rn);
9ee6e8bb 3740 if (rm == 13) {
b26eefb6 3741 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3742 } else {
39d5492a 3743 TCGv_i32 index;
b26eefb6
PB
3744 index = load_reg(s, rm);
3745 tcg_gen_add_i32(base, base, index);
7d1b0095 3746 tcg_temp_free_i32(index);
9ee6e8bb 3747 }
b26eefb6 3748 store_reg(s, rn, base);
9ee6e8bb
PB
3749 }
3750 return 0;
3751}
3b46e624 3752
39d5492a 3753static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3754{
3755 switch (size) {
3756 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3757 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 3758 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
3759 default: abort();
3760 }
3761}
3762
39d5492a 3763static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3764{
3765 switch (size) {
02da0b2d
PM
3766 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3767 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3768 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
3769 default: abort();
3770 }
3771}
3772
39d5492a 3773static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3774{
3775 switch (size) {
02da0b2d
PM
3776 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3777 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3778 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
3779 default: abort();
3780 }
3781}
3782
39d5492a 3783static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
3784{
3785 switch (size) {
02da0b2d
PM
3786 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
3787 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
3788 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
3789 default: abort();
3790 }
3791}
3792
39d5492a 3793static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
3794 int q, int u)
3795{
3796 if (q) {
3797 if (u) {
3798 switch (size) {
3799 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3800 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3801 default: abort();
3802 }
3803 } else {
3804 switch (size) {
3805 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3806 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3807 default: abort();
3808 }
3809 }
3810 } else {
3811 if (u) {
3812 switch (size) {
b408a9b0
CL
3813 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
3814 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
3815 default: abort();
3816 }
3817 } else {
3818 switch (size) {
3819 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3820 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3821 default: abort();
3822 }
3823 }
3824 }
3825}
3826
39d5492a 3827static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
3828{
3829 if (u) {
3830 switch (size) {
3831 case 0: gen_helper_neon_widen_u8(dest, src); break;
3832 case 1: gen_helper_neon_widen_u16(dest, src); break;
3833 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3834 default: abort();
3835 }
3836 } else {
3837 switch (size) {
3838 case 0: gen_helper_neon_widen_s8(dest, src); break;
3839 case 1: gen_helper_neon_widen_s16(dest, src); break;
3840 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3841 default: abort();
3842 }
3843 }
7d1b0095 3844 tcg_temp_free_i32(src);
ad69471c
PB
3845}
3846
3847static inline void gen_neon_addl(int size)
3848{
3849 switch (size) {
3850 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3851 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3852 case 2: tcg_gen_add_i64(CPU_V001); break;
3853 default: abort();
3854 }
3855}
3856
3857static inline void gen_neon_subl(int size)
3858{
3859 switch (size) {
3860 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3861 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3862 case 2: tcg_gen_sub_i64(CPU_V001); break;
3863 default: abort();
3864 }
3865}
3866
a7812ae4 3867static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
3868{
3869 switch (size) {
3870 case 0: gen_helper_neon_negl_u16(var, var); break;
3871 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
3872 case 2:
3873 tcg_gen_neg_i64(var, var);
3874 break;
ad69471c
PB
3875 default: abort();
3876 }
3877}
3878
a7812ae4 3879static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
3880{
3881 switch (size) {
02da0b2d
PM
3882 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
3883 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
3884 default: abort();
3885 }
3886}
3887
39d5492a
PM
3888static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
3889 int size, int u)
ad69471c 3890{
a7812ae4 3891 TCGv_i64 tmp;
ad69471c
PB
3892
3893 switch ((size << 1) | u) {
3894 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
3895 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
3896 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
3897 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
3898 case 4:
3899 tmp = gen_muls_i64_i32(a, b);
3900 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 3901 tcg_temp_free_i64(tmp);
ad69471c
PB
3902 break;
3903 case 5:
3904 tmp = gen_mulu_i64_i32(a, b);
3905 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 3906 tcg_temp_free_i64(tmp);
ad69471c
PB
3907 break;
3908 default: abort();
3909 }
c6067f04
CL
3910
3911 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3912 Don't forget to clean them now. */
3913 if (size < 2) {
7d1b0095
PM
3914 tcg_temp_free_i32(a);
3915 tcg_temp_free_i32(b);
c6067f04 3916 }
ad69471c
PB
3917}
3918
39d5492a
PM
3919static void gen_neon_narrow_op(int op, int u, int size,
3920 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
3921{
3922 if (op) {
3923 if (u) {
3924 gen_neon_unarrow_sats(size, dest, src);
3925 } else {
3926 gen_neon_narrow(size, dest, src);
3927 }
3928 } else {
3929 if (u) {
3930 gen_neon_narrow_satu(size, dest, src);
3931 } else {
3932 gen_neon_narrow_sats(size, dest, src);
3933 }
3934 }
3935}
3936
62698be3
PM
3937/* Symbolic constants for op fields for Neon 3-register same-length.
3938 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3939 * table A7-9.
3940 */
3941#define NEON_3R_VHADD 0
3942#define NEON_3R_VQADD 1
3943#define NEON_3R_VRHADD 2
3944#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3945#define NEON_3R_VHSUB 4
3946#define NEON_3R_VQSUB 5
3947#define NEON_3R_VCGT 6
3948#define NEON_3R_VCGE 7
3949#define NEON_3R_VSHL 8
3950#define NEON_3R_VQSHL 9
3951#define NEON_3R_VRSHL 10
3952#define NEON_3R_VQRSHL 11
3953#define NEON_3R_VMAX 12
3954#define NEON_3R_VMIN 13
3955#define NEON_3R_VABD 14
3956#define NEON_3R_VABA 15
3957#define NEON_3R_VADD_VSUB 16
3958#define NEON_3R_VTST_VCEQ 17
4a7832b0 3959#define NEON_3R_VML 18 /* VMLA, VMLS */
62698be3
PM
3960#define NEON_3R_VMUL 19
3961#define NEON_3R_VPMAX 20
3962#define NEON_3R_VPMIN 21
3963#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 3964#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 3965#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 3966#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
3967#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
3968#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
3969#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
3970#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
3971#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 3972#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
3973
3974static const uint8_t neon_3r_sizes[] = {
3975 [NEON_3R_VHADD] = 0x7,
3976 [NEON_3R_VQADD] = 0xf,
3977 [NEON_3R_VRHADD] = 0x7,
3978 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
3979 [NEON_3R_VHSUB] = 0x7,
3980 [NEON_3R_VQSUB] = 0xf,
3981 [NEON_3R_VCGT] = 0x7,
3982 [NEON_3R_VCGE] = 0x7,
3983 [NEON_3R_VSHL] = 0xf,
3984 [NEON_3R_VQSHL] = 0xf,
3985 [NEON_3R_VRSHL] = 0xf,
3986 [NEON_3R_VQRSHL] = 0xf,
3987 [NEON_3R_VMAX] = 0x7,
3988 [NEON_3R_VMIN] = 0x7,
3989 [NEON_3R_VABD] = 0x7,
3990 [NEON_3R_VABA] = 0x7,
3991 [NEON_3R_VADD_VSUB] = 0xf,
3992 [NEON_3R_VTST_VCEQ] = 0x7,
3993 [NEON_3R_VML] = 0x7,
3994 [NEON_3R_VMUL] = 0x7,
3995 [NEON_3R_VPMAX] = 0x7,
3996 [NEON_3R_VPMIN] = 0x7,
3997 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 3998 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 3999 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 4000 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
4001 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4002 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4003 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4004 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4005 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4006 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4007};
4008
600b828c
PM
4009/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4010 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4011 * table A7-13.
4012 */
4013#define NEON_2RM_VREV64 0
4014#define NEON_2RM_VREV32 1
4015#define NEON_2RM_VREV16 2
4016#define NEON_2RM_VPADDL 4
4017#define NEON_2RM_VPADDL_U 5
9d935509
AB
4018#define NEON_2RM_AESE 6 /* Includes AESD */
4019#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4020#define NEON_2RM_VCLS 8
4021#define NEON_2RM_VCLZ 9
4022#define NEON_2RM_VCNT 10
4023#define NEON_2RM_VMVN 11
4024#define NEON_2RM_VPADAL 12
4025#define NEON_2RM_VPADAL_U 13
4026#define NEON_2RM_VQABS 14
4027#define NEON_2RM_VQNEG 15
4028#define NEON_2RM_VCGT0 16
4029#define NEON_2RM_VCGE0 17
4030#define NEON_2RM_VCEQ0 18
4031#define NEON_2RM_VCLE0 19
4032#define NEON_2RM_VCLT0 20
f1ecb913 4033#define NEON_2RM_SHA1H 21
600b828c
PM
4034#define NEON_2RM_VABS 22
4035#define NEON_2RM_VNEG 23
4036#define NEON_2RM_VCGT0_F 24
4037#define NEON_2RM_VCGE0_F 25
4038#define NEON_2RM_VCEQ0_F 26
4039#define NEON_2RM_VCLE0_F 27
4040#define NEON_2RM_VCLT0_F 28
4041#define NEON_2RM_VABS_F 30
4042#define NEON_2RM_VNEG_F 31
4043#define NEON_2RM_VSWP 32
4044#define NEON_2RM_VTRN 33
4045#define NEON_2RM_VUZP 34
4046#define NEON_2RM_VZIP 35
4047#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4048#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4049#define NEON_2RM_VSHLL 38
f1ecb913 4050#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 4051#define NEON_2RM_VRINTN 40
2ce70625 4052#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4053#define NEON_2RM_VRINTA 42
4054#define NEON_2RM_VRINTZ 43
600b828c 4055#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4056#define NEON_2RM_VRINTM 45
600b828c 4057#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4058#define NEON_2RM_VRINTP 47
901ad525
WN
4059#define NEON_2RM_VCVTAU 48
4060#define NEON_2RM_VCVTAS 49
4061#define NEON_2RM_VCVTNU 50
4062#define NEON_2RM_VCVTNS 51
4063#define NEON_2RM_VCVTPU 52
4064#define NEON_2RM_VCVTPS 53
4065#define NEON_2RM_VCVTMU 54
4066#define NEON_2RM_VCVTMS 55
600b828c
PM
4067#define NEON_2RM_VRECPE 56
4068#define NEON_2RM_VRSQRTE 57
4069#define NEON_2RM_VRECPE_F 58
4070#define NEON_2RM_VRSQRTE_F 59
4071#define NEON_2RM_VCVT_FS 60
4072#define NEON_2RM_VCVT_FU 61
4073#define NEON_2RM_VCVT_SF 62
4074#define NEON_2RM_VCVT_UF 63
4075
fe8fcf3d
PM
4076static bool neon_2rm_is_v8_op(int op)
4077{
4078 /* Return true if this neon 2reg-misc op is ARMv8 and up */
4079 switch (op) {
4080 case NEON_2RM_VRINTN:
4081 case NEON_2RM_VRINTA:
4082 case NEON_2RM_VRINTM:
4083 case NEON_2RM_VRINTP:
4084 case NEON_2RM_VRINTZ:
4085 case NEON_2RM_VRINTX:
4086 case NEON_2RM_VCVTAU:
4087 case NEON_2RM_VCVTAS:
4088 case NEON_2RM_VCVTNU:
4089 case NEON_2RM_VCVTNS:
4090 case NEON_2RM_VCVTPU:
4091 case NEON_2RM_VCVTPS:
4092 case NEON_2RM_VCVTMU:
4093 case NEON_2RM_VCVTMS:
4094 return true;
4095 default:
4096 return false;
4097 }
4098}
4099
600b828c
PM
4100/* Each entry in this array has bit n set if the insn allows
4101 * size value n (otherwise it will UNDEF). Since unallocated
4102 * op values will have no bits set they always UNDEF.
4103 */
4104static const uint8_t neon_2rm_sizes[] = {
4105 [NEON_2RM_VREV64] = 0x7,
4106 [NEON_2RM_VREV32] = 0x3,
4107 [NEON_2RM_VREV16] = 0x1,
4108 [NEON_2RM_VPADDL] = 0x7,
4109 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4110 [NEON_2RM_AESE] = 0x1,
4111 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4112 [NEON_2RM_VCLS] = 0x7,
4113 [NEON_2RM_VCLZ] = 0x7,
4114 [NEON_2RM_VCNT] = 0x1,
4115 [NEON_2RM_VMVN] = 0x1,
4116 [NEON_2RM_VPADAL] = 0x7,
4117 [NEON_2RM_VPADAL_U] = 0x7,
4118 [NEON_2RM_VQABS] = 0x7,
4119 [NEON_2RM_VQNEG] = 0x7,
4120 [NEON_2RM_VCGT0] = 0x7,
4121 [NEON_2RM_VCGE0] = 0x7,
4122 [NEON_2RM_VCEQ0] = 0x7,
4123 [NEON_2RM_VCLE0] = 0x7,
4124 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 4125 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
4126 [NEON_2RM_VABS] = 0x7,
4127 [NEON_2RM_VNEG] = 0x7,
4128 [NEON_2RM_VCGT0_F] = 0x4,
4129 [NEON_2RM_VCGE0_F] = 0x4,
4130 [NEON_2RM_VCEQ0_F] = 0x4,
4131 [NEON_2RM_VCLE0_F] = 0x4,
4132 [NEON_2RM_VCLT0_F] = 0x4,
4133 [NEON_2RM_VABS_F] = 0x4,
4134 [NEON_2RM_VNEG_F] = 0x4,
4135 [NEON_2RM_VSWP] = 0x1,
4136 [NEON_2RM_VTRN] = 0x7,
4137 [NEON_2RM_VUZP] = 0x7,
4138 [NEON_2RM_VZIP] = 0x7,
4139 [NEON_2RM_VMOVN] = 0x7,
4140 [NEON_2RM_VQMOVN] = 0x7,
4141 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 4142 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 4143 [NEON_2RM_VRINTN] = 0x4,
2ce70625 4144 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
4145 [NEON_2RM_VRINTA] = 0x4,
4146 [NEON_2RM_VRINTZ] = 0x4,
600b828c 4147 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 4148 [NEON_2RM_VRINTM] = 0x4,
600b828c 4149 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 4150 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
4151 [NEON_2RM_VCVTAU] = 0x4,
4152 [NEON_2RM_VCVTAS] = 0x4,
4153 [NEON_2RM_VCVTNU] = 0x4,
4154 [NEON_2RM_VCVTNS] = 0x4,
4155 [NEON_2RM_VCVTPU] = 0x4,
4156 [NEON_2RM_VCVTPS] = 0x4,
4157 [NEON_2RM_VCVTMU] = 0x4,
4158 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
4159 [NEON_2RM_VRECPE] = 0x4,
4160 [NEON_2RM_VRSQRTE] = 0x4,
4161 [NEON_2RM_VRECPE_F] = 0x4,
4162 [NEON_2RM_VRSQRTE_F] = 0x4,
4163 [NEON_2RM_VCVT_FS] = 0x4,
4164 [NEON_2RM_VCVT_FU] = 0x4,
4165 [NEON_2RM_VCVT_SF] = 0x4,
4166 [NEON_2RM_VCVT_UF] = 0x4,
4167};
4168
36a71934
RH
4169
4170/* Expand v8.1 simd helper. */
4171static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
4172 int q, int rd, int rn, int rm)
4173{
962fcbf2 4174 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
4175 int opr_sz = (1 + q) * 8;
4176 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
4177 vfp_reg_offset(1, rn),
4178 vfp_reg_offset(1, rm), cpu_env,
4179 opr_sz, opr_sz, 0, fn);
4180 return 0;
4181 }
4182 return 1;
4183}
4184
41f6c113
RH
4185static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4186{
4187 tcg_gen_vec_sar8i_i64(a, a, shift);
4188 tcg_gen_vec_add8_i64(d, d, a);
4189}
4190
4191static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4192{
4193 tcg_gen_vec_sar16i_i64(a, a, shift);
4194 tcg_gen_vec_add16_i64(d, d, a);
4195}
4196
4197static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4198{
4199 tcg_gen_sari_i32(a, a, shift);
4200 tcg_gen_add_i32(d, d, a);
4201}
4202
4203static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4204{
4205 tcg_gen_sari_i64(a, a, shift);
4206 tcg_gen_add_i64(d, d, a);
4207}
4208
4209static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4210{
4211 tcg_gen_sari_vec(vece, a, a, sh);
4212 tcg_gen_add_vec(vece, d, d, a);
4213}
4214
53229a77
RH
4215static const TCGOpcode vecop_list_ssra[] = {
4216 INDEX_op_sari_vec, INDEX_op_add_vec, 0
4217};
4218
41f6c113
RH
4219const GVecGen2i ssra_op[4] = {
4220 { .fni8 = gen_ssra8_i64,
4221 .fniv = gen_ssra_vec,
4222 .load_dest = true,
53229a77 4223 .opt_opc = vecop_list_ssra,
41f6c113
RH
4224 .vece = MO_8 },
4225 { .fni8 = gen_ssra16_i64,
4226 .fniv = gen_ssra_vec,
4227 .load_dest = true,
53229a77 4228 .opt_opc = vecop_list_ssra,
41f6c113
RH
4229 .vece = MO_16 },
4230 { .fni4 = gen_ssra32_i32,
4231 .fniv = gen_ssra_vec,
4232 .load_dest = true,
53229a77 4233 .opt_opc = vecop_list_ssra,
41f6c113
RH
4234 .vece = MO_32 },
4235 { .fni8 = gen_ssra64_i64,
4236 .fniv = gen_ssra_vec,
4237 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 4238 .opt_opc = vecop_list_ssra,
41f6c113 4239 .load_dest = true,
41f6c113
RH
4240 .vece = MO_64 },
4241};
4242
4243static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4244{
4245 tcg_gen_vec_shr8i_i64(a, a, shift);
4246 tcg_gen_vec_add8_i64(d, d, a);
4247}
4248
4249static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4250{
4251 tcg_gen_vec_shr16i_i64(a, a, shift);
4252 tcg_gen_vec_add16_i64(d, d, a);
4253}
4254
4255static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4256{
4257 tcg_gen_shri_i32(a, a, shift);
4258 tcg_gen_add_i32(d, d, a);
4259}
4260
4261static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4262{
4263 tcg_gen_shri_i64(a, a, shift);
4264 tcg_gen_add_i64(d, d, a);
4265}
4266
4267static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4268{
4269 tcg_gen_shri_vec(vece, a, a, sh);
4270 tcg_gen_add_vec(vece, d, d, a);
4271}
4272
53229a77
RH
4273static const TCGOpcode vecop_list_usra[] = {
4274 INDEX_op_shri_vec, INDEX_op_add_vec, 0
4275};
4276
41f6c113
RH
4277const GVecGen2i usra_op[4] = {
4278 { .fni8 = gen_usra8_i64,
4279 .fniv = gen_usra_vec,
4280 .load_dest = true,
53229a77 4281 .opt_opc = vecop_list_usra,
41f6c113
RH
4282 .vece = MO_8, },
4283 { .fni8 = gen_usra16_i64,
4284 .fniv = gen_usra_vec,
4285 .load_dest = true,
53229a77 4286 .opt_opc = vecop_list_usra,
41f6c113
RH
4287 .vece = MO_16, },
4288 { .fni4 = gen_usra32_i32,
4289 .fniv = gen_usra_vec,
4290 .load_dest = true,
53229a77 4291 .opt_opc = vecop_list_usra,
41f6c113
RH
4292 .vece = MO_32, },
4293 { .fni8 = gen_usra64_i64,
4294 .fniv = gen_usra_vec,
4295 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4296 .load_dest = true,
53229a77 4297 .opt_opc = vecop_list_usra,
41f6c113
RH
4298 .vece = MO_64, },
4299};
eabcd6fa 4300
f3cd8218
RH
4301static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4302{
4303 uint64_t mask = dup_const(MO_8, 0xff >> shift);
4304 TCGv_i64 t = tcg_temp_new_i64();
4305
4306 tcg_gen_shri_i64(t, a, shift);
4307 tcg_gen_andi_i64(t, t, mask);
4308 tcg_gen_andi_i64(d, d, ~mask);
4309 tcg_gen_or_i64(d, d, t);
4310 tcg_temp_free_i64(t);
4311}
4312
4313static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4314{
4315 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
4316 TCGv_i64 t = tcg_temp_new_i64();
4317
4318 tcg_gen_shri_i64(t, a, shift);
4319 tcg_gen_andi_i64(t, t, mask);
4320 tcg_gen_andi_i64(d, d, ~mask);
4321 tcg_gen_or_i64(d, d, t);
4322 tcg_temp_free_i64(t);
4323}
4324
4325static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4326{
4327 tcg_gen_shri_i32(a, a, shift);
4328 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
4329}
4330
4331static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4332{
4333 tcg_gen_shri_i64(a, a, shift);
4334 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
4335}
4336
4337static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4338{
4339 if (sh == 0) {
4340 tcg_gen_mov_vec(d, a);
4341 } else {
4342 TCGv_vec t = tcg_temp_new_vec_matching(d);
4343 TCGv_vec m = tcg_temp_new_vec_matching(d);
4344
4345 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
4346 tcg_gen_shri_vec(vece, t, a, sh);
4347 tcg_gen_and_vec(vece, d, d, m);
4348 tcg_gen_or_vec(vece, d, d, t);
4349
4350 tcg_temp_free_vec(t);
4351 tcg_temp_free_vec(m);
4352 }
4353}
4354
53229a77
RH
4355static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
4356
f3cd8218
RH
4357const GVecGen2i sri_op[4] = {
4358 { .fni8 = gen_shr8_ins_i64,
4359 .fniv = gen_shr_ins_vec,
4360 .load_dest = true,
53229a77 4361 .opt_opc = vecop_list_sri,
f3cd8218
RH
4362 .vece = MO_8 },
4363 { .fni8 = gen_shr16_ins_i64,
4364 .fniv = gen_shr_ins_vec,
4365 .load_dest = true,
53229a77 4366 .opt_opc = vecop_list_sri,
f3cd8218
RH
4367 .vece = MO_16 },
4368 { .fni4 = gen_shr32_ins_i32,
4369 .fniv = gen_shr_ins_vec,
4370 .load_dest = true,
53229a77 4371 .opt_opc = vecop_list_sri,
f3cd8218
RH
4372 .vece = MO_32 },
4373 { .fni8 = gen_shr64_ins_i64,
4374 .fniv = gen_shr_ins_vec,
4375 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4376 .load_dest = true,
53229a77 4377 .opt_opc = vecop_list_sri,
f3cd8218
RH
4378 .vece = MO_64 },
4379};
4380
4381static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4382{
4383 uint64_t mask = dup_const(MO_8, 0xff << shift);
4384 TCGv_i64 t = tcg_temp_new_i64();
4385
4386 tcg_gen_shli_i64(t, a, shift);
4387 tcg_gen_andi_i64(t, t, mask);
4388 tcg_gen_andi_i64(d, d, ~mask);
4389 tcg_gen_or_i64(d, d, t);
4390 tcg_temp_free_i64(t);
4391}
4392
4393static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4394{
4395 uint64_t mask = dup_const(MO_16, 0xffff << shift);
4396 TCGv_i64 t = tcg_temp_new_i64();
4397
4398 tcg_gen_shli_i64(t, a, shift);
4399 tcg_gen_andi_i64(t, t, mask);
4400 tcg_gen_andi_i64(d, d, ~mask);
4401 tcg_gen_or_i64(d, d, t);
4402 tcg_temp_free_i64(t);
4403}
4404
4405static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4406{
4407 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
4408}
4409
4410static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4411{
4412 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
4413}
4414
4415static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4416{
4417 if (sh == 0) {
4418 tcg_gen_mov_vec(d, a);
4419 } else {
4420 TCGv_vec t = tcg_temp_new_vec_matching(d);
4421 TCGv_vec m = tcg_temp_new_vec_matching(d);
4422
4423 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
4424 tcg_gen_shli_vec(vece, t, a, sh);
4425 tcg_gen_and_vec(vece, d, d, m);
4426 tcg_gen_or_vec(vece, d, d, t);
4427
4428 tcg_temp_free_vec(t);
4429 tcg_temp_free_vec(m);
4430 }
4431}
4432
53229a77
RH
4433static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
4434
f3cd8218
RH
4435const GVecGen2i sli_op[4] = {
4436 { .fni8 = gen_shl8_ins_i64,
4437 .fniv = gen_shl_ins_vec,
4438 .load_dest = true,
53229a77 4439 .opt_opc = vecop_list_sli,
f3cd8218
RH
4440 .vece = MO_8 },
4441 { .fni8 = gen_shl16_ins_i64,
4442 .fniv = gen_shl_ins_vec,
4443 .load_dest = true,
53229a77 4444 .opt_opc = vecop_list_sli,
f3cd8218
RH
4445 .vece = MO_16 },
4446 { .fni4 = gen_shl32_ins_i32,
4447 .fniv = gen_shl_ins_vec,
4448 .load_dest = true,
53229a77 4449 .opt_opc = vecop_list_sli,
f3cd8218
RH
4450 .vece = MO_32 },
4451 { .fni8 = gen_shl64_ins_i64,
4452 .fniv = gen_shl_ins_vec,
4453 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4454 .load_dest = true,
53229a77 4455 .opt_opc = vecop_list_sli,
f3cd8218
RH
4456 .vece = MO_64 },
4457};
4458
4a7832b0
RH
4459static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4460{
4461 gen_helper_neon_mul_u8(a, a, b);
4462 gen_helper_neon_add_u8(d, d, a);
4463}
4464
4465static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4466{
4467 gen_helper_neon_mul_u8(a, a, b);
4468 gen_helper_neon_sub_u8(d, d, a);
4469}
4470
4471static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4472{
4473 gen_helper_neon_mul_u16(a, a, b);
4474 gen_helper_neon_add_u16(d, d, a);
4475}
4476
4477static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4478{
4479 gen_helper_neon_mul_u16(a, a, b);
4480 gen_helper_neon_sub_u16(d, d, a);
4481}
4482
4483static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4484{
4485 tcg_gen_mul_i32(a, a, b);
4486 tcg_gen_add_i32(d, d, a);
4487}
4488
4489static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4490{
4491 tcg_gen_mul_i32(a, a, b);
4492 tcg_gen_sub_i32(d, d, a);
4493}
4494
4495static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4496{
4497 tcg_gen_mul_i64(a, a, b);
4498 tcg_gen_add_i64(d, d, a);
4499}
4500
4501static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4502{
4503 tcg_gen_mul_i64(a, a, b);
4504 tcg_gen_sub_i64(d, d, a);
4505}
4506
4507static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4508{
4509 tcg_gen_mul_vec(vece, a, a, b);
4510 tcg_gen_add_vec(vece, d, d, a);
4511}
4512
4513static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4514{
4515 tcg_gen_mul_vec(vece, a, a, b);
4516 tcg_gen_sub_vec(vece, d, d, a);
4517}
4518
4519/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4520 * these tables are shared with AArch64 which does support them.
4521 */
53229a77
RH
4522
4523static const TCGOpcode vecop_list_mla[] = {
4524 INDEX_op_mul_vec, INDEX_op_add_vec, 0
4525};
4526
4527static const TCGOpcode vecop_list_mls[] = {
4528 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
4529};
4530
4a7832b0
RH
4531const GVecGen3 mla_op[4] = {
4532 { .fni4 = gen_mla8_i32,
4533 .fniv = gen_mla_vec,
4a7832b0 4534 .load_dest = true,
53229a77 4535 .opt_opc = vecop_list_mla,
4a7832b0
RH
4536 .vece = MO_8 },
4537 { .fni4 = gen_mla16_i32,
4538 .fniv = gen_mla_vec,
4a7832b0 4539 .load_dest = true,
53229a77 4540 .opt_opc = vecop_list_mla,
4a7832b0
RH
4541 .vece = MO_16 },
4542 { .fni4 = gen_mla32_i32,
4543 .fniv = gen_mla_vec,
4a7832b0 4544 .load_dest = true,
53229a77 4545 .opt_opc = vecop_list_mla,
4a7832b0
RH
4546 .vece = MO_32 },
4547 { .fni8 = gen_mla64_i64,
4548 .fniv = gen_mla_vec,
4a7832b0
RH
4549 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4550 .load_dest = true,
53229a77 4551 .opt_opc = vecop_list_mla,
4a7832b0
RH
4552 .vece = MO_64 },
4553};
4554
4555const GVecGen3 mls_op[4] = {
4556 { .fni4 = gen_mls8_i32,
4557 .fniv = gen_mls_vec,
4a7832b0 4558 .load_dest = true,
53229a77 4559 .opt_opc = vecop_list_mls,
4a7832b0
RH
4560 .vece = MO_8 },
4561 { .fni4 = gen_mls16_i32,
4562 .fniv = gen_mls_vec,
4a7832b0 4563 .load_dest = true,
53229a77 4564 .opt_opc = vecop_list_mls,
4a7832b0
RH
4565 .vece = MO_16 },
4566 { .fni4 = gen_mls32_i32,
4567 .fniv = gen_mls_vec,
4a7832b0 4568 .load_dest = true,
53229a77 4569 .opt_opc = vecop_list_mls,
4a7832b0
RH
4570 .vece = MO_32 },
4571 { .fni8 = gen_mls64_i64,
4572 .fniv = gen_mls_vec,
4a7832b0
RH
4573 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4574 .load_dest = true,
53229a77 4575 .opt_opc = vecop_list_mls,
4a7832b0
RH
4576 .vece = MO_64 },
4577};
4578
ea580fa3
RH
4579/* CMTST : test is "if (X & Y != 0)". */
4580static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4581{
4582 tcg_gen_and_i32(d, a, b);
4583 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
4584 tcg_gen_neg_i32(d, d);
4585}
4586
4587void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4588{
4589 tcg_gen_and_i64(d, a, b);
4590 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
4591 tcg_gen_neg_i64(d, d);
4592}
4593
4594static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4595{
4596 tcg_gen_and_vec(vece, d, a, b);
4597 tcg_gen_dupi_vec(vece, a, 0);
4598 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
4599}
4600
53229a77
RH
4601static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
4602
ea580fa3
RH
4603const GVecGen3 cmtst_op[4] = {
4604 { .fni4 = gen_helper_neon_tst_u8,
4605 .fniv = gen_cmtst_vec,
53229a77 4606 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4607 .vece = MO_8 },
4608 { .fni4 = gen_helper_neon_tst_u16,
4609 .fniv = gen_cmtst_vec,
53229a77 4610 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4611 .vece = MO_16 },
4612 { .fni4 = gen_cmtst_i32,
4613 .fniv = gen_cmtst_vec,
53229a77 4614 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4615 .vece = MO_32 },
4616 { .fni8 = gen_cmtst_i64,
4617 .fniv = gen_cmtst_vec,
4618 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 4619 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4620 .vece = MO_64 },
4621};
4622
89e68b57
RH
4623static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4624 TCGv_vec a, TCGv_vec b)
4625{
4626 TCGv_vec x = tcg_temp_new_vec_matching(t);
4627 tcg_gen_add_vec(vece, x, a, b);
4628 tcg_gen_usadd_vec(vece, t, a, b);
4629 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4630 tcg_gen_or_vec(vece, sat, sat, x);
4631 tcg_temp_free_vec(x);
4632}
4633
53229a77
RH
4634static const TCGOpcode vecop_list_uqadd[] = {
4635 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4636};
4637
89e68b57
RH
4638const GVecGen4 uqadd_op[4] = {
4639 { .fniv = gen_uqadd_vec,
4640 .fno = gen_helper_gvec_uqadd_b,
89e68b57 4641 .write_aofs = true,
53229a77 4642 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4643 .vece = MO_8 },
4644 { .fniv = gen_uqadd_vec,
4645 .fno = gen_helper_gvec_uqadd_h,
89e68b57 4646 .write_aofs = true,
53229a77 4647 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4648 .vece = MO_16 },
4649 { .fniv = gen_uqadd_vec,
4650 .fno = gen_helper_gvec_uqadd_s,
89e68b57 4651 .write_aofs = true,
53229a77 4652 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4653 .vece = MO_32 },
4654 { .fniv = gen_uqadd_vec,
4655 .fno = gen_helper_gvec_uqadd_d,
89e68b57 4656 .write_aofs = true,
53229a77 4657 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4658 .vece = MO_64 },
4659};
4660
4661static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4662 TCGv_vec a, TCGv_vec b)
4663{
4664 TCGv_vec x = tcg_temp_new_vec_matching(t);
4665 tcg_gen_add_vec(vece, x, a, b);
4666 tcg_gen_ssadd_vec(vece, t, a, b);
4667 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4668 tcg_gen_or_vec(vece, sat, sat, x);
4669 tcg_temp_free_vec(x);
4670}
4671
53229a77
RH
4672static const TCGOpcode vecop_list_sqadd[] = {
4673 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4674};
4675
89e68b57
RH
4676const GVecGen4 sqadd_op[4] = {
4677 { .fniv = gen_sqadd_vec,
4678 .fno = gen_helper_gvec_sqadd_b,
53229a77 4679 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4680 .write_aofs = true,
4681 .vece = MO_8 },
4682 { .fniv = gen_sqadd_vec,
4683 .fno = gen_helper_gvec_sqadd_h,
53229a77 4684 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4685 .write_aofs = true,
4686 .vece = MO_16 },
4687 { .fniv = gen_sqadd_vec,
4688 .fno = gen_helper_gvec_sqadd_s,
53229a77 4689 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4690 .write_aofs = true,
4691 .vece = MO_32 },
4692 { .fniv = gen_sqadd_vec,
4693 .fno = gen_helper_gvec_sqadd_d,
53229a77 4694 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4695 .write_aofs = true,
4696 .vece = MO_64 },
4697};
4698
4699static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4700 TCGv_vec a, TCGv_vec b)
4701{
4702 TCGv_vec x = tcg_temp_new_vec_matching(t);
4703 tcg_gen_sub_vec(vece, x, a, b);
4704 tcg_gen_ussub_vec(vece, t, a, b);
4705 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4706 tcg_gen_or_vec(vece, sat, sat, x);
4707 tcg_temp_free_vec(x);
4708}
4709
53229a77
RH
4710static const TCGOpcode vecop_list_uqsub[] = {
4711 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4712};
4713
89e68b57
RH
4714const GVecGen4 uqsub_op[4] = {
4715 { .fniv = gen_uqsub_vec,
4716 .fno = gen_helper_gvec_uqsub_b,
53229a77 4717 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4718 .write_aofs = true,
4719 .vece = MO_8 },
4720 { .fniv = gen_uqsub_vec,
4721 .fno = gen_helper_gvec_uqsub_h,
53229a77 4722 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4723 .write_aofs = true,
4724 .vece = MO_16 },
4725 { .fniv = gen_uqsub_vec,
4726 .fno = gen_helper_gvec_uqsub_s,
53229a77 4727 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4728 .write_aofs = true,
4729 .vece = MO_32 },
4730 { .fniv = gen_uqsub_vec,
4731 .fno = gen_helper_gvec_uqsub_d,
53229a77 4732 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4733 .write_aofs = true,
4734 .vece = MO_64 },
4735};
4736
4737static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4738 TCGv_vec a, TCGv_vec b)
4739{
4740 TCGv_vec x = tcg_temp_new_vec_matching(t);
4741 tcg_gen_sub_vec(vece, x, a, b);
4742 tcg_gen_sssub_vec(vece, t, a, b);
4743 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4744 tcg_gen_or_vec(vece, sat, sat, x);
4745 tcg_temp_free_vec(x);
4746}
4747
53229a77
RH
4748static const TCGOpcode vecop_list_sqsub[] = {
4749 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4750};
4751
89e68b57
RH
4752const GVecGen4 sqsub_op[4] = {
4753 { .fniv = gen_sqsub_vec,
4754 .fno = gen_helper_gvec_sqsub_b,
53229a77 4755 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4756 .write_aofs = true,
4757 .vece = MO_8 },
4758 { .fniv = gen_sqsub_vec,
4759 .fno = gen_helper_gvec_sqsub_h,
53229a77 4760 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4761 .write_aofs = true,
4762 .vece = MO_16 },
4763 { .fniv = gen_sqsub_vec,
4764 .fno = gen_helper_gvec_sqsub_s,
53229a77 4765 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4766 .write_aofs = true,
4767 .vece = MO_32 },
4768 { .fniv = gen_sqsub_vec,
4769 .fno = gen_helper_gvec_sqsub_d,
53229a77 4770 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4771 .write_aofs = true,
4772 .vece = MO_64 },
4773};
4774
9ee6e8bb
PB
4775/* Translate a NEON data processing instruction. Return nonzero if the
4776 instruction is invalid.
ad69471c
PB
4777 We process data in a mixture of 32-bit and 64-bit chunks.
4778 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4779
7dcc1f89 4780static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4781{
4782 int op;
4783 int q;
eabcd6fa 4784 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
9ee6e8bb
PB
4785 int size;
4786 int shift;
4787 int pass;
4788 int count;
4789 int pairwise;
4790 int u;
eabcd6fa 4791 int vec_size;
f3cd8218 4792 uint32_t imm;
39d5492a 4793 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 4794 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 4795 TCGv_i64 tmp64;
9ee6e8bb 4796
2c7ffc41
PM
4797 /* FIXME: this access check should not take precedence over UNDEF
4798 * for invalid encodings; we will generate incorrect syndrome information
4799 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4800 */
9dbbc748 4801 if (s->fp_excp_el) {
a767fac8 4802 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4be42f40 4803 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4804 return 0;
4805 }
4806
5df8bac1 4807 if (!s->vfp_enabled)
9ee6e8bb
PB
4808 return 1;
4809 q = (insn & (1 << 6)) != 0;
4810 u = (insn >> 24) & 1;
4811 VFP_DREG_D(rd, insn);
4812 VFP_DREG_N(rn, insn);
4813 VFP_DREG_M(rm, insn);
4814 size = (insn >> 20) & 3;
eabcd6fa
RH
4815 vec_size = q ? 16 : 8;
4816 rd_ofs = neon_reg_offset(rd, 0);
4817 rn_ofs = neon_reg_offset(rn, 0);
4818 rm_ofs = neon_reg_offset(rm, 0);
4819
9ee6e8bb
PB
4820 if ((insn & (1 << 23)) == 0) {
4821 /* Three register same length. */
4822 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4823 /* Catch invalid op and bad size combinations: UNDEF */
4824 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4825 return 1;
4826 }
25f84f79
PM
4827 /* All insns of this form UNDEF for either this condition or the
4828 * superset of cases "Q==1"; we catch the latter later.
4829 */
4830 if (q && ((rd | rn | rm) & 1)) {
4831 return 1;
4832 }
36a71934
RH
4833 switch (op) {
4834 case NEON_3R_SHA:
4835 /* The SHA-1/SHA-256 3-register instructions require special
4836 * treatment here, as their size field is overloaded as an
4837 * op type selector, and they all consume their input in a
4838 * single pass.
4839 */
f1ecb913
AB
4840 if (!q) {
4841 return 1;
4842 }
4843 if (!u) { /* SHA-1 */
962fcbf2 4844 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
4845 return 1;
4846 }
1a66ac61
RH
4847 ptr1 = vfp_reg_ptr(true, rd);
4848 ptr2 = vfp_reg_ptr(true, rn);
4849 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 4850 tmp4 = tcg_const_i32(size);
1a66ac61 4851 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
4852 tcg_temp_free_i32(tmp4);
4853 } else { /* SHA-256 */
962fcbf2 4854 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
4855 return 1;
4856 }
1a66ac61
RH
4857 ptr1 = vfp_reg_ptr(true, rd);
4858 ptr2 = vfp_reg_ptr(true, rn);
4859 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
4860 switch (size) {
4861 case 0:
1a66ac61 4862 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
4863 break;
4864 case 1:
1a66ac61 4865 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
4866 break;
4867 case 2:
1a66ac61 4868 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
4869 break;
4870 }
4871 }
1a66ac61
RH
4872 tcg_temp_free_ptr(ptr1);
4873 tcg_temp_free_ptr(ptr2);
4874 tcg_temp_free_ptr(ptr3);
f1ecb913 4875 return 0;
36a71934
RH
4876
4877 case NEON_3R_VPADD_VQRDMLAH:
4878 if (!u) {
4879 break; /* VPADD */
4880 }
4881 /* VQRDMLAH */
4882 switch (size) {
4883 case 1:
4884 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
4885 q, rd, rn, rm);
4886 case 2:
4887 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
4888 q, rd, rn, rm);
4889 }
4890 return 1;
4891
4892 case NEON_3R_VFM_VQRDMLSH:
4893 if (!u) {
4894 /* VFM, VFMS */
4895 if (size == 1) {
4896 return 1;
4897 }
4898 break;
4899 }
4900 /* VQRDMLSH */
4901 switch (size) {
4902 case 1:
4903 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
4904 q, rd, rn, rm);
4905 case 2:
4906 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
4907 q, rd, rn, rm);
4908 }
4909 return 1;
eabcd6fa
RH
4910
4911 case NEON_3R_LOGIC: /* Logic ops. */
4912 switch ((u << 2) | size) {
4913 case 0: /* VAND */
4914 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
4915 vec_size, vec_size);
4916 break;
4917 case 1: /* VBIC */
4918 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
4919 vec_size, vec_size);
4920 break;
2900847f
RH
4921 case 2: /* VORR */
4922 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
4923 vec_size, vec_size);
eabcd6fa
RH
4924 break;
4925 case 3: /* VORN */
4926 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
4927 vec_size, vec_size);
4928 break;
4929 case 4: /* VEOR */
4930 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
4931 vec_size, vec_size);
4932 break;
4933 case 5: /* VBSL */
3a7a2b4e
RH
4934 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
4935 vec_size, vec_size);
eabcd6fa
RH
4936 break;
4937 case 6: /* VBIT */
3a7a2b4e
RH
4938 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
4939 vec_size, vec_size);
eabcd6fa
RH
4940 break;
4941 case 7: /* VBIF */
3a7a2b4e
RH
4942 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
4943 vec_size, vec_size);
eabcd6fa
RH
4944 break;
4945 }
4946 return 0;
e4717ae0
RH
4947
4948 case NEON_3R_VADD_VSUB:
4949 if (u) {
4950 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
4951 vec_size, vec_size);
4952 } else {
4953 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
4954 vec_size, vec_size);
4955 }
4956 return 0;
82083184 4957
89e68b57
RH
4958 case NEON_3R_VQADD:
4959 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
4960 rn_ofs, rm_ofs, vec_size, vec_size,
4961 (u ? uqadd_op : sqadd_op) + size);
2f143d3a 4962 return 0;
89e68b57
RH
4963
4964 case NEON_3R_VQSUB:
4965 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
4966 rn_ofs, rm_ofs, vec_size, vec_size,
4967 (u ? uqsub_op : sqsub_op) + size);
2f143d3a 4968 return 0;
89e68b57 4969
82083184
RH
4970 case NEON_3R_VMUL: /* VMUL */
4971 if (u) {
4972 /* Polynomial case allows only P8 and is handled below. */
4973 if (size != 0) {
4974 return 1;
4975 }
4976 } else {
4977 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
4978 vec_size, vec_size);
4979 return 0;
4980 }
4981 break;
4a7832b0
RH
4982
4983 case NEON_3R_VML: /* VMLA, VMLS */
4984 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
4985 u ? &mls_op[size] : &mla_op[size]);
4986 return 0;
ea580fa3
RH
4987
4988 case NEON_3R_VTST_VCEQ:
4989 if (u) { /* VCEQ */
4990 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
4991 vec_size, vec_size);
4992 } else { /* VTST */
4993 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
4994 vec_size, vec_size, &cmtst_op[size]);
4995 }
4996 return 0;
4997
4998 case NEON_3R_VCGT:
4999 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
5000 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5001 return 0;
5002
5003 case NEON_3R_VCGE:
5004 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
5005 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5006 return 0;
6f278221
RH
5007
5008 case NEON_3R_VMAX:
5009 if (u) {
5010 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
5011 vec_size, vec_size);
5012 } else {
5013 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
5014 vec_size, vec_size);
5015 }
5016 return 0;
5017 case NEON_3R_VMIN:
5018 if (u) {
5019 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
5020 vec_size, vec_size);
5021 } else {
5022 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
5023 vec_size, vec_size);
5024 }
5025 return 0;
f1ecb913 5026 }
4a7832b0 5027
eabcd6fa 5028 if (size == 3) {
62698be3 5029 /* 64-bit element instructions. */
9ee6e8bb 5030 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5031 neon_load_reg64(cpu_V0, rn + pass);
5032 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5033 switch (op) {
62698be3 5034 case NEON_3R_VSHL:
ad69471c
PB
5035 if (u) {
5036 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5037 } else {
5038 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5039 }
5040 break;
62698be3 5041 case NEON_3R_VQSHL:
ad69471c 5042 if (u) {
02da0b2d
PM
5043 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5044 cpu_V1, cpu_V0);
ad69471c 5045 } else {
02da0b2d
PM
5046 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5047 cpu_V1, cpu_V0);
ad69471c
PB
5048 }
5049 break;
62698be3 5050 case NEON_3R_VRSHL:
ad69471c
PB
5051 if (u) {
5052 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5053 } else {
ad69471c
PB
5054 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5055 }
5056 break;
62698be3 5057 case NEON_3R_VQRSHL:
ad69471c 5058 if (u) {
02da0b2d
PM
5059 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5060 cpu_V1, cpu_V0);
ad69471c 5061 } else {
02da0b2d
PM
5062 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5063 cpu_V1, cpu_V0);
1e8d4eec 5064 }
9ee6e8bb 5065 break;
9ee6e8bb
PB
5066 default:
5067 abort();
2c0262af 5068 }
ad69471c 5069 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5070 }
9ee6e8bb 5071 return 0;
2c0262af 5072 }
25f84f79 5073 pairwise = 0;
9ee6e8bb 5074 switch (op) {
62698be3
PM
5075 case NEON_3R_VSHL:
5076 case NEON_3R_VQSHL:
5077 case NEON_3R_VRSHL:
5078 case NEON_3R_VQRSHL:
9ee6e8bb 5079 {
ad69471c
PB
5080 int rtmp;
5081 /* Shift instruction operands are reversed. */
5082 rtmp = rn;
9ee6e8bb 5083 rn = rm;
ad69471c 5084 rm = rtmp;
9ee6e8bb 5085 }
2c0262af 5086 break;
36a71934 5087 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
5088 case NEON_3R_VPMAX:
5089 case NEON_3R_VPMIN:
9ee6e8bb 5090 pairwise = 1;
2c0262af 5091 break;
25f84f79
PM
5092 case NEON_3R_FLOAT_ARITH:
5093 pairwise = (u && size < 2); /* if VPADD (float) */
5094 break;
5095 case NEON_3R_FLOAT_MINMAX:
5096 pairwise = u; /* if VPMIN/VPMAX (float) */
5097 break;
5098 case NEON_3R_FLOAT_CMP:
5099 if (!u && size) {
5100 /* no encoding for U=0 C=1x */
5101 return 1;
5102 }
5103 break;
5104 case NEON_3R_FLOAT_ACMP:
5105 if (!u) {
5106 return 1;
5107 }
5108 break;
505935fc
WN
5109 case NEON_3R_FLOAT_MISC:
5110 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5111 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5112 return 1;
5113 }
2c0262af 5114 break;
36a71934
RH
5115 case NEON_3R_VFM_VQRDMLSH:
5116 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
5117 return 1;
5118 }
5119 break;
9ee6e8bb 5120 default:
2c0262af 5121 break;
9ee6e8bb 5122 }
dd8fbd78 5123
25f84f79
PM
5124 if (pairwise && q) {
5125 /* All the pairwise insns UNDEF if Q is set */
5126 return 1;
5127 }
5128
9ee6e8bb
PB
5129 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5130
5131 if (pairwise) {
5132 /* Pairwise. */
a5a14945
JR
5133 if (pass < 1) {
5134 tmp = neon_load_reg(rn, 0);
5135 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5136 } else {
a5a14945
JR
5137 tmp = neon_load_reg(rm, 0);
5138 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5139 }
5140 } else {
5141 /* Elementwise. */
dd8fbd78
FN
5142 tmp = neon_load_reg(rn, pass);
5143 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5144 }
5145 switch (op) {
62698be3 5146 case NEON_3R_VHADD:
9ee6e8bb
PB
5147 GEN_NEON_INTEGER_OP(hadd);
5148 break;
62698be3 5149 case NEON_3R_VRHADD:
9ee6e8bb 5150 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5151 break;
62698be3 5152 case NEON_3R_VHSUB:
9ee6e8bb
PB
5153 GEN_NEON_INTEGER_OP(hsub);
5154 break;
62698be3 5155 case NEON_3R_VSHL:
ad69471c 5156 GEN_NEON_INTEGER_OP(shl);
2c0262af 5157 break;
62698be3 5158 case NEON_3R_VQSHL:
02da0b2d 5159 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5160 break;
62698be3 5161 case NEON_3R_VRSHL:
ad69471c 5162 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5163 break;
62698be3 5164 case NEON_3R_VQRSHL:
02da0b2d 5165 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5166 break;
62698be3 5167 case NEON_3R_VABD:
9ee6e8bb
PB
5168 GEN_NEON_INTEGER_OP(abd);
5169 break;
62698be3 5170 case NEON_3R_VABA:
9ee6e8bb 5171 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5172 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5173 tmp2 = neon_load_reg(rd, pass);
5174 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5175 break;
62698be3 5176 case NEON_3R_VMUL:
82083184
RH
5177 /* VMUL.P8; other cases already eliminated. */
5178 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb 5179 break;
62698be3 5180 case NEON_3R_VPMAX:
9ee6e8bb
PB
5181 GEN_NEON_INTEGER_OP(pmax);
5182 break;
62698be3 5183 case NEON_3R_VPMIN:
9ee6e8bb
PB
5184 GEN_NEON_INTEGER_OP(pmin);
5185 break;
62698be3 5186 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5187 if (!u) { /* VQDMULH */
5188 switch (size) {
02da0b2d
PM
5189 case 1:
5190 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5191 break;
5192 case 2:
5193 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5194 break;
62698be3 5195 default: abort();
9ee6e8bb 5196 }
62698be3 5197 } else { /* VQRDMULH */
9ee6e8bb 5198 switch (size) {
02da0b2d
PM
5199 case 1:
5200 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5201 break;
5202 case 2:
5203 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5204 break;
62698be3 5205 default: abort();
9ee6e8bb
PB
5206 }
5207 }
5208 break;
36a71934 5209 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 5210 switch (size) {
dd8fbd78
FN
5211 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5212 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5213 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5214 default: abort();
9ee6e8bb
PB
5215 }
5216 break;
62698be3 5217 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5218 {
5219 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5220 switch ((u << 2) | size) {
5221 case 0: /* VADD */
aa47cfdd
PM
5222 case 4: /* VPADD */
5223 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5224 break;
5225 case 2: /* VSUB */
aa47cfdd 5226 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5227 break;
5228 case 6: /* VABD */
aa47cfdd 5229 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5230 break;
5231 default:
62698be3 5232 abort();
9ee6e8bb 5233 }
aa47cfdd 5234 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5235 break;
aa47cfdd 5236 }
62698be3 5237 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5238 {
5239 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5240 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5241 if (!u) {
7d1b0095 5242 tcg_temp_free_i32(tmp2);
dd8fbd78 5243 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5244 if (size == 0) {
aa47cfdd 5245 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5246 } else {
aa47cfdd 5247 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5248 }
5249 }
aa47cfdd 5250 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5251 break;
aa47cfdd 5252 }
62698be3 5253 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5254 {
5255 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5256 if (!u) {
aa47cfdd 5257 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5258 } else {
aa47cfdd
PM
5259 if (size == 0) {
5260 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5261 } else {
5262 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5263 }
b5ff1b31 5264 }
aa47cfdd 5265 tcg_temp_free_ptr(fpstatus);
2c0262af 5266 break;
aa47cfdd 5267 }
62698be3 5268 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5269 {
5270 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5271 if (size == 0) {
5272 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5273 } else {
5274 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5275 }
5276 tcg_temp_free_ptr(fpstatus);
2c0262af 5277 break;
aa47cfdd 5278 }
62698be3 5279 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5280 {
5281 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5282 if (size == 0) {
f71a2ae5 5283 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5284 } else {
f71a2ae5 5285 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5286 }
5287 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5288 break;
aa47cfdd 5289 }
505935fc
WN
5290 case NEON_3R_FLOAT_MISC:
5291 if (u) {
5292 /* VMAXNM/VMINNM */
5293 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5294 if (size == 0) {
f71a2ae5 5295 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5296 } else {
f71a2ae5 5297 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5298 }
5299 tcg_temp_free_ptr(fpstatus);
5300 } else {
5301 if (size == 0) {
5302 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5303 } else {
5304 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5305 }
5306 }
2c0262af 5307 break;
36a71934 5308 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
5309 {
5310 /* VFMA, VFMS: fused multiply-add */
5311 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5312 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5313 if (size) {
5314 /* VFMS */
5315 gen_helper_vfp_negs(tmp, tmp);
5316 }
5317 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5318 tcg_temp_free_i32(tmp3);
5319 tcg_temp_free_ptr(fpstatus);
5320 break;
5321 }
9ee6e8bb
PB
5322 default:
5323 abort();
2c0262af 5324 }
7d1b0095 5325 tcg_temp_free_i32(tmp2);
dd8fbd78 5326
9ee6e8bb
PB
5327 /* Save the result. For elementwise operations we can put it
5328 straight into the destination register. For pairwise operations
5329 we have to be careful to avoid clobbering the source operands. */
5330 if (pairwise && rd == rm) {
dd8fbd78 5331 neon_store_scratch(pass, tmp);
9ee6e8bb 5332 } else {
dd8fbd78 5333 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5334 }
5335
5336 } /* for pass */
5337 if (pairwise && rd == rm) {
5338 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5339 tmp = neon_load_scratch(pass);
5340 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5341 }
5342 }
ad69471c 5343 /* End of 3 register same size operations. */
9ee6e8bb
PB
5344 } else if (insn & (1 << 4)) {
5345 if ((insn & 0x00380080) != 0) {
5346 /* Two registers and shift. */
5347 op = (insn >> 8) & 0xf;
5348 if (insn & (1 << 7)) {
cc13115b
PM
5349 /* 64-bit shift. */
5350 if (op > 7) {
5351 return 1;
5352 }
9ee6e8bb
PB
5353 size = 3;
5354 } else {
5355 size = 2;
5356 while ((insn & (1 << (size + 19))) == 0)
5357 size--;
5358 }
5359 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
9ee6e8bb
PB
5360 if (op < 8) {
5361 /* Shift by immediate:
5362 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5363 if (q && ((rd | rm) & 1)) {
5364 return 1;
5365 }
5366 if (!u && (op == 4 || op == 6)) {
5367 return 1;
5368 }
9ee6e8bb
PB
5369 /* Right shifts are encoded as N - shift, where N is the
5370 element size in bits. */
1dc8425e 5371 if (op <= 4) {
9ee6e8bb 5372 shift = shift - (1 << (size + 3));
1dc8425e
RH
5373 }
5374
5375 switch (op) {
5376 case 0: /* VSHR */
5377 /* Right shift comes here negative. */
5378 shift = -shift;
5379 /* Shifts larger than the element size are architecturally
5380 * valid. Unsigned results in all zeros; signed results
5381 * in all sign bits.
5382 */
5383 if (!u) {
5384 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
5385 MIN(shift, (8 << size) - 1),
5386 vec_size, vec_size);
5387 } else if (shift >= 8 << size) {
5388 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5389 } else {
5390 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
5391 vec_size, vec_size);
5392 }
5393 return 0;
5394
41f6c113
RH
5395 case 1: /* VSRA */
5396 /* Right shift comes here negative. */
5397 shift = -shift;
5398 /* Shifts larger than the element size are architecturally
5399 * valid. Unsigned results in all zeros; signed results
5400 * in all sign bits.
5401 */
5402 if (!u) {
5403 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5404 MIN(shift, (8 << size) - 1),
5405 &ssra_op[size]);
5406 } else if (shift >= 8 << size) {
5407 /* rd += 0 */
5408 } else {
5409 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5410 shift, &usra_op[size]);
5411 }
5412 return 0;
5413
f3cd8218
RH
5414 case 4: /* VSRI */
5415 if (!u) {
5416 return 1;
5417 }
5418 /* Right shift comes here negative. */
5419 shift = -shift;
5420 /* Shift out of range leaves destination unchanged. */
5421 if (shift < 8 << size) {
5422 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5423 shift, &sri_op[size]);
5424 }
5425 return 0;
5426
1dc8425e 5427 case 5: /* VSHL, VSLI */
f3cd8218
RH
5428 if (u) { /* VSLI */
5429 /* Shift out of range leaves destination unchanged. */
5430 if (shift < 8 << size) {
5431 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
5432 vec_size, shift, &sli_op[size]);
5433 }
5434 } else { /* VSHL */
1dc8425e
RH
5435 /* Shifts larger than the element size are
5436 * architecturally valid and results in zero.
5437 */
5438 if (shift >= 8 << size) {
5439 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5440 } else {
5441 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
5442 vec_size, vec_size);
5443 }
1dc8425e 5444 }
f3cd8218 5445 return 0;
1dc8425e
RH
5446 }
5447
9ee6e8bb
PB
5448 if (size == 3) {
5449 count = q + 1;
5450 } else {
5451 count = q ? 4: 2;
5452 }
1dc8425e
RH
5453
5454 /* To avoid excessive duplication of ops we implement shift
5455 * by immediate using the variable shift operations.
5456 */
5457 imm = dup_const(size, shift);
9ee6e8bb
PB
5458
5459 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5460 if (size == 3) {
5461 neon_load_reg64(cpu_V0, rm + pass);
5462 tcg_gen_movi_i64(cpu_V1, imm);
5463 switch (op) {
ad69471c
PB
5464 case 2: /* VRSHR */
5465 case 3: /* VRSRA */
5466 if (u)
5467 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5468 else
ad69471c 5469 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5470 break;
0322b26e 5471 case 6: /* VQSHLU */
02da0b2d
PM
5472 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5473 cpu_V0, cpu_V1);
ad69471c 5474 break;
0322b26e
PM
5475 case 7: /* VQSHL */
5476 if (u) {
02da0b2d 5477 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5478 cpu_V0, cpu_V1);
5479 } else {
02da0b2d 5480 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5481 cpu_V0, cpu_V1);
5482 }
9ee6e8bb 5483 break;
1dc8425e
RH
5484 default:
5485 g_assert_not_reached();
9ee6e8bb 5486 }
41f6c113 5487 if (op == 3) {
ad69471c 5488 /* Accumulate. */
5371cb81 5489 neon_load_reg64(cpu_V1, rd + pass);
ad69471c 5490 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5491 }
5492 neon_store_reg64(cpu_V0, rd + pass);
5493 } else { /* size < 3 */
5494 /* Operands in T0 and T1. */
dd8fbd78 5495 tmp = neon_load_reg(rm, pass);
7d1b0095 5496 tmp2 = tcg_temp_new_i32();
dd8fbd78 5497 tcg_gen_movi_i32(tmp2, imm);
ad69471c 5498 switch (op) {
ad69471c
PB
5499 case 2: /* VRSHR */
5500 case 3: /* VRSRA */
5501 GEN_NEON_INTEGER_OP(rshl);
5502 break;
0322b26e 5503 case 6: /* VQSHLU */
ad69471c 5504 switch (size) {
0322b26e 5505 case 0:
02da0b2d
PM
5506 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5507 tmp, tmp2);
0322b26e
PM
5508 break;
5509 case 1:
02da0b2d
PM
5510 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5511 tmp, tmp2);
0322b26e
PM
5512 break;
5513 case 2:
02da0b2d
PM
5514 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5515 tmp, tmp2);
0322b26e
PM
5516 break;
5517 default:
cc13115b 5518 abort();
ad69471c
PB
5519 }
5520 break;
0322b26e 5521 case 7: /* VQSHL */
02da0b2d 5522 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5523 break;
1dc8425e
RH
5524 default:
5525 g_assert_not_reached();
ad69471c 5526 }
7d1b0095 5527 tcg_temp_free_i32(tmp2);
ad69471c 5528
41f6c113 5529 if (op == 3) {
ad69471c 5530 /* Accumulate. */
dd8fbd78 5531 tmp2 = neon_load_reg(rd, pass);
5371cb81 5532 gen_neon_add(size, tmp, tmp2);
7d1b0095 5533 tcg_temp_free_i32(tmp2);
ad69471c 5534 }
dd8fbd78 5535 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5536 }
5537 } /* for pass */
5538 } else if (op < 10) {
ad69471c 5539 /* Shift by immediate and narrow:
9ee6e8bb 5540 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5541 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5542 if (rm & 1) {
5543 return 1;
5544 }
9ee6e8bb
PB
5545 shift = shift - (1 << (size + 3));
5546 size++;
92cdfaeb 5547 if (size == 3) {
a7812ae4 5548 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5549 neon_load_reg64(cpu_V0, rm);
5550 neon_load_reg64(cpu_V1, rm + 1);
5551 for (pass = 0; pass < 2; pass++) {
5552 TCGv_i64 in;
5553 if (pass == 0) {
5554 in = cpu_V0;
5555 } else {
5556 in = cpu_V1;
5557 }
ad69471c 5558 if (q) {
0b36f4cd 5559 if (input_unsigned) {
92cdfaeb 5560 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5561 } else {
92cdfaeb 5562 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5563 }
ad69471c 5564 } else {
0b36f4cd 5565 if (input_unsigned) {
92cdfaeb 5566 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5567 } else {
92cdfaeb 5568 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5569 }
ad69471c 5570 }
7d1b0095 5571 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5572 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5573 neon_store_reg(rd, pass, tmp);
5574 } /* for pass */
5575 tcg_temp_free_i64(tmp64);
5576 } else {
5577 if (size == 1) {
5578 imm = (uint16_t)shift;
5579 imm |= imm << 16;
2c0262af 5580 } else {
92cdfaeb
PM
5581 /* size == 2 */
5582 imm = (uint32_t)shift;
5583 }
5584 tmp2 = tcg_const_i32(imm);
5585 tmp4 = neon_load_reg(rm + 1, 0);
5586 tmp5 = neon_load_reg(rm + 1, 1);
5587 for (pass = 0; pass < 2; pass++) {
5588 if (pass == 0) {
5589 tmp = neon_load_reg(rm, 0);
5590 } else {
5591 tmp = tmp4;
5592 }
0b36f4cd
CL
5593 gen_neon_shift_narrow(size, tmp, tmp2, q,
5594 input_unsigned);
92cdfaeb
PM
5595 if (pass == 0) {
5596 tmp3 = neon_load_reg(rm, 1);
5597 } else {
5598 tmp3 = tmp5;
5599 }
0b36f4cd
CL
5600 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5601 input_unsigned);
36aa55dc 5602 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5603 tcg_temp_free_i32(tmp);
5604 tcg_temp_free_i32(tmp3);
5605 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5606 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5607 neon_store_reg(rd, pass, tmp);
5608 } /* for pass */
c6067f04 5609 tcg_temp_free_i32(tmp2);
b75263d6 5610 }
9ee6e8bb 5611 } else if (op == 10) {
cc13115b
PM
5612 /* VSHLL, VMOVL */
5613 if (q || (rd & 1)) {
9ee6e8bb 5614 return 1;
cc13115b 5615 }
ad69471c
PB
5616 tmp = neon_load_reg(rm, 0);
5617 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5618 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5619 if (pass == 1)
5620 tmp = tmp2;
5621
5622 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5623
9ee6e8bb
PB
5624 if (shift != 0) {
5625 /* The shift is less than the width of the source
ad69471c
PB
5626 type, so we can just shift the whole register. */
5627 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5628 /* Widen the result of shift: we need to clear
5629 * the potential overflow bits resulting from
5630 * left bits of the narrow input appearing as
5631 * right bits of left the neighbour narrow
5632 * input. */
ad69471c
PB
5633 if (size < 2 || !u) {
5634 uint64_t imm64;
5635 if (size == 0) {
5636 imm = (0xffu >> (8 - shift));
5637 imm |= imm << 16;
acdf01ef 5638 } else if (size == 1) {
ad69471c 5639 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5640 } else {
5641 /* size == 2 */
5642 imm = 0xffffffff >> (32 - shift);
5643 }
5644 if (size < 2) {
5645 imm64 = imm | (((uint64_t)imm) << 32);
5646 } else {
5647 imm64 = imm;
9ee6e8bb 5648 }
acdf01ef 5649 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5650 }
5651 }
ad69471c 5652 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5653 }
f73534a5 5654 } else if (op >= 14) {
9ee6e8bb 5655 /* VCVT fixed-point. */
c253dd78
PM
5656 TCGv_ptr fpst;
5657 TCGv_i32 shiftv;
5658 VFPGenFixPointFn *fn;
5659
cc13115b
PM
5660 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5661 return 1;
5662 }
c253dd78
PM
5663
5664 if (!(op & 1)) {
5665 if (u) {
5666 fn = gen_helper_vfp_ultos;
5667 } else {
5668 fn = gen_helper_vfp_sltos;
5669 }
5670 } else {
5671 if (u) {
5672 fn = gen_helper_vfp_touls_round_to_zero;
5673 } else {
5674 fn = gen_helper_vfp_tosls_round_to_zero;
5675 }
5676 }
5677
f73534a5
PM
5678 /* We have already masked out the must-be-1 top bit of imm6,
5679 * hence this 32-shift where the ARM ARM has 64-imm6.
5680 */
5681 shift = 32 - shift;
c253dd78
PM
5682 fpst = get_fpstatus_ptr(1);
5683 shiftv = tcg_const_i32(shift);
9ee6e8bb 5684 for (pass = 0; pass < (q ? 4 : 2); pass++) {
c253dd78
PM
5685 TCGv_i32 tmpf = neon_load_reg(rm, pass);
5686 fn(tmpf, tmpf, shiftv, fpst);
5687 neon_store_reg(rd, pass, tmpf);
2c0262af 5688 }
c253dd78
PM
5689 tcg_temp_free_ptr(fpst);
5690 tcg_temp_free_i32(shiftv);
2c0262af 5691 } else {
9ee6e8bb
PB
5692 return 1;
5693 }
5694 } else { /* (insn & 0x00380080) == 0 */
246fa4ac
RH
5695 int invert, reg_ofs, vec_size;
5696
7d80fee5
PM
5697 if (q && (rd & 1)) {
5698 return 1;
5699 }
9ee6e8bb
PB
5700
5701 op = (insn >> 8) & 0xf;
5702 /* One register and immediate. */
5703 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5704 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5705 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5706 * We choose to not special-case this and will behave as if a
5707 * valid constant encoding of 0 had been given.
5708 */
9ee6e8bb
PB
5709 switch (op) {
5710 case 0: case 1:
5711 /* no-op */
5712 break;
5713 case 2: case 3:
5714 imm <<= 8;
5715 break;
5716 case 4: case 5:
5717 imm <<= 16;
5718 break;
5719 case 6: case 7:
5720 imm <<= 24;
5721 break;
5722 case 8: case 9:
5723 imm |= imm << 16;
5724 break;
5725 case 10: case 11:
5726 imm = (imm << 8) | (imm << 24);
5727 break;
5728 case 12:
8e31209e 5729 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5730 break;
5731 case 13:
5732 imm = (imm << 16) | 0xffff;
5733 break;
5734 case 14:
5735 imm |= (imm << 8) | (imm << 16) | (imm << 24);
246fa4ac 5736 if (invert) {
9ee6e8bb 5737 imm = ~imm;
246fa4ac 5738 }
9ee6e8bb
PB
5739 break;
5740 case 15:
7d80fee5
PM
5741 if (invert) {
5742 return 1;
5743 }
9ee6e8bb
PB
5744 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5745 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5746 break;
5747 }
246fa4ac 5748 if (invert) {
9ee6e8bb 5749 imm = ~imm;
246fa4ac 5750 }
9ee6e8bb 5751
246fa4ac
RH
5752 reg_ofs = neon_reg_offset(rd, 0);
5753 vec_size = q ? 16 : 8;
5754
5755 if (op & 1 && op < 12) {
5756 if (invert) {
5757 /* The immediate value has already been inverted,
5758 * so BIC becomes AND.
5759 */
5760 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
5761 vec_size, vec_size);
9ee6e8bb 5762 } else {
246fa4ac
RH
5763 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
5764 vec_size, vec_size);
5765 }
5766 } else {
5767 /* VMOV, VMVN. */
5768 if (op == 14 && invert) {
5769 TCGv_i64 t64 = tcg_temp_new_i64();
5770
5771 for (pass = 0; pass <= q; ++pass) {
5772 uint64_t val = 0;
a5a14945 5773 int n;
246fa4ac
RH
5774
5775 for (n = 0; n < 8; n++) {
5776 if (imm & (1 << (n + pass * 8))) {
5777 val |= 0xffull << (n * 8);
5778 }
9ee6e8bb 5779 }
246fa4ac
RH
5780 tcg_gen_movi_i64(t64, val);
5781 neon_store_reg64(t64, rd + pass);
9ee6e8bb 5782 }
246fa4ac
RH
5783 tcg_temp_free_i64(t64);
5784 } else {
5785 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
9ee6e8bb
PB
5786 }
5787 }
5788 }
e4b3861d 5789 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5790 if (size != 3) {
5791 op = (insn >> 8) & 0xf;
5792 if ((insn & (1 << 6)) == 0) {
5793 /* Three registers of different lengths. */
5794 int src1_wide;
5795 int src2_wide;
5796 int prewiden;
526d0096
PM
5797 /* undefreq: bit 0 : UNDEF if size == 0
5798 * bit 1 : UNDEF if size == 1
5799 * bit 2 : UNDEF if size == 2
5800 * bit 3 : UNDEF if U == 1
5801 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
5802 */
5803 int undefreq;
5804 /* prewiden, src1_wide, src2_wide, undefreq */
5805 static const int neon_3reg_wide[16][4] = {
5806 {1, 0, 0, 0}, /* VADDL */
5807 {1, 1, 0, 0}, /* VADDW */
5808 {1, 0, 0, 0}, /* VSUBL */
5809 {1, 1, 0, 0}, /* VSUBW */
5810 {0, 1, 1, 0}, /* VADDHN */
5811 {0, 0, 0, 0}, /* VABAL */
5812 {0, 1, 1, 0}, /* VSUBHN */
5813 {0, 0, 0, 0}, /* VABDL */
5814 {0, 0, 0, 0}, /* VMLAL */
526d0096 5815 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 5816 {0, 0, 0, 0}, /* VMLSL */
526d0096 5817 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 5818 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 5819 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 5820 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 5821 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5822 };
5823
5824 prewiden = neon_3reg_wide[op][0];
5825 src1_wide = neon_3reg_wide[op][1];
5826 src2_wide = neon_3reg_wide[op][2];
695272dc 5827 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5828
526d0096
PM
5829 if ((undefreq & (1 << size)) ||
5830 ((undefreq & 8) && u)) {
695272dc
PM
5831 return 1;
5832 }
5833 if ((src1_wide && (rn & 1)) ||
5834 (src2_wide && (rm & 1)) ||
5835 (!src2_wide && (rd & 1))) {
ad69471c 5836 return 1;
695272dc 5837 }
ad69471c 5838
4e624eda
PM
5839 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
5840 * outside the loop below as it only performs a single pass.
5841 */
5842 if (op == 14 && size == 2) {
5843 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
5844
962fcbf2 5845 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
5846 return 1;
5847 }
5848 tcg_rn = tcg_temp_new_i64();
5849 tcg_rm = tcg_temp_new_i64();
5850 tcg_rd = tcg_temp_new_i64();
5851 neon_load_reg64(tcg_rn, rn);
5852 neon_load_reg64(tcg_rm, rm);
5853 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
5854 neon_store_reg64(tcg_rd, rd);
5855 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
5856 neon_store_reg64(tcg_rd, rd + 1);
5857 tcg_temp_free_i64(tcg_rn);
5858 tcg_temp_free_i64(tcg_rm);
5859 tcg_temp_free_i64(tcg_rd);
5860 return 0;
5861 }
5862
9ee6e8bb
PB
5863 /* Avoid overlapping operands. Wide source operands are
5864 always aligned so will never overlap with wide
5865 destinations in problematic ways. */
8f8e3aa4 5866 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5867 tmp = neon_load_reg(rm, 1);
5868 neon_store_scratch(2, tmp);
8f8e3aa4 5869 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5870 tmp = neon_load_reg(rn, 1);
5871 neon_store_scratch(2, tmp);
9ee6e8bb 5872 }
f764718d 5873 tmp3 = NULL;
9ee6e8bb 5874 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5875 if (src1_wide) {
5876 neon_load_reg64(cpu_V0, rn + pass);
f764718d 5877 tmp = NULL;
9ee6e8bb 5878 } else {
ad69471c 5879 if (pass == 1 && rd == rn) {
dd8fbd78 5880 tmp = neon_load_scratch(2);
9ee6e8bb 5881 } else {
ad69471c
PB
5882 tmp = neon_load_reg(rn, pass);
5883 }
5884 if (prewiden) {
5885 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5886 }
5887 }
ad69471c
PB
5888 if (src2_wide) {
5889 neon_load_reg64(cpu_V1, rm + pass);
f764718d 5890 tmp2 = NULL;
9ee6e8bb 5891 } else {
ad69471c 5892 if (pass == 1 && rd == rm) {
dd8fbd78 5893 tmp2 = neon_load_scratch(2);
9ee6e8bb 5894 } else {
ad69471c
PB
5895 tmp2 = neon_load_reg(rm, pass);
5896 }
5897 if (prewiden) {
5898 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5899 }
9ee6e8bb
PB
5900 }
5901 switch (op) {
5902 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5903 gen_neon_addl(size);
9ee6e8bb 5904 break;
79b0e534 5905 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5906 gen_neon_subl(size);
9ee6e8bb
PB
5907 break;
5908 case 5: case 7: /* VABAL, VABDL */
5909 switch ((size << 1) | u) {
ad69471c
PB
5910 case 0:
5911 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5912 break;
5913 case 1:
5914 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5915 break;
5916 case 2:
5917 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5918 break;
5919 case 3:
5920 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5921 break;
5922 case 4:
5923 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5924 break;
5925 case 5:
5926 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5927 break;
9ee6e8bb
PB
5928 default: abort();
5929 }
7d1b0095
PM
5930 tcg_temp_free_i32(tmp2);
5931 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5932 break;
5933 case 8: case 9: case 10: case 11: case 12: case 13:
5934 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5935 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5936 break;
5937 case 14: /* Polynomial VMULL */
e5ca24cb 5938 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5939 tcg_temp_free_i32(tmp2);
5940 tcg_temp_free_i32(tmp);
e5ca24cb 5941 break;
695272dc
PM
5942 default: /* 15 is RESERVED: caught earlier */
5943 abort();
9ee6e8bb 5944 }
ebcd88ce
PM
5945 if (op == 13) {
5946 /* VQDMULL */
5947 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5948 neon_store_reg64(cpu_V0, rd + pass);
5949 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5950 /* Accumulate. */
ebcd88ce 5951 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5952 switch (op) {
4dc064e6
PM
5953 case 10: /* VMLSL */
5954 gen_neon_negl(cpu_V0, size);
5955 /* Fall through */
5956 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5957 gen_neon_addl(size);
9ee6e8bb
PB
5958 break;
5959 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5960 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5961 if (op == 11) {
5962 gen_neon_negl(cpu_V0, size);
5963 }
ad69471c
PB
5964 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5965 break;
9ee6e8bb
PB
5966 default:
5967 abort();
5968 }
ad69471c 5969 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5970 } else if (op == 4 || op == 6) {
5971 /* Narrowing operation. */
7d1b0095 5972 tmp = tcg_temp_new_i32();
79b0e534 5973 if (!u) {
9ee6e8bb 5974 switch (size) {
ad69471c
PB
5975 case 0:
5976 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5977 break;
5978 case 1:
5979 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5980 break;
5981 case 2:
664b7e3b 5982 tcg_gen_extrh_i64_i32(tmp, cpu_V0);
ad69471c 5983 break;
9ee6e8bb
PB
5984 default: abort();
5985 }
5986 } else {
5987 switch (size) {
ad69471c
PB
5988 case 0:
5989 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5990 break;
5991 case 1:
5992 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5993 break;
5994 case 2:
5995 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
664b7e3b 5996 tcg_gen_extrh_i64_i32(tmp, cpu_V0);
ad69471c 5997 break;
9ee6e8bb
PB
5998 default: abort();
5999 }
6000 }
ad69471c
PB
6001 if (pass == 0) {
6002 tmp3 = tmp;
6003 } else {
6004 neon_store_reg(rd, 0, tmp3);
6005 neon_store_reg(rd, 1, tmp);
6006 }
9ee6e8bb
PB
6007 } else {
6008 /* Write back the result. */
ad69471c 6009 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6010 }
6011 }
6012 } else {
3e3326df
PM
6013 /* Two registers and a scalar. NB that for ops of this form
6014 * the ARM ARM labels bit 24 as Q, but it is in our variable
6015 * 'u', not 'q'.
6016 */
6017 if (size == 0) {
6018 return 1;
6019 }
9ee6e8bb 6020 switch (op) {
9ee6e8bb 6021 case 1: /* Float VMLA scalar */
9ee6e8bb 6022 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6023 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6024 if (size == 1) {
6025 return 1;
6026 }
6027 /* fall through */
6028 case 0: /* Integer VMLA scalar */
6029 case 4: /* Integer VMLS scalar */
6030 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6031 case 12: /* VQDMULH scalar */
6032 case 13: /* VQRDMULH scalar */
3e3326df
PM
6033 if (u && ((rd | rn) & 1)) {
6034 return 1;
6035 }
dd8fbd78
FN
6036 tmp = neon_get_scalar(size, rm);
6037 neon_store_scratch(0, tmp);
9ee6e8bb 6038 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6039 tmp = neon_load_scratch(0);
6040 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6041 if (op == 12) {
6042 if (size == 1) {
02da0b2d 6043 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6044 } else {
02da0b2d 6045 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6046 }
6047 } else if (op == 13) {
6048 if (size == 1) {
02da0b2d 6049 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6050 } else {
02da0b2d 6051 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6052 }
6053 } else if (op & 1) {
aa47cfdd
PM
6054 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6055 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6056 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6057 } else {
6058 switch (size) {
dd8fbd78
FN
6059 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6060 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6061 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6062 default: abort();
9ee6e8bb
PB
6063 }
6064 }
7d1b0095 6065 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6066 if (op < 8) {
6067 /* Accumulate. */
dd8fbd78 6068 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6069 switch (op) {
6070 case 0:
dd8fbd78 6071 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6072 break;
6073 case 1:
aa47cfdd
PM
6074 {
6075 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6076 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6077 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6078 break;
aa47cfdd 6079 }
9ee6e8bb 6080 case 4:
dd8fbd78 6081 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6082 break;
6083 case 5:
aa47cfdd
PM
6084 {
6085 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6086 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6087 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6088 break;
aa47cfdd 6089 }
9ee6e8bb
PB
6090 default:
6091 abort();
6092 }
7d1b0095 6093 tcg_temp_free_i32(tmp2);
9ee6e8bb 6094 }
dd8fbd78 6095 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6096 }
6097 break;
9ee6e8bb 6098 case 3: /* VQDMLAL scalar */
9ee6e8bb 6099 case 7: /* VQDMLSL scalar */
9ee6e8bb 6100 case 11: /* VQDMULL scalar */
3e3326df 6101 if (u == 1) {
ad69471c 6102 return 1;
3e3326df
PM
6103 }
6104 /* fall through */
6105 case 2: /* VMLAL sclar */
6106 case 6: /* VMLSL scalar */
6107 case 10: /* VMULL scalar */
6108 if (rd & 1) {
6109 return 1;
6110 }
dd8fbd78 6111 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6112 /* We need a copy of tmp2 because gen_neon_mull
6113 * deletes it during pass 0. */
7d1b0095 6114 tmp4 = tcg_temp_new_i32();
c6067f04 6115 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6116 tmp3 = neon_load_reg(rn, 1);
ad69471c 6117
9ee6e8bb 6118 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6119 if (pass == 0) {
6120 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6121 } else {
dd8fbd78 6122 tmp = tmp3;
c6067f04 6123 tmp2 = tmp4;
9ee6e8bb 6124 }
ad69471c 6125 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6126 if (op != 11) {
6127 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6128 }
9ee6e8bb 6129 switch (op) {
4dc064e6
PM
6130 case 6:
6131 gen_neon_negl(cpu_V0, size);
6132 /* Fall through */
6133 case 2:
ad69471c 6134 gen_neon_addl(size);
9ee6e8bb
PB
6135 break;
6136 case 3: case 7:
ad69471c 6137 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6138 if (op == 7) {
6139 gen_neon_negl(cpu_V0, size);
6140 }
ad69471c 6141 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6142 break;
6143 case 10:
6144 /* no-op */
6145 break;
6146 case 11:
ad69471c 6147 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6148 break;
6149 default:
6150 abort();
6151 }
ad69471c 6152 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6153 }
61adacc8
RH
6154 break;
6155 case 14: /* VQRDMLAH scalar */
6156 case 15: /* VQRDMLSH scalar */
6157 {
6158 NeonGenThreeOpEnvFn *fn;
dd8fbd78 6159
962fcbf2 6160 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
6161 return 1;
6162 }
6163 if (u && ((rd | rn) & 1)) {
6164 return 1;
6165 }
6166 if (op == 14) {
6167 if (size == 1) {
6168 fn = gen_helper_neon_qrdmlah_s16;
6169 } else {
6170 fn = gen_helper_neon_qrdmlah_s32;
6171 }
6172 } else {
6173 if (size == 1) {
6174 fn = gen_helper_neon_qrdmlsh_s16;
6175 } else {
6176 fn = gen_helper_neon_qrdmlsh_s32;
6177 }
6178 }
dd8fbd78 6179
61adacc8
RH
6180 tmp2 = neon_get_scalar(size, rm);
6181 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6182 tmp = neon_load_reg(rn, pass);
6183 tmp3 = neon_load_reg(rd, pass);
6184 fn(tmp, cpu_env, tmp, tmp2, tmp3);
6185 tcg_temp_free_i32(tmp3);
6186 neon_store_reg(rd, pass, tmp);
6187 }
6188 tcg_temp_free_i32(tmp2);
6189 }
9ee6e8bb 6190 break;
61adacc8
RH
6191 default:
6192 g_assert_not_reached();
9ee6e8bb
PB
6193 }
6194 }
6195 } else { /* size == 3 */
6196 if (!u) {
6197 /* Extract. */
9ee6e8bb 6198 imm = (insn >> 8) & 0xf;
ad69471c
PB
6199
6200 if (imm > 7 && !q)
6201 return 1;
6202
52579ea1
PM
6203 if (q && ((rd | rn | rm) & 1)) {
6204 return 1;
6205 }
6206
ad69471c
PB
6207 if (imm == 0) {
6208 neon_load_reg64(cpu_V0, rn);
6209 if (q) {
6210 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6211 }
ad69471c
PB
6212 } else if (imm == 8) {
6213 neon_load_reg64(cpu_V0, rn + 1);
6214 if (q) {
6215 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6216 }
ad69471c 6217 } else if (q) {
a7812ae4 6218 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6219 if (imm < 8) {
6220 neon_load_reg64(cpu_V0, rn);
a7812ae4 6221 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6222 } else {
6223 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6224 neon_load_reg64(tmp64, rm);
ad69471c
PB
6225 }
6226 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6227 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6228 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6229 if (imm < 8) {
6230 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6231 } else {
ad69471c
PB
6232 neon_load_reg64(cpu_V1, rm + 1);
6233 imm -= 8;
9ee6e8bb 6234 }
ad69471c 6235 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6236 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6237 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6238 tcg_temp_free_i64(tmp64);
ad69471c 6239 } else {
a7812ae4 6240 /* BUGFIX */
ad69471c 6241 neon_load_reg64(cpu_V0, rn);
a7812ae4 6242 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6243 neon_load_reg64(cpu_V1, rm);
a7812ae4 6244 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6245 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6246 }
6247 neon_store_reg64(cpu_V0, rd);
6248 if (q) {
6249 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6250 }
6251 } else if ((insn & (1 << 11)) == 0) {
6252 /* Two register misc. */
6253 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6254 size = (insn >> 18) & 3;
600b828c
PM
6255 /* UNDEF for unknown op values and bad op-size combinations */
6256 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6257 return 1;
6258 }
fe8fcf3d
PM
6259 if (neon_2rm_is_v8_op(op) &&
6260 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6261 return 1;
6262 }
fc2a9b37
PM
6263 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6264 q && ((rm | rd) & 1)) {
6265 return 1;
6266 }
9ee6e8bb 6267 switch (op) {
600b828c 6268 case NEON_2RM_VREV64:
9ee6e8bb 6269 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6270 tmp = neon_load_reg(rm, pass * 2);
6271 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6272 switch (size) {
dd8fbd78
FN
6273 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6274 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6275 case 2: /* no-op */ break;
6276 default: abort();
6277 }
dd8fbd78 6278 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6279 if (size == 2) {
dd8fbd78 6280 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6281 } else {
9ee6e8bb 6282 switch (size) {
dd8fbd78
FN
6283 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6284 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6285 default: abort();
6286 }
dd8fbd78 6287 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6288 }
6289 }
6290 break;
600b828c
PM
6291 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6292 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6293 for (pass = 0; pass < q + 1; pass++) {
6294 tmp = neon_load_reg(rm, pass * 2);
6295 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6296 tmp = neon_load_reg(rm, pass * 2 + 1);
6297 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6298 switch (size) {
6299 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6300 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6301 case 2: tcg_gen_add_i64(CPU_V001); break;
6302 default: abort();
6303 }
600b828c 6304 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6305 /* Accumulate. */
ad69471c
PB
6306 neon_load_reg64(cpu_V1, rd + pass);
6307 gen_neon_addl(size);
9ee6e8bb 6308 }
ad69471c 6309 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6310 }
6311 break;
600b828c 6312 case NEON_2RM_VTRN:
9ee6e8bb 6313 if (size == 2) {
a5a14945 6314 int n;
9ee6e8bb 6315 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6316 tmp = neon_load_reg(rm, n);
6317 tmp2 = neon_load_reg(rd, n + 1);
6318 neon_store_reg(rm, n, tmp2);
6319 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6320 }
6321 } else {
6322 goto elementwise;
6323 }
6324 break;
600b828c 6325 case NEON_2RM_VUZP:
02acedf9 6326 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6327 return 1;
9ee6e8bb
PB
6328 }
6329 break;
600b828c 6330 case NEON_2RM_VZIP:
d68a6f3a 6331 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6332 return 1;
9ee6e8bb
PB
6333 }
6334 break;
600b828c
PM
6335 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6336 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6337 if (rm & 1) {
6338 return 1;
6339 }
f764718d 6340 tmp2 = NULL;
9ee6e8bb 6341 for (pass = 0; pass < 2; pass++) {
ad69471c 6342 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6343 tmp = tcg_temp_new_i32();
600b828c
PM
6344 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6345 tmp, cpu_V0);
ad69471c
PB
6346 if (pass == 0) {
6347 tmp2 = tmp;
6348 } else {
6349 neon_store_reg(rd, 0, tmp2);
6350 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6351 }
9ee6e8bb
PB
6352 }
6353 break;
600b828c 6354 case NEON_2RM_VSHLL:
fc2a9b37 6355 if (q || (rd & 1)) {
9ee6e8bb 6356 return 1;
600b828c 6357 }
ad69471c
PB
6358 tmp = neon_load_reg(rm, 0);
6359 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6360 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6361 if (pass == 1)
6362 tmp = tmp2;
6363 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6364 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6365 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6366 }
6367 break;
600b828c 6368 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
6369 {
6370 TCGv_ptr fpst;
6371 TCGv_i32 ahp;
6372
602f6e42 6373 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
6374 q || (rm & 1)) {
6375 return 1;
6376 }
486624fc
AB
6377 fpst = get_fpstatus_ptr(true);
6378 ahp = get_ahp_flag();
58f2682e
PM
6379 tmp = neon_load_reg(rm, 0);
6380 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6381 tmp2 = neon_load_reg(rm, 1);
6382 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
60011498
PB
6383 tcg_gen_shli_i32(tmp2, tmp2, 16);
6384 tcg_gen_or_i32(tmp2, tmp2, tmp);
58f2682e
PM
6385 tcg_temp_free_i32(tmp);
6386 tmp = neon_load_reg(rm, 2);
6387 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6388 tmp3 = neon_load_reg(rm, 3);
60011498 6389 neon_store_reg(rd, 0, tmp2);
58f2682e
PM
6390 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
6391 tcg_gen_shli_i32(tmp3, tmp3, 16);
6392 tcg_gen_or_i32(tmp3, tmp3, tmp);
6393 neon_store_reg(rd, 1, tmp3);
7d1b0095 6394 tcg_temp_free_i32(tmp);
486624fc
AB
6395 tcg_temp_free_i32(ahp);
6396 tcg_temp_free_ptr(fpst);
60011498 6397 break;
486624fc 6398 }
600b828c 6399 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
6400 {
6401 TCGv_ptr fpst;
6402 TCGv_i32 ahp;
602f6e42 6403 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
6404 q || (rd & 1)) {
6405 return 1;
6406 }
486624fc
AB
6407 fpst = get_fpstatus_ptr(true);
6408 ahp = get_ahp_flag();
7d1b0095 6409 tmp3 = tcg_temp_new_i32();
60011498
PB
6410 tmp = neon_load_reg(rm, 0);
6411 tmp2 = neon_load_reg(rm, 1);
6412 tcg_gen_ext16u_i32(tmp3, tmp);
b66f6b99
PM
6413 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6414 neon_store_reg(rd, 0, tmp3);
6415 tcg_gen_shri_i32(tmp, tmp, 16);
6416 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
6417 neon_store_reg(rd, 1, tmp);
6418 tmp3 = tcg_temp_new_i32();
60011498 6419 tcg_gen_ext16u_i32(tmp3, tmp2);
b66f6b99
PM
6420 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6421 neon_store_reg(rd, 2, tmp3);
6422 tcg_gen_shri_i32(tmp2, tmp2, 16);
6423 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
6424 neon_store_reg(rd, 3, tmp2);
486624fc
AB
6425 tcg_temp_free_i32(ahp);
6426 tcg_temp_free_ptr(fpst);
60011498 6427 break;
486624fc 6428 }
9d935509 6429 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 6430 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
6431 return 1;
6432 }
1a66ac61
RH
6433 ptr1 = vfp_reg_ptr(true, rd);
6434 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
6435
6436 /* Bit 6 is the lowest opcode bit; it distinguishes between
6437 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6438 */
6439 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6440
6441 if (op == NEON_2RM_AESE) {
1a66ac61 6442 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 6443 } else {
1a66ac61 6444 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 6445 }
1a66ac61
RH
6446 tcg_temp_free_ptr(ptr1);
6447 tcg_temp_free_ptr(ptr2);
9d935509
AB
6448 tcg_temp_free_i32(tmp3);
6449 break;
f1ecb913 6450 case NEON_2RM_SHA1H:
962fcbf2 6451 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
6452 return 1;
6453 }
1a66ac61
RH
6454 ptr1 = vfp_reg_ptr(true, rd);
6455 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 6456
1a66ac61 6457 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 6458
1a66ac61
RH
6459 tcg_temp_free_ptr(ptr1);
6460 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
6461 break;
6462 case NEON_2RM_SHA1SU1:
6463 if ((rm | rd) & 1) {
6464 return 1;
6465 }
6466 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6467 if (q) {
962fcbf2 6468 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
6469 return 1;
6470 }
962fcbf2 6471 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
6472 return 1;
6473 }
1a66ac61
RH
6474 ptr1 = vfp_reg_ptr(true, rd);
6475 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 6476 if (q) {
1a66ac61 6477 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 6478 } else {
1a66ac61 6479 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 6480 }
1a66ac61
RH
6481 tcg_temp_free_ptr(ptr1);
6482 tcg_temp_free_ptr(ptr2);
f1ecb913 6483 break;
4bf940be
RH
6484
6485 case NEON_2RM_VMVN:
6486 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
6487 break;
6488 case NEON_2RM_VNEG:
6489 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
6490 break;
4e027a71
RH
6491 case NEON_2RM_VABS:
6492 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
6493 break;
4bf940be 6494
9ee6e8bb
PB
6495 default:
6496 elementwise:
6497 for (pass = 0; pass < (q ? 4 : 2); pass++) {
60737ed5 6498 tmp = neon_load_reg(rm, pass);
9ee6e8bb 6499 switch (op) {
600b828c 6500 case NEON_2RM_VREV32:
9ee6e8bb 6501 switch (size) {
dd8fbd78
FN
6502 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6503 case 1: gen_swap_half(tmp); break;
600b828c 6504 default: abort();
9ee6e8bb
PB
6505 }
6506 break;
600b828c 6507 case NEON_2RM_VREV16:
dd8fbd78 6508 gen_rev16(tmp);
9ee6e8bb 6509 break;
600b828c 6510 case NEON_2RM_VCLS:
9ee6e8bb 6511 switch (size) {
dd8fbd78
FN
6512 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6513 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6514 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6515 default: abort();
9ee6e8bb
PB
6516 }
6517 break;
600b828c 6518 case NEON_2RM_VCLZ:
9ee6e8bb 6519 switch (size) {
dd8fbd78
FN
6520 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6521 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 6522 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 6523 default: abort();
9ee6e8bb
PB
6524 }
6525 break;
600b828c 6526 case NEON_2RM_VCNT:
dd8fbd78 6527 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6528 break;
600b828c 6529 case NEON_2RM_VQABS:
9ee6e8bb 6530 switch (size) {
02da0b2d
PM
6531 case 0:
6532 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6533 break;
6534 case 1:
6535 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6536 break;
6537 case 2:
6538 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6539 break;
600b828c 6540 default: abort();
9ee6e8bb
PB
6541 }
6542 break;
600b828c 6543 case NEON_2RM_VQNEG:
9ee6e8bb 6544 switch (size) {
02da0b2d
PM
6545 case 0:
6546 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6547 break;
6548 case 1:
6549 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6550 break;
6551 case 2:
6552 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6553 break;
600b828c 6554 default: abort();
9ee6e8bb
PB
6555 }
6556 break;
600b828c 6557 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6558 tmp2 = tcg_const_i32(0);
9ee6e8bb 6559 switch(size) {
dd8fbd78
FN
6560 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6561 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6562 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6563 default: abort();
9ee6e8bb 6564 }
39d5492a 6565 tcg_temp_free_i32(tmp2);
600b828c 6566 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6567 tcg_gen_not_i32(tmp, tmp);
600b828c 6568 }
9ee6e8bb 6569 break;
600b828c 6570 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6571 tmp2 = tcg_const_i32(0);
9ee6e8bb 6572 switch(size) {
dd8fbd78
FN
6573 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6574 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6575 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6576 default: abort();
9ee6e8bb 6577 }
39d5492a 6578 tcg_temp_free_i32(tmp2);
600b828c 6579 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6580 tcg_gen_not_i32(tmp, tmp);
600b828c 6581 }
9ee6e8bb 6582 break;
600b828c 6583 case NEON_2RM_VCEQ0:
dd8fbd78 6584 tmp2 = tcg_const_i32(0);
9ee6e8bb 6585 switch(size) {
dd8fbd78
FN
6586 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6587 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6588 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6589 default: abort();
9ee6e8bb 6590 }
39d5492a 6591 tcg_temp_free_i32(tmp2);
9ee6e8bb 6592 break;
600b828c 6593 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6594 {
6595 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6596 tmp2 = tcg_const_i32(0);
aa47cfdd 6597 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6598 tcg_temp_free_i32(tmp2);
aa47cfdd 6599 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6600 break;
aa47cfdd 6601 }
600b828c 6602 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6603 {
6604 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6605 tmp2 = tcg_const_i32(0);
aa47cfdd 6606 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6607 tcg_temp_free_i32(tmp2);
aa47cfdd 6608 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6609 break;
aa47cfdd 6610 }
600b828c 6611 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6612 {
6613 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6614 tmp2 = tcg_const_i32(0);
aa47cfdd 6615 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6616 tcg_temp_free_i32(tmp2);
aa47cfdd 6617 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6618 break;
aa47cfdd 6619 }
600b828c 6620 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6621 {
6622 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6623 tmp2 = tcg_const_i32(0);
aa47cfdd 6624 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6625 tcg_temp_free_i32(tmp2);
aa47cfdd 6626 tcg_temp_free_ptr(fpstatus);
0e326109 6627 break;
aa47cfdd 6628 }
600b828c 6629 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6630 {
6631 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6632 tmp2 = tcg_const_i32(0);
aa47cfdd 6633 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6634 tcg_temp_free_i32(tmp2);
aa47cfdd 6635 tcg_temp_free_ptr(fpstatus);
0e326109 6636 break;
aa47cfdd 6637 }
600b828c 6638 case NEON_2RM_VABS_F:
fd8a68cd 6639 gen_helper_vfp_abss(tmp, tmp);
9ee6e8bb 6640 break;
600b828c 6641 case NEON_2RM_VNEG_F:
cedcc96f 6642 gen_helper_vfp_negs(tmp, tmp);
9ee6e8bb 6643 break;
600b828c 6644 case NEON_2RM_VSWP:
dd8fbd78
FN
6645 tmp2 = neon_load_reg(rd, pass);
6646 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6647 break;
600b828c 6648 case NEON_2RM_VTRN:
dd8fbd78 6649 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6650 switch (size) {
dd8fbd78
FN
6651 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6652 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6653 default: abort();
9ee6e8bb 6654 }
dd8fbd78 6655 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6656 break;
34f7b0a2
WN
6657 case NEON_2RM_VRINTN:
6658 case NEON_2RM_VRINTA:
6659 case NEON_2RM_VRINTM:
6660 case NEON_2RM_VRINTP:
6661 case NEON_2RM_VRINTZ:
6662 {
6663 TCGv_i32 tcg_rmode;
6664 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6665 int rmode;
6666
6667 if (op == NEON_2RM_VRINTZ) {
6668 rmode = FPROUNDING_ZERO;
6669 } else {
6670 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6671 }
6672
6673 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6674 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6675 cpu_env);
3b52ad1f 6676 gen_helper_rints(tmp, tmp, fpstatus);
34f7b0a2
WN
6677 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6678 cpu_env);
6679 tcg_temp_free_ptr(fpstatus);
6680 tcg_temp_free_i32(tcg_rmode);
6681 break;
6682 }
2ce70625
WN
6683 case NEON_2RM_VRINTX:
6684 {
6685 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
3b52ad1f 6686 gen_helper_rints_exact(tmp, tmp, fpstatus);
2ce70625
WN
6687 tcg_temp_free_ptr(fpstatus);
6688 break;
6689 }
901ad525
WN
6690 case NEON_2RM_VCVTAU:
6691 case NEON_2RM_VCVTAS:
6692 case NEON_2RM_VCVTNU:
6693 case NEON_2RM_VCVTNS:
6694 case NEON_2RM_VCVTPU:
6695 case NEON_2RM_VCVTPS:
6696 case NEON_2RM_VCVTMU:
6697 case NEON_2RM_VCVTMS:
6698 {
6699 bool is_signed = !extract32(insn, 7, 1);
6700 TCGv_ptr fpst = get_fpstatus_ptr(1);
6701 TCGv_i32 tcg_rmode, tcg_shift;
6702 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6703
6704 tcg_shift = tcg_const_i32(0);
6705 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6706 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6707 cpu_env);
6708
6709 if (is_signed) {
30bf0a01 6710 gen_helper_vfp_tosls(tmp, tmp,
901ad525
WN
6711 tcg_shift, fpst);
6712 } else {
30bf0a01 6713 gen_helper_vfp_touls(tmp, tmp,
901ad525
WN
6714 tcg_shift, fpst);
6715 }
6716
6717 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6718 cpu_env);
6719 tcg_temp_free_i32(tcg_rmode);
6720 tcg_temp_free_i32(tcg_shift);
6721 tcg_temp_free_ptr(fpst);
6722 break;
6723 }
600b828c 6724 case NEON_2RM_VRECPE:
b6d4443a
AB
6725 {
6726 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6727 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6728 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6729 break;
b6d4443a 6730 }
600b828c 6731 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6732 {
6733 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6734 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6735 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6736 break;
c2fb418e 6737 }
600b828c 6738 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6739 {
6740 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9a011fec 6741 gen_helper_recpe_f32(tmp, tmp, fpstatus);
b6d4443a 6742 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6743 break;
b6d4443a 6744 }
600b828c 6745 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6746 {
6747 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9a011fec 6748 gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
c2fb418e 6749 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6750 break;
c2fb418e 6751 }
600b828c 6752 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
60737ed5
PM
6753 {
6754 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6755 gen_helper_vfp_sitos(tmp, tmp, fpstatus);
6756 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6757 break;
60737ed5 6758 }
600b828c 6759 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
60737ed5
PM
6760 {
6761 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6762 gen_helper_vfp_uitos(tmp, tmp, fpstatus);
6763 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6764 break;
60737ed5 6765 }
600b828c 6766 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
60737ed5
PM
6767 {
6768 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6769 gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
6770 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6771 break;
60737ed5 6772 }
600b828c 6773 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
60737ed5
PM
6774 {
6775 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6776 gen_helper_vfp_touizs(tmp, tmp, fpstatus);
6777 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6778 break;
60737ed5 6779 }
9ee6e8bb 6780 default:
600b828c
PM
6781 /* Reserved op values were caught by the
6782 * neon_2rm_sizes[] check earlier.
6783 */
6784 abort();
9ee6e8bb 6785 }
60737ed5 6786 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6787 }
6788 break;
6789 }
6790 } else if ((insn & (1 << 10)) == 0) {
6791 /* VTBL, VTBX. */
56907d77
PM
6792 int n = ((insn >> 8) & 3) + 1;
6793 if ((rn + n) > 32) {
6794 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6795 * helper function running off the end of the register file.
6796 */
6797 return 1;
6798 }
6799 n <<= 3;
9ee6e8bb 6800 if (insn & (1 << 6)) {
8f8e3aa4 6801 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6802 } else {
7d1b0095 6803 tmp = tcg_temp_new_i32();
8f8e3aa4 6804 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6805 }
8f8e3aa4 6806 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 6807 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 6808 tmp5 = tcg_const_i32(n);
e7c06c4e 6809 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 6810 tcg_temp_free_i32(tmp);
9ee6e8bb 6811 if (insn & (1 << 6)) {
8f8e3aa4 6812 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6813 } else {
7d1b0095 6814 tmp = tcg_temp_new_i32();
8f8e3aa4 6815 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6816 }
8f8e3aa4 6817 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 6818 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 6819 tcg_temp_free_i32(tmp5);
e7c06c4e 6820 tcg_temp_free_ptr(ptr1);
8f8e3aa4 6821 neon_store_reg(rd, 0, tmp2);
3018f259 6822 neon_store_reg(rd, 1, tmp3);
7d1b0095 6823 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6824 } else if ((insn & 0x380) == 0) {
6825 /* VDUP */
32f91fb7 6826 int element;
14776ab5 6827 MemOp size;
32f91fb7 6828
133da6aa
JR
6829 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6830 return 1;
6831 }
9ee6e8bb 6832 if (insn & (1 << 16)) {
32f91fb7
RH
6833 size = MO_8;
6834 element = (insn >> 17) & 7;
9ee6e8bb 6835 } else if (insn & (1 << 17)) {
32f91fb7
RH
6836 size = MO_16;
6837 element = (insn >> 18) & 3;
6838 } else {
6839 size = MO_32;
6840 element = (insn >> 19) & 1;
9ee6e8bb 6841 }
32f91fb7
RH
6842 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
6843 neon_element_offset(rm, element, size),
6844 q ? 16 : 8, q ? 16 : 8);
9ee6e8bb
PB
6845 } else {
6846 return 1;
6847 }
6848 }
6849 }
6850 return 0;
6851}
6852
8b7209fa
RH
6853/* Advanced SIMD three registers of the same length extension.
6854 * 31 25 23 22 20 16 12 11 10 9 8 3 0
6855 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6856 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6857 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6858 */
6859static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
6860{
26c470a7
RH
6861 gen_helper_gvec_3 *fn_gvec = NULL;
6862 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
6863 int rd, rn, rm, opr_sz;
6864 int data = 0;
87732318
RH
6865 int off_rn, off_rm;
6866 bool is_long = false, q = extract32(insn, 6, 1);
6867 bool ptr_is_env = false;
8b7209fa
RH
6868
6869 if ((insn & 0xfe200f10) == 0xfc200800) {
6870 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
6871 int size = extract32(insn, 20, 1);
6872 data = extract32(insn, 23, 2); /* rot */
962fcbf2 6873 if (!dc_isar_feature(aa32_vcma, s)
5763190f 6874 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
6875 return 1;
6876 }
6877 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
6878 } else if ((insn & 0xfea00f10) == 0xfc800800) {
6879 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
6880 int size = extract32(insn, 20, 1);
6881 data = extract32(insn, 24, 1); /* rot */
962fcbf2 6882 if (!dc_isar_feature(aa32_vcma, s)
5763190f 6883 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
6884 return 1;
6885 }
6886 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
6887 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
6888 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
6889 bool u = extract32(insn, 4, 1);
962fcbf2 6890 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
6891 return 1;
6892 }
6893 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
87732318
RH
6894 } else if ((insn & 0xff300f10) == 0xfc200810) {
6895 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
6896 int is_s = extract32(insn, 23, 1);
6897 if (!dc_isar_feature(aa32_fhm, s)) {
6898 return 1;
6899 }
6900 is_long = true;
6901 data = is_s; /* is_2 == 0 */
6902 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
6903 ptr_is_env = true;
8b7209fa
RH
6904 } else {
6905 return 1;
6906 }
6907
87732318
RH
6908 VFP_DREG_D(rd, insn);
6909 if (rd & q) {
6910 return 1;
6911 }
6912 if (q || !is_long) {
6913 VFP_DREG_N(rn, insn);
6914 VFP_DREG_M(rm, insn);
6915 if ((rn | rm) & q & !is_long) {
6916 return 1;
6917 }
6918 off_rn = vfp_reg_offset(1, rn);
6919 off_rm = vfp_reg_offset(1, rm);
6920 } else {
6921 rn = VFP_SREG_N(insn);
6922 rm = VFP_SREG_M(insn);
6923 off_rn = vfp_reg_offset(0, rn);
6924 off_rm = vfp_reg_offset(0, rm);
6925 }
6926
8b7209fa 6927 if (s->fp_excp_el) {
a767fac8 6928 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4be42f40 6929 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8b7209fa
RH
6930 return 0;
6931 }
6932 if (!s->vfp_enabled) {
6933 return 1;
6934 }
6935
6936 opr_sz = (1 + q) * 8;
26c470a7 6937 if (fn_gvec_ptr) {
87732318
RH
6938 TCGv_ptr ptr;
6939 if (ptr_is_env) {
6940 ptr = cpu_env;
6941 } else {
6942 ptr = get_fpstatus_ptr(1);
6943 }
6944 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 6945 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
6946 if (!ptr_is_env) {
6947 tcg_temp_free_ptr(ptr);
6948 }
26c470a7 6949 } else {
87732318 6950 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
6951 opr_sz, opr_sz, data, fn_gvec);
6952 }
8b7209fa
RH
6953 return 0;
6954}
6955
638808ff
RH
6956/* Advanced SIMD two registers and a scalar extension.
6957 * 31 24 23 22 20 16 12 11 10 9 8 3 0
6958 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
6959 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6960 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
6961 *
6962 */
6963
6964static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
6965{
26c470a7
RH
6966 gen_helper_gvec_3 *fn_gvec = NULL;
6967 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 6968 int rd, rn, rm, opr_sz, data;
87732318
RH
6969 int off_rn, off_rm;
6970 bool is_long = false, q = extract32(insn, 6, 1);
6971 bool ptr_is_env = false;
638808ff
RH
6972
6973 if ((insn & 0xff000f10) == 0xfe000800) {
6974 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
6975 int rot = extract32(insn, 20, 2);
6976 int size = extract32(insn, 23, 1);
6977 int index;
6978
962fcbf2 6979 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
6980 return 1;
6981 }
2cc99919 6982 if (size == 0) {
5763190f 6983 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2cc99919
RH
6984 return 1;
6985 }
6986 /* For fp16, rm is just Vm, and index is M. */
6987 rm = extract32(insn, 0, 4);
6988 index = extract32(insn, 5, 1);
6989 } else {
6990 /* For fp32, rm is the usual M:Vm, and index is 0. */
6991 VFP_DREG_M(rm, insn);
6992 index = 0;
6993 }
6994 data = (index << 2) | rot;
6995 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
6996 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
6997 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
6998 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
6999 int u = extract32(insn, 4, 1);
87732318 7000
962fcbf2 7001 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
7002 return 1;
7003 }
7004 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7005 /* rm is just Vm, and index is M. */
7006 data = extract32(insn, 5, 1); /* index */
7007 rm = extract32(insn, 0, 4);
87732318
RH
7008 } else if ((insn & 0xffa00f10) == 0xfe000810) {
7009 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
7010 int is_s = extract32(insn, 20, 1);
7011 int vm20 = extract32(insn, 0, 3);
7012 int vm3 = extract32(insn, 3, 1);
7013 int m = extract32(insn, 5, 1);
7014 int index;
7015
7016 if (!dc_isar_feature(aa32_fhm, s)) {
7017 return 1;
7018 }
7019 if (q) {
7020 rm = vm20;
7021 index = m * 2 + vm3;
7022 } else {
7023 rm = vm20 * 2 + m;
7024 index = vm3;
7025 }
7026 is_long = true;
7027 data = (index << 2) | is_s; /* is_2 == 0 */
7028 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
7029 ptr_is_env = true;
638808ff
RH
7030 } else {
7031 return 1;
7032 }
7033
87732318
RH
7034 VFP_DREG_D(rd, insn);
7035 if (rd & q) {
7036 return 1;
7037 }
7038 if (q || !is_long) {
7039 VFP_DREG_N(rn, insn);
7040 if (rn & q & !is_long) {
7041 return 1;
7042 }
7043 off_rn = vfp_reg_offset(1, rn);
7044 off_rm = vfp_reg_offset(1, rm);
7045 } else {
7046 rn = VFP_SREG_N(insn);
7047 off_rn = vfp_reg_offset(0, rn);
7048 off_rm = vfp_reg_offset(0, rm);
7049 }
638808ff 7050 if (s->fp_excp_el) {
a767fac8 7051 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4be42f40 7052 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
638808ff
RH
7053 return 0;
7054 }
7055 if (!s->vfp_enabled) {
7056 return 1;
7057 }
7058
7059 opr_sz = (1 + q) * 8;
26c470a7 7060 if (fn_gvec_ptr) {
87732318
RH
7061 TCGv_ptr ptr;
7062 if (ptr_is_env) {
7063 ptr = cpu_env;
7064 } else {
7065 ptr = get_fpstatus_ptr(1);
7066 }
7067 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 7068 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
7069 if (!ptr_is_env) {
7070 tcg_temp_free_ptr(ptr);
7071 }
26c470a7 7072 } else {
87732318 7073 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
7074 opr_sz, opr_sz, data, fn_gvec);
7075 }
638808ff
RH
7076 return 0;
7077}
7078
7dcc1f89 7079static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7080{
4b6a83fb
PM
7081 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7082 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7083
7084 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7085
7086 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7087 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7088 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7089 return 1;
7090 }
d614a513 7091 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7092 return disas_iwmmxt_insn(s, insn);
d614a513 7093 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7094 return disas_dsp_insn(s, insn);
c0f4af17
PM
7095 }
7096 return 1;
4b6a83fb
PM
7097 }
7098
7099 /* Otherwise treat as a generic register access */
7100 is64 = (insn & (1 << 25)) == 0;
7101 if (!is64 && ((insn & (1 << 4)) == 0)) {
7102 /* cdp */
7103 return 1;
7104 }
7105
7106 crm = insn & 0xf;
7107 if (is64) {
7108 crn = 0;
7109 opc1 = (insn >> 4) & 0xf;
7110 opc2 = 0;
7111 rt2 = (insn >> 16) & 0xf;
7112 } else {
7113 crn = (insn >> 16) & 0xf;
7114 opc1 = (insn >> 21) & 7;
7115 opc2 = (insn >> 5) & 7;
7116 rt2 = 0;
7117 }
7118 isread = (insn >> 20) & 1;
7119 rt = (insn >> 12) & 0xf;
7120
60322b39 7121 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7122 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7123 if (ri) {
7124 /* Check access permissions */
dcbff19b 7125 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7126 return 1;
7127 }
7128
c0f4af17 7129 if (ri->accessfn ||
d614a513 7130 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7131 /* Emit code to perform further access permissions checks at
7132 * runtime; this may result in an exception.
c0f4af17
PM
7133 * Note that on XScale all cp0..c13 registers do an access check
7134 * call in order to handle c15_cpar.
f59df3f2
PM
7135 */
7136 TCGv_ptr tmpptr;
3f208fd7 7137 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7138 uint32_t syndrome;
7139
7140 /* Note that since we are an implementation which takes an
7141 * exception on a trapped conditional instruction only if the
7142 * instruction passes its condition code check, we can take
7143 * advantage of the clause in the ARM ARM that allows us to set
7144 * the COND field in the instruction to 0xE in all cases.
7145 * We could fish the actual condition out of the insn (ARM)
7146 * or the condexec bits (Thumb) but it isn't necessary.
7147 */
7148 switch (cpnum) {
7149 case 14:
7150 if (is64) {
7151 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7152 isread, false);
8bcbf37c
PM
7153 } else {
7154 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7155 rt, isread, false);
8bcbf37c
PM
7156 }
7157 break;
7158 case 15:
7159 if (is64) {
7160 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7161 isread, false);
8bcbf37c
PM
7162 } else {
7163 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7164 rt, isread, false);
8bcbf37c
PM
7165 }
7166 break;
7167 default:
7168 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7169 * so this can only happen if this is an ARMv7 or earlier CPU,
7170 * in which case the syndrome information won't actually be
7171 * guest visible.
7172 */
d614a513 7173 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7174 syndrome = syn_uncategorized();
7175 break;
7176 }
7177
43bfa4a1 7178 gen_set_condexec(s);
43722a6d 7179 gen_set_pc_im(s, s->pc_curr);
f59df3f2 7180 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7181 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7182 tcg_isread = tcg_const_i32(isread);
7183 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7184 tcg_isread);
f59df3f2 7185 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7186 tcg_temp_free_i32(tcg_syn);
3f208fd7 7187 tcg_temp_free_i32(tcg_isread);
37ff584c
PM
7188 } else if (ri->type & ARM_CP_RAISES_EXC) {
7189 /*
7190 * The readfn or writefn might raise an exception;
7191 * synchronize the CPU state in case it does.
7192 */
7193 gen_set_condexec(s);
7194 gen_set_pc_im(s, s->pc_curr);
f59df3f2
PM
7195 }
7196
4b6a83fb
PM
7197 /* Handle special cases first */
7198 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7199 case ARM_CP_NOP:
7200 return 0;
7201 case ARM_CP_WFI:
7202 if (isread) {
7203 return 1;
7204 }
a0415916 7205 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 7206 s->base.is_jmp = DISAS_WFI;
2bee5105 7207 return 0;
4b6a83fb
PM
7208 default:
7209 break;
7210 }
7211
c5a49c63 7212 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7213 gen_io_start();
7214 }
7215
4b6a83fb
PM
7216 if (isread) {
7217 /* Read */
7218 if (is64) {
7219 TCGv_i64 tmp64;
7220 TCGv_i32 tmp;
7221 if (ri->type & ARM_CP_CONST) {
7222 tmp64 = tcg_const_i64(ri->resetvalue);
7223 } else if (ri->readfn) {
7224 TCGv_ptr tmpptr;
4b6a83fb
PM
7225 tmp64 = tcg_temp_new_i64();
7226 tmpptr = tcg_const_ptr(ri);
7227 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7228 tcg_temp_free_ptr(tmpptr);
7229 } else {
7230 tmp64 = tcg_temp_new_i64();
7231 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7232 }
7233 tmp = tcg_temp_new_i32();
ecc7b3aa 7234 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb 7235 store_reg(s, rt, tmp);
ed336850 7236 tmp = tcg_temp_new_i32();
664b7e3b 7237 tcg_gen_extrh_i64_i32(tmp, tmp64);
ed336850 7238 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7239 store_reg(s, rt2, tmp);
7240 } else {
39d5492a 7241 TCGv_i32 tmp;
4b6a83fb
PM
7242 if (ri->type & ARM_CP_CONST) {
7243 tmp = tcg_const_i32(ri->resetvalue);
7244 } else if (ri->readfn) {
7245 TCGv_ptr tmpptr;
4b6a83fb
PM
7246 tmp = tcg_temp_new_i32();
7247 tmpptr = tcg_const_ptr(ri);
7248 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7249 tcg_temp_free_ptr(tmpptr);
7250 } else {
7251 tmp = load_cpu_offset(ri->fieldoffset);
7252 }
7253 if (rt == 15) {
7254 /* Destination register of r15 for 32 bit loads sets
7255 * the condition codes from the high 4 bits of the value
7256 */
7257 gen_set_nzcv(tmp);
7258 tcg_temp_free_i32(tmp);
7259 } else {
7260 store_reg(s, rt, tmp);
7261 }
7262 }
7263 } else {
7264 /* Write */
7265 if (ri->type & ARM_CP_CONST) {
7266 /* If not forbidden by access permissions, treat as WI */
7267 return 0;
7268 }
7269
7270 if (is64) {
39d5492a 7271 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7272 TCGv_i64 tmp64 = tcg_temp_new_i64();
7273 tmplo = load_reg(s, rt);
7274 tmphi = load_reg(s, rt2);
7275 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7276 tcg_temp_free_i32(tmplo);
7277 tcg_temp_free_i32(tmphi);
7278 if (ri->writefn) {
7279 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7280 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7281 tcg_temp_free_ptr(tmpptr);
7282 } else {
7283 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7284 }
7285 tcg_temp_free_i64(tmp64);
7286 } else {
7287 if (ri->writefn) {
39d5492a 7288 TCGv_i32 tmp;
4b6a83fb 7289 TCGv_ptr tmpptr;
4b6a83fb
PM
7290 tmp = load_reg(s, rt);
7291 tmpptr = tcg_const_ptr(ri);
7292 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7293 tcg_temp_free_ptr(tmpptr);
7294 tcg_temp_free_i32(tmp);
7295 } else {
39d5492a 7296 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7297 store_cpu_offset(tmp, ri->fieldoffset);
7298 }
7299 }
2452731c
PM
7300 }
7301
c5a49c63 7302 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c 7303 /* I/O operations must end the TB here (whether read or write) */
2452731c
PM
7304 gen_lookup_tb(s);
7305 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7306 /* We default to ending the TB on a coprocessor register write,
7307 * but allow this to be suppressed by the register definition
7308 * (usually only necessary to work around guest bugs).
7309 */
2452731c 7310 gen_lookup_tb(s);
4b6a83fb 7311 }
2452731c 7312
4b6a83fb
PM
7313 return 0;
7314 }
7315
626187d8
PM
7316 /* Unknown register; this might be a guest error or a QEMU
7317 * unimplemented feature.
7318 */
7319 if (is64) {
7320 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7321 "64 bit system register cp:%d opc1: %d crm:%d "
7322 "(%s)\n",
7323 isread ? "read" : "write", cpnum, opc1, crm,
7324 s->ns ? "non-secure" : "secure");
626187d8
PM
7325 } else {
7326 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7327 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7328 "(%s)\n",
7329 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7330 s->ns ? "non-secure" : "secure");
626187d8
PM
7331 }
7332
4a9a539f 7333 return 1;
9ee6e8bb
PB
7334}
7335
5e3f878a
PB
7336
7337/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7338static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7339{
39d5492a 7340 TCGv_i32 tmp;
7d1b0095 7341 tmp = tcg_temp_new_i32();
ecc7b3aa 7342 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7343 store_reg(s, rlow, tmp);
7d1b0095 7344 tmp = tcg_temp_new_i32();
664b7e3b 7345 tcg_gen_extrh_i64_i32(tmp, val);
5e3f878a
PB
7346 store_reg(s, rhigh, tmp);
7347}
7348
7349/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7350static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7351{
a7812ae4 7352 TCGv_i64 tmp;
39d5492a 7353 TCGv_i32 tmp2;
5e3f878a 7354
36aa55dc 7355 /* Load value and extend to 64 bits. */
a7812ae4 7356 tmp = tcg_temp_new_i64();
5e3f878a
PB
7357 tmp2 = load_reg(s, rlow);
7358 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7359 tcg_temp_free_i32(tmp2);
5e3f878a 7360 tcg_gen_add_i64(val, val, tmp);
b75263d6 7361 tcg_temp_free_i64(tmp);
5e3f878a
PB
7362}
7363
7364/* load and add a 64-bit value from a register pair. */
a7812ae4 7365static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7366{
a7812ae4 7367 TCGv_i64 tmp;
39d5492a
PM
7368 TCGv_i32 tmpl;
7369 TCGv_i32 tmph;
5e3f878a
PB
7370
7371 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7372 tmpl = load_reg(s, rlow);
7373 tmph = load_reg(s, rhigh);
a7812ae4 7374 tmp = tcg_temp_new_i64();
36aa55dc 7375 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7376 tcg_temp_free_i32(tmpl);
7377 tcg_temp_free_i32(tmph);
5e3f878a 7378 tcg_gen_add_i64(val, val, tmp);
b75263d6 7379 tcg_temp_free_i64(tmp);
5e3f878a
PB
7380}
7381
c9f10124 7382/* Set N and Z flags from hi|lo. */
39d5492a 7383static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7384{
c9f10124
RH
7385 tcg_gen_mov_i32(cpu_NF, hi);
7386 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7387}
7388
426f5abc
PB
7389/* Load/Store exclusive instructions are implemented by remembering
7390 the value/address loaded, and seeing if these are the same
354161b3 7391 when the store is performed. This should be sufficient to implement
426f5abc 7392 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7393 regular stores. The compare vs the remembered value is done during
7394 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7395static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7396 TCGv_i32 addr, int size)
426f5abc 7397{
94ee24e7 7398 TCGv_i32 tmp = tcg_temp_new_i32();
14776ab5 7399 MemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7400
50225ad0
PM
7401 s->is_ldex = true;
7402
426f5abc 7403 if (size == 3) {
39d5492a 7404 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7405 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7406
3448d47b
PM
7407 /* For AArch32, architecturally the 32-bit word at the lowest
7408 * address is always Rt and the one at addr+4 is Rt2, even if
7409 * the CPU is big-endian. That means we don't want to do a
7410 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7411 * for an architecturally 64-bit access, but instead do a
7412 * 64-bit access using MO_BE if appropriate and then split
7413 * the two halves.
7414 * This only makes a difference for BE32 user-mode, where
7415 * frob64() must not flip the two halves of the 64-bit data
7416 * but this code must treat BE32 user-mode like BE32 system.
7417 */
7418 TCGv taddr = gen_aa32_addr(s, addr, opc);
7419
7420 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7421 tcg_temp_free(taddr);
354161b3 7422 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
7423 if (s->be_data == MO_BE) {
7424 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7425 } else {
7426 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7427 }
354161b3
EC
7428 tcg_temp_free_i64(t64);
7429
7430 store_reg(s, rt2, tmp2);
03d05e2d 7431 } else {
354161b3 7432 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7433 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7434 }
03d05e2d
PM
7435
7436 store_reg(s, rt, tmp);
7437 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7438}
7439
7440static void gen_clrex(DisasContext *s)
7441{
03d05e2d 7442 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7443}
7444
426f5abc 7445static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7446 TCGv_i32 addr, int size)
426f5abc 7447{
354161b3
EC
7448 TCGv_i32 t0, t1, t2;
7449 TCGv_i64 extaddr;
7450 TCGv taddr;
42a268c2
RH
7451 TCGLabel *done_label;
7452 TCGLabel *fail_label;
14776ab5 7453 MemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7454
7455 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7456 [addr] = {Rt};
7457 {Rd} = 0;
7458 } else {
7459 {Rd} = 1;
7460 } */
7461 fail_label = gen_new_label();
7462 done_label = gen_new_label();
03d05e2d
PM
7463 extaddr = tcg_temp_new_i64();
7464 tcg_gen_extu_i32_i64(extaddr, addr);
7465 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7466 tcg_temp_free_i64(extaddr);
7467
354161b3
EC
7468 taddr = gen_aa32_addr(s, addr, opc);
7469 t0 = tcg_temp_new_i32();
7470 t1 = load_reg(s, rt);
426f5abc 7471 if (size == 3) {
354161b3
EC
7472 TCGv_i64 o64 = tcg_temp_new_i64();
7473 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7474
354161b3 7475 t2 = load_reg(s, rt2);
3448d47b
PM
7476 /* For AArch32, architecturally the 32-bit word at the lowest
7477 * address is always Rt and the one at addr+4 is Rt2, even if
7478 * the CPU is big-endian. Since we're going to treat this as a
7479 * single 64-bit BE store, we need to put the two halves in the
7480 * opposite order for BE to LE, so that they end up in the right
7481 * places.
7482 * We don't want gen_aa32_frob64() because that does the wrong
7483 * thing for BE32 usermode.
7484 */
7485 if (s->be_data == MO_BE) {
7486 tcg_gen_concat_i32_i64(n64, t2, t1);
7487 } else {
7488 tcg_gen_concat_i32_i64(n64, t1, t2);
7489 }
354161b3 7490 tcg_temp_free_i32(t2);
03d05e2d 7491
354161b3
EC
7492 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7493 get_mem_index(s), opc);
7494 tcg_temp_free_i64(n64);
7495
354161b3
EC
7496 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7497 tcg_gen_extrl_i64_i32(t0, o64);
7498
7499 tcg_temp_free_i64(o64);
7500 } else {
7501 t2 = tcg_temp_new_i32();
7502 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7503 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7504 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7505 tcg_temp_free_i32(t2);
426f5abc 7506 }
354161b3
EC
7507 tcg_temp_free_i32(t1);
7508 tcg_temp_free(taddr);
7509 tcg_gen_mov_i32(cpu_R[rd], t0);
7510 tcg_temp_free_i32(t0);
426f5abc 7511 tcg_gen_br(done_label);
354161b3 7512
426f5abc
PB
7513 gen_set_label(fail_label);
7514 tcg_gen_movi_i32(cpu_R[rd], 1);
7515 gen_set_label(done_label);
03d05e2d 7516 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 7517}
426f5abc 7518
81465888
PM
7519/* gen_srs:
7520 * @env: CPUARMState
7521 * @s: DisasContext
7522 * @mode: mode field from insn (which stack to store to)
7523 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7524 * @writeback: true if writeback bit set
7525 *
7526 * Generate code for the SRS (Store Return State) insn.
7527 */
7528static void gen_srs(DisasContext *s,
7529 uint32_t mode, uint32_t amode, bool writeback)
7530{
7531 int32_t offset;
cbc0326b
PM
7532 TCGv_i32 addr, tmp;
7533 bool undef = false;
7534
7535 /* SRS is:
7536 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7537 * and specified mode is monitor mode
cbc0326b
PM
7538 * - UNDEFINED in Hyp mode
7539 * - UNPREDICTABLE in User or System mode
7540 * - UNPREDICTABLE if the specified mode is:
7541 * -- not implemented
7542 * -- not a valid mode number
7543 * -- a mode that's at a higher exception level
7544 * -- Monitor, if we are Non-secure
f01377f5 7545 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7546 */
ba63cf47 7547 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
a767fac8 7548 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
cbc0326b
PM
7549 return;
7550 }
7551
7552 if (s->current_el == 0 || s->current_el == 2) {
7553 undef = true;
7554 }
7555
7556 switch (mode) {
7557 case ARM_CPU_MODE_USR:
7558 case ARM_CPU_MODE_FIQ:
7559 case ARM_CPU_MODE_IRQ:
7560 case ARM_CPU_MODE_SVC:
7561 case ARM_CPU_MODE_ABT:
7562 case ARM_CPU_MODE_UND:
7563 case ARM_CPU_MODE_SYS:
7564 break;
7565 case ARM_CPU_MODE_HYP:
7566 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7567 undef = true;
7568 }
7569 break;
7570 case ARM_CPU_MODE_MON:
7571 /* No need to check specifically for "are we non-secure" because
7572 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7573 * so if this isn't EL3 then we must be non-secure.
7574 */
7575 if (s->current_el != 3) {
7576 undef = true;
7577 }
7578 break;
7579 default:
7580 undef = true;
7581 }
7582
7583 if (undef) {
1ce21ba1 7584 unallocated_encoding(s);
cbc0326b
PM
7585 return;
7586 }
7587
7588 addr = tcg_temp_new_i32();
7589 tmp = tcg_const_i32(mode);
f01377f5
PM
7590 /* get_r13_banked() will raise an exception if called from System mode */
7591 gen_set_condexec(s);
43722a6d 7592 gen_set_pc_im(s, s->pc_curr);
81465888
PM
7593 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7594 tcg_temp_free_i32(tmp);
7595 switch (amode) {
7596 case 0: /* DA */
7597 offset = -4;
7598 break;
7599 case 1: /* IA */
7600 offset = 0;
7601 break;
7602 case 2: /* DB */
7603 offset = -8;
7604 break;
7605 case 3: /* IB */
7606 offset = 4;
7607 break;
7608 default:
7609 abort();
7610 }
7611 tcg_gen_addi_i32(addr, addr, offset);
7612 tmp = load_reg(s, 14);
12dcc321 7613 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7614 tcg_temp_free_i32(tmp);
81465888
PM
7615 tmp = load_cpu_field(spsr);
7616 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 7617 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7618 tcg_temp_free_i32(tmp);
81465888
PM
7619 if (writeback) {
7620 switch (amode) {
7621 case 0:
7622 offset = -8;
7623 break;
7624 case 1:
7625 offset = 4;
7626 break;
7627 case 2:
7628 offset = -4;
7629 break;
7630 case 3:
7631 offset = 0;
7632 break;
7633 default:
7634 abort();
7635 }
7636 tcg_gen_addi_i32(addr, addr, offset);
7637 tmp = tcg_const_i32(mode);
7638 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7639 tcg_temp_free_i32(tmp);
7640 }
7641 tcg_temp_free_i32(addr);
dcba3a8d 7642 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
7643}
7644
c2d9644e
RK
7645/* Generate a label used for skipping this instruction */
7646static void arm_gen_condlabel(DisasContext *s)
7647{
7648 if (!s->condjmp) {
7649 s->condlabel = gen_new_label();
7650 s->condjmp = 1;
7651 }
7652}
7653
7654/* Skip this instruction if the ARM condition is false */
7655static void arm_skip_unless(DisasContext *s, uint32_t cond)
7656{
7657 arm_gen_condlabel(s);
7658 arm_gen_test_cc(cond ^ 1, s->condlabel);
7659}
7660
581c6ebd
RH
7661
7662/*
7663 * Constant expanders for the decoders.
7664 */
7665
7666static int times_2(DisasContext *s, int x)
7667{
7668 return x * 2;
7669}
7670
7671/* Return only the rotation part of T32ExpandImm. */
7672static int t32_expandimm_rot(DisasContext *s, int x)
7673{
7674 return x & 0xc00 ? extract32(x, 7, 5) : 0;
7675}
7676
7677/* Return the unrotated immediate from T32ExpandImm. */
7678static int t32_expandimm_imm(DisasContext *s, int x)
7679{
7680 int imm = extract32(x, 0, 8);
7681
7682 switch (extract32(x, 8, 4)) {
7683 case 0: /* XY */
7684 /* Nothing to do. */
7685 break;
7686 case 1: /* 00XY00XY */
7687 imm *= 0x00010001;
7688 break;
7689 case 2: /* XY00XY00 */
7690 imm *= 0x01000100;
7691 break;
7692 case 3: /* XYXYXYXY */
7693 imm *= 0x01010101;
7694 break;
7695 default:
7696 /* Rotated constant. */
7697 imm |= 0x80;
7698 break;
7699 }
7700 return imm;
7701}
7702
51409b9e
RH
7703/*
7704 * Include the generated decoders.
7705 */
7706
7707#include "decode-a32.inc.c"
7708#include "decode-a32-uncond.inc.c"
7709#include "decode-t32.inc.c"
7710
25ae32c5
RH
7711/* Helpers to swap operands for reverse-subtract. */
7712static void gen_rsb(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
7713{
7714 tcg_gen_sub_i32(dst, b, a);
7715}
7716
7717static void gen_rsb_CC(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
7718{
7719 gen_sub_CC(dst, b, a);
7720}
7721
7722static void gen_rsc(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
7723{
7724 gen_sub_carry(dest, b, a);
7725}
7726
7727static void gen_rsc_CC(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
7728{
7729 gen_sbc_CC(dest, b, a);
7730}
7731
7732/*
7733 * Helpers for the data processing routines.
7734 *
7735 * After the computation store the results back.
7736 * This may be suppressed altogether (STREG_NONE), require a runtime
7737 * check against the stack limits (STREG_SP_CHECK), or generate an
7738 * exception return. Oh, or store into a register.
7739 *
7740 * Always return true, indicating success for a trans_* function.
7741 */
7742typedef enum {
7743 STREG_NONE,
7744 STREG_NORMAL,
7745 STREG_SP_CHECK,
7746 STREG_EXC_RET,
7747} StoreRegKind;
7748
7749static bool store_reg_kind(DisasContext *s, int rd,
7750 TCGv_i32 val, StoreRegKind kind)
7751{
7752 switch (kind) {
7753 case STREG_NONE:
7754 tcg_temp_free_i32(val);
7755 return true;
7756 case STREG_NORMAL:
7757 /* See ALUWritePC: Interworking only from a32 mode. */
7758 if (s->thumb) {
7759 store_reg(s, rd, val);
7760 } else {
7761 store_reg_bx(s, rd, val);
7762 }
7763 return true;
7764 case STREG_SP_CHECK:
7765 store_sp_checked(s, val);
7766 return true;
7767 case STREG_EXC_RET:
7768 gen_exception_return(s, val);
7769 return true;
7770 }
7771 g_assert_not_reached();
7772}
7773
7774/*
7775 * Data Processing (register)
7776 *
7777 * Operate, with set flags, one register source,
7778 * one immediate shifted register source, and a destination.
7779 */
7780static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a,
7781 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
7782 int logic_cc, StoreRegKind kind)
7783{
7784 TCGv_i32 tmp1, tmp2;
7785
7786 tmp2 = load_reg(s, a->rm);
7787 gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc);
7788 tmp1 = load_reg(s, a->rn);
7789
7790 gen(tmp1, tmp1, tmp2);
7791 tcg_temp_free_i32(tmp2);
7792
7793 if (logic_cc) {
7794 gen_logic_CC(tmp1);
7795 }
7796 return store_reg_kind(s, a->rd, tmp1, kind);
7797}
7798
7799static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a,
7800 void (*gen)(TCGv_i32, TCGv_i32),
7801 int logic_cc, StoreRegKind kind)
7802{
7803 TCGv_i32 tmp;
7804
7805 tmp = load_reg(s, a->rm);
7806 gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc);
7807
7808 gen(tmp, tmp);
7809 if (logic_cc) {
7810 gen_logic_CC(tmp);
7811 }
7812 return store_reg_kind(s, a->rd, tmp, kind);
7813}
7814
5be2c123
RH
7815/*
7816 * Data-processing (register-shifted register)
7817 *
7818 * Operate, with set flags, one register source,
7819 * one register shifted register source, and a destination.
7820 */
7821static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a,
7822 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
7823 int logic_cc, StoreRegKind kind)
7824{
7825 TCGv_i32 tmp1, tmp2;
7826
7827 tmp1 = load_reg(s, a->rs);
7828 tmp2 = load_reg(s, a->rm);
7829 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
7830 tmp1 = load_reg(s, a->rn);
7831
7832 gen(tmp1, tmp1, tmp2);
7833 tcg_temp_free_i32(tmp2);
7834
7835 if (logic_cc) {
7836 gen_logic_CC(tmp1);
7837 }
7838 return store_reg_kind(s, a->rd, tmp1, kind);
7839}
7840
7841static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a,
7842 void (*gen)(TCGv_i32, TCGv_i32),
7843 int logic_cc, StoreRegKind kind)
7844{
7845 TCGv_i32 tmp1, tmp2;
7846
7847 tmp1 = load_reg(s, a->rs);
7848 tmp2 = load_reg(s, a->rm);
7849 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
7850
7851 gen(tmp2, tmp2);
7852 if (logic_cc) {
7853 gen_logic_CC(tmp2);
7854 }
7855 return store_reg_kind(s, a->rd, tmp2, kind);
7856}
7857
581c6ebd
RH
7858/*
7859 * Data-processing (immediate)
7860 *
7861 * Operate, with set flags, one register source,
7862 * one rotated immediate, and a destination.
7863 *
7864 * Note that logic_cc && a->rot setting CF based on the msb of the
7865 * immediate is the reason why we must pass in the unrotated form
7866 * of the immediate.
7867 */
7868static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a,
7869 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
7870 int logic_cc, StoreRegKind kind)
7871{
7872 TCGv_i32 tmp1, tmp2;
7873 uint32_t imm;
7874
7875 imm = ror32(a->imm, a->rot);
7876 if (logic_cc && a->rot) {
7877 tcg_gen_movi_i32(cpu_CF, imm >> 31);
7878 }
7879 tmp2 = tcg_const_i32(imm);
7880 tmp1 = load_reg(s, a->rn);
7881
7882 gen(tmp1, tmp1, tmp2);
7883 tcg_temp_free_i32(tmp2);
7884
7885 if (logic_cc) {
7886 gen_logic_CC(tmp1);
7887 }
7888 return store_reg_kind(s, a->rd, tmp1, kind);
7889}
7890
7891static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a,
7892 void (*gen)(TCGv_i32, TCGv_i32),
7893 int logic_cc, StoreRegKind kind)
7894{
7895 TCGv_i32 tmp;
7896 uint32_t imm;
7897
7898 imm = ror32(a->imm, a->rot);
7899 if (logic_cc && a->rot) {
7900 tcg_gen_movi_i32(cpu_CF, imm >> 31);
7901 }
7902 tmp = tcg_const_i32(imm);
7903
7904 gen(tmp, tmp);
7905 if (logic_cc) {
7906 gen_logic_CC(tmp);
7907 }
7908 return store_reg_kind(s, a->rd, tmp, kind);
7909}
7910
25ae32c5
RH
7911#define DO_ANY3(NAME, OP, L, K) \
7912 static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
5be2c123
RH
7913 { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
7914 static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
581c6ebd
RH
7915 { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
7916 static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
7917 { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
25ae32c5
RH
7918
7919#define DO_ANY2(NAME, OP, L, K) \
7920 static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
5be2c123
RH
7921 { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
7922 static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
581c6ebd
RH
7923 { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
7924 static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
7925 { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
25ae32c5
RH
7926
7927#define DO_CMP2(NAME, OP, L) \
7928 static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
5be2c123
RH
7929 { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
7930 static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
581c6ebd
RH
7931 { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
7932 static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
7933 { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
25ae32c5
RH
7934
7935DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
7936DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
7937DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
7938DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
7939
7940DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
7941DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
7942DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
7943DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
7944
7945DO_CMP2(TST, tcg_gen_and_i32, true)
7946DO_CMP2(TEQ, tcg_gen_xor_i32, true)
7947DO_CMP2(CMN, gen_add_CC, false)
7948DO_CMP2(CMP, gen_sub_CC, false)
7949
7950DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false,
7951 a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL)
7952
7953/*
7954 * Note for the computation of StoreRegKind we return out of the
7955 * middle of the functions that are expanded by DO_ANY3, and that
7956 * we modify a->s via that parameter before it is used by OP.
7957 */
7958DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false,
7959 ({
7960 StoreRegKind ret = STREG_NORMAL;
7961 if (a->rd == 15 && a->s) {
7962 /*
7963 * See ALUExceptionReturn:
7964 * In User mode, UNPREDICTABLE; we choose UNDEF.
7965 * In Hyp mode, UNDEFINED.
7966 */
7967 if (IS_USER(s) || s->current_el == 2) {
7968 unallocated_encoding(s);
7969 return true;
7970 }
7971 /* There is no writeback of nzcv to PSTATE. */
7972 a->s = 0;
7973 ret = STREG_EXC_RET;
7974 } else if (a->rd == 13 && a->rn == 13) {
7975 ret = STREG_SP_CHECK;
7976 }
7977 ret;
7978 }))
7979
7980DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
7981 ({
7982 StoreRegKind ret = STREG_NORMAL;
7983 if (a->rd == 15 && a->s) {
7984 /*
7985 * See ALUExceptionReturn:
7986 * In User mode, UNPREDICTABLE; we choose UNDEF.
7987 * In Hyp mode, UNDEFINED.
7988 */
7989 if (IS_USER(s) || s->current_el == 2) {
7990 unallocated_encoding(s);
7991 return true;
7992 }
7993 /* There is no writeback of nzcv to PSTATE. */
7994 a->s = 0;
7995 ret = STREG_EXC_RET;
7996 } else if (a->rd == 13) {
7997 ret = STREG_SP_CHECK;
7998 }
7999 ret;
8000 }))
8001
8002DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
8003
8004/*
8005 * ORN is only available with T32, so there is no register-shifted-register
8006 * form of the insn. Using the DO_ANY3 macro would create an unused function.
8007 */
8008static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a)
8009{
8010 return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
8011}
8012
581c6ebd
RH
8013static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a)
8014{
8015 return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
8016}
8017
25ae32c5
RH
8018#undef DO_ANY3
8019#undef DO_ANY2
8020#undef DO_CMP2
8021
51409b9e
RH
8022/*
8023 * Legacy decoder.
8024 */
8025
f4df2210 8026static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8027{
f4df2210 8028 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8029 TCGv_i32 tmp;
8030 TCGv_i32 tmp2;
8031 TCGv_i32 tmp3;
8032 TCGv_i32 addr;
a7812ae4 8033 TCGv_i64 tmp64;
9ee6e8bb 8034
e13886e3
PM
8035 /* M variants do not implement ARM mode; this must raise the INVSTATE
8036 * UsageFault exception.
8037 */
b53d8923 8038 if (arm_dc_feature(s, ARM_FEATURE_M)) {
a767fac8 8039 gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
e13886e3
PM
8040 default_exception_el(s));
8041 return;
b53d8923 8042 }
9ee6e8bb 8043 cond = insn >> 28;
51409b9e
RH
8044
8045 if (cond == 0xf) {
be5e7a76
DES
8046 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8047 * choose to UNDEF. In ARMv5 and above the space is used
8048 * for miscellaneous unconditional instructions.
8049 */
8050 ARCH(5);
8051
9ee6e8bb 8052 /* Unconditional instructions. */
51409b9e
RH
8053 if (disas_a32_uncond(s, insn)) {
8054 return;
8055 }
8056 /* fall back to legacy decoder */
8057
9ee6e8bb
PB
8058 if (((insn >> 25) & 7) == 1) {
8059 /* NEON Data processing. */
d614a513 8060 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8061 goto illegal_op;
d614a513 8062 }
9ee6e8bb 8063
7dcc1f89 8064 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8065 goto illegal_op;
7dcc1f89 8066 }
9ee6e8bb
PB
8067 return;
8068 }
8069 if ((insn & 0x0f100000) == 0x04000000) {
8070 /* NEON load/store. */
d614a513 8071 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8072 goto illegal_op;
d614a513 8073 }
9ee6e8bb 8074
7dcc1f89 8075 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8076 goto illegal_op;
7dcc1f89 8077 }
9ee6e8bb
PB
8078 return;
8079 }
6a57f3eb
WN
8080 if ((insn & 0x0f000e10) == 0x0e000a00) {
8081 /* VFP. */
7dcc1f89 8082 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8083 goto illegal_op;
8084 }
8085 return;
8086 }
3d185e5d
PM
8087 if (((insn & 0x0f30f000) == 0x0510f000) ||
8088 ((insn & 0x0f30f010) == 0x0710f000)) {
8089 if ((insn & (1 << 22)) == 0) {
8090 /* PLDW; v7MP */
d614a513 8091 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8092 goto illegal_op;
8093 }
8094 }
8095 /* Otherwise PLD; v5TE+ */
be5e7a76 8096 ARCH(5TE);
3d185e5d
PM
8097 return;
8098 }
8099 if (((insn & 0x0f70f000) == 0x0450f000) ||
8100 ((insn & 0x0f70f010) == 0x0650f000)) {
8101 ARCH(7);
8102 return; /* PLI; V7 */
8103 }
8104 if (((insn & 0x0f700000) == 0x04100000) ||
8105 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8106 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8107 goto illegal_op;
8108 }
8109 return; /* v7MP: Unallocated memory hint: must NOP */
8110 }
8111
8112 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8113 ARCH(6);
8114 /* setend */
9886ecdf
PB
8115 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8116 gen_helper_setend(cpu_env);
dcba3a8d 8117 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8118 }
8119 return;
8120 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8121 switch ((insn >> 4) & 0xf) {
8122 case 1: /* clrex */
8123 ARCH(6K);
426f5abc 8124 gen_clrex(s);
9ee6e8bb
PB
8125 return;
8126 case 4: /* dsb */
8127 case 5: /* dmb */
9ee6e8bb 8128 ARCH(7);
61e4c432 8129 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8130 return;
6df99dec
SS
8131 case 6: /* isb */
8132 /* We need to break the TB after this insn to execute
8133 * self-modifying code correctly and also to take
8134 * any pending interrupts immediately.
8135 */
a0415916 8136 gen_goto_tb(s, 0, s->base.pc_next);
6df99dec 8137 return;
9888bd1e
RH
8138 case 7: /* sb */
8139 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
8140 goto illegal_op;
8141 }
8142 /*
8143 * TODO: There is no speculation barrier opcode
8144 * for TCG; MB and end the TB instead.
8145 */
8146 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
a0415916 8147 gen_goto_tb(s, 0, s->base.pc_next);
9888bd1e 8148 return;
9ee6e8bb
PB
8149 default:
8150 goto illegal_op;
8151 }
8152 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8153 /* srs */
81465888
PM
8154 ARCH(6);
8155 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8156 return;
ea825eee 8157 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8158 /* rfe */
c67b6b71 8159 int32_t offset;
9ee6e8bb
PB
8160 if (IS_USER(s))
8161 goto illegal_op;
8162 ARCH(6);
8163 rn = (insn >> 16) & 0xf;
b0109805 8164 addr = load_reg(s, rn);
9ee6e8bb
PB
8165 i = (insn >> 23) & 3;
8166 switch (i) {
b0109805 8167 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8168 case 1: offset = 0; break; /* IA */
8169 case 2: offset = -8; break; /* DB */
b0109805 8170 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8171 default: abort();
8172 }
8173 if (offset)
b0109805
PB
8174 tcg_gen_addi_i32(addr, addr, offset);
8175 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8176 tmp = tcg_temp_new_i32();
12dcc321 8177 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8178 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8179 tmp2 = tcg_temp_new_i32();
12dcc321 8180 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8181 if (insn & (1 << 21)) {
8182 /* Base writeback. */
8183 switch (i) {
b0109805 8184 case 0: offset = -8; break;
c67b6b71
FN
8185 case 1: offset = 4; break;
8186 case 2: offset = -4; break;
b0109805 8187 case 3: offset = 0; break;
9ee6e8bb
PB
8188 default: abort();
8189 }
8190 if (offset)
b0109805
PB
8191 tcg_gen_addi_i32(addr, addr, offset);
8192 store_reg(s, rn, addr);
8193 } else {
7d1b0095 8194 tcg_temp_free_i32(addr);
9ee6e8bb 8195 }
b0109805 8196 gen_rfe(s, tmp, tmp2);
c67b6b71 8197 return;
9ee6e8bb
PB
8198 } else if ((insn & 0x0e000000) == 0x0a000000) {
8199 /* branch link and change to thumb (blx <offset>) */
8200 int32_t offset;
8201
7d1b0095 8202 tmp = tcg_temp_new_i32();
a0415916 8203 tcg_gen_movi_i32(tmp, s->base.pc_next);
d9ba4830 8204 store_reg(s, 14, tmp);
9ee6e8bb
PB
8205 /* Sign-extend the 24-bit offset */
8206 offset = (((int32_t)insn) << 8) >> 8;
fdbcf632 8207 val = read_pc(s);
9ee6e8bb
PB
8208 /* offset * 4 + bit24 * 2 + (thumb bit) */
8209 val += (offset << 2) | ((insn >> 23) & 2) | 1;
be5e7a76 8210 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8211 gen_bx_im(s, val);
9ee6e8bb
PB
8212 return;
8213 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8214 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8215 /* iWMMXt register transfer. */
c0f4af17 8216 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8217 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8218 return;
c0f4af17
PM
8219 }
8220 }
9ee6e8bb 8221 }
8b7209fa
RH
8222 } else if ((insn & 0x0e000a00) == 0x0c000800
8223 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8224 if (disas_neon_insn_3same_ext(s, insn)) {
8225 goto illegal_op;
8226 }
8227 return;
638808ff
RH
8228 } else if ((insn & 0x0f000a00) == 0x0e000800
8229 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8230 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
8231 goto illegal_op;
8232 }
8233 return;
9ee6e8bb
PB
8234 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8235 /* Coprocessor double register transfer. */
be5e7a76 8236 ARCH(5TE);
9ee6e8bb
PB
8237 } else if ((insn & 0x0f000010) == 0x0e000010) {
8238 /* Additional coprocessor register transfer. */
7997d92f 8239 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8240 uint32_t mask;
8241 uint32_t val;
8242 /* cps (privileged) */
8243 if (IS_USER(s))
8244 return;
8245 mask = val = 0;
8246 if (insn & (1 << 19)) {
8247 if (insn & (1 << 8))
8248 mask |= CPSR_A;
8249 if (insn & (1 << 7))
8250 mask |= CPSR_I;
8251 if (insn & (1 << 6))
8252 mask |= CPSR_F;
8253 if (insn & (1 << 18))
8254 val |= mask;
8255 }
7997d92f 8256 if (insn & (1 << 17)) {
9ee6e8bb
PB
8257 mask |= CPSR_M;
8258 val |= (insn & 0x1f);
8259 }
8260 if (mask) {
2fbac54b 8261 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8262 }
8263 return;
8264 }
8265 goto illegal_op;
8266 }
8267 if (cond != 0xe) {
8268 /* if not always execute, we generate a conditional jump to
8269 next instruction */
c2d9644e 8270 arm_skip_unless(s, cond);
9ee6e8bb 8271 }
51409b9e
RH
8272
8273 if (disas_a32(s, insn)) {
8274 return;
8275 }
8276 /* fall back to legacy decoder */
8277
9ee6e8bb
PB
8278 if ((insn & 0x0f900000) == 0x03000000) {
8279 if ((insn & (1 << 21)) == 0) {
8280 ARCH(6T2);
8281 rd = (insn >> 12) & 0xf;
8282 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8283 if ((insn & (1 << 22)) == 0) {
8284 /* MOVW */
7d1b0095 8285 tmp = tcg_temp_new_i32();
5e3f878a 8286 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8287 } else {
8288 /* MOVT */
5e3f878a 8289 tmp = load_reg(s, rd);
86831435 8290 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8291 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8292 }
5e3f878a 8293 store_reg(s, rd, tmp);
9ee6e8bb
PB
8294 } else {
8295 if (((insn >> 12) & 0xf) != 0xf)
8296 goto illegal_op;
8297 if (((insn >> 16) & 0xf) == 0) {
8298 gen_nop_hint(s, insn & 0xff);
8299 } else {
8300 /* CPSR = immediate */
8301 val = insn & 0xff;
8302 shift = ((insn >> 8) & 0xf) * 2;
dd861b3f 8303 val = ror32(val, shift);
9ee6e8bb 8304 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8305 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8306 i, val)) {
9ee6e8bb 8307 goto illegal_op;
7dcc1f89 8308 }
9ee6e8bb
PB
8309 }
8310 }
8311 } else if ((insn & 0x0f900000) == 0x01000000
8312 && (insn & 0x00000090) != 0x00000090) {
8313 /* miscellaneous instructions */
8314 op1 = (insn >> 21) & 3;
8315 sh = (insn >> 4) & 0xf;
8316 rm = insn & 0xf;
8317 switch (sh) {
8bfd0550
PM
8318 case 0x0: /* MSR, MRS */
8319 if (insn & (1 << 9)) {
8320 /* MSR (banked) and MRS (banked) */
8321 int sysm = extract32(insn, 16, 4) |
8322 (extract32(insn, 8, 1) << 4);
8323 int r = extract32(insn, 22, 1);
8324
8325 if (op1 & 1) {
8326 /* MSR (banked) */
8327 gen_msr_banked(s, r, sysm, rm);
8328 } else {
8329 /* MRS (banked) */
8330 int rd = extract32(insn, 12, 4);
8331
8332 gen_mrs_banked(s, r, sysm, rd);
8333 }
8334 break;
8335 }
8336
8337 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8338 if (op1 & 1) {
8339 /* PSR = reg */
2fbac54b 8340 tmp = load_reg(s, rm);
9ee6e8bb 8341 i = ((op1 & 2) != 0);
7dcc1f89 8342 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8343 goto illegal_op;
8344 } else {
8345 /* reg = PSR */
8346 rd = (insn >> 12) & 0xf;
8347 if (op1 & 2) {
8348 if (IS_USER(s))
8349 goto illegal_op;
d9ba4830 8350 tmp = load_cpu_field(spsr);
9ee6e8bb 8351 } else {
7d1b0095 8352 tmp = tcg_temp_new_i32();
9ef39277 8353 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8354 }
d9ba4830 8355 store_reg(s, rd, tmp);
9ee6e8bb
PB
8356 }
8357 break;
8358 case 0x1:
8359 if (op1 == 1) {
8360 /* branch/exchange thumb (bx). */
be5e7a76 8361 ARCH(4T);
d9ba4830
PB
8362 tmp = load_reg(s, rm);
8363 gen_bx(s, tmp);
9ee6e8bb
PB
8364 } else if (op1 == 3) {
8365 /* clz */
be5e7a76 8366 ARCH(5);
9ee6e8bb 8367 rd = (insn >> 12) & 0xf;
1497c961 8368 tmp = load_reg(s, rm);
7539a012 8369 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8370 store_reg(s, rd, tmp);
9ee6e8bb
PB
8371 } else {
8372 goto illegal_op;
8373 }
8374 break;
8375 case 0x2:
8376 if (op1 == 1) {
8377 ARCH(5J); /* bxj */
8378 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8379 tmp = load_reg(s, rm);
8380 gen_bx(s, tmp);
9ee6e8bb
PB
8381 } else {
8382 goto illegal_op;
8383 }
8384 break;
8385 case 0x3:
8386 if (op1 != 1)
8387 goto illegal_op;
8388
be5e7a76 8389 ARCH(5);
9ee6e8bb 8390 /* branch link/exchange thumb (blx) */
d9ba4830 8391 tmp = load_reg(s, rm);
7d1b0095 8392 tmp2 = tcg_temp_new_i32();
a0415916 8393 tcg_gen_movi_i32(tmp2, s->base.pc_next);
d9ba4830
PB
8394 store_reg(s, 14, tmp2);
8395 gen_bx(s, tmp);
9ee6e8bb 8396 break;
eb0ecd5a
WN
8397 case 0x4:
8398 {
8399 /* crc32/crc32c */
8400 uint32_t c = extract32(insn, 8, 4);
8401
8402 /* Check this CPU supports ARMv8 CRC instructions.
8403 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8404 * Bits 8, 10 and 11 should be zero.
8405 */
962fcbf2 8406 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
eb0ecd5a
WN
8407 goto illegal_op;
8408 }
8409
8410 rn = extract32(insn, 16, 4);
8411 rd = extract32(insn, 12, 4);
8412
8413 tmp = load_reg(s, rn);
8414 tmp2 = load_reg(s, rm);
aa633469
PM
8415 if (op1 == 0) {
8416 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8417 } else if (op1 == 1) {
8418 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8419 }
eb0ecd5a
WN
8420 tmp3 = tcg_const_i32(1 << op1);
8421 if (c & 0x2) {
8422 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8423 } else {
8424 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8425 }
8426 tcg_temp_free_i32(tmp2);
8427 tcg_temp_free_i32(tmp3);
8428 store_reg(s, rd, tmp);
8429 break;
8430 }
9ee6e8bb 8431 case 0x5: /* saturating add/subtract */
be5e7a76 8432 ARCH(5TE);
9ee6e8bb
PB
8433 rd = (insn >> 12) & 0xf;
8434 rn = (insn >> 16) & 0xf;
b40d0353 8435 tmp = load_reg(s, rm);
5e3f878a 8436 tmp2 = load_reg(s, rn);
9ee6e8bb 8437 if (op1 & 2)
640581a0 8438 gen_helper_add_saturate(tmp2, cpu_env, tmp2, tmp2);
9ee6e8bb 8439 if (op1 & 1)
9ef39277 8440 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8441 else
9ef39277 8442 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8443 tcg_temp_free_i32(tmp2);
5e3f878a 8444 store_reg(s, rd, tmp);
9ee6e8bb 8445 break;
55c544ed
PM
8446 case 0x6: /* ERET */
8447 if (op1 != 3) {
8448 goto illegal_op;
8449 }
8450 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8451 goto illegal_op;
8452 }
8453 if ((insn & 0x000fff0f) != 0x0000000e) {
8454 /* UNPREDICTABLE; we choose to UNDEF */
8455 goto illegal_op;
8456 }
8457
8458 if (s->current_el == 2) {
8459 tmp = load_cpu_field(elr_el[2]);
8460 } else {
8461 tmp = load_reg(s, 14);
8462 }
8463 gen_exception_return(s, tmp);
8464 break;
49e14940 8465 case 7:
d4a2dc67
PM
8466 {
8467 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8468 switch (op1) {
19a6e31c
PM
8469 case 0:
8470 /* HLT */
8471 gen_hlt(s, imm16);
8472 break;
37e6456e
PM
8473 case 1:
8474 /* bkpt */
8475 ARCH(5);
06bcbda3 8476 gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm16, false));
37e6456e
PM
8477 break;
8478 case 2:
8479 /* Hypervisor call (v7) */
8480 ARCH(7);
8481 if (IS_USER(s)) {
8482 goto illegal_op;
8483 }
8484 gen_hvc(s, imm16);
8485 break;
8486 case 3:
8487 /* Secure monitor call (v6+) */
8488 ARCH(6K);
8489 if (IS_USER(s)) {
8490 goto illegal_op;
8491 }
8492 gen_smc(s);
8493 break;
8494 default:
19a6e31c 8495 g_assert_not_reached();
49e14940 8496 }
9ee6e8bb 8497 break;
d4a2dc67 8498 }
9ee6e8bb
PB
8499 case 0x8: /* signed multiply */
8500 case 0xa:
8501 case 0xc:
8502 case 0xe:
be5e7a76 8503 ARCH(5TE);
9ee6e8bb
PB
8504 rs = (insn >> 8) & 0xf;
8505 rn = (insn >> 12) & 0xf;
8506 rd = (insn >> 16) & 0xf;
8507 if (op1 == 1) {
8508 /* (32 * 16) >> 16 */
5e3f878a
PB
8509 tmp = load_reg(s, rm);
8510 tmp2 = load_reg(s, rs);
9ee6e8bb 8511 if (sh & 4)
5e3f878a 8512 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8513 else
5e3f878a 8514 gen_sxth(tmp2);
a7812ae4
PB
8515 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8516 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8517 tmp = tcg_temp_new_i32();
ecc7b3aa 8518 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8519 tcg_temp_free_i64(tmp64);
9ee6e8bb 8520 if ((sh & 2) == 0) {
5e3f878a 8521 tmp2 = load_reg(s, rn);
9ef39277 8522 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8523 tcg_temp_free_i32(tmp2);
9ee6e8bb 8524 }
5e3f878a 8525 store_reg(s, rd, tmp);
9ee6e8bb
PB
8526 } else {
8527 /* 16 * 16 */
5e3f878a
PB
8528 tmp = load_reg(s, rm);
8529 tmp2 = load_reg(s, rs);
8530 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8531 tcg_temp_free_i32(tmp2);
9ee6e8bb 8532 if (op1 == 2) {
a7812ae4
PB
8533 tmp64 = tcg_temp_new_i64();
8534 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8535 tcg_temp_free_i32(tmp);
a7812ae4
PB
8536 gen_addq(s, tmp64, rn, rd);
8537 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8538 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8539 } else {
8540 if (op1 == 0) {
5e3f878a 8541 tmp2 = load_reg(s, rn);
9ef39277 8542 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8543 tcg_temp_free_i32(tmp2);
9ee6e8bb 8544 }
5e3f878a 8545 store_reg(s, rd, tmp);
9ee6e8bb
PB
8546 }
8547 }
8548 break;
8549 default:
8550 goto illegal_op;
8551 }
8552 } else if (((insn & 0x0e000000) == 0 &&
8553 (insn & 0x00000090) != 0x90) ||
8554 ((insn & 0x0e000000) == (1 << 25))) {
581c6ebd
RH
8555 /* Data-processing (reg, reg-shift-reg, imm). */
8556 /* All done in decodetree. Reach here for illegal ops. */
8557 goto illegal_op;
9ee6e8bb
PB
8558 } else {
8559 /* other instructions */
8560 op1 = (insn >> 24) & 0xf;
8561 switch(op1) {
8562 case 0x0:
8563 case 0x1:
8564 /* multiplies, extra load/stores */
8565 sh = (insn >> 5) & 3;
8566 if (sh == 0) {
8567 if (op1 == 0x0) {
8568 rd = (insn >> 16) & 0xf;
8569 rn = (insn >> 12) & 0xf;
8570 rs = (insn >> 8) & 0xf;
8571 rm = (insn) & 0xf;
8572 op1 = (insn >> 20) & 0xf;
8573 switch (op1) {
8574 case 0: case 1: case 2: case 3: case 6:
8575 /* 32 bit mul */
5e3f878a
PB
8576 tmp = load_reg(s, rs);
8577 tmp2 = load_reg(s, rm);
8578 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8579 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8580 if (insn & (1 << 22)) {
8581 /* Subtract (mls) */
8582 ARCH(6T2);
5e3f878a
PB
8583 tmp2 = load_reg(s, rn);
8584 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8585 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8586 } else if (insn & (1 << 21)) {
8587 /* Add */
5e3f878a
PB
8588 tmp2 = load_reg(s, rn);
8589 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8590 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8591 }
8592 if (insn & (1 << 20))
5e3f878a
PB
8593 gen_logic_CC(tmp);
8594 store_reg(s, rd, tmp);
9ee6e8bb 8595 break;
8aac08b1
AJ
8596 case 4:
8597 /* 64 bit mul double accumulate (UMAAL) */
8598 ARCH(6);
8599 tmp = load_reg(s, rs);
8600 tmp2 = load_reg(s, rm);
8601 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8602 gen_addq_lo(s, tmp64, rn);
8603 gen_addq_lo(s, tmp64, rd);
8604 gen_storeq_reg(s, rn, rd, tmp64);
8605 tcg_temp_free_i64(tmp64);
8606 break;
8607 case 8: case 9: case 10: case 11:
8608 case 12: case 13: case 14: case 15:
8609 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8610 tmp = load_reg(s, rs);
8611 tmp2 = load_reg(s, rm);
8aac08b1 8612 if (insn & (1 << 22)) {
c9f10124 8613 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8614 } else {
c9f10124 8615 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8616 }
8617 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8618 TCGv_i32 al = load_reg(s, rn);
8619 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8620 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8621 tcg_temp_free_i32(al);
8622 tcg_temp_free_i32(ah);
9ee6e8bb 8623 }
8aac08b1 8624 if (insn & (1 << 20)) {
c9f10124 8625 gen_logicq_cc(tmp, tmp2);
8aac08b1 8626 }
c9f10124
RH
8627 store_reg(s, rn, tmp);
8628 store_reg(s, rd, tmp2);
9ee6e8bb 8629 break;
8aac08b1
AJ
8630 default:
8631 goto illegal_op;
9ee6e8bb
PB
8632 }
8633 } else {
8634 rn = (insn >> 16) & 0xf;
8635 rd = (insn >> 12) & 0xf;
8636 if (insn & (1 << 23)) {
8637 /* load/store exclusive */
96c55295
PM
8638 bool is_ld = extract32(insn, 20, 1);
8639 bool is_lasr = !extract32(insn, 8, 1);
2359bf80 8640 int op2 = (insn >> 8) & 3;
86753403 8641 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8642
8643 switch (op2) {
8644 case 0: /* lda/stl */
8645 if (op1 == 1) {
8646 goto illegal_op;
8647 }
8648 ARCH(8);
8649 break;
8650 case 1: /* reserved */
8651 goto illegal_op;
8652 case 2: /* ldaex/stlex */
8653 ARCH(8);
8654 break;
8655 case 3: /* ldrex/strex */
8656 if (op1) {
8657 ARCH(6K);
8658 } else {
8659 ARCH(6);
8660 }
8661 break;
8662 }
8663
3174f8e9 8664 addr = tcg_temp_local_new_i32();
98a46317 8665 load_reg_var(s, addr, rn);
2359bf80 8666
96c55295
PM
8667 if (is_lasr && !is_ld) {
8668 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8669 }
8670
2359bf80 8671 if (op2 == 0) {
96c55295 8672 if (is_ld) {
2359bf80
MR
8673 tmp = tcg_temp_new_i32();
8674 switch (op1) {
8675 case 0: /* lda */
9bb6558a
PM
8676 gen_aa32_ld32u_iss(s, tmp, addr,
8677 get_mem_index(s),
8678 rd | ISSIsAcqRel);
2359bf80
MR
8679 break;
8680 case 2: /* ldab */
9bb6558a
PM
8681 gen_aa32_ld8u_iss(s, tmp, addr,
8682 get_mem_index(s),
8683 rd | ISSIsAcqRel);
2359bf80
MR
8684 break;
8685 case 3: /* ldah */
9bb6558a
PM
8686 gen_aa32_ld16u_iss(s, tmp, addr,
8687 get_mem_index(s),
8688 rd | ISSIsAcqRel);
2359bf80
MR
8689 break;
8690 default:
8691 abort();
8692 }
8693 store_reg(s, rd, tmp);
8694 } else {
8695 rm = insn & 0xf;
8696 tmp = load_reg(s, rm);
8697 switch (op1) {
8698 case 0: /* stl */
9bb6558a
PM
8699 gen_aa32_st32_iss(s, tmp, addr,
8700 get_mem_index(s),
8701 rm | ISSIsAcqRel);
2359bf80
MR
8702 break;
8703 case 2: /* stlb */
9bb6558a
PM
8704 gen_aa32_st8_iss(s, tmp, addr,
8705 get_mem_index(s),
8706 rm | ISSIsAcqRel);
2359bf80
MR
8707 break;
8708 case 3: /* stlh */
9bb6558a
PM
8709 gen_aa32_st16_iss(s, tmp, addr,
8710 get_mem_index(s),
8711 rm | ISSIsAcqRel);
2359bf80
MR
8712 break;
8713 default:
8714 abort();
8715 }
8716 tcg_temp_free_i32(tmp);
8717 }
96c55295 8718 } else if (is_ld) {
86753403
PB
8719 switch (op1) {
8720 case 0: /* ldrex */
426f5abc 8721 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8722 break;
8723 case 1: /* ldrexd */
426f5abc 8724 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8725 break;
8726 case 2: /* ldrexb */
426f5abc 8727 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8728 break;
8729 case 3: /* ldrexh */
426f5abc 8730 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8731 break;
8732 default:
8733 abort();
8734 }
9ee6e8bb
PB
8735 } else {
8736 rm = insn & 0xf;
86753403
PB
8737 switch (op1) {
8738 case 0: /* strex */
426f5abc 8739 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8740 break;
8741 case 1: /* strexd */
502e64fe 8742 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8743 break;
8744 case 2: /* strexb */
426f5abc 8745 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8746 break;
8747 case 3: /* strexh */
426f5abc 8748 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8749 break;
8750 default:
8751 abort();
8752 }
9ee6e8bb 8753 }
39d5492a 8754 tcg_temp_free_i32(addr);
96c55295
PM
8755
8756 if (is_lasr && is_ld) {
8757 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
8758 }
c4869ca6
OS
8759 } else if ((insn & 0x00300f00) == 0) {
8760 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
8761 * - SWP, SWPB
8762 */
8763
cf12bce0 8764 TCGv taddr;
14776ab5 8765 MemOp opc = s->be_data;
cf12bce0 8766
9ee6e8bb
PB
8767 rm = (insn) & 0xf;
8768
9ee6e8bb 8769 if (insn & (1 << 22)) {
cf12bce0 8770 opc |= MO_UB;
9ee6e8bb 8771 } else {
cf12bce0 8772 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 8773 }
cf12bce0
EC
8774
8775 addr = load_reg(s, rn);
8776 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 8777 tcg_temp_free_i32(addr);
cf12bce0
EC
8778
8779 tmp = load_reg(s, rm);
8780 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8781 get_mem_index(s), opc);
8782 tcg_temp_free(taddr);
8783 store_reg(s, rd, tmp);
c4869ca6
OS
8784 } else {
8785 goto illegal_op;
9ee6e8bb
PB
8786 }
8787 }
8788 } else {
8789 int address_offset;
3960c336 8790 bool load = insn & (1 << 20);
63f26fcf
PM
8791 bool wbit = insn & (1 << 21);
8792 bool pbit = insn & (1 << 24);
3960c336 8793 bool doubleword = false;
9bb6558a
PM
8794 ISSInfo issinfo;
8795
9ee6e8bb
PB
8796 /* Misc load/store */
8797 rn = (insn >> 16) & 0xf;
8798 rd = (insn >> 12) & 0xf;
3960c336 8799
9bb6558a
PM
8800 /* ISS not valid if writeback */
8801 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8802
3960c336
PM
8803 if (!load && (sh & 2)) {
8804 /* doubleword */
8805 ARCH(5TE);
8806 if (rd & 1) {
8807 /* UNPREDICTABLE; we choose to UNDEF */
8808 goto illegal_op;
8809 }
8810 load = (sh & 1) == 0;
8811 doubleword = true;
8812 }
8813
b0109805 8814 addr = load_reg(s, rn);
63f26fcf 8815 if (pbit) {
b0109805 8816 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 8817 }
9ee6e8bb 8818 address_offset = 0;
3960c336
PM
8819
8820 if (doubleword) {
8821 if (!load) {
9ee6e8bb 8822 /* store */
b0109805 8823 tmp = load_reg(s, rd);
12dcc321 8824 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8825 tcg_temp_free_i32(tmp);
b0109805
PB
8826 tcg_gen_addi_i32(addr, addr, 4);
8827 tmp = load_reg(s, rd + 1);
12dcc321 8828 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8829 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8830 } else {
8831 /* load */
5a839c0d 8832 tmp = tcg_temp_new_i32();
12dcc321 8833 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8834 store_reg(s, rd, tmp);
8835 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8836 tmp = tcg_temp_new_i32();
12dcc321 8837 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8838 rd++;
9ee6e8bb
PB
8839 }
8840 address_offset = -4;
3960c336
PM
8841 } else if (load) {
8842 /* load */
8843 tmp = tcg_temp_new_i32();
8844 switch (sh) {
8845 case 1:
9bb6558a
PM
8846 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
8847 issinfo);
3960c336
PM
8848 break;
8849 case 2:
9bb6558a
PM
8850 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
8851 issinfo);
3960c336
PM
8852 break;
8853 default:
8854 case 3:
9bb6558a
PM
8855 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
8856 issinfo);
3960c336
PM
8857 break;
8858 }
9ee6e8bb
PB
8859 } else {
8860 /* store */
b0109805 8861 tmp = load_reg(s, rd);
9bb6558a 8862 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 8863 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8864 }
8865 /* Perform base writeback before the loaded value to
8866 ensure correct behavior with overlapping index registers.
b6af0975 8867 ldrd with base writeback is undefined if the
9ee6e8bb 8868 destination and index registers overlap. */
63f26fcf 8869 if (!pbit) {
b0109805
PB
8870 gen_add_datah_offset(s, insn, address_offset, addr);
8871 store_reg(s, rn, addr);
63f26fcf 8872 } else if (wbit) {
9ee6e8bb 8873 if (address_offset)
b0109805
PB
8874 tcg_gen_addi_i32(addr, addr, address_offset);
8875 store_reg(s, rn, addr);
8876 } else {
7d1b0095 8877 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8878 }
8879 if (load) {
8880 /* Complete the load. */
b0109805 8881 store_reg(s, rd, tmp);
9ee6e8bb
PB
8882 }
8883 }
8884 break;
8885 case 0x4:
8886 case 0x5:
8887 goto do_ldst;
8888 case 0x6:
8889 case 0x7:
8890 if (insn & (1 << 4)) {
8891 ARCH(6);
8892 /* Armv6 Media instructions. */
8893 rm = insn & 0xf;
8894 rn = (insn >> 16) & 0xf;
2c0262af 8895 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8896 rs = (insn >> 8) & 0xf;
8897 switch ((insn >> 23) & 3) {
8898 case 0: /* Parallel add/subtract. */
8899 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8900 tmp = load_reg(s, rn);
8901 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8902 sh = (insn >> 5) & 7;
8903 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8904 goto illegal_op;
6ddbc6e4 8905 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8906 tcg_temp_free_i32(tmp2);
6ddbc6e4 8907 store_reg(s, rd, tmp);
9ee6e8bb
PB
8908 break;
8909 case 1:
8910 if ((insn & 0x00700020) == 0) {
6c95676b 8911 /* Halfword pack. */
3670669c
PB
8912 tmp = load_reg(s, rn);
8913 tmp2 = load_reg(s, rm);
9ee6e8bb 8914 shift = (insn >> 7) & 0x1f;
3670669c
PB
8915 if (insn & (1 << 6)) {
8916 /* pkhtb */
d1f8755f 8917 if (shift == 0) {
22478e79 8918 shift = 31;
d1f8755f 8919 }
22478e79 8920 tcg_gen_sari_i32(tmp2, tmp2, shift);
d1f8755f 8921 tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
3670669c
PB
8922 } else {
8923 /* pkhbt */
d1f8755f
RH
8924 tcg_gen_shli_i32(tmp2, tmp2, shift);
8925 tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
3670669c 8926 }
7d1b0095 8927 tcg_temp_free_i32(tmp2);
3670669c 8928 store_reg(s, rd, tmp);
9ee6e8bb
PB
8929 } else if ((insn & 0x00200020) == 0x00200000) {
8930 /* [us]sat */
6ddbc6e4 8931 tmp = load_reg(s, rm);
9ee6e8bb
PB
8932 shift = (insn >> 7) & 0x1f;
8933 if (insn & (1 << 6)) {
8934 if (shift == 0)
8935 shift = 31;
6ddbc6e4 8936 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8937 } else {
6ddbc6e4 8938 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8939 }
8940 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8941 tmp2 = tcg_const_i32(sh);
8942 if (insn & (1 << 22))
9ef39277 8943 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8944 else
9ef39277 8945 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8946 tcg_temp_free_i32(tmp2);
6ddbc6e4 8947 store_reg(s, rd, tmp);
9ee6e8bb
PB
8948 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8949 /* [us]sat16 */
6ddbc6e4 8950 tmp = load_reg(s, rm);
9ee6e8bb 8951 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8952 tmp2 = tcg_const_i32(sh);
8953 if (insn & (1 << 22))
9ef39277 8954 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8955 else
9ef39277 8956 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8957 tcg_temp_free_i32(tmp2);
6ddbc6e4 8958 store_reg(s, rd, tmp);
9ee6e8bb
PB
8959 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8960 /* Select bytes. */
6ddbc6e4
PB
8961 tmp = load_reg(s, rn);
8962 tmp2 = load_reg(s, rm);
7d1b0095 8963 tmp3 = tcg_temp_new_i32();
0ecb72a5 8964 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8965 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8966 tcg_temp_free_i32(tmp3);
8967 tcg_temp_free_i32(tmp2);
6ddbc6e4 8968 store_reg(s, rd, tmp);
9ee6e8bb 8969 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8970 tmp = load_reg(s, rm);
9ee6e8bb 8971 shift = (insn >> 10) & 3;
1301f322 8972 /* ??? In many cases it's not necessary to do a
9ee6e8bb 8973 rotate, a shift is sufficient. */
464eaa95 8974 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8975 op1 = (insn >> 20) & 7;
8976 switch (op1) {
5e3f878a
PB
8977 case 0: gen_sxtb16(tmp); break;
8978 case 2: gen_sxtb(tmp); break;
8979 case 3: gen_sxth(tmp); break;
8980 case 4: gen_uxtb16(tmp); break;
8981 case 6: gen_uxtb(tmp); break;
8982 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8983 default: goto illegal_op;
8984 }
8985 if (rn != 15) {
5e3f878a 8986 tmp2 = load_reg(s, rn);
9ee6e8bb 8987 if ((op1 & 3) == 0) {
5e3f878a 8988 gen_add16(tmp, tmp2);
9ee6e8bb 8989 } else {
5e3f878a 8990 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8991 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8992 }
8993 }
6c95676b 8994 store_reg(s, rd, tmp);
9ee6e8bb
PB
8995 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8996 /* rev */
b0109805 8997 tmp = load_reg(s, rm);
9ee6e8bb
PB
8998 if (insn & (1 << 22)) {
8999 if (insn & (1 << 7)) {
b0109805 9000 gen_revsh(tmp);
9ee6e8bb
PB
9001 } else {
9002 ARCH(6T2);
b0109805 9003 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9004 }
9005 } else {
9006 if (insn & (1 << 7))
b0109805 9007 gen_rev16(tmp);
9ee6e8bb 9008 else
66896cb8 9009 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9010 }
b0109805 9011 store_reg(s, rd, tmp);
9ee6e8bb
PB
9012 } else {
9013 goto illegal_op;
9014 }
9015 break;
9016 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9017 switch ((insn >> 20) & 0x7) {
9018 case 5:
9019 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9020 /* op2 not 00x or 11x : UNDEF */
9021 goto illegal_op;
9022 }
838fa72d
AJ
9023 /* Signed multiply most significant [accumulate].
9024 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9025 tmp = load_reg(s, rm);
9026 tmp2 = load_reg(s, rs);
5f8cd06e 9027 tcg_gen_muls2_i32(tmp2, tmp, tmp, tmp2);
838fa72d 9028
955a7dd5 9029 if (rd != 15) {
5f8cd06e 9030 tmp3 = load_reg(s, rd);
9ee6e8bb 9031 if (insn & (1 << 6)) {
e0a0c832
RH
9032 /*
9033 * For SMMLS, we need a 64-bit subtract.
9034 * Borrow caused by a non-zero multiplicand
9035 * lowpart, and the correct result lowpart
9036 * for rounding.
9037 */
9038 TCGv_i32 zero = tcg_const_i32(0);
9039 tcg_gen_sub2_i32(tmp2, tmp, zero, tmp3,
9040 tmp2, tmp);
9041 tcg_temp_free_i32(zero);
9ee6e8bb 9042 } else {
5f8cd06e 9043 tcg_gen_add_i32(tmp, tmp, tmp3);
9ee6e8bb 9044 }
5f8cd06e 9045 tcg_temp_free_i32(tmp3);
9ee6e8bb 9046 }
838fa72d 9047 if (insn & (1 << 5)) {
5f8cd06e
RH
9048 /*
9049 * Adding 0x80000000 to the 64-bit quantity
9050 * means that we have carry in to the high
9051 * word when the low word has the high bit set.
9052 */
9053 tcg_gen_shri_i32(tmp2, tmp2, 31);
9054 tcg_gen_add_i32(tmp, tmp, tmp2);
838fa72d 9055 }
5f8cd06e 9056 tcg_temp_free_i32(tmp2);
955a7dd5 9057 store_reg(s, rn, tmp);
41e9564d
PM
9058 break;
9059 case 0:
9060 case 4:
9061 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9062 if (insn & (1 << 7)) {
9063 goto illegal_op;
9064 }
9065 tmp = load_reg(s, rm);
9066 tmp2 = load_reg(s, rs);
9ee6e8bb 9067 if (insn & (1 << 5))
5e3f878a
PB
9068 gen_swap_half(tmp2);
9069 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9070 if (insn & (1 << 22)) {
5e3f878a 9071 /* smlald, smlsld */
33bbd75a
PC
9072 TCGv_i64 tmp64_2;
9073
a7812ae4 9074 tmp64 = tcg_temp_new_i64();
33bbd75a 9075 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9076 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9077 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9078 tcg_temp_free_i32(tmp);
33bbd75a
PC
9079 tcg_temp_free_i32(tmp2);
9080 if (insn & (1 << 6)) {
9081 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9082 } else {
9083 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9084 }
9085 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9086 gen_addq(s, tmp64, rd, rn);
9087 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9088 tcg_temp_free_i64(tmp64);
9ee6e8bb 9089 } else {
5e3f878a 9090 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9091 if (insn & (1 << 6)) {
9092 /* This subtraction cannot overflow. */
9093 tcg_gen_sub_i32(tmp, tmp, tmp2);
9094 } else {
9095 /* This addition cannot overflow 32 bits;
9096 * however it may overflow considered as a
9097 * signed operation, in which case we must set
9098 * the Q flag.
9099 */
9100 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9101 }
9102 tcg_temp_free_i32(tmp2);
22478e79 9103 if (rd != 15)
9ee6e8bb 9104 {
22478e79 9105 tmp2 = load_reg(s, rd);
9ef39277 9106 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9107 tcg_temp_free_i32(tmp2);
9ee6e8bb 9108 }
22478e79 9109 store_reg(s, rn, tmp);
9ee6e8bb 9110 }
41e9564d 9111 break;
b8b8ea05
PM
9112 case 1:
9113 case 3:
9114 /* SDIV, UDIV */
7e0cf8b4 9115 if (!dc_isar_feature(arm_div, s)) {
b8b8ea05
PM
9116 goto illegal_op;
9117 }
9118 if (((insn >> 5) & 7) || (rd != 15)) {
9119 goto illegal_op;
9120 }
9121 tmp = load_reg(s, rm);
9122 tmp2 = load_reg(s, rs);
9123 if (insn & (1 << 21)) {
9124 gen_helper_udiv(tmp, tmp, tmp2);
9125 } else {
9126 gen_helper_sdiv(tmp, tmp, tmp2);
9127 }
9128 tcg_temp_free_i32(tmp2);
9129 store_reg(s, rn, tmp);
9130 break;
41e9564d
PM
9131 default:
9132 goto illegal_op;
9ee6e8bb
PB
9133 }
9134 break;
9135 case 3:
9136 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9137 switch (op1) {
9138 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9139 ARCH(6);
9140 tmp = load_reg(s, rm);
9141 tmp2 = load_reg(s, rs);
9142 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9143 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9144 if (rd != 15) {
9145 tmp2 = load_reg(s, rd);
6ddbc6e4 9146 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9147 tcg_temp_free_i32(tmp2);
9ee6e8bb 9148 }
ded9d295 9149 store_reg(s, rn, tmp);
9ee6e8bb
PB
9150 break;
9151 case 0x20: case 0x24: case 0x28: case 0x2c:
9152 /* Bitfield insert/clear. */
9153 ARCH(6T2);
9154 shift = (insn >> 7) & 0x1f;
9155 i = (insn >> 16) & 0x1f;
45140a57
KB
9156 if (i < shift) {
9157 /* UNPREDICTABLE; we choose to UNDEF */
9158 goto illegal_op;
9159 }
9ee6e8bb
PB
9160 i = i + 1 - shift;
9161 if (rm == 15) {
7d1b0095 9162 tmp = tcg_temp_new_i32();
5e3f878a 9163 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9164 } else {
5e3f878a 9165 tmp = load_reg(s, rm);
9ee6e8bb
PB
9166 }
9167 if (i != 32) {
5e3f878a 9168 tmp2 = load_reg(s, rd);
d593c48e 9169 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9170 tcg_temp_free_i32(tmp2);
9ee6e8bb 9171 }
5e3f878a 9172 store_reg(s, rd, tmp);
9ee6e8bb
PB
9173 break;
9174 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9175 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9176 ARCH(6T2);
5e3f878a 9177 tmp = load_reg(s, rm);
9ee6e8bb
PB
9178 shift = (insn >> 7) & 0x1f;
9179 i = ((insn >> 16) & 0x1f) + 1;
9180 if (shift + i > 32)
9181 goto illegal_op;
9182 if (i < 32) {
9183 if (op1 & 0x20) {
59a71b4c 9184 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9185 } else {
59a71b4c 9186 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9187 }
9188 }
5e3f878a 9189 store_reg(s, rd, tmp);
9ee6e8bb
PB
9190 break;
9191 default:
9192 goto illegal_op;
9193 }
9194 break;
9195 }
9196 break;
9197 }
9198 do_ldst:
9199 /* Check for undefined extension instructions
9200 * per the ARM Bible IE:
9201 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9202 */
9203 sh = (0xf << 20) | (0xf << 4);
9204 if (op1 == 0x7 && ((insn & sh) == sh))
9205 {
9206 goto illegal_op;
9207 }
9208 /* load/store byte/word */
9209 rn = (insn >> 16) & 0xf;
9210 rd = (insn >> 12) & 0xf;
b0109805 9211 tmp2 = load_reg(s, rn);
a99caa48
PM
9212 if ((insn & 0x01200000) == 0x00200000) {
9213 /* ldrt/strt */
579d21cc 9214 i = get_a32_user_mem_index(s);
a99caa48
PM
9215 } else {
9216 i = get_mem_index(s);
9217 }
9ee6e8bb 9218 if (insn & (1 << 24))
b0109805 9219 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9220 if (insn & (1 << 20)) {
9221 /* load */
5a839c0d 9222 tmp = tcg_temp_new_i32();
9ee6e8bb 9223 if (insn & (1 << 22)) {
9bb6558a 9224 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9225 } else {
9bb6558a 9226 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9227 }
9ee6e8bb
PB
9228 } else {
9229 /* store */
b0109805 9230 tmp = load_reg(s, rd);
5a839c0d 9231 if (insn & (1 << 22)) {
9bb6558a 9232 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9233 } else {
9bb6558a 9234 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9235 }
9236 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9237 }
9238 if (!(insn & (1 << 24))) {
b0109805
PB
9239 gen_add_data_offset(s, insn, tmp2);
9240 store_reg(s, rn, tmp2);
9241 } else if (insn & (1 << 21)) {
9242 store_reg(s, rn, tmp2);
9243 } else {
7d1b0095 9244 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9245 }
9246 if (insn & (1 << 20)) {
9247 /* Complete the load. */
7dcc1f89 9248 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9249 }
9250 break;
9251 case 0x08:
9252 case 0x09:
9253 {
da3e53dd
PM
9254 int j, n, loaded_base;
9255 bool exc_return = false;
9256 bool is_load = extract32(insn, 20, 1);
9257 bool user = false;
39d5492a 9258 TCGv_i32 loaded_var;
9ee6e8bb
PB
9259 /* load/store multiple words */
9260 /* XXX: store correct base if write back */
9ee6e8bb 9261 if (insn & (1 << 22)) {
da3e53dd 9262 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9263 if (IS_USER(s))
9264 goto illegal_op; /* only usable in supervisor mode */
9265
da3e53dd
PM
9266 if (is_load && extract32(insn, 15, 1)) {
9267 exc_return = true;
9268 } else {
9269 user = true;
9270 }
9ee6e8bb
PB
9271 }
9272 rn = (insn >> 16) & 0xf;
b0109805 9273 addr = load_reg(s, rn);
9ee6e8bb
PB
9274
9275 /* compute total size */
9276 loaded_base = 0;
f764718d 9277 loaded_var = NULL;
9ee6e8bb 9278 n = 0;
9798ac71 9279 for (i = 0; i < 16; i++) {
9ee6e8bb
PB
9280 if (insn & (1 << i))
9281 n++;
9282 }
9283 /* XXX: test invalid n == 0 case ? */
9284 if (insn & (1 << 23)) {
9285 if (insn & (1 << 24)) {
9286 /* pre increment */
b0109805 9287 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9288 } else {
9289 /* post increment */
9290 }
9291 } else {
9292 if (insn & (1 << 24)) {
9293 /* pre decrement */
b0109805 9294 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9295 } else {
9296 /* post decrement */
9297 if (n != 1)
b0109805 9298 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9299 }
9300 }
9301 j = 0;
9798ac71 9302 for (i = 0; i < 16; i++) {
9ee6e8bb 9303 if (insn & (1 << i)) {
da3e53dd 9304 if (is_load) {
9ee6e8bb 9305 /* load */
5a839c0d 9306 tmp = tcg_temp_new_i32();
12dcc321 9307 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9308 if (user) {
b75263d6 9309 tmp2 = tcg_const_i32(i);
1ce94f81 9310 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9311 tcg_temp_free_i32(tmp2);
7d1b0095 9312 tcg_temp_free_i32(tmp);
9ee6e8bb 9313 } else if (i == rn) {
b0109805 9314 loaded_var = tmp;
9ee6e8bb 9315 loaded_base = 1;
9d090d17 9316 } else if (i == 15 && exc_return) {
fb0e8e79 9317 store_pc_exc_ret(s, tmp);
9ee6e8bb 9318 } else {
7dcc1f89 9319 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9320 }
9321 } else {
9322 /* store */
9323 if (i == 15) {
7d1b0095 9324 tmp = tcg_temp_new_i32();
fdbcf632 9325 tcg_gen_movi_i32(tmp, read_pc(s));
9ee6e8bb 9326 } else if (user) {
7d1b0095 9327 tmp = tcg_temp_new_i32();
b75263d6 9328 tmp2 = tcg_const_i32(i);
9ef39277 9329 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9330 tcg_temp_free_i32(tmp2);
9ee6e8bb 9331 } else {
b0109805 9332 tmp = load_reg(s, i);
9ee6e8bb 9333 }
12dcc321 9334 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9335 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9336 }
9337 j++;
9338 /* no need to add after the last transfer */
9339 if (j != n)
b0109805 9340 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9341 }
9342 }
9343 if (insn & (1 << 21)) {
9344 /* write back */
9345 if (insn & (1 << 23)) {
9346 if (insn & (1 << 24)) {
9347 /* pre increment */
9348 } else {
9349 /* post increment */
b0109805 9350 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9351 }
9352 } else {
9353 if (insn & (1 << 24)) {
9354 /* pre decrement */
9355 if (n != 1)
b0109805 9356 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9357 } else {
9358 /* post decrement */
b0109805 9359 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9360 }
9361 }
b0109805
PB
9362 store_reg(s, rn, addr);
9363 } else {
7d1b0095 9364 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9365 }
9366 if (loaded_base) {
b0109805 9367 store_reg(s, rn, loaded_var);
9ee6e8bb 9368 }
da3e53dd 9369 if (exc_return) {
9ee6e8bb 9370 /* Restore CPSR from SPSR. */
d9ba4830 9371 tmp = load_cpu_field(spsr);
e69ad9df
AL
9372 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9373 gen_io_start();
9374 }
235ea1f5 9375 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9376 tcg_temp_free_i32(tmp);
b29fd33d 9377 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9378 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9379 }
9380 }
9381 break;
9382 case 0xa:
9383 case 0xb:
9384 {
9385 int32_t offset;
9386
9387 /* branch (and link) */
9ee6e8bb 9388 if (insn & (1 << 24)) {
7d1b0095 9389 tmp = tcg_temp_new_i32();
a0415916 9390 tcg_gen_movi_i32(tmp, s->base.pc_next);
5e3f878a 9391 store_reg(s, 14, tmp);
9ee6e8bb 9392 }
534df156 9393 offset = sextract32(insn << 2, 0, 26);
fdbcf632 9394 gen_jmp(s, read_pc(s) + offset);
9ee6e8bb
PB
9395 }
9396 break;
9397 case 0xc:
9398 case 0xd:
9399 case 0xe:
6a57f3eb
WN
9400 if (((insn >> 8) & 0xe) == 10) {
9401 /* VFP. */
7dcc1f89 9402 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9403 goto illegal_op;
9404 }
7dcc1f89 9405 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9406 /* Coprocessor. */
9ee6e8bb 9407 goto illegal_op;
6a57f3eb 9408 }
9ee6e8bb
PB
9409 break;
9410 case 0xf:
9411 /* swi */
a0415916 9412 gen_set_pc_im(s, s->base.pc_next);
d4a2dc67 9413 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 9414 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
9415 break;
9416 default:
9417 illegal_op:
1ce21ba1 9418 unallocated_encoding(s);
9ee6e8bb
PB
9419 break;
9420 }
9421 }
9422}
9423
331b1ca6 9424static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
296e5a0a 9425{
331b1ca6
RH
9426 /*
9427 * Return true if this is a 16 bit instruction. We must be precise
9428 * about this (matching the decode).
296e5a0a
PM
9429 */
9430 if ((insn >> 11) < 0x1d) {
9431 /* Definitely a 16-bit instruction */
9432 return true;
9433 }
9434
9435 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9436 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9437 * end up actually treating this as two 16-bit insns, though,
9438 * if it's half of a bl/blx pair that might span a page boundary.
9439 */
14120108
JS
9440 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
9441 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
9442 /* Thumb2 cores (including all M profile ones) always treat
9443 * 32-bit insns as 32-bit.
9444 */
9445 return false;
9446 }
9447
331b1ca6 9448 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
9449 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9450 * is not on the next page; we merge this into a 32-bit
9451 * insn.
9452 */
9453 return false;
9454 }
9455 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9456 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9457 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9458 * -- handle as single 16 bit insn
9459 */
9460 return true;
9461}
9462
2eea841c
PM
9463/* Translate a 32-bit thumb instruction. */
9464static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 9465{
296e5a0a 9466 uint32_t imm, shift, offset;
9ee6e8bb 9467 uint32_t rd, rn, rm, rs;
39d5492a
PM
9468 TCGv_i32 tmp;
9469 TCGv_i32 tmp2;
9470 TCGv_i32 tmp3;
9471 TCGv_i32 addr;
a7812ae4 9472 TCGv_i64 tmp64;
9ee6e8bb 9473 int op;
9ee6e8bb 9474
14120108
JS
9475 /*
9476 * ARMv6-M supports a limited subset of Thumb2 instructions.
9477 * Other Thumb1 architectures allow only 32-bit
9478 * combined BL/BLX prefix and suffix.
296e5a0a 9479 */
14120108
JS
9480 if (arm_dc_feature(s, ARM_FEATURE_M) &&
9481 !arm_dc_feature(s, ARM_FEATURE_V7)) {
9482 int i;
9483 bool found = false;
8297cb13
JS
9484 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
9485 0xf3b08040 /* dsb */,
9486 0xf3b08050 /* dmb */,
9487 0xf3b08060 /* isb */,
9488 0xf3e08000 /* mrs */,
9489 0xf000d000 /* bl */};
9490 static const uint32_t armv6m_mask[] = {0xffe0d000,
9491 0xfff0d0f0,
9492 0xfff0d0f0,
9493 0xfff0d0f0,
9494 0xffe0d000,
9495 0xf800d000};
14120108
JS
9496
9497 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
9498 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
9499 found = true;
9500 break;
9501 }
9502 }
9503 if (!found) {
9504 goto illegal_op;
9505 }
9506 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
9507 ARCH(6T2);
9508 }
9509
51409b9e
RH
9510 if (disas_t32(s, insn)) {
9511 return;
9512 }
9513 /* fall back to legacy decoder */
9514
9ee6e8bb
PB
9515 rn = (insn >> 16) & 0xf;
9516 rs = (insn >> 12) & 0xf;
9517 rd = (insn >> 8) & 0xf;
9518 rm = insn & 0xf;
9519 switch ((insn >> 25) & 0xf) {
9520 case 0: case 1: case 2: case 3:
9521 /* 16-bit instructions. Should never happen. */
9522 abort();
9523 case 4:
9524 if (insn & (1 << 22)) {
ebfe27c5
PM
9525 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9526 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 9527 * table branch, TT.
ebfe27c5 9528 */
76eff04d
PM
9529 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9530 arm_dc_feature(s, ARM_FEATURE_V8)) {
9531 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9532 * - SG (v8M only)
9533 * The bulk of the behaviour for this instruction is implemented
9534 * in v7m_handle_execute_nsc(), which deals with the insn when
9535 * it is executed by a CPU in non-secure state from memory
9536 * which is Secure & NonSecure-Callable.
9537 * Here we only need to handle the remaining cases:
9538 * * in NS memory (including the "security extension not
9539 * implemented" case) : NOP
9540 * * in S memory but CPU already secure (clear IT bits)
9541 * We know that the attribute for the memory this insn is
9542 * in must match the current CPU state, because otherwise
9543 * get_phys_addr_pmsav8 would have generated an exception.
9544 */
9545 if (s->v8m_secure) {
9546 /* Like the IT insn, we don't need to generate any code */
9547 s->condexec_cond = 0;
9548 s->condexec_mask = 0;
9549 }
9550 } else if (insn & 0x01200000) {
ebfe27c5
PM
9551 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9552 * - load/store dual (post-indexed)
9553 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9554 * - load/store dual (literal and immediate)
9555 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9556 * - load/store dual (pre-indexed)
9557 */
910d7692
PM
9558 bool wback = extract32(insn, 21, 1);
9559
16e0d823
RH
9560 if (rn == 15 && (insn & (1 << 21))) {
9561 /* UNPREDICTABLE */
9562 goto illegal_op;
9ee6e8bb 9563 }
16e0d823
RH
9564
9565 addr = add_reg_for_lit(s, rn, 0);
9ee6e8bb 9566 offset = (insn & 0xff) * 4;
910d7692 9567 if ((insn & (1 << 23)) == 0) {
9ee6e8bb 9568 offset = -offset;
910d7692
PM
9569 }
9570
9571 if (s->v8m_stackcheck && rn == 13 && wback) {
9572 /*
9573 * Here 'addr' is the current SP; if offset is +ve we're
9574 * moving SP up, else down. It is UNKNOWN whether the limit
9575 * check triggers when SP starts below the limit and ends
9576 * up above it; check whichever of the current and final
9577 * SP is lower, so QEMU will trigger in that situation.
9578 */
9579 if ((int32_t)offset < 0) {
9580 TCGv_i32 newsp = tcg_temp_new_i32();
9581
9582 tcg_gen_addi_i32(newsp, addr, offset);
9583 gen_helper_v8m_stackcheck(cpu_env, newsp);
9584 tcg_temp_free_i32(newsp);
9585 } else {
9586 gen_helper_v8m_stackcheck(cpu_env, addr);
9587 }
9588 }
9589
9ee6e8bb 9590 if (insn & (1 << 24)) {
b0109805 9591 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9592 offset = 0;
9593 }
9594 if (insn & (1 << 20)) {
9595 /* ldrd */
e2592fad 9596 tmp = tcg_temp_new_i32();
12dcc321 9597 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9598 store_reg(s, rs, tmp);
9599 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9600 tmp = tcg_temp_new_i32();
12dcc321 9601 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9602 store_reg(s, rd, tmp);
9ee6e8bb
PB
9603 } else {
9604 /* strd */
b0109805 9605 tmp = load_reg(s, rs);
12dcc321 9606 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9607 tcg_temp_free_i32(tmp);
b0109805
PB
9608 tcg_gen_addi_i32(addr, addr, 4);
9609 tmp = load_reg(s, rd);
12dcc321 9610 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9611 tcg_temp_free_i32(tmp);
9ee6e8bb 9612 }
910d7692 9613 if (wback) {
9ee6e8bb 9614 /* Base writeback. */
b0109805
PB
9615 tcg_gen_addi_i32(addr, addr, offset - 4);
9616 store_reg(s, rn, addr);
9617 } else {
7d1b0095 9618 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9619 }
9620 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
9621 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9622 * - load/store exclusive word
5158de24 9623 * - TT (v8M only)
ebfe27c5
PM
9624 */
9625 if (rs == 15) {
5158de24
PM
9626 if (!(insn & (1 << 20)) &&
9627 arm_dc_feature(s, ARM_FEATURE_M) &&
9628 arm_dc_feature(s, ARM_FEATURE_V8)) {
9629 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
9630 * - TT (v8M only)
9631 */
9632 bool alt = insn & (1 << 7);
9633 TCGv_i32 addr, op, ttresp;
9634
9635 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
9636 /* we UNDEF for these UNPREDICTABLE cases */
9637 goto illegal_op;
9638 }
9639
9640 if (alt && !s->v8m_secure) {
9641 goto illegal_op;
9642 }
9643
9644 addr = load_reg(s, rn);
9645 op = tcg_const_i32(extract32(insn, 6, 2));
9646 ttresp = tcg_temp_new_i32();
9647 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
9648 tcg_temp_free_i32(addr);
9649 tcg_temp_free_i32(op);
9650 store_reg(s, rd, ttresp);
384c6c03 9651 break;
5158de24 9652 }
ebfe27c5
PM
9653 goto illegal_op;
9654 }
39d5492a 9655 addr = tcg_temp_local_new_i32();
98a46317 9656 load_reg_var(s, addr, rn);
426f5abc 9657 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9658 if (insn & (1 << 20)) {
426f5abc 9659 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9660 } else {
426f5abc 9661 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9662 }
39d5492a 9663 tcg_temp_free_i32(addr);
2359bf80 9664 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb 9665 /* Table Branch. */
fdbcf632 9666 addr = load_reg(s, rn);
b26eefb6 9667 tmp = load_reg(s, rm);
b0109805 9668 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9669 if (insn & (1 << 4)) {
9670 /* tbh */
b0109805 9671 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9672 tcg_temp_free_i32(tmp);
e2592fad 9673 tmp = tcg_temp_new_i32();
12dcc321 9674 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9675 } else { /* tbb */
7d1b0095 9676 tcg_temp_free_i32(tmp);
e2592fad 9677 tmp = tcg_temp_new_i32();
12dcc321 9678 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9679 }
7d1b0095 9680 tcg_temp_free_i32(addr);
b0109805 9681 tcg_gen_shli_i32(tmp, tmp, 1);
fdbcf632 9682 tcg_gen_addi_i32(tmp, tmp, read_pc(s));
b0109805 9683 store_reg(s, 15, tmp);
9ee6e8bb 9684 } else {
96c55295
PM
9685 bool is_lasr = false;
9686 bool is_ld = extract32(insn, 20, 1);
2359bf80 9687 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9688 op = (insn >> 4) & 0x3;
2359bf80
MR
9689 switch (op2) {
9690 case 0:
426f5abc 9691 goto illegal_op;
2359bf80
MR
9692 case 1:
9693 /* Load/store exclusive byte/halfword/doubleword */
9694 if (op == 2) {
9695 goto illegal_op;
9696 }
9697 ARCH(7);
9698 break;
9699 case 2:
9700 /* Load-acquire/store-release */
9701 if (op == 3) {
9702 goto illegal_op;
9703 }
9704 /* Fall through */
9705 case 3:
9706 /* Load-acquire/store-release exclusive */
9707 ARCH(8);
96c55295 9708 is_lasr = true;
2359bf80 9709 break;
426f5abc 9710 }
96c55295
PM
9711
9712 if (is_lasr && !is_ld) {
9713 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9714 }
9715
39d5492a 9716 addr = tcg_temp_local_new_i32();
98a46317 9717 load_reg_var(s, addr, rn);
2359bf80 9718 if (!(op2 & 1)) {
96c55295 9719 if (is_ld) {
2359bf80
MR
9720 tmp = tcg_temp_new_i32();
9721 switch (op) {
9722 case 0: /* ldab */
9bb6558a
PM
9723 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9724 rs | ISSIsAcqRel);
2359bf80
MR
9725 break;
9726 case 1: /* ldah */
9bb6558a
PM
9727 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9728 rs | ISSIsAcqRel);
2359bf80
MR
9729 break;
9730 case 2: /* lda */
9bb6558a
PM
9731 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9732 rs | ISSIsAcqRel);
2359bf80
MR
9733 break;
9734 default:
9735 abort();
9736 }
9737 store_reg(s, rs, tmp);
9738 } else {
9739 tmp = load_reg(s, rs);
9740 switch (op) {
9741 case 0: /* stlb */
9bb6558a
PM
9742 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9743 rs | ISSIsAcqRel);
2359bf80
MR
9744 break;
9745 case 1: /* stlh */
9bb6558a
PM
9746 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9747 rs | ISSIsAcqRel);
2359bf80
MR
9748 break;
9749 case 2: /* stl */
9bb6558a
PM
9750 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9751 rs | ISSIsAcqRel);
2359bf80
MR
9752 break;
9753 default:
9754 abort();
9755 }
9756 tcg_temp_free_i32(tmp);
9757 }
96c55295 9758 } else if (is_ld) {
426f5abc 9759 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9760 } else {
426f5abc 9761 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9762 }
39d5492a 9763 tcg_temp_free_i32(addr);
96c55295
PM
9764
9765 if (is_lasr && is_ld) {
9766 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
9767 }
9ee6e8bb
PB
9768 }
9769 } else {
9770 /* Load/store multiple, RFE, SRS. */
9771 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9772 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9773 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9774 goto illegal_op;
00115976 9775 }
9ee6e8bb
PB
9776 if (insn & (1 << 20)) {
9777 /* rfe */
b0109805
PB
9778 addr = load_reg(s, rn);
9779 if ((insn & (1 << 24)) == 0)
9780 tcg_gen_addi_i32(addr, addr, -8);
9781 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9782 tmp = tcg_temp_new_i32();
12dcc321 9783 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9784 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9785 tmp2 = tcg_temp_new_i32();
12dcc321 9786 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9787 if (insn & (1 << 21)) {
9788 /* Base writeback. */
b0109805
PB
9789 if (insn & (1 << 24)) {
9790 tcg_gen_addi_i32(addr, addr, 4);
9791 } else {
9792 tcg_gen_addi_i32(addr, addr, -4);
9793 }
9794 store_reg(s, rn, addr);
9795 } else {
7d1b0095 9796 tcg_temp_free_i32(addr);
9ee6e8bb 9797 }
b0109805 9798 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9799 } else {
9800 /* srs */
81465888
PM
9801 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9802 insn & (1 << 21));
9ee6e8bb
PB
9803 }
9804 } else {
5856d44e 9805 int i, loaded_base = 0;
39d5492a 9806 TCGv_i32 loaded_var;
7c0ed88e 9807 bool wback = extract32(insn, 21, 1);
9ee6e8bb 9808 /* Load/store multiple. */
b0109805 9809 addr = load_reg(s, rn);
9ee6e8bb
PB
9810 offset = 0;
9811 for (i = 0; i < 16; i++) {
9812 if (insn & (1 << i))
9813 offset += 4;
9814 }
7c0ed88e 9815
9ee6e8bb 9816 if (insn & (1 << 24)) {
b0109805 9817 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9818 }
9819
7c0ed88e
PM
9820 if (s->v8m_stackcheck && rn == 13 && wback) {
9821 /*
9822 * If the writeback is incrementing SP rather than
9823 * decrementing it, and the initial SP is below the
9824 * stack limit but the final written-back SP would
9825 * be above, then then we must not perform any memory
9826 * accesses, but it is IMPDEF whether we generate
9827 * an exception. We choose to do so in this case.
9828 * At this point 'addr' is the lowest address, so
9829 * either the original SP (if incrementing) or our
9830 * final SP (if decrementing), so that's what we check.
9831 */
9832 gen_helper_v8m_stackcheck(cpu_env, addr);
9833 }
9834
f764718d 9835 loaded_var = NULL;
9ee6e8bb
PB
9836 for (i = 0; i < 16; i++) {
9837 if ((insn & (1 << i)) == 0)
9838 continue;
9839 if (insn & (1 << 20)) {
9840 /* Load. */
e2592fad 9841 tmp = tcg_temp_new_i32();
12dcc321 9842 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
69be3e13 9843 if (i == rn) {
5856d44e
YO
9844 loaded_var = tmp;
9845 loaded_base = 1;
9ee6e8bb 9846 } else {
69be3e13 9847 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9848 }
9849 } else {
9850 /* Store. */
b0109805 9851 tmp = load_reg(s, i);
12dcc321 9852 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9853 tcg_temp_free_i32(tmp);
9ee6e8bb 9854 }
b0109805 9855 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9856 }
5856d44e
YO
9857 if (loaded_base) {
9858 store_reg(s, rn, loaded_var);
9859 }
7c0ed88e 9860 if (wback) {
9ee6e8bb
PB
9861 /* Base register writeback. */
9862 if (insn & (1 << 24)) {
b0109805 9863 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9864 }
9865 /* Fault if writeback register is in register list. */
9866 if (insn & (1 << rn))
9867 goto illegal_op;
b0109805
PB
9868 store_reg(s, rn, addr);
9869 } else {
7d1b0095 9870 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9871 }
9872 }
9873 }
9874 break;
2af9ab77
JB
9875 case 5:
9876
9ee6e8bb 9877 op = (insn >> 21) & 0xf;
2af9ab77 9878 if (op == 6) {
62b44f05
AR
9879 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9880 goto illegal_op;
9881 }
2af9ab77
JB
9882 /* Halfword pack. */
9883 tmp = load_reg(s, rn);
9884 tmp2 = load_reg(s, rm);
9885 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9886 if (insn & (1 << 5)) {
9887 /* pkhtb */
d1f8755f 9888 if (shift == 0) {
2af9ab77 9889 shift = 31;
d1f8755f 9890 }
2af9ab77 9891 tcg_gen_sari_i32(tmp2, tmp2, shift);
d1f8755f 9892 tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
2af9ab77
JB
9893 } else {
9894 /* pkhbt */
d1f8755f
RH
9895 tcg_gen_shli_i32(tmp2, tmp2, shift);
9896 tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
2af9ab77 9897 }
7d1b0095 9898 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9899 store_reg(s, rd, tmp);
9900 } else {
2af9ab77 9901 /* Data processing register constant shift. */
25ae32c5
RH
9902 /* All done in decodetree. Reach here for illegal ops. */
9903 goto illegal_op;
3174f8e9 9904 }
9ee6e8bb
PB
9905 break;
9906 case 13: /* Misc data processing. */
9907 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9908 if (op < 4 && (insn & 0xf000) != 0xf000)
9909 goto illegal_op;
9910 switch (op) {
5be2c123
RH
9911 case 0: /* Register controlled shift, in decodetree */
9912 goto illegal_op;
9ee6e8bb 9913 case 1: /* Sign/zero extend. */
62b44f05
AR
9914 op = (insn >> 20) & 7;
9915 switch (op) {
9916 case 0: /* SXTAH, SXTH */
9917 case 1: /* UXTAH, UXTH */
9918 case 4: /* SXTAB, SXTB */
9919 case 5: /* UXTAB, UXTB */
9920 break;
9921 case 2: /* SXTAB16, SXTB16 */
9922 case 3: /* UXTAB16, UXTB16 */
9923 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9924 goto illegal_op;
9925 }
9926 break;
9927 default:
9928 goto illegal_op;
9929 }
9930 if (rn != 15) {
9931 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9932 goto illegal_op;
9933 }
9934 }
5e3f878a 9935 tmp = load_reg(s, rm);
9ee6e8bb 9936 shift = (insn >> 4) & 3;
1301f322 9937 /* ??? In many cases it's not necessary to do a
9ee6e8bb 9938 rotate, a shift is sufficient. */
464eaa95 9939 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9940 op = (insn >> 20) & 7;
9941 switch (op) {
5e3f878a
PB
9942 case 0: gen_sxth(tmp); break;
9943 case 1: gen_uxth(tmp); break;
9944 case 2: gen_sxtb16(tmp); break;
9945 case 3: gen_uxtb16(tmp); break;
9946 case 4: gen_sxtb(tmp); break;
9947 case 5: gen_uxtb(tmp); break;
62b44f05
AR
9948 default:
9949 g_assert_not_reached();
9ee6e8bb
PB
9950 }
9951 if (rn != 15) {
5e3f878a 9952 tmp2 = load_reg(s, rn);
9ee6e8bb 9953 if ((op >> 1) == 1) {
5e3f878a 9954 gen_add16(tmp, tmp2);
9ee6e8bb 9955 } else {
5e3f878a 9956 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9957 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9958 }
9959 }
5e3f878a 9960 store_reg(s, rd, tmp);
9ee6e8bb
PB
9961 break;
9962 case 2: /* SIMD add/subtract. */
62b44f05
AR
9963 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9964 goto illegal_op;
9965 }
9ee6e8bb
PB
9966 op = (insn >> 20) & 7;
9967 shift = (insn >> 4) & 7;
9968 if ((op & 3) == 3 || (shift & 3) == 3)
9969 goto illegal_op;
6ddbc6e4
PB
9970 tmp = load_reg(s, rn);
9971 tmp2 = load_reg(s, rm);
9972 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9973 tcg_temp_free_i32(tmp2);
6ddbc6e4 9974 store_reg(s, rd, tmp);
9ee6e8bb
PB
9975 break;
9976 case 3: /* Other data processing. */
9977 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9978 if (op < 4) {
9979 /* Saturating add/subtract. */
62b44f05
AR
9980 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9981 goto illegal_op;
9982 }
d9ba4830
PB
9983 tmp = load_reg(s, rn);
9984 tmp2 = load_reg(s, rm);
9ee6e8bb 9985 if (op & 1)
640581a0 9986 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp);
4809c612 9987 if (op & 2)
9ef39277 9988 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9989 else
9ef39277 9990 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9991 tcg_temp_free_i32(tmp2);
9ee6e8bb 9992 } else {
62b44f05
AR
9993 switch (op) {
9994 case 0x0a: /* rbit */
9995 case 0x08: /* rev */
9996 case 0x09: /* rev16 */
9997 case 0x0b: /* revsh */
9998 case 0x18: /* clz */
9999 break;
10000 case 0x10: /* sel */
10001 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10002 goto illegal_op;
10003 }
10004 break;
10005 case 0x20: /* crc32/crc32c */
10006 case 0x21:
10007 case 0x22:
10008 case 0x28:
10009 case 0x29:
10010 case 0x2a:
962fcbf2 10011 if (!dc_isar_feature(aa32_crc32, s)) {
62b44f05
AR
10012 goto illegal_op;
10013 }
10014 break;
10015 default:
10016 goto illegal_op;
10017 }
d9ba4830 10018 tmp = load_reg(s, rn);
9ee6e8bb
PB
10019 switch (op) {
10020 case 0x0a: /* rbit */
d9ba4830 10021 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10022 break;
10023 case 0x08: /* rev */
66896cb8 10024 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10025 break;
10026 case 0x09: /* rev16 */
d9ba4830 10027 gen_rev16(tmp);
9ee6e8bb
PB
10028 break;
10029 case 0x0b: /* revsh */
d9ba4830 10030 gen_revsh(tmp);
9ee6e8bb
PB
10031 break;
10032 case 0x10: /* sel */
d9ba4830 10033 tmp2 = load_reg(s, rm);
7d1b0095 10034 tmp3 = tcg_temp_new_i32();
0ecb72a5 10035 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10036 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10037 tcg_temp_free_i32(tmp3);
10038 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10039 break;
10040 case 0x18: /* clz */
7539a012 10041 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10042 break;
eb0ecd5a
WN
10043 case 0x20:
10044 case 0x21:
10045 case 0x22:
10046 case 0x28:
10047 case 0x29:
10048 case 0x2a:
10049 {
10050 /* crc32/crc32c */
10051 uint32_t sz = op & 0x3;
10052 uint32_t c = op & 0x8;
10053
eb0ecd5a 10054 tmp2 = load_reg(s, rm);
aa633469
PM
10055 if (sz == 0) {
10056 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10057 } else if (sz == 1) {
10058 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10059 }
eb0ecd5a
WN
10060 tmp3 = tcg_const_i32(1 << sz);
10061 if (c) {
10062 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10063 } else {
10064 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10065 }
10066 tcg_temp_free_i32(tmp2);
10067 tcg_temp_free_i32(tmp3);
10068 break;
10069 }
9ee6e8bb 10070 default:
62b44f05 10071 g_assert_not_reached();
9ee6e8bb
PB
10072 }
10073 }
d9ba4830 10074 store_reg(s, rd, tmp);
9ee6e8bb
PB
10075 break;
10076 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10077 switch ((insn >> 20) & 7) {
10078 case 0: /* 32 x 32 -> 32 */
10079 case 7: /* Unsigned sum of absolute differences. */
10080 break;
10081 case 1: /* 16 x 16 -> 32 */
10082 case 2: /* Dual multiply add. */
10083 case 3: /* 32 * 16 -> 32msb */
10084 case 4: /* Dual multiply subtract. */
10085 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10086 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10087 goto illegal_op;
10088 }
10089 break;
10090 }
9ee6e8bb 10091 op = (insn >> 4) & 0xf;
d9ba4830
PB
10092 tmp = load_reg(s, rn);
10093 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10094 switch ((insn >> 20) & 7) {
10095 case 0: /* 32 x 32 -> 32 */
d9ba4830 10096 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10097 tcg_temp_free_i32(tmp2);
9ee6e8bb 10098 if (rs != 15) {
d9ba4830 10099 tmp2 = load_reg(s, rs);
9ee6e8bb 10100 if (op)
d9ba4830 10101 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10102 else
d9ba4830 10103 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10104 tcg_temp_free_i32(tmp2);
9ee6e8bb 10105 }
9ee6e8bb
PB
10106 break;
10107 case 1: /* 16 x 16 -> 32 */
d9ba4830 10108 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10109 tcg_temp_free_i32(tmp2);
9ee6e8bb 10110 if (rs != 15) {
d9ba4830 10111 tmp2 = load_reg(s, rs);
9ef39277 10112 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10113 tcg_temp_free_i32(tmp2);
9ee6e8bb 10114 }
9ee6e8bb
PB
10115 break;
10116 case 2: /* Dual multiply add. */
10117 case 4: /* Dual multiply subtract. */
10118 if (op)
d9ba4830
PB
10119 gen_swap_half(tmp2);
10120 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10121 if (insn & (1 << 22)) {
e1d177b9 10122 /* This subtraction cannot overflow. */
d9ba4830 10123 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10124 } else {
e1d177b9
PM
10125 /* This addition cannot overflow 32 bits;
10126 * however it may overflow considered as a signed
10127 * operation, in which case we must set the Q flag.
10128 */
9ef39277 10129 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10130 }
7d1b0095 10131 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10132 if (rs != 15)
10133 {
d9ba4830 10134 tmp2 = load_reg(s, rs);
9ef39277 10135 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10136 tcg_temp_free_i32(tmp2);
9ee6e8bb 10137 }
9ee6e8bb
PB
10138 break;
10139 case 3: /* 32 * 16 -> 32msb */
10140 if (op)
d9ba4830 10141 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10142 else
d9ba4830 10143 gen_sxth(tmp2);
a7812ae4
PB
10144 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10145 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10146 tmp = tcg_temp_new_i32();
ecc7b3aa 10147 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10148 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10149 if (rs != 15)
10150 {
d9ba4830 10151 tmp2 = load_reg(s, rs);
9ef39277 10152 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10153 tcg_temp_free_i32(tmp2);
9ee6e8bb 10154 }
9ee6e8bb 10155 break;
838fa72d 10156 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
5f8cd06e 10157 tcg_gen_muls2_i32(tmp2, tmp, tmp, tmp2);
9ee6e8bb 10158 if (rs != 15) {
5f8cd06e 10159 tmp3 = load_reg(s, rs);
838fa72d 10160 if (insn & (1 << 20)) {
5f8cd06e 10161 tcg_gen_add_i32(tmp, tmp, tmp3);
99c475ab 10162 } else {
e0a0c832
RH
10163 /*
10164 * For SMMLS, we need a 64-bit subtract.
10165 * Borrow caused by a non-zero multiplicand lowpart,
10166 * and the correct result lowpart for rounding.
10167 */
10168 TCGv_i32 zero = tcg_const_i32(0);
10169 tcg_gen_sub2_i32(tmp2, tmp, zero, tmp3, tmp2, tmp);
10170 tcg_temp_free_i32(zero);
99c475ab 10171 }
5f8cd06e 10172 tcg_temp_free_i32(tmp3);
2c0262af 10173 }
838fa72d 10174 if (insn & (1 << 4)) {
5f8cd06e
RH
10175 /*
10176 * Adding 0x80000000 to the 64-bit quantity
10177 * means that we have carry in to the high
10178 * word when the low word has the high bit set.
10179 */
10180 tcg_gen_shri_i32(tmp2, tmp2, 31);
10181 tcg_gen_add_i32(tmp, tmp, tmp2);
838fa72d 10182 }
5f8cd06e 10183 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10184 break;
10185 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10186 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10187 tcg_temp_free_i32(tmp2);
9ee6e8bb 10188 if (rs != 15) {
d9ba4830
PB
10189 tmp2 = load_reg(s, rs);
10190 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10191 tcg_temp_free_i32(tmp2);
5fd46862 10192 }
9ee6e8bb 10193 break;
2c0262af 10194 }
d9ba4830 10195 store_reg(s, rd, tmp);
2c0262af 10196 break;
9ee6e8bb
PB
10197 case 6: case 7: /* 64-bit multiply, Divide. */
10198 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10199 tmp = load_reg(s, rn);
10200 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10201 if ((op & 0x50) == 0x10) {
10202 /* sdiv, udiv */
7e0cf8b4 10203 if (!dc_isar_feature(thumb_div, s)) {
9ee6e8bb 10204 goto illegal_op;
47789990 10205 }
9ee6e8bb 10206 if (op & 0x20)
5e3f878a 10207 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10208 else
5e3f878a 10209 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10210 tcg_temp_free_i32(tmp2);
5e3f878a 10211 store_reg(s, rd, tmp);
9ee6e8bb
PB
10212 } else if ((op & 0xe) == 0xc) {
10213 /* Dual multiply accumulate long. */
62b44f05
AR
10214 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10215 tcg_temp_free_i32(tmp);
10216 tcg_temp_free_i32(tmp2);
10217 goto illegal_op;
10218 }
9ee6e8bb 10219 if (op & 1)
5e3f878a
PB
10220 gen_swap_half(tmp2);
10221 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10222 if (op & 0x10) {
5e3f878a 10223 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10224 } else {
5e3f878a 10225 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10226 }
7d1b0095 10227 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10228 /* BUGFIX */
10229 tmp64 = tcg_temp_new_i64();
10230 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10231 tcg_temp_free_i32(tmp);
a7812ae4
PB
10232 gen_addq(s, tmp64, rs, rd);
10233 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10234 tcg_temp_free_i64(tmp64);
2c0262af 10235 } else {
9ee6e8bb
PB
10236 if (op & 0x20) {
10237 /* Unsigned 64-bit multiply */
a7812ae4 10238 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10239 } else {
9ee6e8bb
PB
10240 if (op & 8) {
10241 /* smlalxy */
62b44f05
AR
10242 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10243 tcg_temp_free_i32(tmp2);
10244 tcg_temp_free_i32(tmp);
10245 goto illegal_op;
10246 }
5e3f878a 10247 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10248 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10249 tmp64 = tcg_temp_new_i64();
10250 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10251 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10252 } else {
10253 /* Signed 64-bit multiply */
a7812ae4 10254 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10255 }
b5ff1b31 10256 }
9ee6e8bb
PB
10257 if (op & 4) {
10258 /* umaal */
62b44f05
AR
10259 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10260 tcg_temp_free_i64(tmp64);
10261 goto illegal_op;
10262 }
a7812ae4
PB
10263 gen_addq_lo(s, tmp64, rs);
10264 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10265 } else if (op & 0x40) {
10266 /* 64-bit accumulate. */
a7812ae4 10267 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10268 }
a7812ae4 10269 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10270 tcg_temp_free_i64(tmp64);
5fd46862 10271 }
2c0262af 10272 break;
9ee6e8bb
PB
10273 }
10274 break;
10275 case 6: case 7: case 14: case 15:
10276 /* Coprocessor. */
7517748e 10277 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8859ba3c
PM
10278 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10279 if (extract32(insn, 24, 2) == 3) {
10280 goto illegal_op; /* op0 = 0b11 : unallocated */
10281 }
10282
10283 /*
10284 * Decode VLLDM and VLSTM first: these are nonstandard because:
10285 * * if there is no FPU then these insns must NOP in
10286 * Secure state and UNDEF in Nonsecure state
10287 * * if there is an FPU then these insns do not have
10288 * the usual behaviour that disas_vfp_insn() provides of
10289 * being controlled by CPACR/NSACR enable bits or the
10290 * lazy-stacking logic.
7517748e 10291 */
b1e5336a
PM
10292 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10293 (insn & 0xffa00f00) == 0xec200a00) {
10294 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10295 * - VLLDM, VLSTM
10296 * We choose to UNDEF if the RAZ bits are non-zero.
10297 */
10298 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10299 goto illegal_op;
10300 }
019076b0
PM
10301
10302 if (arm_dc_feature(s, ARM_FEATURE_VFP)) {
10303 TCGv_i32 fptr = load_reg(s, rn);
10304
10305 if (extract32(insn, 20, 1)) {
956fe143 10306 gen_helper_v7m_vlldm(cpu_env, fptr);
019076b0
PM
10307 } else {
10308 gen_helper_v7m_vlstm(cpu_env, fptr);
10309 }
10310 tcg_temp_free_i32(fptr);
10311
10312 /* End the TB, because we have updated FP control bits */
10313 s->base.is_jmp = DISAS_UPDATE;
10314 }
b1e5336a
PM
10315 break;
10316 }
8859ba3c
PM
10317 if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
10318 ((insn >> 8) & 0xe) == 10) {
10319 /* FP, and the CPU supports it */
10320 if (disas_vfp_insn(s, insn)) {
10321 goto illegal_op;
10322 }
10323 break;
10324 }
10325
b1e5336a 10326 /* All other insns: NOCP */
a767fac8 10327 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
7517748e
PM
10328 default_exception_el(s));
10329 break;
10330 }
0052087e
RH
10331 if ((insn & 0xfe000a00) == 0xfc000800
10332 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10333 /* The Thumb2 and ARM encodings are identical. */
10334 if (disas_neon_insn_3same_ext(s, insn)) {
10335 goto illegal_op;
10336 }
10337 } else if ((insn & 0xff000a00) == 0xfe000800
10338 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10339 /* The Thumb2 and ARM encodings are identical. */
10340 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10341 goto illegal_op;
10342 }
10343 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 10344 /* Translate into the equivalent ARM encoding. */
f06053e3 10345 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10346 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10347 goto illegal_op;
7dcc1f89 10348 }
6a57f3eb 10349 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10350 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10351 goto illegal_op;
10352 }
9ee6e8bb
PB
10353 } else {
10354 if (insn & (1 << 28))
10355 goto illegal_op;
7dcc1f89 10356 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10357 goto illegal_op;
7dcc1f89 10358 }
9ee6e8bb
PB
10359 }
10360 break;
10361 case 8: case 9: case 10: case 11:
10362 if (insn & (1 << 15)) {
10363 /* Branches, misc control. */
10364 if (insn & 0x5000) {
10365 /* Unconditional branch. */
10366 /* signextend(hw1[10:0]) -> offset[:12]. */
10367 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10368 /* hw1[10:0] -> offset[11:1]. */
10369 offset |= (insn & 0x7ff) << 1;
10370 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10371 offset[24:22] already have the same value because of the
10372 sign extension above. */
10373 offset ^= ((~insn) & (1 << 13)) << 10;
10374 offset ^= ((~insn) & (1 << 11)) << 11;
10375
9ee6e8bb
PB
10376 if (insn & (1 << 14)) {
10377 /* Branch and link. */
a0415916 10378 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
b5ff1b31 10379 }
3b46e624 10380
fdbcf632 10381 offset += read_pc(s);
9ee6e8bb
PB
10382 if (insn & (1 << 12)) {
10383 /* b/bl */
b0109805 10384 gen_jmp(s, offset);
9ee6e8bb
PB
10385 } else {
10386 /* blx */
b0109805 10387 offset &= ~(uint32_t)2;
be5e7a76 10388 /* thumb2 bx, no need to check */
b0109805 10389 gen_bx_im(s, offset);
2c0262af 10390 }
9ee6e8bb
PB
10391 } else if (((insn >> 23) & 7) == 7) {
10392 /* Misc control */
10393 if (insn & (1 << 13))
10394 goto illegal_op;
10395
10396 if (insn & (1 << 26)) {
001b3cab
PM
10397 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10398 goto illegal_op;
10399 }
37e6456e
PM
10400 if (!(insn & (1 << 20))) {
10401 /* Hypervisor call (v7) */
10402 int imm16 = extract32(insn, 16, 4) << 12
10403 | extract32(insn, 0, 12);
10404 ARCH(7);
10405 if (IS_USER(s)) {
10406 goto illegal_op;
10407 }
10408 gen_hvc(s, imm16);
10409 } else {
10410 /* Secure monitor call (v6+) */
10411 ARCH(6K);
10412 if (IS_USER(s)) {
10413 goto illegal_op;
10414 }
10415 gen_smc(s);
10416 }
2c0262af 10417 } else {
9ee6e8bb
PB
10418 op = (insn >> 20) & 7;
10419 switch (op) {
10420 case 0: /* msr cpsr. */
b53d8923 10421 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10422 tmp = load_reg(s, rn);
b28b3377
PM
10423 /* the constant is the mask and SYSm fields */
10424 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10425 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10426 tcg_temp_free_i32(addr);
7d1b0095 10427 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10428 gen_lookup_tb(s);
10429 break;
10430 }
10431 /* fall through */
10432 case 1: /* msr spsr. */
b53d8923 10433 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10434 goto illegal_op;
b53d8923 10435 }
8bfd0550
PM
10436
10437 if (extract32(insn, 5, 1)) {
10438 /* MSR (banked) */
10439 int sysm = extract32(insn, 8, 4) |
10440 (extract32(insn, 4, 1) << 4);
10441 int r = op & 1;
10442
10443 gen_msr_banked(s, r, sysm, rm);
10444 break;
10445 }
10446
10447 /* MSR (for PSRs) */
2fbac54b
FN
10448 tmp = load_reg(s, rn);
10449 if (gen_set_psr(s,
7dcc1f89 10450 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10451 op == 1, tmp))
9ee6e8bb
PB
10452 goto illegal_op;
10453 break;
10454 case 2: /* cps, nop-hint. */
10455 if (((insn >> 8) & 7) == 0) {
10456 gen_nop_hint(s, insn & 0xff);
10457 }
10458 /* Implemented as NOP in user mode. */
10459 if (IS_USER(s))
10460 break;
10461 offset = 0;
10462 imm = 0;
10463 if (insn & (1 << 10)) {
10464 if (insn & (1 << 7))
10465 offset |= CPSR_A;
10466 if (insn & (1 << 6))
10467 offset |= CPSR_I;
10468 if (insn & (1 << 5))
10469 offset |= CPSR_F;
10470 if (insn & (1 << 9))
10471 imm = CPSR_A | CPSR_I | CPSR_F;
10472 }
10473 if (insn & (1 << 8)) {
10474 offset |= 0x1f;
10475 imm |= (insn & 0x1f);
10476 }
10477 if (offset) {
2fbac54b 10478 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10479 }
10480 break;
10481 case 3: /* Special control operations. */
14120108 10482 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 10483 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
10484 goto illegal_op;
10485 }
9ee6e8bb
PB
10486 op = (insn >> 4) & 0xf;
10487 switch (op) {
10488 case 2: /* clrex */
426f5abc 10489 gen_clrex(s);
9ee6e8bb
PB
10490 break;
10491 case 4: /* dsb */
10492 case 5: /* dmb */
61e4c432 10493 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10494 break;
6df99dec
SS
10495 case 6: /* isb */
10496 /* We need to break the TB after this insn
10497 * to execute self-modifying code correctly
10498 * and also to take any pending interrupts
10499 * immediately.
10500 */
a0415916 10501 gen_goto_tb(s, 0, s->base.pc_next);
6df99dec 10502 break;
9888bd1e
RH
10503 case 7: /* sb */
10504 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
10505 goto illegal_op;
10506 }
10507 /*
10508 * TODO: There is no speculation barrier opcode
10509 * for TCG; MB and end the TB instead.
10510 */
10511 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
a0415916 10512 gen_goto_tb(s, 0, s->base.pc_next);
9888bd1e 10513 break;
9ee6e8bb
PB
10514 default:
10515 goto illegal_op;
10516 }
10517 break;
10518 case 4: /* bxj */
9d7c59c8
PM
10519 /* Trivial implementation equivalent to bx.
10520 * This instruction doesn't exist at all for M-profile.
10521 */
10522 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10523 goto illegal_op;
10524 }
d9ba4830
PB
10525 tmp = load_reg(s, rn);
10526 gen_bx(s, tmp);
9ee6e8bb
PB
10527 break;
10528 case 5: /* Exception return. */
b8b45b68
RV
10529 if (IS_USER(s)) {
10530 goto illegal_op;
10531 }
10532 if (rn != 14 || rd != 15) {
10533 goto illegal_op;
10534 }
55c544ed
PM
10535 if (s->current_el == 2) {
10536 /* ERET from Hyp uses ELR_Hyp, not LR */
10537 if (insn & 0xff) {
10538 goto illegal_op;
10539 }
10540 tmp = load_cpu_field(elr_el[2]);
10541 } else {
10542 tmp = load_reg(s, rn);
10543 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10544 }
b8b45b68
RV
10545 gen_exception_return(s, tmp);
10546 break;
8bfd0550 10547 case 6: /* MRS */
43ac6574
PM
10548 if (extract32(insn, 5, 1) &&
10549 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10550 /* MRS (banked) */
10551 int sysm = extract32(insn, 16, 4) |
10552 (extract32(insn, 4, 1) << 4);
10553
10554 gen_mrs_banked(s, 0, sysm, rd);
10555 break;
10556 }
10557
3d54026f
PM
10558 if (extract32(insn, 16, 4) != 0xf) {
10559 goto illegal_op;
10560 }
10561 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10562 extract32(insn, 0, 8) != 0) {
10563 goto illegal_op;
10564 }
10565
8bfd0550 10566 /* mrs cpsr */
7d1b0095 10567 tmp = tcg_temp_new_i32();
b53d8923 10568 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10569 addr = tcg_const_i32(insn & 0xff);
10570 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10571 tcg_temp_free_i32(addr);
9ee6e8bb 10572 } else {
9ef39277 10573 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10574 }
8984bd2e 10575 store_reg(s, rd, tmp);
9ee6e8bb 10576 break;
8bfd0550 10577 case 7: /* MRS */
43ac6574
PM
10578 if (extract32(insn, 5, 1) &&
10579 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10580 /* MRS (banked) */
10581 int sysm = extract32(insn, 16, 4) |
10582 (extract32(insn, 4, 1) << 4);
10583
10584 gen_mrs_banked(s, 1, sysm, rd);
10585 break;
10586 }
10587
10588 /* mrs spsr. */
9ee6e8bb 10589 /* Not accessible in user mode. */
b53d8923 10590 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10591 goto illegal_op;
b53d8923 10592 }
3d54026f
PM
10593
10594 if (extract32(insn, 16, 4) != 0xf ||
10595 extract32(insn, 0, 8) != 0) {
10596 goto illegal_op;
10597 }
10598
d9ba4830
PB
10599 tmp = load_cpu_field(spsr);
10600 store_reg(s, rd, tmp);
9ee6e8bb 10601 break;
2c0262af
FB
10602 }
10603 }
9ee6e8bb
PB
10604 } else {
10605 /* Conditional branch. */
10606 op = (insn >> 22) & 0xf;
10607 /* Generate a conditional jump to next instruction. */
c2d9644e 10608 arm_skip_unless(s, op);
9ee6e8bb
PB
10609
10610 /* offset[11:1] = insn[10:0] */
10611 offset = (insn & 0x7ff) << 1;
10612 /* offset[17:12] = insn[21:16]. */
10613 offset |= (insn & 0x003f0000) >> 4;
10614 /* offset[31:20] = insn[26]. */
10615 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10616 /* offset[18] = insn[13]. */
10617 offset |= (insn & (1 << 13)) << 5;
10618 /* offset[19] = insn[11]. */
10619 offset |= (insn & (1 << 11)) << 8;
10620
10621 /* jump to the offset */
fdbcf632 10622 gen_jmp(s, read_pc(s) + offset);
9ee6e8bb
PB
10623 }
10624 } else {
55203189
PM
10625 /*
10626 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
10627 * - Data-processing (modified immediate, plain binary immediate)
10628 */
9ee6e8bb 10629 if (insn & (1 << 25)) {
55203189
PM
10630 /*
10631 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
10632 * - Data-processing (plain binary immediate)
10633 */
9ee6e8bb
PB
10634 if (insn & (1 << 24)) {
10635 if (insn & (1 << 20))
10636 goto illegal_op;
10637 /* Bitfield/Saturate. */
10638 op = (insn >> 21) & 7;
10639 imm = insn & 0x1f;
10640 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10641 if (rn == 15) {
7d1b0095 10642 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10643 tcg_gen_movi_i32(tmp, 0);
10644 } else {
10645 tmp = load_reg(s, rn);
10646 }
9ee6e8bb
PB
10647 switch (op) {
10648 case 2: /* Signed bitfield extract. */
10649 imm++;
10650 if (shift + imm > 32)
10651 goto illegal_op;
59a71b4c
RH
10652 if (imm < 32) {
10653 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10654 }
9ee6e8bb
PB
10655 break;
10656 case 6: /* Unsigned bitfield extract. */
10657 imm++;
10658 if (shift + imm > 32)
10659 goto illegal_op;
59a71b4c
RH
10660 if (imm < 32) {
10661 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10662 }
9ee6e8bb
PB
10663 break;
10664 case 3: /* Bitfield insert/clear. */
10665 if (imm < shift)
10666 goto illegal_op;
10667 imm = imm + 1 - shift;
10668 if (imm != 32) {
6ddbc6e4 10669 tmp2 = load_reg(s, rd);
d593c48e 10670 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10671 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10672 }
10673 break;
10674 case 7:
10675 goto illegal_op;
10676 default: /* Saturate. */
464eaa95
RH
10677 if (op & 1) {
10678 tcg_gen_sari_i32(tmp, tmp, shift);
10679 } else {
10680 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10681 }
6ddbc6e4 10682 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10683 if (op & 4) {
10684 /* Unsigned. */
62b44f05
AR
10685 if ((op & 1) && shift == 0) {
10686 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10687 tcg_temp_free_i32(tmp);
10688 tcg_temp_free_i32(tmp2);
10689 goto illegal_op;
10690 }
9ef39277 10691 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10692 } else {
9ef39277 10693 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10694 }
2c0262af 10695 } else {
9ee6e8bb 10696 /* Signed. */
62b44f05
AR
10697 if ((op & 1) && shift == 0) {
10698 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10699 tcg_temp_free_i32(tmp);
10700 tcg_temp_free_i32(tmp2);
10701 goto illegal_op;
10702 }
9ef39277 10703 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10704 } else {
9ef39277 10705 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10706 }
2c0262af 10707 }
b75263d6 10708 tcg_temp_free_i32(tmp2);
9ee6e8bb 10709 break;
2c0262af 10710 }
6ddbc6e4 10711 store_reg(s, rd, tmp);
9ee6e8bb
PB
10712 } else {
10713 imm = ((insn & 0x04000000) >> 15)
10714 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10715 if (insn & (1 << 22)) {
10716 /* 16-bit immediate. */
10717 imm |= (insn >> 4) & 0xf000;
10718 if (insn & (1 << 23)) {
10719 /* movt */
5e3f878a 10720 tmp = load_reg(s, rd);
86831435 10721 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10722 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10723 } else {
9ee6e8bb 10724 /* movw */
7d1b0095 10725 tmp = tcg_temp_new_i32();
5e3f878a 10726 tcg_gen_movi_i32(tmp, imm);
2c0262af 10727 }
55203189 10728 store_reg(s, rd, tmp);
2c0262af 10729 } else {
9ee6e8bb 10730 /* Add/sub 12-bit immediate. */
16e0d823
RH
10731 if (insn & (1 << 23)) {
10732 imm = -imm;
10733 }
10734 tmp = add_reg_for_lit(s, rn, imm);
10735 if (rn == 13 && rd == 13) {
10736 /* ADD SP, SP, imm or SUB SP, SP, imm */
10737 store_sp_checked(s, tmp);
2c0262af 10738 } else {
16e0d823 10739 store_reg(s, rd, tmp);
2c0262af 10740 }
9ee6e8bb 10741 }
191abaa2 10742 }
9ee6e8bb 10743 } else {
581c6ebd
RH
10744 /* Data-processing (modified immediate) */
10745 /* All done in decodetree. Reach here for illegal ops. */
10746 goto illegal_op;
2c0262af 10747 }
9ee6e8bb
PB
10748 }
10749 break;
10750 case 12: /* Load/store single data item. */
10751 {
10752 int postinc = 0;
10753 int writeback = 0;
a99caa48 10754 int memidx;
9bb6558a
PM
10755 ISSInfo issinfo;
10756
9ee6e8bb 10757 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10758 if (disas_neon_ls_insn(s, insn)) {
c1713132 10759 goto illegal_op;
7dcc1f89 10760 }
9ee6e8bb
PB
10761 break;
10762 }
a2fdc890
PM
10763 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10764 if (rs == 15) {
10765 if (!(insn & (1 << 20))) {
10766 goto illegal_op;
10767 }
10768 if (op != 2) {
10769 /* Byte or halfword load space with dest == r15 : memory hints.
10770 * Catch them early so we don't emit pointless addressing code.
10771 * This space is a mix of:
10772 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10773 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10774 * cores)
10775 * unallocated hints, which must be treated as NOPs
10776 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10777 * which is easiest for the decoding logic
10778 * Some space which must UNDEF
10779 */
10780 int op1 = (insn >> 23) & 3;
10781 int op2 = (insn >> 6) & 0x3f;
10782 if (op & 2) {
10783 goto illegal_op;
10784 }
10785 if (rn == 15) {
02afbf64
PM
10786 /* UNPREDICTABLE, unallocated hint or
10787 * PLD/PLDW/PLI (literal)
10788 */
2eea841c 10789 return;
a2fdc890
PM
10790 }
10791 if (op1 & 1) {
2eea841c 10792 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10793 }
10794 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 10795 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10796 }
10797 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 10798 goto illegal_op;
a2fdc890
PM
10799 }
10800 }
a99caa48 10801 memidx = get_mem_index(s);
16e0d823
RH
10802 imm = insn & 0xfff;
10803 if (insn & (1 << 23)) {
10804 /* PC relative or Positive offset. */
10805 addr = add_reg_for_lit(s, rn, imm);
10806 } else if (rn == 15) {
10807 /* PC relative with negative offset. */
10808 addr = add_reg_for_lit(s, rn, -imm);
9ee6e8bb 10809 } else {
b0109805 10810 addr = load_reg(s, rn);
16e0d823
RH
10811 imm = insn & 0xff;
10812 switch ((insn >> 8) & 0xf) {
10813 case 0x0: /* Shifted Register. */
10814 shift = (insn >> 4) & 0xf;
10815 if (shift > 3) {
2a0308c5 10816 tcg_temp_free_i32(addr);
b7bcbe95 10817 goto illegal_op;
9ee6e8bb 10818 }
16e0d823 10819 tmp = load_reg(s, rm);
464eaa95 10820 tcg_gen_shli_i32(tmp, tmp, shift);
16e0d823
RH
10821 tcg_gen_add_i32(addr, addr, tmp);
10822 tcg_temp_free_i32(tmp);
10823 break;
10824 case 0xc: /* Negative offset. */
10825 tcg_gen_addi_i32(addr, addr, -imm);
10826 break;
10827 case 0xe: /* User privilege. */
10828 tcg_gen_addi_i32(addr, addr, imm);
10829 memidx = get_a32_user_mem_index(s);
10830 break;
10831 case 0x9: /* Post-decrement. */
10832 imm = -imm;
10833 /* Fall through. */
10834 case 0xb: /* Post-increment. */
10835 postinc = 1;
10836 writeback = 1;
10837 break;
10838 case 0xd: /* Pre-decrement. */
10839 imm = -imm;
10840 /* Fall through. */
10841 case 0xf: /* Pre-increment. */
10842 writeback = 1;
10843 break;
10844 default:
10845 tcg_temp_free_i32(addr);
10846 goto illegal_op;
9ee6e8bb
PB
10847 }
10848 }
9bb6558a
PM
10849
10850 issinfo = writeback ? ISSInvalid : rs;
10851
0bc003ba
PM
10852 if (s->v8m_stackcheck && rn == 13 && writeback) {
10853 /*
10854 * Stackcheck. Here we know 'addr' is the current SP;
10855 * if imm is +ve we're moving SP up, else down. It is
10856 * UNKNOWN whether the limit check triggers when SP starts
10857 * below the limit and ends up above it; we chose to do so.
10858 */
10859 if ((int32_t)imm < 0) {
10860 TCGv_i32 newsp = tcg_temp_new_i32();
10861
10862 tcg_gen_addi_i32(newsp, addr, imm);
10863 gen_helper_v8m_stackcheck(cpu_env, newsp);
10864 tcg_temp_free_i32(newsp);
10865 } else {
10866 gen_helper_v8m_stackcheck(cpu_env, addr);
10867 }
10868 }
10869
10870 if (writeback && !postinc) {
10871 tcg_gen_addi_i32(addr, addr, imm);
10872 }
10873
9ee6e8bb
PB
10874 if (insn & (1 << 20)) {
10875 /* Load. */
5a839c0d 10876 tmp = tcg_temp_new_i32();
a2fdc890 10877 switch (op) {
5a839c0d 10878 case 0:
9bb6558a 10879 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10880 break;
10881 case 4:
9bb6558a 10882 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10883 break;
10884 case 1:
9bb6558a 10885 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10886 break;
10887 case 5:
9bb6558a 10888 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10889 break;
10890 case 2:
9bb6558a 10891 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10892 break;
2a0308c5 10893 default:
5a839c0d 10894 tcg_temp_free_i32(tmp);
2a0308c5
PM
10895 tcg_temp_free_i32(addr);
10896 goto illegal_op;
a2fdc890 10897 }
69be3e13 10898 store_reg_from_load(s, rs, tmp);
9ee6e8bb
PB
10899 } else {
10900 /* Store. */
b0109805 10901 tmp = load_reg(s, rs);
9ee6e8bb 10902 switch (op) {
5a839c0d 10903 case 0:
9bb6558a 10904 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10905 break;
10906 case 1:
9bb6558a 10907 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10908 break;
10909 case 2:
9bb6558a 10910 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10911 break;
2a0308c5 10912 default:
5a839c0d 10913 tcg_temp_free_i32(tmp);
2a0308c5
PM
10914 tcg_temp_free_i32(addr);
10915 goto illegal_op;
b7bcbe95 10916 }
5a839c0d 10917 tcg_temp_free_i32(tmp);
2c0262af 10918 }
9ee6e8bb 10919 if (postinc)
b0109805
PB
10920 tcg_gen_addi_i32(addr, addr, imm);
10921 if (writeback) {
10922 store_reg(s, rn, addr);
10923 } else {
7d1b0095 10924 tcg_temp_free_i32(addr);
b0109805 10925 }
9ee6e8bb
PB
10926 }
10927 break;
10928 default:
10929 goto illegal_op;
2c0262af 10930 }
2eea841c 10931 return;
9ee6e8bb 10932illegal_op:
1ce21ba1 10933 unallocated_encoding(s);
2c0262af
FB
10934}
10935
296e5a0a 10936static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 10937{
296e5a0a 10938 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
10939 int32_t offset;
10940 int i;
39d5492a
PM
10941 TCGv_i32 tmp;
10942 TCGv_i32 tmp2;
10943 TCGv_i32 addr;
99c475ab 10944
99c475ab
FB
10945 switch (insn >> 12) {
10946 case 0: case 1:
396e467c 10947
99c475ab
FB
10948 rd = insn & 7;
10949 op = (insn >> 11) & 3;
10950 if (op == 3) {
a2d12f0f
PM
10951 /*
10952 * 0b0001_1xxx_xxxx_xxxx
10953 * - Add, subtract (three low registers)
10954 * - Add, subtract (two low registers and immediate)
10955 */
99c475ab 10956 rn = (insn >> 3) & 7;
396e467c 10957 tmp = load_reg(s, rn);
99c475ab
FB
10958 if (insn & (1 << 10)) {
10959 /* immediate */
7d1b0095 10960 tmp2 = tcg_temp_new_i32();
396e467c 10961 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10962 } else {
10963 /* reg */
10964 rm = (insn >> 6) & 7;
396e467c 10965 tmp2 = load_reg(s, rm);
99c475ab 10966 }
9ee6e8bb
PB
10967 if (insn & (1 << 9)) {
10968 if (s->condexec_mask)
396e467c 10969 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10970 else
72485ec4 10971 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10972 } else {
10973 if (s->condexec_mask)
396e467c 10974 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10975 else
72485ec4 10976 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10977 }
7d1b0095 10978 tcg_temp_free_i32(tmp2);
396e467c 10979 store_reg(s, rd, tmp);
99c475ab
FB
10980 } else {
10981 /* shift immediate */
10982 rm = (insn >> 3) & 7;
10983 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10984 tmp = load_reg(s, rm);
10985 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10986 if (!s->condexec_mask)
10987 gen_logic_CC(tmp);
10988 store_reg(s, rd, tmp);
99c475ab
FB
10989 }
10990 break;
10991 case 2: case 3:
a2d12f0f
PM
10992 /*
10993 * 0b001x_xxxx_xxxx_xxxx
10994 * - Add, subtract, compare, move (one low register and immediate)
10995 */
99c475ab
FB
10996 op = (insn >> 11) & 3;
10997 rd = (insn >> 8) & 0x7;
396e467c 10998 if (op == 0) { /* mov */
7d1b0095 10999 tmp = tcg_temp_new_i32();
396e467c 11000 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11001 if (!s->condexec_mask)
396e467c
FN
11002 gen_logic_CC(tmp);
11003 store_reg(s, rd, tmp);
11004 } else {
11005 tmp = load_reg(s, rd);
7d1b0095 11006 tmp2 = tcg_temp_new_i32();
396e467c
FN
11007 tcg_gen_movi_i32(tmp2, insn & 0xff);
11008 switch (op) {
11009 case 1: /* cmp */
72485ec4 11010 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11011 tcg_temp_free_i32(tmp);
11012 tcg_temp_free_i32(tmp2);
396e467c
FN
11013 break;
11014 case 2: /* add */
11015 if (s->condexec_mask)
11016 tcg_gen_add_i32(tmp, tmp, tmp2);
11017 else
72485ec4 11018 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11019 tcg_temp_free_i32(tmp2);
396e467c
FN
11020 store_reg(s, rd, tmp);
11021 break;
11022 case 3: /* sub */
11023 if (s->condexec_mask)
11024 tcg_gen_sub_i32(tmp, tmp, tmp2);
11025 else
72485ec4 11026 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11027 tcg_temp_free_i32(tmp2);
396e467c
FN
11028 store_reg(s, rd, tmp);
11029 break;
11030 }
99c475ab 11031 }
99c475ab
FB
11032 break;
11033 case 4:
11034 if (insn & (1 << 11)) {
11035 rd = (insn >> 8) & 7;
5899f386 11036 /* load pc-relative. Bit 1 of PC is ignored. */
16e0d823 11037 addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
c40c8556 11038 tmp = tcg_temp_new_i32();
9bb6558a
PM
11039 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11040 rd | ISSIs16Bit);
7d1b0095 11041 tcg_temp_free_i32(addr);
b0109805 11042 store_reg(s, rd, tmp);
99c475ab
FB
11043 break;
11044 }
11045 if (insn & (1 << 10)) {
ebfe27c5
PM
11046 /* 0b0100_01xx_xxxx_xxxx
11047 * - data processing extended, branch and exchange
11048 */
99c475ab
FB
11049 rd = (insn & 7) | ((insn >> 4) & 8);
11050 rm = (insn >> 3) & 0xf;
11051 op = (insn >> 8) & 3;
11052 switch (op) {
11053 case 0: /* add */
396e467c
FN
11054 tmp = load_reg(s, rd);
11055 tmp2 = load_reg(s, rm);
11056 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11057 tcg_temp_free_i32(tmp2);
55203189
PM
11058 if (rd == 13) {
11059 /* ADD SP, SP, reg */
11060 store_sp_checked(s, tmp);
11061 } else {
11062 store_reg(s, rd, tmp);
11063 }
99c475ab
FB
11064 break;
11065 case 1: /* cmp */
396e467c
FN
11066 tmp = load_reg(s, rd);
11067 tmp2 = load_reg(s, rm);
72485ec4 11068 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11069 tcg_temp_free_i32(tmp2);
11070 tcg_temp_free_i32(tmp);
99c475ab
FB
11071 break;
11072 case 2: /* mov/cpy */
396e467c 11073 tmp = load_reg(s, rm);
55203189
PM
11074 if (rd == 13) {
11075 /* MOV SP, reg */
11076 store_sp_checked(s, tmp);
11077 } else {
11078 store_reg(s, rd, tmp);
11079 }
99c475ab 11080 break;
ebfe27c5
PM
11081 case 3:
11082 {
11083 /* 0b0100_0111_xxxx_xxxx
11084 * - branch [and link] exchange thumb register
11085 */
11086 bool link = insn & (1 << 7);
11087
fb602cb7 11088 if (insn & 3) {
ebfe27c5
PM
11089 goto undef;
11090 }
11091 if (link) {
be5e7a76 11092 ARCH(5);
ebfe27c5 11093 }
fb602cb7
PM
11094 if ((insn & 4)) {
11095 /* BXNS/BLXNS: only exists for v8M with the
11096 * security extensions, and always UNDEF if NonSecure.
11097 * We don't implement these in the user-only mode
11098 * either (in theory you can use them from Secure User
11099 * mode but they are too tied in to system emulation.)
11100 */
11101 if (!s->v8m_secure || IS_USER_ONLY) {
11102 goto undef;
11103 }
11104 if (link) {
3e3fa230 11105 gen_blxns(s, rm);
fb602cb7
PM
11106 } else {
11107 gen_bxns(s, rm);
11108 }
11109 break;
11110 }
11111 /* BLX/BX */
ebfe27c5
PM
11112 tmp = load_reg(s, rm);
11113 if (link) {
a0415916 11114 val = (uint32_t)s->base.pc_next | 1;
7d1b0095 11115 tmp2 = tcg_temp_new_i32();
b0109805
PB
11116 tcg_gen_movi_i32(tmp2, val);
11117 store_reg(s, 14, tmp2);
3bb8a96f
PM
11118 gen_bx(s, tmp);
11119 } else {
11120 /* Only BX works as exception-return, not BLX */
11121 gen_bx_excret(s, tmp);
99c475ab 11122 }
99c475ab
FB
11123 break;
11124 }
ebfe27c5 11125 }
99c475ab
FB
11126 break;
11127 }
11128
a2d12f0f
PM
11129 /*
11130 * 0b0100_00xx_xxxx_xxxx
11131 * - Data-processing (two low registers)
11132 */
99c475ab
FB
11133 rd = insn & 7;
11134 rm = (insn >> 3) & 7;
11135 op = (insn >> 6) & 0xf;
11136 if (op == 2 || op == 3 || op == 4 || op == 7) {
11137 /* the shift/rotate ops want the operands backwards */
11138 val = rm;
11139 rm = rd;
11140 rd = val;
11141 val = 1;
11142 } else {
11143 val = 0;
11144 }
11145
396e467c 11146 if (op == 9) { /* neg */
7d1b0095 11147 tmp = tcg_temp_new_i32();
396e467c
FN
11148 tcg_gen_movi_i32(tmp, 0);
11149 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11150 tmp = load_reg(s, rd);
11151 } else {
f764718d 11152 tmp = NULL;
396e467c 11153 }
99c475ab 11154
396e467c 11155 tmp2 = load_reg(s, rm);
5899f386 11156 switch (op) {
99c475ab 11157 case 0x0: /* and */
396e467c 11158 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11159 if (!s->condexec_mask)
396e467c 11160 gen_logic_CC(tmp);
99c475ab
FB
11161 break;
11162 case 0x1: /* eor */
396e467c 11163 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11164 if (!s->condexec_mask)
396e467c 11165 gen_logic_CC(tmp);
99c475ab
FB
11166 break;
11167 case 0x2: /* lsl */
9ee6e8bb 11168 if (s->condexec_mask) {
365af80e 11169 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11170 } else {
9ef39277 11171 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11172 gen_logic_CC(tmp2);
9ee6e8bb 11173 }
99c475ab
FB
11174 break;
11175 case 0x3: /* lsr */
9ee6e8bb 11176 if (s->condexec_mask) {
365af80e 11177 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11178 } else {
9ef39277 11179 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11180 gen_logic_CC(tmp2);
9ee6e8bb 11181 }
99c475ab
FB
11182 break;
11183 case 0x4: /* asr */
9ee6e8bb 11184 if (s->condexec_mask) {
365af80e 11185 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11186 } else {
9ef39277 11187 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11188 gen_logic_CC(tmp2);
9ee6e8bb 11189 }
99c475ab
FB
11190 break;
11191 case 0x5: /* adc */
49b4c31e 11192 if (s->condexec_mask) {
396e467c 11193 gen_adc(tmp, tmp2);
49b4c31e
RH
11194 } else {
11195 gen_adc_CC(tmp, tmp, tmp2);
11196 }
99c475ab
FB
11197 break;
11198 case 0x6: /* sbc */
2de68a49 11199 if (s->condexec_mask) {
396e467c 11200 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11201 } else {
11202 gen_sbc_CC(tmp, tmp, tmp2);
11203 }
99c475ab
FB
11204 break;
11205 case 0x7: /* ror */
9ee6e8bb 11206 if (s->condexec_mask) {
f669df27
AJ
11207 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11208 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11209 } else {
9ef39277 11210 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11211 gen_logic_CC(tmp2);
9ee6e8bb 11212 }
99c475ab
FB
11213 break;
11214 case 0x8: /* tst */
396e467c
FN
11215 tcg_gen_and_i32(tmp, tmp, tmp2);
11216 gen_logic_CC(tmp);
99c475ab 11217 rd = 16;
5899f386 11218 break;
99c475ab 11219 case 0x9: /* neg */
9ee6e8bb 11220 if (s->condexec_mask)
396e467c 11221 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11222 else
72485ec4 11223 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11224 break;
11225 case 0xa: /* cmp */
72485ec4 11226 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11227 rd = 16;
11228 break;
11229 case 0xb: /* cmn */
72485ec4 11230 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11231 rd = 16;
11232 break;
11233 case 0xc: /* orr */
396e467c 11234 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11235 if (!s->condexec_mask)
396e467c 11236 gen_logic_CC(tmp);
99c475ab
FB
11237 break;
11238 case 0xd: /* mul */
7b2919a0 11239 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11240 if (!s->condexec_mask)
396e467c 11241 gen_logic_CC(tmp);
99c475ab
FB
11242 break;
11243 case 0xe: /* bic */
f669df27 11244 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11245 if (!s->condexec_mask)
396e467c 11246 gen_logic_CC(tmp);
99c475ab
FB
11247 break;
11248 case 0xf: /* mvn */
396e467c 11249 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11250 if (!s->condexec_mask)
396e467c 11251 gen_logic_CC(tmp2);
99c475ab 11252 val = 1;
5899f386 11253 rm = rd;
99c475ab
FB
11254 break;
11255 }
11256 if (rd != 16) {
396e467c
FN
11257 if (val) {
11258 store_reg(s, rm, tmp2);
11259 if (op != 0xf)
7d1b0095 11260 tcg_temp_free_i32(tmp);
396e467c
FN
11261 } else {
11262 store_reg(s, rd, tmp);
7d1b0095 11263 tcg_temp_free_i32(tmp2);
396e467c
FN
11264 }
11265 } else {
7d1b0095
PM
11266 tcg_temp_free_i32(tmp);
11267 tcg_temp_free_i32(tmp2);
99c475ab
FB
11268 }
11269 break;
11270
11271 case 5:
11272 /* load/store register offset. */
11273 rd = insn & 7;
11274 rn = (insn >> 3) & 7;
11275 rm = (insn >> 6) & 7;
11276 op = (insn >> 9) & 7;
b0109805 11277 addr = load_reg(s, rn);
b26eefb6 11278 tmp = load_reg(s, rm);
b0109805 11279 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11280 tcg_temp_free_i32(tmp);
99c475ab 11281
c40c8556 11282 if (op < 3) { /* store */
b0109805 11283 tmp = load_reg(s, rd);
c40c8556
PM
11284 } else {
11285 tmp = tcg_temp_new_i32();
11286 }
99c475ab
FB
11287
11288 switch (op) {
11289 case 0: /* str */
9bb6558a 11290 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11291 break;
11292 case 1: /* strh */
9bb6558a 11293 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11294 break;
11295 case 2: /* strb */
9bb6558a 11296 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11297 break;
11298 case 3: /* ldrsb */
9bb6558a 11299 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11300 break;
11301 case 4: /* ldr */
9bb6558a 11302 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11303 break;
11304 case 5: /* ldrh */
9bb6558a 11305 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11306 break;
11307 case 6: /* ldrb */
9bb6558a 11308 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11309 break;
11310 case 7: /* ldrsh */
9bb6558a 11311 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11312 break;
11313 }
c40c8556 11314 if (op >= 3) { /* load */
b0109805 11315 store_reg(s, rd, tmp);
c40c8556
PM
11316 } else {
11317 tcg_temp_free_i32(tmp);
11318 }
7d1b0095 11319 tcg_temp_free_i32(addr);
99c475ab
FB
11320 break;
11321
11322 case 6:
11323 /* load/store word immediate offset */
11324 rd = insn & 7;
11325 rn = (insn >> 3) & 7;
b0109805 11326 addr = load_reg(s, rn);
99c475ab 11327 val = (insn >> 4) & 0x7c;
b0109805 11328 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11329
11330 if (insn & (1 << 11)) {
11331 /* load */
c40c8556 11332 tmp = tcg_temp_new_i32();
12dcc321 11333 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11334 store_reg(s, rd, tmp);
99c475ab
FB
11335 } else {
11336 /* store */
b0109805 11337 tmp = load_reg(s, rd);
12dcc321 11338 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11339 tcg_temp_free_i32(tmp);
99c475ab 11340 }
7d1b0095 11341 tcg_temp_free_i32(addr);
99c475ab
FB
11342 break;
11343
11344 case 7:
11345 /* load/store byte immediate offset */
11346 rd = insn & 7;
11347 rn = (insn >> 3) & 7;
b0109805 11348 addr = load_reg(s, rn);
99c475ab 11349 val = (insn >> 6) & 0x1f;
b0109805 11350 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11351
11352 if (insn & (1 << 11)) {
11353 /* load */
c40c8556 11354 tmp = tcg_temp_new_i32();
9bb6558a 11355 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11356 store_reg(s, rd, tmp);
99c475ab
FB
11357 } else {
11358 /* store */
b0109805 11359 tmp = load_reg(s, rd);
9bb6558a 11360 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11361 tcg_temp_free_i32(tmp);
99c475ab 11362 }
7d1b0095 11363 tcg_temp_free_i32(addr);
99c475ab
FB
11364 break;
11365
11366 case 8:
11367 /* load/store halfword immediate offset */
11368 rd = insn & 7;
11369 rn = (insn >> 3) & 7;
b0109805 11370 addr = load_reg(s, rn);
99c475ab 11371 val = (insn >> 5) & 0x3e;
b0109805 11372 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11373
11374 if (insn & (1 << 11)) {
11375 /* load */
c40c8556 11376 tmp = tcg_temp_new_i32();
9bb6558a 11377 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11378 store_reg(s, rd, tmp);
99c475ab
FB
11379 } else {
11380 /* store */
b0109805 11381 tmp = load_reg(s, rd);
9bb6558a 11382 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11383 tcg_temp_free_i32(tmp);
99c475ab 11384 }
7d1b0095 11385 tcg_temp_free_i32(addr);
99c475ab
FB
11386 break;
11387
11388 case 9:
11389 /* load/store from stack */
11390 rd = (insn >> 8) & 7;
b0109805 11391 addr = load_reg(s, 13);
99c475ab 11392 val = (insn & 0xff) * 4;
b0109805 11393 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11394
11395 if (insn & (1 << 11)) {
11396 /* load */
c40c8556 11397 tmp = tcg_temp_new_i32();
9bb6558a 11398 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11399 store_reg(s, rd, tmp);
99c475ab
FB
11400 } else {
11401 /* store */
b0109805 11402 tmp = load_reg(s, rd);
9bb6558a 11403 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11404 tcg_temp_free_i32(tmp);
99c475ab 11405 }
7d1b0095 11406 tcg_temp_free_i32(addr);
99c475ab
FB
11407 break;
11408
11409 case 10:
55203189
PM
11410 /*
11411 * 0b1010_xxxx_xxxx_xxxx
11412 * - Add PC/SP (immediate)
11413 */
99c475ab 11414 rd = (insn >> 8) & 7;
99c475ab 11415 val = (insn & 0xff) * 4;
16e0d823 11416 tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
5e3f878a 11417 store_reg(s, rd, tmp);
99c475ab
FB
11418 break;
11419
11420 case 11:
11421 /* misc */
11422 op = (insn >> 8) & 0xf;
11423 switch (op) {
11424 case 0:
55203189
PM
11425 /*
11426 * 0b1011_0000_xxxx_xxxx
11427 * - ADD (SP plus immediate)
11428 * - SUB (SP minus immediate)
11429 */
b26eefb6 11430 tmp = load_reg(s, 13);
99c475ab
FB
11431 val = (insn & 0x7f) * 4;
11432 if (insn & (1 << 7))
6a0d8a1d 11433 val = -(int32_t)val;
b26eefb6 11434 tcg_gen_addi_i32(tmp, tmp, val);
55203189 11435 store_sp_checked(s, tmp);
99c475ab
FB
11436 break;
11437
9ee6e8bb
PB
11438 case 2: /* sign/zero extend. */
11439 ARCH(6);
11440 rd = insn & 7;
11441 rm = (insn >> 3) & 7;
b0109805 11442 tmp = load_reg(s, rm);
9ee6e8bb 11443 switch ((insn >> 6) & 3) {
b0109805
PB
11444 case 0: gen_sxth(tmp); break;
11445 case 1: gen_sxtb(tmp); break;
11446 case 2: gen_uxth(tmp); break;
11447 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11448 }
b0109805 11449 store_reg(s, rd, tmp);
9ee6e8bb 11450 break;
99c475ab 11451 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
11452 /*
11453 * 0b1011_x10x_xxxx_xxxx
11454 * - push/pop
11455 */
b0109805 11456 addr = load_reg(s, 13);
5899f386
FB
11457 if (insn & (1 << 8))
11458 offset = 4;
99c475ab 11459 else
5899f386
FB
11460 offset = 0;
11461 for (i = 0; i < 8; i++) {
11462 if (insn & (1 << i))
11463 offset += 4;
11464 }
11465 if ((insn & (1 << 11)) == 0) {
b0109805 11466 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11467 }
aa369e5c
PM
11468
11469 if (s->v8m_stackcheck) {
11470 /*
11471 * Here 'addr' is the lower of "old SP" and "new SP";
11472 * if this is a pop that starts below the limit and ends
11473 * above it, it is UNKNOWN whether the limit check triggers;
11474 * we choose to trigger.
11475 */
11476 gen_helper_v8m_stackcheck(cpu_env, addr);
11477 }
11478
99c475ab
FB
11479 for (i = 0; i < 8; i++) {
11480 if (insn & (1 << i)) {
11481 if (insn & (1 << 11)) {
11482 /* pop */
c40c8556 11483 tmp = tcg_temp_new_i32();
12dcc321 11484 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11485 store_reg(s, i, tmp);
99c475ab
FB
11486 } else {
11487 /* push */
b0109805 11488 tmp = load_reg(s, i);
12dcc321 11489 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11490 tcg_temp_free_i32(tmp);
99c475ab 11491 }
5899f386 11492 /* advance to the next address. */
b0109805 11493 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11494 }
11495 }
f764718d 11496 tmp = NULL;
99c475ab
FB
11497 if (insn & (1 << 8)) {
11498 if (insn & (1 << 11)) {
11499 /* pop pc */
c40c8556 11500 tmp = tcg_temp_new_i32();
12dcc321 11501 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11502 /* don't set the pc until the rest of the instruction
11503 has completed */
11504 } else {
11505 /* push lr */
b0109805 11506 tmp = load_reg(s, 14);
12dcc321 11507 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11508 tcg_temp_free_i32(tmp);
99c475ab 11509 }
b0109805 11510 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11511 }
5899f386 11512 if ((insn & (1 << 11)) == 0) {
b0109805 11513 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11514 }
99c475ab 11515 /* write back the new stack pointer */
b0109805 11516 store_reg(s, 13, addr);
99c475ab 11517 /* set the new PC value */
be5e7a76 11518 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11519 store_reg_from_load(s, 15, tmp);
be5e7a76 11520 }
99c475ab
FB
11521 break;
11522
9ee6e8bb
PB
11523 case 1: case 3: case 9: case 11: /* czb */
11524 rm = insn & 7;
d9ba4830 11525 tmp = load_reg(s, rm);
c2d9644e 11526 arm_gen_condlabel(s);
9ee6e8bb 11527 if (insn & (1 << 11))
cb63669a 11528 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11529 else
cb63669a 11530 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11531 tcg_temp_free_i32(tmp);
9ee6e8bb 11532 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
fdbcf632 11533 gen_jmp(s, read_pc(s) + offset);
9ee6e8bb
PB
11534 break;
11535
11536 case 15: /* IT, nop-hint. */
11537 if ((insn & 0xf) == 0) {
11538 gen_nop_hint(s, (insn >> 4) & 0xf);
11539 break;
11540 }
5529de1e
PM
11541 /*
11542 * IT (If-Then)
11543 *
11544 * Combinations of firstcond and mask which set up an 0b1111
11545 * condition are UNPREDICTABLE; we take the CONSTRAINED
11546 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
11547 * i.e. both meaning "execute always".
11548 */
9ee6e8bb
PB
11549 s->condexec_cond = (insn >> 4) & 0xe;
11550 s->condexec_mask = insn & 0x1f;
11551 /* No actual code generated for this insn, just setup state. */
11552 break;
11553
06c949e6 11554 case 0xe: /* bkpt */
d4a2dc67
PM
11555 {
11556 int imm8 = extract32(insn, 0, 8);
be5e7a76 11557 ARCH(5);
06bcbda3 11558 gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm8, true));
06c949e6 11559 break;
d4a2dc67 11560 }
06c949e6 11561
19a6e31c
PM
11562 case 0xa: /* rev, and hlt */
11563 {
11564 int op1 = extract32(insn, 6, 2);
11565
11566 if (op1 == 2) {
11567 /* HLT */
11568 int imm6 = extract32(insn, 0, 6);
11569
11570 gen_hlt(s, imm6);
11571 break;
11572 }
11573
11574 /* Otherwise this is rev */
9ee6e8bb
PB
11575 ARCH(6);
11576 rn = (insn >> 3) & 0x7;
11577 rd = insn & 0x7;
b0109805 11578 tmp = load_reg(s, rn);
19a6e31c 11579 switch (op1) {
66896cb8 11580 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11581 case 1: gen_rev16(tmp); break;
11582 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11583 default:
11584 g_assert_not_reached();
9ee6e8bb 11585 }
b0109805 11586 store_reg(s, rd, tmp);
9ee6e8bb 11587 break;
19a6e31c 11588 }
9ee6e8bb 11589
d9e028c1
PM
11590 case 6:
11591 switch ((insn >> 5) & 7) {
11592 case 2:
11593 /* setend */
11594 ARCH(6);
9886ecdf
PB
11595 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11596 gen_helper_setend(cpu_env);
dcba3a8d 11597 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 11598 }
9ee6e8bb 11599 break;
d9e028c1
PM
11600 case 3:
11601 /* cps */
11602 ARCH(6);
11603 if (IS_USER(s)) {
11604 break;
8984bd2e 11605 }
b53d8923 11606 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11607 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11608 /* FAULTMASK */
11609 if (insn & 1) {
11610 addr = tcg_const_i32(19);
11611 gen_helper_v7m_msr(cpu_env, addr, tmp);
11612 tcg_temp_free_i32(addr);
11613 }
11614 /* PRIMASK */
11615 if (insn & 2) {
11616 addr = tcg_const_i32(16);
11617 gen_helper_v7m_msr(cpu_env, addr, tmp);
11618 tcg_temp_free_i32(addr);
11619 }
11620 tcg_temp_free_i32(tmp);
11621 gen_lookup_tb(s);
11622 } else {
11623 if (insn & (1 << 4)) {
11624 shift = CPSR_A | CPSR_I | CPSR_F;
11625 } else {
11626 shift = 0;
11627 }
11628 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11629 }
d9e028c1
PM
11630 break;
11631 default:
11632 goto undef;
9ee6e8bb
PB
11633 }
11634 break;
11635
99c475ab
FB
11636 default:
11637 goto undef;
11638 }
11639 break;
11640
11641 case 12:
a7d3970d 11642 {
99c475ab 11643 /* load/store multiple */
f764718d 11644 TCGv_i32 loaded_var = NULL;
99c475ab 11645 rn = (insn >> 8) & 0x7;
b0109805 11646 addr = load_reg(s, rn);
99c475ab
FB
11647 for (i = 0; i < 8; i++) {
11648 if (insn & (1 << i)) {
99c475ab
FB
11649 if (insn & (1 << 11)) {
11650 /* load */
c40c8556 11651 tmp = tcg_temp_new_i32();
12dcc321 11652 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11653 if (i == rn) {
11654 loaded_var = tmp;
11655 } else {
11656 store_reg(s, i, tmp);
11657 }
99c475ab
FB
11658 } else {
11659 /* store */
b0109805 11660 tmp = load_reg(s, i);
12dcc321 11661 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11662 tcg_temp_free_i32(tmp);
99c475ab 11663 }
5899f386 11664 /* advance to the next address */
b0109805 11665 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11666 }
11667 }
b0109805 11668 if ((insn & (1 << rn)) == 0) {
a7d3970d 11669 /* base reg not in list: base register writeback */
b0109805
PB
11670 store_reg(s, rn, addr);
11671 } else {
a7d3970d
PM
11672 /* base reg in list: if load, complete it now */
11673 if (insn & (1 << 11)) {
11674 store_reg(s, rn, loaded_var);
11675 }
7d1b0095 11676 tcg_temp_free_i32(addr);
b0109805 11677 }
99c475ab 11678 break;
a7d3970d 11679 }
99c475ab
FB
11680 case 13:
11681 /* conditional branch or swi */
11682 cond = (insn >> 8) & 0xf;
11683 if (cond == 0xe)
11684 goto undef;
11685
11686 if (cond == 0xf) {
11687 /* swi */
a0415916 11688 gen_set_pc_im(s, s->base.pc_next);
d4a2dc67 11689 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 11690 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
11691 break;
11692 }
11693 /* generate a conditional jump to next instruction */
c2d9644e 11694 arm_skip_unless(s, cond);
99c475ab
FB
11695
11696 /* jump to the offset */
fdbcf632 11697 val = read_pc(s);
99c475ab 11698 offset = ((int32_t)insn << 24) >> 24;
5899f386 11699 val += offset << 1;
8aaca4c0 11700 gen_jmp(s, val);
99c475ab
FB
11701 break;
11702
11703 case 14:
358bf29e 11704 if (insn & (1 << 11)) {
296e5a0a
PM
11705 /* thumb_insn_is_16bit() ensures we can't get here for
11706 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11707 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11708 */
11709 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11710 ARCH(5);
11711 offset = ((insn & 0x7ff) << 1);
11712 tmp = load_reg(s, 14);
11713 tcg_gen_addi_i32(tmp, tmp, offset);
11714 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11715
11716 tmp2 = tcg_temp_new_i32();
a0415916 11717 tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
296e5a0a
PM
11718 store_reg(s, 14, tmp2);
11719 gen_bx(s, tmp);
358bf29e
PB
11720 break;
11721 }
9ee6e8bb 11722 /* unconditional branch */
fdbcf632 11723 val = read_pc(s);
99c475ab 11724 offset = ((int32_t)insn << 21) >> 21;
fdbcf632 11725 val += offset << 1;
8aaca4c0 11726 gen_jmp(s, val);
99c475ab
FB
11727 break;
11728
11729 case 15:
296e5a0a
PM
11730 /* thumb_insn_is_16bit() ensures we can't get here for
11731 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11732 */
11733 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11734
11735 if (insn & (1 << 11)) {
11736 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11737 offset = ((insn & 0x7ff) << 1) | 1;
11738 tmp = load_reg(s, 14);
11739 tcg_gen_addi_i32(tmp, tmp, offset);
11740
11741 tmp2 = tcg_temp_new_i32();
a0415916 11742 tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
296e5a0a
PM
11743 store_reg(s, 14, tmp2);
11744 gen_bx(s, tmp);
11745 } else {
11746 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11747 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11748
fdbcf632 11749 tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
296e5a0a 11750 }
9ee6e8bb 11751 break;
99c475ab
FB
11752 }
11753 return;
9ee6e8bb 11754illegal_op:
99c475ab 11755undef:
1ce21ba1 11756 unallocated_encoding(s);
99c475ab
FB
11757}
11758
541ebcd4
PM
11759static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11760{
a0415916 11761 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
541ebcd4 11762 * (False positives are OK, false negatives are not.)
5b8d7289 11763 * We know this is a Thumb insn, and our caller ensures we are
a0415916 11764 * only called if dc->base.pc_next is less than 4 bytes from the page
5b8d7289
PM
11765 * boundary, so we cross the page if the first 16 bits indicate
11766 * that this is a 32 bit insn.
541ebcd4 11767 */
a0415916 11768 uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
541ebcd4 11769
a0415916 11770 return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
541ebcd4
PM
11771}
11772
b542683d 11773static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 11774{
1d8a5535 11775 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 11776 CPUARMState *env = cs->env_ptr;
2fc0cc0e 11777 ARMCPU *cpu = env_archcpu(env);
aad821ac
RH
11778 uint32_t tb_flags = dc->base.tb->flags;
11779 uint32_t condexec, core_mmu_idx;
3b46e624 11780
962fcbf2 11781 dc->isar = &cpu->isar;
e50e6a20 11782 dc->condjmp = 0;
3926cc84 11783
40f860cd 11784 dc->aarch64 = 0;
cef9ee70
SS
11785 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11786 * there is no secure EL1, so we route exceptions to EL3.
11787 */
11788 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11789 !arm_el_is_aa64(env, 3);
aad821ac
RH
11790 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
11791 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
11792 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
11793 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
11794 dc->condexec_mask = (condexec & 0xf) << 1;
11795 dc->condexec_cond = condexec >> 4;
11796 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
11797 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
c1e37810 11798 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11799#if !defined(CONFIG_USER_ONLY)
c1e37810 11800 dc->user = (dc->current_el == 0);
3926cc84 11801#endif
aad821ac
RH
11802 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
11803 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
11804 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
11805 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
ea7ac69d
PM
11806 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11807 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
11808 dc->vec_stride = 0;
11809 } else {
11810 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
11811 dc->c15_cpar = 0;
11812 }
aad821ac 11813 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
fb602cb7
PM
11814 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11815 regime_is_secure(env, dc->mmu_idx);
aad821ac 11816 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
6d60c67a 11817 dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
6000531e
PM
11818 dc->v7m_new_fp_ctxt_needed =
11819 FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
e33cf0f8 11820 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
60322b39 11821 dc->cp_regs = cpu->cp_regs;
a984e42c 11822 dc->features = env->features;
40f860cd 11823
50225ad0
PM
11824 /* Single step state. The code-generation logic here is:
11825 * SS_ACTIVE == 0:
11826 * generate code with no special handling for single-stepping (except
11827 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11828 * this happens anyway because those changes are all system register or
11829 * PSTATE writes).
11830 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11831 * emit code for one insn
11832 * emit code to clear PSTATE.SS
11833 * emit code to generate software step exception for completed step
11834 * end TB (as usual for having generated an exception)
11835 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11836 * emit code to generate a software step exception
11837 * end the TB
11838 */
aad821ac
RH
11839 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
11840 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
50225ad0 11841 dc->is_ldex = false;
8bd587c1
PM
11842 if (!arm_feature(env, ARM_FEATURE_M)) {
11843 dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
11844 }
50225ad0 11845
bfe7ad5b 11846 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 11847
f7708456
RH
11848 /* If architectural single step active, limit to 1. */
11849 if (is_singlestepping(dc)) {
b542683d 11850 dc->base.max_insns = 1;
f7708456
RH
11851 }
11852
d0264d86
RH
11853 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11854 to those left on the page. */
11855 if (!dc->thumb) {
bfe7ad5b 11856 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 11857 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
11858 }
11859
d9eea52c
PM
11860 cpu_V0 = tcg_temp_new_i64();
11861 cpu_V1 = tcg_temp_new_i64();
e677137d 11862 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11863 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
11864}
11865
b1476854
LV
11866static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11867{
11868 DisasContext *dc = container_of(dcbase, DisasContext, base);
11869
11870 /* A note on handling of the condexec (IT) bits:
11871 *
11872 * We want to avoid the overhead of having to write the updated condexec
11873 * bits back to the CPUARMState for every instruction in an IT block. So:
11874 * (1) if the condexec bits are not already zero then we write
11875 * zero back into the CPUARMState now. This avoids complications trying
11876 * to do it at the end of the block. (For example if we don't do this
11877 * it's hard to identify whether we can safely skip writing condexec
11878 * at the end of the TB, which we definitely want to do for the case
11879 * where a TB doesn't do anything with the IT state at all.)
11880 * (2) if we are going to leave the TB then we call gen_set_condexec()
11881 * which will write the correct value into CPUARMState if zero is wrong.
11882 * This is done both for leaving the TB at the end, and for leaving
11883 * it because of an exception we know will happen, which is done in
11884 * gen_exception_insn(). The latter is necessary because we need to
11885 * leave the TB with the PC/IT state just prior to execution of the
11886 * instruction which caused the exception.
11887 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11888 * then the CPUARMState will be wrong and we need to reset it.
11889 * This is handled in the same way as restoration of the
11890 * PC in these situations; we save the value of the condexec bits
11891 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11892 * then uses this to restore them after an exception.
11893 *
11894 * Note that there are no instructions which can read the condexec
11895 * bits, and none which can write non-static values to them, so
11896 * we don't need to care about whether CPUARMState is correct in the
11897 * middle of a TB.
11898 */
11899
11900 /* Reset the conditional execution bits immediately. This avoids
11901 complications trying to do it at the end of the block. */
11902 if (dc->condexec_mask || dc->condexec_cond) {
11903 TCGv_i32 tmp = tcg_temp_new_i32();
11904 tcg_gen_movi_i32(tmp, 0);
11905 store_cpu_field(tmp, condexec_bits);
11906 }
11907}
11908
f62bd897
LV
11909static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11910{
11911 DisasContext *dc = container_of(dcbase, DisasContext, base);
11912
a0415916 11913 tcg_gen_insn_start(dc->base.pc_next,
f62bd897
LV
11914 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11915 0);
15fa08f8 11916 dc->insn_start = tcg_last_op();
f62bd897
LV
11917}
11918
a68956ad
LV
11919static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
11920 const CPUBreakpoint *bp)
11921{
11922 DisasContext *dc = container_of(dcbase, DisasContext, base);
11923
11924 if (bp->flags & BP_CPU) {
11925 gen_set_condexec(dc);
a0415916 11926 gen_set_pc_im(dc, dc->base.pc_next);
a68956ad
LV
11927 gen_helper_check_breakpoints(cpu_env);
11928 /* End the TB early; it's likely not going to be executed */
11929 dc->base.is_jmp = DISAS_TOO_MANY;
11930 } else {
aee828e7 11931 gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
a68956ad
LV
11932 /* The address covered by the breakpoint must be
11933 included in [tb->pc, tb->pc + tb->size) in order
11934 to for it to be properly cleared -- thus we
11935 increment the PC here so that the logic setting
11936 tb->size below does the right thing. */
11937 /* TODO: Advance PC by correct instruction length to
11938 * avoid disassembler error messages */
a0415916 11939 dc->base.pc_next += 2;
a68956ad
LV
11940 dc->base.is_jmp = DISAS_NORETURN;
11941 }
11942
11943 return true;
11944}
11945
722ef0a5 11946static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 11947{
13189a90
LV
11948#ifdef CONFIG_USER_ONLY
11949 /* Intercept jump to the magic kernel page. */
a0415916 11950 if (dc->base.pc_next >= 0xffff0000) {
13189a90
LV
11951 /* We always get here via a jump, so know we are not in a
11952 conditional execution block. */
11953 gen_exception_internal(EXCP_KERNEL_TRAP);
11954 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 11955 return true;
13189a90
LV
11956 }
11957#endif
11958
11959 if (dc->ss_active && !dc->pstate_ss) {
11960 /* Singlestep state is Active-pending.
11961 * If we're in this state at the start of a TB then either
11962 * a) we just took an exception to an EL which is being debugged
11963 * and this is the first insn in the exception handler
11964 * b) debug exceptions were masked and we just unmasked them
11965 * without changing EL (eg by clearing PSTATE.D)
11966 * In either case we're going to take a swstep exception in the
11967 * "did not step an insn" case, and so the syndrome ISV and EX
11968 * bits should be zero.
11969 */
11970 assert(dc->base.num_insns == 1);
c1d5f50f 11971 gen_swstep_exception(dc, 0, 0);
13189a90 11972 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 11973 return true;
13189a90
LV
11974 }
11975
722ef0a5
RH
11976 return false;
11977}
13189a90 11978
d0264d86 11979static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 11980{
13189a90
LV
11981 if (dc->condjmp && !dc->base.is_jmp) {
11982 gen_set_label(dc->condlabel);
11983 dc->condjmp = 0;
11984 }
23169224 11985 translator_loop_temp_check(&dc->base);
13189a90
LV
11986}
11987
722ef0a5
RH
11988static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
11989{
11990 DisasContext *dc = container_of(dcbase, DisasContext, base);
11991 CPUARMState *env = cpu->env_ptr;
11992 unsigned int insn;
11993
11994 if (arm_pre_translate_insn(dc)) {
11995 return;
11996 }
11997
a0415916
RH
11998 dc->pc_curr = dc->base.pc_next;
11999 insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
58803318 12000 dc->insn = insn;
a0415916 12001 dc->base.pc_next += 4;
722ef0a5
RH
12002 disas_arm_insn(dc, insn);
12003
d0264d86
RH
12004 arm_post_translate_insn(dc);
12005
12006 /* ARM is a fixed-length ISA. We performed the cross-page check
12007 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12008}
12009
dcf14dfb
PM
12010static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12011{
12012 /* Return true if this Thumb insn is always unconditional,
12013 * even inside an IT block. This is true of only a very few
12014 * instructions: BKPT, HLT, and SG.
12015 *
12016 * A larger class of instructions are UNPREDICTABLE if used
12017 * inside an IT block; we do not need to detect those here, because
12018 * what we do by default (perform the cc check and update the IT
12019 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12020 * choice for those situations.
12021 *
12022 * insn is either a 16-bit or a 32-bit instruction; the two are
12023 * distinguishable because for the 16-bit case the top 16 bits
12024 * are zeroes, and that isn't a valid 32-bit encoding.
12025 */
12026 if ((insn & 0xffffff00) == 0xbe00) {
12027 /* BKPT */
12028 return true;
12029 }
12030
12031 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12032 !arm_dc_feature(s, ARM_FEATURE_M)) {
12033 /* HLT: v8A only. This is unconditional even when it is going to
12034 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12035 * For v7 cores this was a plain old undefined encoding and so
12036 * honours its cc check. (We might be using the encoding as
12037 * a semihosting trap, but we don't change the cc check behaviour
12038 * on that account, because a debugger connected to a real v7A
12039 * core and emulating semihosting traps by catching the UNDEF
12040 * exception would also only see cases where the cc check passed.
12041 * No guest code should be trying to do a HLT semihosting trap
12042 * in an IT block anyway.
12043 */
12044 return true;
12045 }
12046
12047 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12048 arm_dc_feature(s, ARM_FEATURE_M)) {
12049 /* SG: v8M only */
12050 return true;
12051 }
12052
12053 return false;
12054}
12055
722ef0a5
RH
12056static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12057{
12058 DisasContext *dc = container_of(dcbase, DisasContext, base);
12059 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12060 uint32_t insn;
12061 bool is_16bit;
722ef0a5
RH
12062
12063 if (arm_pre_translate_insn(dc)) {
12064 return;
12065 }
12066
a0415916
RH
12067 dc->pc_curr = dc->base.pc_next;
12068 insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
12069 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
12070 dc->base.pc_next += 2;
296e5a0a 12071 if (!is_16bit) {
a0415916 12072 uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
296e5a0a
PM
12073
12074 insn = insn << 16 | insn2;
a0415916 12075 dc->base.pc_next += 2;
296e5a0a 12076 }
58803318 12077 dc->insn = insn;
296e5a0a 12078
dcf14dfb 12079 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12080 uint32_t cond = dc->condexec_cond;
12081
5529de1e
PM
12082 /*
12083 * Conditionally skip the insn. Note that both 0xe and 0xf mean
12084 * "always"; 0xf is not "never".
12085 */
12086 if (cond < 0x0e) {
c2d9644e 12087 arm_skip_unless(dc, cond);
296e5a0a
PM
12088 }
12089 }
12090
12091 if (is_16bit) {
12092 disas_thumb_insn(dc, insn);
12093 } else {
2eea841c 12094 disas_thumb2_insn(dc, insn);
296e5a0a 12095 }
722ef0a5
RH
12096
12097 /* Advance the Thumb condexec condition. */
12098 if (dc->condexec_mask) {
12099 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12100 ((dc->condexec_mask >> 4) & 1));
12101 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12102 if (dc->condexec_mask == 0) {
12103 dc->condexec_cond = 0;
12104 }
12105 }
12106
d0264d86
RH
12107 arm_post_translate_insn(dc);
12108
12109 /* Thumb is a variable-length ISA. Stop translation when the next insn
12110 * will touch a new page. This ensures that prefetch aborts occur at
12111 * the right place.
12112 *
12113 * We want to stop the TB if the next insn starts in a new page,
12114 * or if it spans between this page and the next. This means that
12115 * if we're looking at the last halfword in the page we need to
12116 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12117 * or a 32-bit Thumb insn (which won't).
12118 * This is to avoid generating a silly TB with a single 16-bit insn
12119 * in it at the end of this page (which would execute correctly
12120 * but isn't very efficient).
12121 */
12122 if (dc->base.is_jmp == DISAS_NEXT
a0415916
RH
12123 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
12124 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
12125 && insn_crosses_page(env, dc)))) {
12126 dc->base.is_jmp = DISAS_TOO_MANY;
12127 }
722ef0a5
RH
12128}
12129
70d3c035 12130static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12131{
70d3c035 12132 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12133
c5a49c63 12134 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12135 /* FIXME: This can theoretically happen with self-modifying code. */
12136 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12137 }
9ee6e8bb 12138
b5ff1b31 12139 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12140 instruction was a conditional branch or trap, and the PC has
12141 already been written. */
f021b2c4 12142 gen_set_condexec(dc);
dcba3a8d 12143 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12144 /* Exception return branches need some special case code at the
12145 * end of the TB, which is complex enough that it has to
12146 * handle the single-step vs not and the condition-failed
12147 * insn codepath itself.
12148 */
12149 gen_bx_excret_final_code(dc);
12150 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12151 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12152 switch (dc->base.is_jmp) {
7999a5c8 12153 case DISAS_SWI:
50225ad0 12154 gen_ss_advance(dc);
73710361
GB
12155 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12156 default_exception_el(dc));
7999a5c8
SF
12157 break;
12158 case DISAS_HVC:
37e6456e 12159 gen_ss_advance(dc);
73710361 12160 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12161 break;
12162 case DISAS_SMC:
37e6456e 12163 gen_ss_advance(dc);
73710361 12164 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12165 break;
12166 case DISAS_NEXT:
a68956ad 12167 case DISAS_TOO_MANY:
7999a5c8 12168 case DISAS_UPDATE:
a0415916 12169 gen_set_pc_im(dc, dc->base.pc_next);
7999a5c8
SF
12170 /* fall through */
12171 default:
5425415e
PM
12172 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12173 gen_singlestep_exception(dc);
a0c231e6
RH
12174 break;
12175 case DISAS_NORETURN:
12176 break;
7999a5c8 12177 }
8aaca4c0 12178 } else {
9ee6e8bb
PB
12179 /* While branches must always occur at the end of an IT block,
12180 there are a few other things that can cause us to terminate
65626741 12181 the TB in the middle of an IT block:
9ee6e8bb
PB
12182 - Exception generating instructions (bkpt, swi, undefined).
12183 - Page boundaries.
12184 - Hardware watchpoints.
12185 Hardware breakpoints have already been handled and skip this code.
12186 */
dcba3a8d 12187 switch(dc->base.is_jmp) {
8aaca4c0 12188 case DISAS_NEXT:
a68956ad 12189 case DISAS_TOO_MANY:
a0415916 12190 gen_goto_tb(dc, 1, dc->base.pc_next);
8aaca4c0 12191 break;
577bf808 12192 case DISAS_JUMP:
8a6b28c7
EC
12193 gen_goto_ptr();
12194 break;
e8d52302 12195 case DISAS_UPDATE:
a0415916 12196 gen_set_pc_im(dc, dc->base.pc_next);
e8d52302 12197 /* fall through */
577bf808 12198 default:
8aaca4c0 12199 /* indicate that the hash table must be used to find the next TB */
07ea28b4 12200 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 12201 break;
a0c231e6 12202 case DISAS_NORETURN:
8aaca4c0
FB
12203 /* nothing more to generate */
12204 break;
9ee6e8bb 12205 case DISAS_WFI:
58803318
SS
12206 {
12207 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12208 !(dc->insn & (1U << 31))) ? 2 : 4);
12209
12210 gen_helper_wfi(cpu_env, tmp);
12211 tcg_temp_free_i32(tmp);
84549b6d
PM
12212 /* The helper doesn't necessarily throw an exception, but we
12213 * must go back to the main loop to check for interrupts anyway.
12214 */
07ea28b4 12215 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 12216 break;
58803318 12217 }
72c1d3af
PM
12218 case DISAS_WFE:
12219 gen_helper_wfe(cpu_env);
12220 break;
c87e5a61
PM
12221 case DISAS_YIELD:
12222 gen_helper_yield(cpu_env);
12223 break;
9ee6e8bb 12224 case DISAS_SWI:
73710361
GB
12225 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12226 default_exception_el(dc));
9ee6e8bb 12227 break;
37e6456e 12228 case DISAS_HVC:
73710361 12229 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12230 break;
12231 case DISAS_SMC:
73710361 12232 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12233 break;
8aaca4c0 12234 }
f021b2c4
PM
12235 }
12236
12237 if (dc->condjmp) {
12238 /* "Condition failed" instruction codepath for the branch/trap insn */
12239 gen_set_label(dc->condlabel);
12240 gen_set_condexec(dc);
b636649f 12241 if (unlikely(is_singlestepping(dc))) {
a0415916 12242 gen_set_pc_im(dc, dc->base.pc_next);
f021b2c4
PM
12243 gen_singlestep_exception(dc);
12244 } else {
a0415916 12245 gen_goto_tb(dc, 1, dc->base.pc_next);
e50e6a20 12246 }
2c0262af 12247 }
70d3c035
LV
12248}
12249
4013f7fc
LV
12250static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12251{
12252 DisasContext *dc = container_of(dcbase, DisasContext, base);
12253
12254 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12255 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12256}
12257
23169224
LV
12258static const TranslatorOps arm_translator_ops = {
12259 .init_disas_context = arm_tr_init_disas_context,
12260 .tb_start = arm_tr_tb_start,
12261 .insn_start = arm_tr_insn_start,
12262 .breakpoint_check = arm_tr_breakpoint_check,
12263 .translate_insn = arm_tr_translate_insn,
12264 .tb_stop = arm_tr_tb_stop,
12265 .disas_log = arm_tr_disas_log,
12266};
12267
722ef0a5
RH
12268static const TranslatorOps thumb_translator_ops = {
12269 .init_disas_context = arm_tr_init_disas_context,
12270 .tb_start = arm_tr_tb_start,
12271 .insn_start = arm_tr_insn_start,
12272 .breakpoint_check = arm_tr_breakpoint_check,
12273 .translate_insn = thumb_tr_translate_insn,
12274 .tb_stop = arm_tr_tb_stop,
12275 .disas_log = arm_tr_disas_log,
12276};
12277
70d3c035 12278/* generate intermediate code for basic block 'tb'. */
8b86d6d2 12279void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
70d3c035 12280{
23169224
LV
12281 DisasContext dc;
12282 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12283
aad821ac 12284 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
722ef0a5
RH
12285 ops = &thumb_translator_ops;
12286 }
23169224 12287#ifdef TARGET_AARCH64
aad821ac 12288 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
23169224 12289 ops = &aarch64_translator_ops;
2c0262af
FB
12290 }
12291#endif
23169224 12292
8b86d6d2 12293 translator_loop(ops, &dc.base, cpu, tb, max_insns);
2c0262af
FB
12294}
12295
bad729e2
RH
12296void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12297 target_ulong *data)
d2856f1a 12298{
3926cc84 12299 if (is_a64(env)) {
bad729e2 12300 env->pc = data[0];
40f860cd 12301 env->condexec_bits = 0;
aaa1f954 12302 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12303 } else {
bad729e2
RH
12304 env->regs[15] = data[0];
12305 env->condexec_bits = data[1];
aaa1f954 12306 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12307 }
d2856f1a 12308}