]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Remove redundant s->pc & ~1
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
1d854765 31#include "arm_ldst.h"
f1672e6f 32#include "hw/semihosting/semihost.h"
1497c961 33
2ef6175a
RH
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
2c0262af 36
a7e30d84 37#include "trace-tcg.h"
508127e2 38#include "exec/log.h"
a7e30d84
LV
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
09cbd501 45#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d 55
b5ff1b31
FB
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
022c62cb 69#include "exec/gen-icount.h"
2e70f6ef 70
308e5636 71static const char * const regnames[] =
155c3eac
FN
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
74
61adacc8
RH
75/* Function prototypes for gen_ functions calling Neon helpers. */
76typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
77 TCGv_i32, TCGv_i32);
c253dd78
PM
78/* Function prototypes for gen_ functions for fix point conversions */
79typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
61adacc8 80
b26eefb6
PB
81/* initialize TCG globals. */
82void arm_translate_init(void)
83{
155c3eac
FN
84 int i;
85
155c3eac 86 for (i = 0; i < 16; i++) {
e1ccc054 87 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 88 offsetof(CPUARMState, regs[i]),
155c3eac
FN
89 regnames[i]);
90 }
e1ccc054
RH
91 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
92 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
93 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
94 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 95
e1ccc054 96 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 97 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 98 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 100
14ade10f 101 a64_translate_init();
b26eefb6
PB
102}
103
9bb6558a
PM
104/* Flags for the disas_set_da_iss info argument:
105 * lower bits hold the Rt register number, higher bits are flags.
106 */
107typedef enum ISSInfo {
108 ISSNone = 0,
109 ISSRegMask = 0x1f,
110 ISSInvalid = (1 << 5),
111 ISSIsAcqRel = (1 << 6),
112 ISSIsWrite = (1 << 7),
113 ISSIs16Bit = (1 << 8),
114} ISSInfo;
115
116/* Save the syndrome information for a Data Abort */
117static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
118{
119 uint32_t syn;
120 int sas = memop & MO_SIZE;
121 bool sse = memop & MO_SIGN;
122 bool is_acqrel = issinfo & ISSIsAcqRel;
123 bool is_write = issinfo & ISSIsWrite;
124 bool is_16bit = issinfo & ISSIs16Bit;
125 int srt = issinfo & ISSRegMask;
126
127 if (issinfo & ISSInvalid) {
128 /* Some callsites want to conditionally provide ISS info,
129 * eg "only if this was not a writeback"
130 */
131 return;
132 }
133
134 if (srt == 15) {
135 /* For AArch32, insns where the src/dest is R15 never generate
136 * ISS information. Catching that here saves checking at all
137 * the call sites.
138 */
139 return;
140 }
141
142 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
143 0, 0, 0, is_write, 0, is_16bit);
144 disas_set_insn_syndrome(s, syn);
145}
146
8bd5c820 147static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 148{
8bd5c820 149 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
150 * insns:
151 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
152 * otherwise, access as if at PL0.
153 */
154 switch (s->mmu_idx) {
155 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
156 case ARMMMUIdx_S12NSE0:
157 case ARMMMUIdx_S12NSE1:
8bd5c820 158 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
159 case ARMMMUIdx_S1E3:
160 case ARMMMUIdx_S1SE0:
161 case ARMMMUIdx_S1SE1:
8bd5c820 162 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
163 case ARMMMUIdx_MUser:
164 case ARMMMUIdx_MPriv:
165 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
166 case ARMMMUIdx_MUserNegPri:
167 case ARMMMUIdx_MPrivNegPri:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
169 case ARMMMUIdx_MSUser:
170 case ARMMMUIdx_MSPriv:
b9f587d6 171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
172 case ARMMMUIdx_MSUserNegPri:
173 case ARMMMUIdx_MSPrivNegPri:
174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
175 case ARMMMUIdx_S2NS:
176 default:
177 g_assert_not_reached();
178 }
179}
180
39d5492a 181static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 182{
39d5492a 183 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
184 tcg_gen_ld_i32(tmp, cpu_env, offset);
185 return tmp;
186}
187
0ecb72a5 188#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 189
39d5492a 190static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
191{
192 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 193 tcg_temp_free_i32(var);
d9ba4830
PB
194}
195
196#define store_cpu_field(var, name) \
0ecb72a5 197 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 198
fdbcf632
RH
199/* The architectural value of PC. */
200static uint32_t read_pc(DisasContext *s)
201{
202 return s->pc_curr + (s->thumb ? 4 : 8);
203}
204
b26eefb6 205/* Set a variable to the value of a CPU register. */
39d5492a 206static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
207{
208 if (reg == 15) {
fdbcf632 209 tcg_gen_movi_i32(var, read_pc(s));
b26eefb6 210 } else {
155c3eac 211 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
212 }
213}
214
215/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 216static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 217{
39d5492a 218 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
219 load_reg_var(s, tmp, reg);
220 return tmp;
221}
222
16e0d823
RH
223/*
224 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
225 * This is used for load/store for which use of PC implies (literal),
226 * or ADD that implies ADR.
227 */
228static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
229{
230 TCGv_i32 tmp = tcg_temp_new_i32();
231
232 if (reg == 15) {
233 tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
234 } else {
235 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
236 }
237 return tmp;
238}
239
b26eefb6
PB
240/* Set a CPU register. The source must be a temporary and will be
241 marked as dead. */
39d5492a 242static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
243{
244 if (reg == 15) {
9b6a3ea7
PM
245 /* In Thumb mode, we must ignore bit 0.
246 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
247 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
248 * We choose to ignore [1:0] in ARM mode for all architecture versions.
249 */
250 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 251 s->base.is_jmp = DISAS_JUMP;
b26eefb6 252 }
155c3eac 253 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 254 tcg_temp_free_i32(var);
b26eefb6
PB
255}
256
55203189
PM
257/*
258 * Variant of store_reg which applies v8M stack-limit checks before updating
259 * SP. If the check fails this will result in an exception being taken.
260 * We disable the stack checks for CONFIG_USER_ONLY because we have
261 * no idea what the stack limits should be in that case.
262 * If stack checking is not being done this just acts like store_reg().
263 */
264static void store_sp_checked(DisasContext *s, TCGv_i32 var)
265{
266#ifndef CONFIG_USER_ONLY
267 if (s->v8m_stackcheck) {
268 gen_helper_v8m_stackcheck(cpu_env, var);
269 }
270#endif
271 store_reg(s, 13, var);
272}
273
b26eefb6 274/* Value extensions. */
86831435
PB
275#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
276#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
277#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
278#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
279
1497c961
PB
280#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
281#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 282
b26eefb6 283
39d5492a 284static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 285{
39d5492a 286 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 287 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
288 tcg_temp_free_i32(tmp_mask);
289}
d9ba4830
PB
290/* Set NZCV flags from the high 4 bits of var. */
291#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
292
d4a2dc67 293static void gen_exception_internal(int excp)
d9ba4830 294{
d4a2dc67
PM
295 TCGv_i32 tcg_excp = tcg_const_i32(excp);
296
297 assert(excp_is_internal(excp));
298 gen_helper_exception_internal(cpu_env, tcg_excp);
299 tcg_temp_free_i32(tcg_excp);
300}
301
50225ad0
PM
302static void gen_step_complete_exception(DisasContext *s)
303{
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
312 */
313 gen_ss_advance(s);
c1d5f50f 314 gen_swstep_exception(s, 1, s->is_ldex);
dcba3a8d 315 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
316}
317
5425415e
PM
318static void gen_singlestep_exception(DisasContext *s)
319{
320 /* Generate the right kind of exception for singlestep, which is
321 * either the architectural singlestep or EXCP_DEBUG for QEMU's
322 * gdb singlestepping.
323 */
324 if (s->ss_active) {
325 gen_step_complete_exception(s);
326 } else {
327 gen_exception_internal(EXCP_DEBUG);
328 }
329}
330
b636649f
PM
331static inline bool is_singlestepping(DisasContext *s)
332{
333 /* Return true if we are singlestepping either because of
334 * architectural singlestep or QEMU gdbstub singlestep. This does
335 * not include the command line '-singlestep' mode which is rather
336 * misnamed as it only means "one instruction per TB" and doesn't
337 * affect the code we generate.
338 */
dcba3a8d 339 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
340}
341
39d5492a 342static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 343{
39d5492a
PM
344 TCGv_i32 tmp1 = tcg_temp_new_i32();
345 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
346 tcg_gen_ext16s_i32(tmp1, a);
347 tcg_gen_ext16s_i32(tmp2, b);
3670669c 348 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 349 tcg_temp_free_i32(tmp2);
3670669c
PB
350 tcg_gen_sari_i32(a, a, 16);
351 tcg_gen_sari_i32(b, b, 16);
352 tcg_gen_mul_i32(b, b, a);
353 tcg_gen_mov_i32(a, tmp1);
7d1b0095 354 tcg_temp_free_i32(tmp1);
3670669c
PB
355}
356
357/* Byteswap each halfword. */
39d5492a 358static void gen_rev16(TCGv_i32 var)
3670669c 359{
39d5492a 360 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 361 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 362 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
363 tcg_gen_and_i32(tmp, tmp, mask);
364 tcg_gen_and_i32(var, var, mask);
3670669c 365 tcg_gen_shli_i32(var, var, 8);
3670669c 366 tcg_gen_or_i32(var, var, tmp);
68cedf73 367 tcg_temp_free_i32(mask);
7d1b0095 368 tcg_temp_free_i32(tmp);
3670669c
PB
369}
370
371/* Byteswap low halfword and sign extend. */
39d5492a 372static void gen_revsh(TCGv_i32 var)
3670669c 373{
1a855029
AJ
374 tcg_gen_ext16u_i32(var, var);
375 tcg_gen_bswap16_i32(var, var);
376 tcg_gen_ext16s_i32(var, var);
3670669c
PB
377}
378
838fa72d 379/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 380static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 381{
838fa72d
AJ
382 TCGv_i64 tmp64 = tcg_temp_new_i64();
383
384 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 385 tcg_temp_free_i32(b);
838fa72d
AJ
386 tcg_gen_shli_i64(tmp64, tmp64, 32);
387 tcg_gen_add_i64(a, tmp64, a);
388
389 tcg_temp_free_i64(tmp64);
390 return a;
391}
392
393/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 394static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
395{
396 TCGv_i64 tmp64 = tcg_temp_new_i64();
397
398 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 399 tcg_temp_free_i32(b);
838fa72d
AJ
400 tcg_gen_shli_i64(tmp64, tmp64, 32);
401 tcg_gen_sub_i64(a, tmp64, a);
402
403 tcg_temp_free_i64(tmp64);
404 return a;
3670669c
PB
405}
406
5e3f878a 407/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 408static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 409{
39d5492a
PM
410 TCGv_i32 lo = tcg_temp_new_i32();
411 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 412 TCGv_i64 ret;
5e3f878a 413
831d7fe8 414 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 415 tcg_temp_free_i32(a);
7d1b0095 416 tcg_temp_free_i32(b);
831d7fe8
RH
417
418 ret = tcg_temp_new_i64();
419 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
420 tcg_temp_free_i32(lo);
421 tcg_temp_free_i32(hi);
831d7fe8
RH
422
423 return ret;
5e3f878a
PB
424}
425
39d5492a 426static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 427{
39d5492a
PM
428 TCGv_i32 lo = tcg_temp_new_i32();
429 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 430 TCGv_i64 ret;
5e3f878a 431
831d7fe8 432 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 433 tcg_temp_free_i32(a);
7d1b0095 434 tcg_temp_free_i32(b);
831d7fe8
RH
435
436 ret = tcg_temp_new_i64();
437 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
438 tcg_temp_free_i32(lo);
439 tcg_temp_free_i32(hi);
831d7fe8
RH
440
441 return ret;
5e3f878a
PB
442}
443
8f01245e 444/* Swap low and high halfwords. */
39d5492a 445static void gen_swap_half(TCGv_i32 var)
8f01245e 446{
39d5492a 447 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
448 tcg_gen_shri_i32(tmp, var, 16);
449 tcg_gen_shli_i32(var, var, 16);
450 tcg_gen_or_i32(var, var, tmp);
7d1b0095 451 tcg_temp_free_i32(tmp);
8f01245e
PB
452}
453
b26eefb6
PB
454/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
455 tmp = (t0 ^ t1) & 0x8000;
456 t0 &= ~0x8000;
457 t1 &= ~0x8000;
458 t0 = (t0 + t1) ^ tmp;
459 */
460
39d5492a 461static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 462{
39d5492a 463 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
464 tcg_gen_xor_i32(tmp, t0, t1);
465 tcg_gen_andi_i32(tmp, tmp, 0x8000);
466 tcg_gen_andi_i32(t0, t0, ~0x8000);
467 tcg_gen_andi_i32(t1, t1, ~0x8000);
468 tcg_gen_add_i32(t0, t0, t1);
469 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
470 tcg_temp_free_i32(tmp);
471 tcg_temp_free_i32(t1);
b26eefb6
PB
472}
473
474/* Set CF to the top bit of var. */
39d5492a 475static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 476{
66c374de 477 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
478}
479
480/* Set N and Z flags from var. */
39d5492a 481static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 482{
66c374de
AJ
483 tcg_gen_mov_i32(cpu_NF, var);
484 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
485}
486
487/* T0 += T1 + CF. */
39d5492a 488static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 489{
396e467c 490 tcg_gen_add_i32(t0, t0, t1);
66c374de 491 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
492}
493
e9bb4aa9 494/* dest = T0 + T1 + CF. */
39d5492a 495static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 496{
e9bb4aa9 497 tcg_gen_add_i32(dest, t0, t1);
66c374de 498 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
499}
500
3670669c 501/* dest = T0 - T1 + CF - 1. */
39d5492a 502static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 503{
3670669c 504 tcg_gen_sub_i32(dest, t0, t1);
66c374de 505 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 506 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
507}
508
72485ec4 509/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 510static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 511{
39d5492a 512 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
513 tcg_gen_movi_i32(tmp, 0);
514 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 515 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 516 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
517 tcg_gen_xor_i32(tmp, t0, t1);
518 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
519 tcg_temp_free_i32(tmp);
520 tcg_gen_mov_i32(dest, cpu_NF);
521}
522
49b4c31e 523/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 524static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 525{
39d5492a 526 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
527 if (TCG_TARGET_HAS_add2_i32) {
528 tcg_gen_movi_i32(tmp, 0);
529 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 530 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
531 } else {
532 TCGv_i64 q0 = tcg_temp_new_i64();
533 TCGv_i64 q1 = tcg_temp_new_i64();
534 tcg_gen_extu_i32_i64(q0, t0);
535 tcg_gen_extu_i32_i64(q1, t1);
536 tcg_gen_add_i64(q0, q0, q1);
537 tcg_gen_extu_i32_i64(q1, cpu_CF);
538 tcg_gen_add_i64(q0, q0, q1);
539 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
540 tcg_temp_free_i64(q0);
541 tcg_temp_free_i64(q1);
542 }
543 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
544 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
545 tcg_gen_xor_i32(tmp, t0, t1);
546 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
547 tcg_temp_free_i32(tmp);
548 tcg_gen_mov_i32(dest, cpu_NF);
549}
550
72485ec4 551/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 552static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 553{
39d5492a 554 TCGv_i32 tmp;
72485ec4
AJ
555 tcg_gen_sub_i32(cpu_NF, t0, t1);
556 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
557 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
558 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
559 tmp = tcg_temp_new_i32();
560 tcg_gen_xor_i32(tmp, t0, t1);
561 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
562 tcg_temp_free_i32(tmp);
563 tcg_gen_mov_i32(dest, cpu_NF);
564}
565
e77f0832 566/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 567static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 568{
39d5492a 569 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
570 tcg_gen_not_i32(tmp, t1);
571 gen_adc_CC(dest, t0, tmp);
39d5492a 572 tcg_temp_free_i32(tmp);
2de68a49
RH
573}
574
365af80e 575#define GEN_SHIFT(name) \
39d5492a 576static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 577{ \
39d5492a 578 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
579 tmp1 = tcg_temp_new_i32(); \
580 tcg_gen_andi_i32(tmp1, t1, 0xff); \
581 tmp2 = tcg_const_i32(0); \
582 tmp3 = tcg_const_i32(0x1f); \
583 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
584 tcg_temp_free_i32(tmp3); \
585 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
586 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
587 tcg_temp_free_i32(tmp2); \
588 tcg_temp_free_i32(tmp1); \
589}
590GEN_SHIFT(shl)
591GEN_SHIFT(shr)
592#undef GEN_SHIFT
593
39d5492a 594static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 595{
39d5492a 596 TCGv_i32 tmp1, tmp2;
365af80e
AJ
597 tmp1 = tcg_temp_new_i32();
598 tcg_gen_andi_i32(tmp1, t1, 0xff);
599 tmp2 = tcg_const_i32(0x1f);
600 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
601 tcg_temp_free_i32(tmp2);
602 tcg_gen_sar_i32(dest, t0, tmp1);
603 tcg_temp_free_i32(tmp1);
604}
605
39d5492a 606static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 607{
9a119ff6 608 if (shift == 0) {
66c374de 609 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 610 } else {
66c374de
AJ
611 tcg_gen_shri_i32(cpu_CF, var, shift);
612 if (shift != 31) {
613 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
614 }
9a119ff6 615 }
9a119ff6 616}
b26eefb6 617
9a119ff6 618/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
619static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
620 int shift, int flags)
9a119ff6
PB
621{
622 switch (shiftop) {
623 case 0: /* LSL */
624 if (shift != 0) {
625 if (flags)
626 shifter_out_im(var, 32 - shift);
627 tcg_gen_shli_i32(var, var, shift);
628 }
629 break;
630 case 1: /* LSR */
631 if (shift == 0) {
632 if (flags) {
66c374de 633 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
634 }
635 tcg_gen_movi_i32(var, 0);
636 } else {
637 if (flags)
638 shifter_out_im(var, shift - 1);
639 tcg_gen_shri_i32(var, var, shift);
640 }
641 break;
642 case 2: /* ASR */
643 if (shift == 0)
644 shift = 32;
645 if (flags)
646 shifter_out_im(var, shift - 1);
647 if (shift == 32)
648 shift = 31;
649 tcg_gen_sari_i32(var, var, shift);
650 break;
651 case 3: /* ROR/RRX */
652 if (shift != 0) {
653 if (flags)
654 shifter_out_im(var, shift - 1);
f669df27 655 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 656 } else {
39d5492a 657 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 658 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
659 if (flags)
660 shifter_out_im(var, 0);
661 tcg_gen_shri_i32(var, var, 1);
b26eefb6 662 tcg_gen_or_i32(var, var, tmp);
7d1b0095 663 tcg_temp_free_i32(tmp);
b26eefb6
PB
664 }
665 }
666};
667
39d5492a
PM
668static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
669 TCGv_i32 shift, int flags)
8984bd2e
PB
670{
671 if (flags) {
672 switch (shiftop) {
9ef39277
BS
673 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
674 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
675 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
676 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
677 }
678 } else {
679 switch (shiftop) {
365af80e
AJ
680 case 0:
681 gen_shl(var, var, shift);
682 break;
683 case 1:
684 gen_shr(var, var, shift);
685 break;
686 case 2:
687 gen_sar(var, var, shift);
688 break;
f669df27
AJ
689 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
690 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
691 }
692 }
7d1b0095 693 tcg_temp_free_i32(shift);
8984bd2e
PB
694}
695
6ddbc6e4
PB
696#define PAS_OP(pfx) \
697 switch (op2) { \
698 case 0: gen_pas_helper(glue(pfx,add16)); break; \
699 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
700 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
701 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
702 case 4: gen_pas_helper(glue(pfx,add8)); break; \
703 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
704 }
39d5492a 705static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 706{
a7812ae4 707 TCGv_ptr tmp;
6ddbc6e4
PB
708
709 switch (op1) {
710#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
711 case 1:
a7812ae4 712 tmp = tcg_temp_new_ptr();
0ecb72a5 713 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 714 PAS_OP(s)
b75263d6 715 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
716 break;
717 case 5:
a7812ae4 718 tmp = tcg_temp_new_ptr();
0ecb72a5 719 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 720 PAS_OP(u)
b75263d6 721 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
722 break;
723#undef gen_pas_helper
724#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
725 case 2:
726 PAS_OP(q);
727 break;
728 case 3:
729 PAS_OP(sh);
730 break;
731 case 6:
732 PAS_OP(uq);
733 break;
734 case 7:
735 PAS_OP(uh);
736 break;
737#undef gen_pas_helper
738 }
739}
9ee6e8bb
PB
740#undef PAS_OP
741
6ddbc6e4
PB
742/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
743#define PAS_OP(pfx) \
ed89a2f1 744 switch (op1) { \
6ddbc6e4
PB
745 case 0: gen_pas_helper(glue(pfx,add8)); break; \
746 case 1: gen_pas_helper(glue(pfx,add16)); break; \
747 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
748 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
749 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
750 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
751 }
39d5492a 752static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 753{
a7812ae4 754 TCGv_ptr tmp;
6ddbc6e4 755
ed89a2f1 756 switch (op2) {
6ddbc6e4
PB
757#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
758 case 0:
a7812ae4 759 tmp = tcg_temp_new_ptr();
0ecb72a5 760 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 761 PAS_OP(s)
b75263d6 762 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
763 break;
764 case 4:
a7812ae4 765 tmp = tcg_temp_new_ptr();
0ecb72a5 766 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 767 PAS_OP(u)
b75263d6 768 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
769 break;
770#undef gen_pas_helper
771#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
772 case 1:
773 PAS_OP(q);
774 break;
775 case 2:
776 PAS_OP(sh);
777 break;
778 case 5:
779 PAS_OP(uq);
780 break;
781 case 6:
782 PAS_OP(uh);
783 break;
784#undef gen_pas_helper
785 }
786}
9ee6e8bb
PB
787#undef PAS_OP
788
39fb730a 789/*
6c2c63d3 790 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
791 * This is common between ARM and Aarch64 targets.
792 */
6c2c63d3 793void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 794{
6c2c63d3
RH
795 TCGv_i32 value;
796 TCGCond cond;
797 bool global = true;
d9ba4830 798
d9ba4830
PB
799 switch (cc) {
800 case 0: /* eq: Z */
d9ba4830 801 case 1: /* ne: !Z */
6c2c63d3
RH
802 cond = TCG_COND_EQ;
803 value = cpu_ZF;
d9ba4830 804 break;
6c2c63d3 805
d9ba4830 806 case 2: /* cs: C */
d9ba4830 807 case 3: /* cc: !C */
6c2c63d3
RH
808 cond = TCG_COND_NE;
809 value = cpu_CF;
d9ba4830 810 break;
6c2c63d3 811
d9ba4830 812 case 4: /* mi: N */
d9ba4830 813 case 5: /* pl: !N */
6c2c63d3
RH
814 cond = TCG_COND_LT;
815 value = cpu_NF;
d9ba4830 816 break;
6c2c63d3 817
d9ba4830 818 case 6: /* vs: V */
d9ba4830 819 case 7: /* vc: !V */
6c2c63d3
RH
820 cond = TCG_COND_LT;
821 value = cpu_VF;
d9ba4830 822 break;
6c2c63d3 823
d9ba4830 824 case 8: /* hi: C && !Z */
6c2c63d3
RH
825 case 9: /* ls: !C || Z -> !(C && !Z) */
826 cond = TCG_COND_NE;
827 value = tcg_temp_new_i32();
828 global = false;
829 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
830 ZF is non-zero for !Z; so AND the two subexpressions. */
831 tcg_gen_neg_i32(value, cpu_CF);
832 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 833 break;
6c2c63d3 834
d9ba4830 835 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 836 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
837 /* Since we're only interested in the sign bit, == 0 is >= 0. */
838 cond = TCG_COND_GE;
839 value = tcg_temp_new_i32();
840 global = false;
841 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 842 break;
6c2c63d3 843
d9ba4830 844 case 12: /* gt: !Z && N == V */
d9ba4830 845 case 13: /* le: Z || N != V */
6c2c63d3
RH
846 cond = TCG_COND_NE;
847 value = tcg_temp_new_i32();
848 global = false;
849 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
850 * the sign bit then AND with ZF to yield the result. */
851 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
852 tcg_gen_sari_i32(value, value, 31);
853 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 854 break;
6c2c63d3 855
9305eac0
RH
856 case 14: /* always */
857 case 15: /* always */
858 /* Use the ALWAYS condition, which will fold early.
859 * It doesn't matter what we use for the value. */
860 cond = TCG_COND_ALWAYS;
861 value = cpu_ZF;
862 goto no_invert;
863
d9ba4830
PB
864 default:
865 fprintf(stderr, "Bad condition code 0x%x\n", cc);
866 abort();
867 }
6c2c63d3
RH
868
869 if (cc & 1) {
870 cond = tcg_invert_cond(cond);
871 }
872
9305eac0 873 no_invert:
6c2c63d3
RH
874 cmp->cond = cond;
875 cmp->value = value;
876 cmp->value_global = global;
877}
878
879void arm_free_cc(DisasCompare *cmp)
880{
881 if (!cmp->value_global) {
882 tcg_temp_free_i32(cmp->value);
883 }
884}
885
886void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
887{
888 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
889}
890
891void arm_gen_test_cc(int cc, TCGLabel *label)
892{
893 DisasCompare cmp;
894 arm_test_cc(&cmp, cc);
895 arm_jump_cc(&cmp, label);
896 arm_free_cc(&cmp);
d9ba4830 897}
2c0262af 898
b1d8e52e 899static const uint8_t table_logic_cc[16] = {
2c0262af
FB
900 1, /* and */
901 1, /* xor */
902 0, /* sub */
903 0, /* rsb */
904 0, /* add */
905 0, /* adc */
906 0, /* sbc */
907 0, /* rsc */
908 1, /* andl */
909 1, /* xorl */
910 0, /* cmp */
911 0, /* cmn */
912 1, /* orr */
913 1, /* mov */
914 1, /* bic */
915 1, /* mvn */
916};
3b46e624 917
4d5e8c96
PM
918static inline void gen_set_condexec(DisasContext *s)
919{
920 if (s->condexec_mask) {
921 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
922 TCGv_i32 tmp = tcg_temp_new_i32();
923 tcg_gen_movi_i32(tmp, val);
924 store_cpu_field(tmp, condexec_bits);
925 }
926}
927
928static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
929{
930 tcg_gen_movi_i32(cpu_R[15], val);
931}
932
d9ba4830
PB
933/* Set PC and Thumb state from an immediate address. */
934static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 935{
39d5492a 936 TCGv_i32 tmp;
99c475ab 937
dcba3a8d 938 s->base.is_jmp = DISAS_JUMP;
d9ba4830 939 if (s->thumb != (addr & 1)) {
7d1b0095 940 tmp = tcg_temp_new_i32();
d9ba4830 941 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 942 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 943 tcg_temp_free_i32(tmp);
d9ba4830 944 }
155c3eac 945 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
946}
947
948/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 949static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 950{
dcba3a8d 951 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
952 tcg_gen_andi_i32(cpu_R[15], var, ~1);
953 tcg_gen_andi_i32(var, var, 1);
954 store_cpu_field(var, thumb);
d9ba4830
PB
955}
956
3bb8a96f
PM
957/* Set PC and Thumb state from var. var is marked as dead.
958 * For M-profile CPUs, include logic to detect exception-return
959 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
960 * and BX reg, and no others, and happens only for code in Handler mode.
961 */
962static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
963{
964 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 965 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
966 */
967 gen_bx(s, var);
d02a8698
PM
968 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
969 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 970 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
971 }
972}
973
974static inline void gen_bx_excret_final_code(DisasContext *s)
975{
976 /* Generate the code to finish possible exception return and end the TB */
977 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
978 uint32_t min_magic;
979
980 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
981 /* Covers FNC_RETURN and EXC_RETURN magic */
982 min_magic = FNC_RETURN_MIN_MAGIC;
983 } else {
984 /* EXC_RETURN magic only */
985 min_magic = EXC_RETURN_MIN_MAGIC;
986 }
3bb8a96f
PM
987
988 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 989 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
990 /* No: end the TB as we would for a DISAS_JMP */
991 if (is_singlestepping(s)) {
992 gen_singlestep_exception(s);
993 } else {
07ea28b4 994 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
995 }
996 gen_set_label(excret_label);
997 /* Yes: this is an exception return.
998 * At this point in runtime env->regs[15] and env->thumb will hold
999 * the exception-return magic number, which do_v7m_exception_exit()
1000 * will read. Nothing else will be able to see those values because
1001 * the cpu-exec main loop guarantees that we will always go straight
1002 * from raising the exception to the exception-handling code.
1003 *
1004 * gen_ss_advance(s) does nothing on M profile currently but
1005 * calling it is conceptually the right thing as we have executed
1006 * this instruction (compare SWI, HVC, SMC handling).
1007 */
1008 gen_ss_advance(s);
1009 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1010}
1011
fb602cb7
PM
1012static inline void gen_bxns(DisasContext *s, int rm)
1013{
1014 TCGv_i32 var = load_reg(s, rm);
1015
1016 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1017 * we need to sync state before calling it, but:
1018 * - we don't need to do gen_set_pc_im() because the bxns helper will
1019 * always set the PC itself
1020 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1021 * unless it's outside an IT block or the last insn in an IT block,
1022 * so we know that condexec == 0 (already set at the top of the TB)
1023 * is correct in the non-UNPREDICTABLE cases, and we can choose
1024 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1025 */
1026 gen_helper_v7m_bxns(cpu_env, var);
1027 tcg_temp_free_i32(var);
ef475b5d 1028 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1029}
1030
3e3fa230
PM
1031static inline void gen_blxns(DisasContext *s, int rm)
1032{
1033 TCGv_i32 var = load_reg(s, rm);
1034
1035 /* We don't need to sync condexec state, for the same reason as bxns.
1036 * We do however need to set the PC, because the blxns helper reads it.
1037 * The blxns helper may throw an exception.
1038 */
1039 gen_set_pc_im(s, s->pc);
1040 gen_helper_v7m_blxns(cpu_env, var);
1041 tcg_temp_free_i32(var);
1042 s->base.is_jmp = DISAS_EXIT;
1043}
1044
21aeb343
JR
1045/* Variant of store_reg which uses branch&exchange logic when storing
1046 to r15 in ARM architecture v7 and above. The source must be a temporary
1047 and will be marked as dead. */
7dcc1f89 1048static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1049{
1050 if (reg == 15 && ENABLE_ARCH_7) {
1051 gen_bx(s, var);
1052 } else {
1053 store_reg(s, reg, var);
1054 }
1055}
1056
be5e7a76
DES
1057/* Variant of store_reg which uses branch&exchange logic when storing
1058 * to r15 in ARM architecture v5T and above. This is used for storing
1059 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1060 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1061static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1062{
1063 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1064 gen_bx_excret(s, var);
be5e7a76
DES
1065 } else {
1066 store_reg(s, reg, var);
1067 }
1068}
1069
e334bd31
PB
1070#ifdef CONFIG_USER_ONLY
1071#define IS_USER_ONLY 1
1072#else
1073#define IS_USER_ONLY 0
1074#endif
1075
08307563
PM
1076/* Abstractions of "generate code to do a guest load/store for
1077 * AArch32", where a vaddr is always 32 bits (and is zero
1078 * extended if we're a 64 bit core) and data is also
1079 * 32 bits unless specifically doing a 64 bit access.
1080 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1081 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1082 */
08307563 1083
7f5616f5 1084static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1085{
7f5616f5
RH
1086 TCGv addr = tcg_temp_new();
1087 tcg_gen_extu_i32_tl(addr, a32);
1088
e334bd31 1089 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1090 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1091 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1092 }
7f5616f5 1093 return addr;
08307563
PM
1094}
1095
7f5616f5
RH
1096static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1097 int index, TCGMemOp opc)
08307563 1098{
2aeba0d0
JS
1099 TCGv addr;
1100
1101 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1102 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1103 opc |= MO_ALIGN;
1104 }
1105
1106 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1107 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1108 tcg_temp_free(addr);
08307563
PM
1109}
1110
7f5616f5
RH
1111static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1112 int index, TCGMemOp opc)
1113{
2aeba0d0
JS
1114 TCGv addr;
1115
1116 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1117 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1118 opc |= MO_ALIGN;
1119 }
1120
1121 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1122 tcg_gen_qemu_st_i32(val, addr, index, opc);
1123 tcg_temp_free(addr);
1124}
08307563 1125
7f5616f5 1126#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1127static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1128 TCGv_i32 a32, int index) \
08307563 1129{ \
7f5616f5 1130 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1131} \
1132static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1133 TCGv_i32 val, \
1134 TCGv_i32 a32, int index, \
1135 ISSInfo issinfo) \
1136{ \
1137 gen_aa32_ld##SUFF(s, val, a32, index); \
1138 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1139}
1140
7f5616f5 1141#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1142static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1143 TCGv_i32 a32, int index) \
08307563 1144{ \
7f5616f5 1145 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1146} \
1147static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1148 TCGv_i32 val, \
1149 TCGv_i32 a32, int index, \
1150 ISSInfo issinfo) \
1151{ \
1152 gen_aa32_st##SUFF(s, val, a32, index); \
1153 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1154}
1155
7f5616f5 1156static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1157{
e334bd31
PB
1158 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1159 if (!IS_USER_ONLY && s->sctlr_b) {
1160 tcg_gen_rotri_i64(val, val, 32);
1161 }
08307563
PM
1162}
1163
7f5616f5
RH
1164static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1165 int index, TCGMemOp opc)
08307563 1166{
7f5616f5
RH
1167 TCGv addr = gen_aa32_addr(s, a32, opc);
1168 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1169 gen_aa32_frob64(s, val);
1170 tcg_temp_free(addr);
1171}
1172
1173static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1174 TCGv_i32 a32, int index)
1175{
1176 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1177}
1178
1179static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1180 int index, TCGMemOp opc)
1181{
1182 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1183
1184 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1185 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1186 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1187 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1188 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1189 tcg_temp_free_i64(tmp);
e334bd31 1190 } else {
7f5616f5 1191 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1192 }
7f5616f5 1193 tcg_temp_free(addr);
08307563
PM
1194}
1195
7f5616f5
RH
1196static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1197 TCGv_i32 a32, int index)
1198{
1199 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1200}
08307563 1201
7f5616f5
RH
1202DO_GEN_LD(8s, MO_SB)
1203DO_GEN_LD(8u, MO_UB)
1204DO_GEN_LD(16s, MO_SW)
1205DO_GEN_LD(16u, MO_UW)
1206DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1207DO_GEN_ST(8, MO_UB)
1208DO_GEN_ST(16, MO_UW)
1209DO_GEN_ST(32, MO_UL)
08307563 1210
37e6456e
PM
1211static inline void gen_hvc(DisasContext *s, int imm16)
1212{
1213 /* The pre HVC helper handles cases when HVC gets trapped
1214 * as an undefined insn by runtime configuration (ie before
1215 * the insn really executes).
1216 */
43722a6d 1217 gen_set_pc_im(s, s->pc_curr);
37e6456e
PM
1218 gen_helper_pre_hvc(cpu_env);
1219 /* Otherwise we will treat this as a real exception which
1220 * happens after execution of the insn. (The distinction matters
1221 * for the PC value reported to the exception handler and also
1222 * for single stepping.)
1223 */
1224 s->svc_imm = imm16;
1225 gen_set_pc_im(s, s->pc);
dcba3a8d 1226 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1227}
1228
1229static inline void gen_smc(DisasContext *s)
1230{
1231 /* As with HVC, we may take an exception either before or after
1232 * the insn executes.
1233 */
1234 TCGv_i32 tmp;
1235
43722a6d 1236 gen_set_pc_im(s, s->pc_curr);
37e6456e
PM
1237 tmp = tcg_const_i32(syn_aa32_smc());
1238 gen_helper_pre_smc(cpu_env, tmp);
1239 tcg_temp_free_i32(tmp);
1240 gen_set_pc_im(s, s->pc);
dcba3a8d 1241 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1242}
1243
d4a2dc67
PM
1244static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1245{
1246 gen_set_condexec(s);
1247 gen_set_pc_im(s, s->pc - offset);
1248 gen_exception_internal(excp);
dcba3a8d 1249 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1250}
1251
73710361
GB
1252static void gen_exception_insn(DisasContext *s, int offset, int excp,
1253 int syn, uint32_t target_el)
d4a2dc67
PM
1254{
1255 gen_set_condexec(s);
1256 gen_set_pc_im(s, s->pc - offset);
73710361 1257 gen_exception(excp, syn, target_el);
dcba3a8d 1258 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1259}
1260
c900a2e6
PM
1261static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1262{
1263 TCGv_i32 tcg_syn;
1264
1265 gen_set_condexec(s);
1266 gen_set_pc_im(s, s->pc - offset);
1267 tcg_syn = tcg_const_i32(syn);
1268 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1269 tcg_temp_free_i32(tcg_syn);
1270 s->base.is_jmp = DISAS_NORETURN;
1271}
1272
b5ff1b31
FB
1273/* Force a TB lookup after an instruction that changes the CPU state. */
1274static inline void gen_lookup_tb(DisasContext *s)
1275{
4818c374 1276 tcg_gen_movi_i32(cpu_R[15], s->pc);
dcba3a8d 1277 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1278}
1279
19a6e31c
PM
1280static inline void gen_hlt(DisasContext *s, int imm)
1281{
1282 /* HLT. This has two purposes.
1283 * Architecturally, it is an external halting debug instruction.
1284 * Since QEMU doesn't implement external debug, we treat this as
1285 * it is required for halting debug disabled: it will UNDEF.
1286 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1287 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1288 * must trigger semihosting even for ARMv7 and earlier, where
1289 * HLT was an undefined encoding.
1290 * In system mode, we don't allow userspace access to
1291 * semihosting, to provide some semblance of security
1292 * (and for consistency with our 32-bit semihosting).
1293 */
1294 if (semihosting_enabled() &&
1295#ifndef CONFIG_USER_ONLY
1296 s->current_el != 0 &&
1297#endif
1298 (imm == (s->thumb ? 0x3c : 0xf000))) {
1299 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1300 return;
1301 }
1302
1303 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1304 default_exception_el(s));
1305}
1306
b0109805 1307static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1308 TCGv_i32 var)
2c0262af 1309{
1e8d4eec 1310 int val, rm, shift, shiftop;
39d5492a 1311 TCGv_i32 offset;
2c0262af
FB
1312
1313 if (!(insn & (1 << 25))) {
1314 /* immediate */
1315 val = insn & 0xfff;
1316 if (!(insn & (1 << 23)))
1317 val = -val;
537730b9 1318 if (val != 0)
b0109805 1319 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1320 } else {
1321 /* shift/register */
1322 rm = (insn) & 0xf;
1323 shift = (insn >> 7) & 0x1f;
1e8d4eec 1324 shiftop = (insn >> 5) & 3;
b26eefb6 1325 offset = load_reg(s, rm);
9a119ff6 1326 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1327 if (!(insn & (1 << 23)))
b0109805 1328 tcg_gen_sub_i32(var, var, offset);
2c0262af 1329 else
b0109805 1330 tcg_gen_add_i32(var, var, offset);
7d1b0095 1331 tcg_temp_free_i32(offset);
2c0262af
FB
1332 }
1333}
1334
191f9a93 1335static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1336 int extra, TCGv_i32 var)
2c0262af
FB
1337{
1338 int val, rm;
39d5492a 1339 TCGv_i32 offset;
3b46e624 1340
2c0262af
FB
1341 if (insn & (1 << 22)) {
1342 /* immediate */
1343 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1344 if (!(insn & (1 << 23)))
1345 val = -val;
18acad92 1346 val += extra;
537730b9 1347 if (val != 0)
b0109805 1348 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1349 } else {
1350 /* register */
191f9a93 1351 if (extra)
b0109805 1352 tcg_gen_addi_i32(var, var, extra);
2c0262af 1353 rm = (insn) & 0xf;
b26eefb6 1354 offset = load_reg(s, rm);
2c0262af 1355 if (!(insn & (1 << 23)))
b0109805 1356 tcg_gen_sub_i32(var, var, offset);
2c0262af 1357 else
b0109805 1358 tcg_gen_add_i32(var, var, offset);
7d1b0095 1359 tcg_temp_free_i32(offset);
2c0262af
FB
1360 }
1361}
1362
5aaebd13
PM
1363static TCGv_ptr get_fpstatus_ptr(int neon)
1364{
1365 TCGv_ptr statusptr = tcg_temp_new_ptr();
1366 int offset;
1367 if (neon) {
0ecb72a5 1368 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1369 } else {
0ecb72a5 1370 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1371 }
1372 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1373 return statusptr;
1374}
1375
c39c2b90 1376static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1377{
9a2b5256 1378 if (dp) {
c39c2b90 1379 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1380 } else {
c39c2b90 1381 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1382 if (reg & 1) {
1383 ofs += offsetof(CPU_DoubleU, l.upper);
1384 } else {
1385 ofs += offsetof(CPU_DoubleU, l.lower);
1386 }
1387 return ofs;
8e96005d
FB
1388 }
1389}
9ee6e8bb
PB
1390
1391/* Return the offset of a 32-bit piece of a NEON register.
1392 zero is the least significant end of the register. */
1393static inline long
1394neon_reg_offset (int reg, int n)
1395{
1396 int sreg;
1397 sreg = reg * 2 + n;
1398 return vfp_reg_offset(0, sreg);
1399}
1400
32f91fb7
RH
1401/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1402 * where 0 is the least significant end of the register.
1403 */
1404static inline long
1405neon_element_offset(int reg, int element, TCGMemOp size)
1406{
1407 int element_size = 1 << size;
1408 int ofs = element * element_size;
1409#ifdef HOST_WORDS_BIGENDIAN
1410 /* Calculate the offset assuming fully little-endian,
1411 * then XOR to account for the order of the 8-byte units.
1412 */
1413 if (element_size < 8) {
1414 ofs ^= 8 - element_size;
1415 }
1416#endif
1417 return neon_reg_offset(reg, 0) + ofs;
1418}
1419
39d5492a 1420static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1421{
39d5492a 1422 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1423 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1424 return tmp;
1425}
1426
2d6ac920
RH
1427static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1428{
1429 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1430
1431 switch (mop) {
1432 case MO_UB:
1433 tcg_gen_ld8u_i32(var, cpu_env, offset);
1434 break;
1435 case MO_UW:
1436 tcg_gen_ld16u_i32(var, cpu_env, offset);
1437 break;
1438 case MO_UL:
1439 tcg_gen_ld_i32(var, cpu_env, offset);
1440 break;
1441 default:
1442 g_assert_not_reached();
1443 }
1444}
1445
ac55d007
RH
1446static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1447{
1448 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1449
1450 switch (mop) {
1451 case MO_UB:
1452 tcg_gen_ld8u_i64(var, cpu_env, offset);
1453 break;
1454 case MO_UW:
1455 tcg_gen_ld16u_i64(var, cpu_env, offset);
1456 break;
1457 case MO_UL:
1458 tcg_gen_ld32u_i64(var, cpu_env, offset);
1459 break;
1460 case MO_Q:
1461 tcg_gen_ld_i64(var, cpu_env, offset);
1462 break;
1463 default:
1464 g_assert_not_reached();
1465 }
1466}
1467
39d5492a 1468static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1469{
1470 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1471 tcg_temp_free_i32(var);
8f8e3aa4
PB
1472}
1473
2d6ac920
RH
1474static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1475{
1476 long offset = neon_element_offset(reg, ele, size);
1477
1478 switch (size) {
1479 case MO_8:
1480 tcg_gen_st8_i32(var, cpu_env, offset);
1481 break;
1482 case MO_16:
1483 tcg_gen_st16_i32(var, cpu_env, offset);
1484 break;
1485 case MO_32:
1486 tcg_gen_st_i32(var, cpu_env, offset);
1487 break;
1488 default:
1489 g_assert_not_reached();
1490 }
1491}
1492
ac55d007
RH
1493static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1494{
1495 long offset = neon_element_offset(reg, ele, size);
1496
1497 switch (size) {
1498 case MO_8:
1499 tcg_gen_st8_i64(var, cpu_env, offset);
1500 break;
1501 case MO_16:
1502 tcg_gen_st16_i64(var, cpu_env, offset);
1503 break;
1504 case MO_32:
1505 tcg_gen_st32_i64(var, cpu_env, offset);
1506 break;
1507 case MO_64:
1508 tcg_gen_st_i64(var, cpu_env, offset);
1509 break;
1510 default:
1511 g_assert_not_reached();
1512 }
1513}
1514
a7812ae4 1515static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1516{
1517 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1518}
1519
a7812ae4 1520static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1521{
1522 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1523}
1524
160f3b64
PM
1525static inline void neon_load_reg32(TCGv_i32 var, int reg)
1526{
1527 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1528}
1529
1530static inline void neon_store_reg32(TCGv_i32 var, int reg)
1531{
1532 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1533}
1534
1a66ac61
RH
1535static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1536{
1537 TCGv_ptr ret = tcg_temp_new_ptr();
1538 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1539 return ret;
1540}
1541
d00584b7 1542#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1543
78e138bc
PM
1544/* Include the VFP decoder */
1545#include "translate-vfp.inc.c"
1546
a7812ae4 1547static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1548{
0ecb72a5 1549 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1550}
1551
a7812ae4 1552static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1553{
0ecb72a5 1554 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1555}
1556
39d5492a 1557static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1558{
39d5492a 1559 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1560 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1561 return var;
e677137d
PB
1562}
1563
39d5492a 1564static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1565{
0ecb72a5 1566 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1567 tcg_temp_free_i32(var);
e677137d
PB
1568}
1569
1570static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1571{
1572 iwmmxt_store_reg(cpu_M0, rn);
1573}
1574
1575static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1576{
1577 iwmmxt_load_reg(cpu_M0, rn);
1578}
1579
1580static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1581{
1582 iwmmxt_load_reg(cpu_V1, rn);
1583 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1584}
1585
1586static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1587{
1588 iwmmxt_load_reg(cpu_V1, rn);
1589 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1590}
1591
1592static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1593{
1594 iwmmxt_load_reg(cpu_V1, rn);
1595 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1596}
1597
1598#define IWMMXT_OP(name) \
1599static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1600{ \
1601 iwmmxt_load_reg(cpu_V1, rn); \
1602 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1603}
1604
477955bd
PM
1605#define IWMMXT_OP_ENV(name) \
1606static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1607{ \
1608 iwmmxt_load_reg(cpu_V1, rn); \
1609 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1610}
1611
1612#define IWMMXT_OP_ENV_SIZE(name) \
1613IWMMXT_OP_ENV(name##b) \
1614IWMMXT_OP_ENV(name##w) \
1615IWMMXT_OP_ENV(name##l)
e677137d 1616
477955bd 1617#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1618static inline void gen_op_iwmmxt_##name##_M0(void) \
1619{ \
477955bd 1620 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1621}
1622
1623IWMMXT_OP(maddsq)
1624IWMMXT_OP(madduq)
1625IWMMXT_OP(sadb)
1626IWMMXT_OP(sadw)
1627IWMMXT_OP(mulslw)
1628IWMMXT_OP(mulshw)
1629IWMMXT_OP(mululw)
1630IWMMXT_OP(muluhw)
1631IWMMXT_OP(macsw)
1632IWMMXT_OP(macuw)
1633
477955bd
PM
1634IWMMXT_OP_ENV_SIZE(unpackl)
1635IWMMXT_OP_ENV_SIZE(unpackh)
1636
1637IWMMXT_OP_ENV1(unpacklub)
1638IWMMXT_OP_ENV1(unpackluw)
1639IWMMXT_OP_ENV1(unpacklul)
1640IWMMXT_OP_ENV1(unpackhub)
1641IWMMXT_OP_ENV1(unpackhuw)
1642IWMMXT_OP_ENV1(unpackhul)
1643IWMMXT_OP_ENV1(unpacklsb)
1644IWMMXT_OP_ENV1(unpacklsw)
1645IWMMXT_OP_ENV1(unpacklsl)
1646IWMMXT_OP_ENV1(unpackhsb)
1647IWMMXT_OP_ENV1(unpackhsw)
1648IWMMXT_OP_ENV1(unpackhsl)
1649
1650IWMMXT_OP_ENV_SIZE(cmpeq)
1651IWMMXT_OP_ENV_SIZE(cmpgtu)
1652IWMMXT_OP_ENV_SIZE(cmpgts)
1653
1654IWMMXT_OP_ENV_SIZE(mins)
1655IWMMXT_OP_ENV_SIZE(minu)
1656IWMMXT_OP_ENV_SIZE(maxs)
1657IWMMXT_OP_ENV_SIZE(maxu)
1658
1659IWMMXT_OP_ENV_SIZE(subn)
1660IWMMXT_OP_ENV_SIZE(addn)
1661IWMMXT_OP_ENV_SIZE(subu)
1662IWMMXT_OP_ENV_SIZE(addu)
1663IWMMXT_OP_ENV_SIZE(subs)
1664IWMMXT_OP_ENV_SIZE(adds)
1665
1666IWMMXT_OP_ENV(avgb0)
1667IWMMXT_OP_ENV(avgb1)
1668IWMMXT_OP_ENV(avgw0)
1669IWMMXT_OP_ENV(avgw1)
e677137d 1670
477955bd
PM
1671IWMMXT_OP_ENV(packuw)
1672IWMMXT_OP_ENV(packul)
1673IWMMXT_OP_ENV(packuq)
1674IWMMXT_OP_ENV(packsw)
1675IWMMXT_OP_ENV(packsl)
1676IWMMXT_OP_ENV(packsq)
e677137d 1677
e677137d
PB
1678static void gen_op_iwmmxt_set_mup(void)
1679{
39d5492a 1680 TCGv_i32 tmp;
e677137d
PB
1681 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1682 tcg_gen_ori_i32(tmp, tmp, 2);
1683 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1684}
1685
1686static void gen_op_iwmmxt_set_cup(void)
1687{
39d5492a 1688 TCGv_i32 tmp;
e677137d
PB
1689 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1690 tcg_gen_ori_i32(tmp, tmp, 1);
1691 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1692}
1693
1694static void gen_op_iwmmxt_setpsr_nz(void)
1695{
39d5492a 1696 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1697 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1698 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1699}
1700
1701static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1702{
1703 iwmmxt_load_reg(cpu_V1, rn);
86831435 1704 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1705 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1706}
1707
39d5492a
PM
1708static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1709 TCGv_i32 dest)
18c9b560
AZ
1710{
1711 int rd;
1712 uint32_t offset;
39d5492a 1713 TCGv_i32 tmp;
18c9b560
AZ
1714
1715 rd = (insn >> 16) & 0xf;
da6b5335 1716 tmp = load_reg(s, rd);
18c9b560
AZ
1717
1718 offset = (insn & 0xff) << ((insn >> 7) & 2);
1719 if (insn & (1 << 24)) {
1720 /* Pre indexed */
1721 if (insn & (1 << 23))
da6b5335 1722 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1723 else
da6b5335
FN
1724 tcg_gen_addi_i32(tmp, tmp, -offset);
1725 tcg_gen_mov_i32(dest, tmp);
18c9b560 1726 if (insn & (1 << 21))
da6b5335
FN
1727 store_reg(s, rd, tmp);
1728 else
7d1b0095 1729 tcg_temp_free_i32(tmp);
18c9b560
AZ
1730 } else if (insn & (1 << 21)) {
1731 /* Post indexed */
da6b5335 1732 tcg_gen_mov_i32(dest, tmp);
18c9b560 1733 if (insn & (1 << 23))
da6b5335 1734 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1735 else
da6b5335
FN
1736 tcg_gen_addi_i32(tmp, tmp, -offset);
1737 store_reg(s, rd, tmp);
18c9b560
AZ
1738 } else if (!(insn & (1 << 23)))
1739 return 1;
1740 return 0;
1741}
1742
39d5492a 1743static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1744{
1745 int rd = (insn >> 0) & 0xf;
39d5492a 1746 TCGv_i32 tmp;
18c9b560 1747
da6b5335
FN
1748 if (insn & (1 << 8)) {
1749 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1750 return 1;
da6b5335
FN
1751 } else {
1752 tmp = iwmmxt_load_creg(rd);
1753 }
1754 } else {
7d1b0095 1755 tmp = tcg_temp_new_i32();
da6b5335 1756 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1757 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1758 }
1759 tcg_gen_andi_i32(tmp, tmp, mask);
1760 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1761 tcg_temp_free_i32(tmp);
18c9b560
AZ
1762 return 0;
1763}
1764
a1c7273b 1765/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1766 (ie. an undefined instruction). */
7dcc1f89 1767static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1768{
1769 int rd, wrd;
1770 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1771 TCGv_i32 addr;
1772 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1773
1774 if ((insn & 0x0e000e00) == 0x0c000000) {
1775 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1776 wrd = insn & 0xf;
1777 rdlo = (insn >> 12) & 0xf;
1778 rdhi = (insn >> 16) & 0xf;
d00584b7 1779 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1780 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1781 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1782 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1783 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1784 } else { /* TMCRR */
da6b5335
FN
1785 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1786 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1787 gen_op_iwmmxt_set_mup();
1788 }
1789 return 0;
1790 }
1791
1792 wrd = (insn >> 12) & 0xf;
7d1b0095 1793 addr = tcg_temp_new_i32();
da6b5335 1794 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1795 tcg_temp_free_i32(addr);
18c9b560 1796 return 1;
da6b5335 1797 }
18c9b560 1798 if (insn & ARM_CP_RW_BIT) {
d00584b7 1799 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1800 tmp = tcg_temp_new_i32();
12dcc321 1801 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1802 iwmmxt_store_creg(wrd, tmp);
18c9b560 1803 } else {
e677137d
PB
1804 i = 1;
1805 if (insn & (1 << 8)) {
d00584b7 1806 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1807 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1808 i = 0;
d00584b7 1809 } else { /* WLDRW wRd */
29531141 1810 tmp = tcg_temp_new_i32();
12dcc321 1811 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1812 }
1813 } else {
29531141 1814 tmp = tcg_temp_new_i32();
d00584b7 1815 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1816 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 1817 } else { /* WLDRB */
12dcc321 1818 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1819 }
1820 }
1821 if (i) {
1822 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1823 tcg_temp_free_i32(tmp);
e677137d 1824 }
18c9b560
AZ
1825 gen_op_iwmmxt_movq_wRn_M0(wrd);
1826 }
1827 } else {
d00584b7 1828 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1829 tmp = iwmmxt_load_creg(wrd);
12dcc321 1830 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1831 } else {
1832 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1833 tmp = tcg_temp_new_i32();
e677137d 1834 if (insn & (1 << 8)) {
d00584b7 1835 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1836 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 1837 } else { /* WSTRW wRd */
ecc7b3aa 1838 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1839 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1840 }
1841 } else {
d00584b7 1842 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1843 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1844 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 1845 } else { /* WSTRB */
ecc7b3aa 1846 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1847 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1848 }
1849 }
18c9b560 1850 }
29531141 1851 tcg_temp_free_i32(tmp);
18c9b560 1852 }
7d1b0095 1853 tcg_temp_free_i32(addr);
18c9b560
AZ
1854 return 0;
1855 }
1856
1857 if ((insn & 0x0f000000) != 0x0e000000)
1858 return 1;
1859
1860 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 1861 case 0x000: /* WOR */
18c9b560
AZ
1862 wrd = (insn >> 12) & 0xf;
1863 rd0 = (insn >> 0) & 0xf;
1864 rd1 = (insn >> 16) & 0xf;
1865 gen_op_iwmmxt_movq_M0_wRn(rd0);
1866 gen_op_iwmmxt_orq_M0_wRn(rd1);
1867 gen_op_iwmmxt_setpsr_nz();
1868 gen_op_iwmmxt_movq_wRn_M0(wrd);
1869 gen_op_iwmmxt_set_mup();
1870 gen_op_iwmmxt_set_cup();
1871 break;
d00584b7 1872 case 0x011: /* TMCR */
18c9b560
AZ
1873 if (insn & 0xf)
1874 return 1;
1875 rd = (insn >> 12) & 0xf;
1876 wrd = (insn >> 16) & 0xf;
1877 switch (wrd) {
1878 case ARM_IWMMXT_wCID:
1879 case ARM_IWMMXT_wCASF:
1880 break;
1881 case ARM_IWMMXT_wCon:
1882 gen_op_iwmmxt_set_cup();
1883 /* Fall through. */
1884 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1885 tmp = iwmmxt_load_creg(wrd);
1886 tmp2 = load_reg(s, rd);
f669df27 1887 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1888 tcg_temp_free_i32(tmp2);
da6b5335 1889 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1890 break;
1891 case ARM_IWMMXT_wCGR0:
1892 case ARM_IWMMXT_wCGR1:
1893 case ARM_IWMMXT_wCGR2:
1894 case ARM_IWMMXT_wCGR3:
1895 gen_op_iwmmxt_set_cup();
da6b5335
FN
1896 tmp = load_reg(s, rd);
1897 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1898 break;
1899 default:
1900 return 1;
1901 }
1902 break;
d00584b7 1903 case 0x100: /* WXOR */
18c9b560
AZ
1904 wrd = (insn >> 12) & 0xf;
1905 rd0 = (insn >> 0) & 0xf;
1906 rd1 = (insn >> 16) & 0xf;
1907 gen_op_iwmmxt_movq_M0_wRn(rd0);
1908 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1909 gen_op_iwmmxt_setpsr_nz();
1910 gen_op_iwmmxt_movq_wRn_M0(wrd);
1911 gen_op_iwmmxt_set_mup();
1912 gen_op_iwmmxt_set_cup();
1913 break;
d00584b7 1914 case 0x111: /* TMRC */
18c9b560
AZ
1915 if (insn & 0xf)
1916 return 1;
1917 rd = (insn >> 12) & 0xf;
1918 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1919 tmp = iwmmxt_load_creg(wrd);
1920 store_reg(s, rd, tmp);
18c9b560 1921 break;
d00584b7 1922 case 0x300: /* WANDN */
18c9b560
AZ
1923 wrd = (insn >> 12) & 0xf;
1924 rd0 = (insn >> 0) & 0xf;
1925 rd1 = (insn >> 16) & 0xf;
1926 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1927 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1928 gen_op_iwmmxt_andq_M0_wRn(rd1);
1929 gen_op_iwmmxt_setpsr_nz();
1930 gen_op_iwmmxt_movq_wRn_M0(wrd);
1931 gen_op_iwmmxt_set_mup();
1932 gen_op_iwmmxt_set_cup();
1933 break;
d00584b7 1934 case 0x200: /* WAND */
18c9b560
AZ
1935 wrd = (insn >> 12) & 0xf;
1936 rd0 = (insn >> 0) & 0xf;
1937 rd1 = (insn >> 16) & 0xf;
1938 gen_op_iwmmxt_movq_M0_wRn(rd0);
1939 gen_op_iwmmxt_andq_M0_wRn(rd1);
1940 gen_op_iwmmxt_setpsr_nz();
1941 gen_op_iwmmxt_movq_wRn_M0(wrd);
1942 gen_op_iwmmxt_set_mup();
1943 gen_op_iwmmxt_set_cup();
1944 break;
d00584b7 1945 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
1946 wrd = (insn >> 12) & 0xf;
1947 rd0 = (insn >> 0) & 0xf;
1948 rd1 = (insn >> 16) & 0xf;
1949 gen_op_iwmmxt_movq_M0_wRn(rd0);
1950 if (insn & (1 << 21))
1951 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1952 else
1953 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1954 gen_op_iwmmxt_movq_wRn_M0(wrd);
1955 gen_op_iwmmxt_set_mup();
1956 break;
d00584b7 1957 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
1958 wrd = (insn >> 12) & 0xf;
1959 rd0 = (insn >> 16) & 0xf;
1960 rd1 = (insn >> 0) & 0xf;
1961 gen_op_iwmmxt_movq_M0_wRn(rd0);
1962 switch ((insn >> 22) & 3) {
1963 case 0:
1964 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1965 break;
1966 case 1:
1967 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1968 break;
1969 case 2:
1970 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1971 break;
1972 case 3:
1973 return 1;
1974 }
1975 gen_op_iwmmxt_movq_wRn_M0(wrd);
1976 gen_op_iwmmxt_set_mup();
1977 gen_op_iwmmxt_set_cup();
1978 break;
d00584b7 1979 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
1980 wrd = (insn >> 12) & 0xf;
1981 rd0 = (insn >> 16) & 0xf;
1982 rd1 = (insn >> 0) & 0xf;
1983 gen_op_iwmmxt_movq_M0_wRn(rd0);
1984 switch ((insn >> 22) & 3) {
1985 case 0:
1986 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1987 break;
1988 case 1:
1989 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1990 break;
1991 case 2:
1992 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1993 break;
1994 case 3:
1995 return 1;
1996 }
1997 gen_op_iwmmxt_movq_wRn_M0(wrd);
1998 gen_op_iwmmxt_set_mup();
1999 gen_op_iwmmxt_set_cup();
2000 break;
d00584b7 2001 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
2002 wrd = (insn >> 12) & 0xf;
2003 rd0 = (insn >> 16) & 0xf;
2004 rd1 = (insn >> 0) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
2006 if (insn & (1 << 22))
2007 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2008 else
2009 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2010 if (!(insn & (1 << 20)))
2011 gen_op_iwmmxt_addl_M0_wRn(wrd);
2012 gen_op_iwmmxt_movq_wRn_M0(wrd);
2013 gen_op_iwmmxt_set_mup();
2014 break;
d00584b7 2015 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
2016 wrd = (insn >> 12) & 0xf;
2017 rd0 = (insn >> 16) & 0xf;
2018 rd1 = (insn >> 0) & 0xf;
2019 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2020 if (insn & (1 << 21)) {
2021 if (insn & (1 << 20))
2022 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2023 else
2024 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2025 } else {
2026 if (insn & (1 << 20))
2027 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2028 else
2029 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2030 }
18c9b560
AZ
2031 gen_op_iwmmxt_movq_wRn_M0(wrd);
2032 gen_op_iwmmxt_set_mup();
2033 break;
d00584b7 2034 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
2035 wrd = (insn >> 12) & 0xf;
2036 rd0 = (insn >> 16) & 0xf;
2037 rd1 = (insn >> 0) & 0xf;
2038 gen_op_iwmmxt_movq_M0_wRn(rd0);
2039 if (insn & (1 << 21))
2040 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2041 else
2042 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2043 if (!(insn & (1 << 20))) {
e677137d
PB
2044 iwmmxt_load_reg(cpu_V1, wrd);
2045 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2046 }
2047 gen_op_iwmmxt_movq_wRn_M0(wrd);
2048 gen_op_iwmmxt_set_mup();
2049 break;
d00584b7 2050 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2051 wrd = (insn >> 12) & 0xf;
2052 rd0 = (insn >> 16) & 0xf;
2053 rd1 = (insn >> 0) & 0xf;
2054 gen_op_iwmmxt_movq_M0_wRn(rd0);
2055 switch ((insn >> 22) & 3) {
2056 case 0:
2057 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2058 break;
2059 case 1:
2060 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2061 break;
2062 case 2:
2063 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2064 break;
2065 case 3:
2066 return 1;
2067 }
2068 gen_op_iwmmxt_movq_wRn_M0(wrd);
2069 gen_op_iwmmxt_set_mup();
2070 gen_op_iwmmxt_set_cup();
2071 break;
d00584b7 2072 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2073 wrd = (insn >> 12) & 0xf;
2074 rd0 = (insn >> 16) & 0xf;
2075 rd1 = (insn >> 0) & 0xf;
2076 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2077 if (insn & (1 << 22)) {
2078 if (insn & (1 << 20))
2079 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2080 else
2081 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2082 } else {
2083 if (insn & (1 << 20))
2084 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2085 else
2086 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2087 }
18c9b560
AZ
2088 gen_op_iwmmxt_movq_wRn_M0(wrd);
2089 gen_op_iwmmxt_set_mup();
2090 gen_op_iwmmxt_set_cup();
2091 break;
d00584b7 2092 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2093 wrd = (insn >> 12) & 0xf;
2094 rd0 = (insn >> 16) & 0xf;
2095 rd1 = (insn >> 0) & 0xf;
2096 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2097 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2098 tcg_gen_andi_i32(tmp, tmp, 7);
2099 iwmmxt_load_reg(cpu_V1, rd1);
2100 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2101 tcg_temp_free_i32(tmp);
18c9b560
AZ
2102 gen_op_iwmmxt_movq_wRn_M0(wrd);
2103 gen_op_iwmmxt_set_mup();
2104 break;
d00584b7 2105 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2106 if (((insn >> 6) & 3) == 3)
2107 return 1;
18c9b560
AZ
2108 rd = (insn >> 12) & 0xf;
2109 wrd = (insn >> 16) & 0xf;
da6b5335 2110 tmp = load_reg(s, rd);
18c9b560
AZ
2111 gen_op_iwmmxt_movq_M0_wRn(wrd);
2112 switch ((insn >> 6) & 3) {
2113 case 0:
da6b5335
FN
2114 tmp2 = tcg_const_i32(0xff);
2115 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2116 break;
2117 case 1:
da6b5335
FN
2118 tmp2 = tcg_const_i32(0xffff);
2119 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2120 break;
2121 case 2:
da6b5335
FN
2122 tmp2 = tcg_const_i32(0xffffffff);
2123 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2124 break;
da6b5335 2125 default:
f764718d
RH
2126 tmp2 = NULL;
2127 tmp3 = NULL;
18c9b560 2128 }
da6b5335 2129 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2130 tcg_temp_free_i32(tmp3);
2131 tcg_temp_free_i32(tmp2);
7d1b0095 2132 tcg_temp_free_i32(tmp);
18c9b560
AZ
2133 gen_op_iwmmxt_movq_wRn_M0(wrd);
2134 gen_op_iwmmxt_set_mup();
2135 break;
d00584b7 2136 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2137 rd = (insn >> 12) & 0xf;
2138 wrd = (insn >> 16) & 0xf;
da6b5335 2139 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2140 return 1;
2141 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2142 tmp = tcg_temp_new_i32();
18c9b560
AZ
2143 switch ((insn >> 22) & 3) {
2144 case 0:
da6b5335 2145 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2146 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2147 if (insn & 8) {
2148 tcg_gen_ext8s_i32(tmp, tmp);
2149 } else {
2150 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2151 }
2152 break;
2153 case 1:
da6b5335 2154 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2155 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2156 if (insn & 8) {
2157 tcg_gen_ext16s_i32(tmp, tmp);
2158 } else {
2159 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2160 }
2161 break;
2162 case 2:
da6b5335 2163 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2164 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2165 break;
18c9b560 2166 }
da6b5335 2167 store_reg(s, rd, tmp);
18c9b560 2168 break;
d00584b7 2169 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2170 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2171 return 1;
da6b5335 2172 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2173 switch ((insn >> 22) & 3) {
2174 case 0:
da6b5335 2175 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2176 break;
2177 case 1:
da6b5335 2178 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2179 break;
2180 case 2:
da6b5335 2181 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2182 break;
18c9b560 2183 }
da6b5335
FN
2184 tcg_gen_shli_i32(tmp, tmp, 28);
2185 gen_set_nzcv(tmp);
7d1b0095 2186 tcg_temp_free_i32(tmp);
18c9b560 2187 break;
d00584b7 2188 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2189 if (((insn >> 6) & 3) == 3)
2190 return 1;
18c9b560
AZ
2191 rd = (insn >> 12) & 0xf;
2192 wrd = (insn >> 16) & 0xf;
da6b5335 2193 tmp = load_reg(s, rd);
18c9b560
AZ
2194 switch ((insn >> 6) & 3) {
2195 case 0:
da6b5335 2196 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2197 break;
2198 case 1:
da6b5335 2199 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2200 break;
2201 case 2:
da6b5335 2202 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2203 break;
18c9b560 2204 }
7d1b0095 2205 tcg_temp_free_i32(tmp);
18c9b560
AZ
2206 gen_op_iwmmxt_movq_wRn_M0(wrd);
2207 gen_op_iwmmxt_set_mup();
2208 break;
d00584b7 2209 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2210 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2211 return 1;
da6b5335 2212 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2213 tmp2 = tcg_temp_new_i32();
da6b5335 2214 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2215 switch ((insn >> 22) & 3) {
2216 case 0:
2217 for (i = 0; i < 7; i ++) {
da6b5335
FN
2218 tcg_gen_shli_i32(tmp2, tmp2, 4);
2219 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2220 }
2221 break;
2222 case 1:
2223 for (i = 0; i < 3; i ++) {
da6b5335
FN
2224 tcg_gen_shli_i32(tmp2, tmp2, 8);
2225 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2226 }
2227 break;
2228 case 2:
da6b5335
FN
2229 tcg_gen_shli_i32(tmp2, tmp2, 16);
2230 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2231 break;
18c9b560 2232 }
da6b5335 2233 gen_set_nzcv(tmp);
7d1b0095
PM
2234 tcg_temp_free_i32(tmp2);
2235 tcg_temp_free_i32(tmp);
18c9b560 2236 break;
d00584b7 2237 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2238 wrd = (insn >> 12) & 0xf;
2239 rd0 = (insn >> 16) & 0xf;
2240 gen_op_iwmmxt_movq_M0_wRn(rd0);
2241 switch ((insn >> 22) & 3) {
2242 case 0:
e677137d 2243 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2244 break;
2245 case 1:
e677137d 2246 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2247 break;
2248 case 2:
e677137d 2249 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2250 break;
2251 case 3:
2252 return 1;
2253 }
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 break;
d00584b7 2257 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2258 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2259 return 1;
da6b5335 2260 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2261 tmp2 = tcg_temp_new_i32();
da6b5335 2262 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2263 switch ((insn >> 22) & 3) {
2264 case 0:
2265 for (i = 0; i < 7; i ++) {
da6b5335
FN
2266 tcg_gen_shli_i32(tmp2, tmp2, 4);
2267 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2268 }
2269 break;
2270 case 1:
2271 for (i = 0; i < 3; i ++) {
da6b5335
FN
2272 tcg_gen_shli_i32(tmp2, tmp2, 8);
2273 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2274 }
2275 break;
2276 case 2:
da6b5335
FN
2277 tcg_gen_shli_i32(tmp2, tmp2, 16);
2278 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2279 break;
18c9b560 2280 }
da6b5335 2281 gen_set_nzcv(tmp);
7d1b0095
PM
2282 tcg_temp_free_i32(tmp2);
2283 tcg_temp_free_i32(tmp);
18c9b560 2284 break;
d00584b7 2285 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2286 rd = (insn >> 12) & 0xf;
2287 rd0 = (insn >> 16) & 0xf;
da6b5335 2288 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2289 return 1;
2290 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2291 tmp = tcg_temp_new_i32();
18c9b560
AZ
2292 switch ((insn >> 22) & 3) {
2293 case 0:
da6b5335 2294 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2295 break;
2296 case 1:
da6b5335 2297 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2298 break;
2299 case 2:
da6b5335 2300 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2301 break;
18c9b560 2302 }
da6b5335 2303 store_reg(s, rd, tmp);
18c9b560 2304 break;
d00584b7 2305 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2306 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2307 wrd = (insn >> 12) & 0xf;
2308 rd0 = (insn >> 16) & 0xf;
2309 rd1 = (insn >> 0) & 0xf;
2310 gen_op_iwmmxt_movq_M0_wRn(rd0);
2311 switch ((insn >> 22) & 3) {
2312 case 0:
2313 if (insn & (1 << 21))
2314 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2315 else
2316 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2317 break;
2318 case 1:
2319 if (insn & (1 << 21))
2320 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2321 else
2322 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2323 break;
2324 case 2:
2325 if (insn & (1 << 21))
2326 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2327 else
2328 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2329 break;
2330 case 3:
2331 return 1;
2332 }
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 gen_op_iwmmxt_set_cup();
2336 break;
d00584b7 2337 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2338 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2339 wrd = (insn >> 12) & 0xf;
2340 rd0 = (insn >> 16) & 0xf;
2341 gen_op_iwmmxt_movq_M0_wRn(rd0);
2342 switch ((insn >> 22) & 3) {
2343 case 0:
2344 if (insn & (1 << 21))
2345 gen_op_iwmmxt_unpacklsb_M0();
2346 else
2347 gen_op_iwmmxt_unpacklub_M0();
2348 break;
2349 case 1:
2350 if (insn & (1 << 21))
2351 gen_op_iwmmxt_unpacklsw_M0();
2352 else
2353 gen_op_iwmmxt_unpackluw_M0();
2354 break;
2355 case 2:
2356 if (insn & (1 << 21))
2357 gen_op_iwmmxt_unpacklsl_M0();
2358 else
2359 gen_op_iwmmxt_unpacklul_M0();
2360 break;
2361 case 3:
2362 return 1;
2363 }
2364 gen_op_iwmmxt_movq_wRn_M0(wrd);
2365 gen_op_iwmmxt_set_mup();
2366 gen_op_iwmmxt_set_cup();
2367 break;
d00584b7 2368 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2369 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2370 wrd = (insn >> 12) & 0xf;
2371 rd0 = (insn >> 16) & 0xf;
2372 gen_op_iwmmxt_movq_M0_wRn(rd0);
2373 switch ((insn >> 22) & 3) {
2374 case 0:
2375 if (insn & (1 << 21))
2376 gen_op_iwmmxt_unpackhsb_M0();
2377 else
2378 gen_op_iwmmxt_unpackhub_M0();
2379 break;
2380 case 1:
2381 if (insn & (1 << 21))
2382 gen_op_iwmmxt_unpackhsw_M0();
2383 else
2384 gen_op_iwmmxt_unpackhuw_M0();
2385 break;
2386 case 2:
2387 if (insn & (1 << 21))
2388 gen_op_iwmmxt_unpackhsl_M0();
2389 else
2390 gen_op_iwmmxt_unpackhul_M0();
2391 break;
2392 case 3:
2393 return 1;
2394 }
2395 gen_op_iwmmxt_movq_wRn_M0(wrd);
2396 gen_op_iwmmxt_set_mup();
2397 gen_op_iwmmxt_set_cup();
2398 break;
d00584b7 2399 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2400 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2401 if (((insn >> 22) & 3) == 0)
2402 return 1;
18c9b560
AZ
2403 wrd = (insn >> 12) & 0xf;
2404 rd0 = (insn >> 16) & 0xf;
2405 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2406 tmp = tcg_temp_new_i32();
da6b5335 2407 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2408 tcg_temp_free_i32(tmp);
18c9b560 2409 return 1;
da6b5335 2410 }
18c9b560 2411 switch ((insn >> 22) & 3) {
18c9b560 2412 case 1:
477955bd 2413 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2414 break;
2415 case 2:
477955bd 2416 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2417 break;
2418 case 3:
477955bd 2419 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2420 break;
2421 }
7d1b0095 2422 tcg_temp_free_i32(tmp);
18c9b560
AZ
2423 gen_op_iwmmxt_movq_wRn_M0(wrd);
2424 gen_op_iwmmxt_set_mup();
2425 gen_op_iwmmxt_set_cup();
2426 break;
d00584b7 2427 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2428 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2429 if (((insn >> 22) & 3) == 0)
2430 return 1;
18c9b560
AZ
2431 wrd = (insn >> 12) & 0xf;
2432 rd0 = (insn >> 16) & 0xf;
2433 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2434 tmp = tcg_temp_new_i32();
da6b5335 2435 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2436 tcg_temp_free_i32(tmp);
18c9b560 2437 return 1;
da6b5335 2438 }
18c9b560 2439 switch ((insn >> 22) & 3) {
18c9b560 2440 case 1:
477955bd 2441 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2442 break;
2443 case 2:
477955bd 2444 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2445 break;
2446 case 3:
477955bd 2447 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2448 break;
2449 }
7d1b0095 2450 tcg_temp_free_i32(tmp);
18c9b560
AZ
2451 gen_op_iwmmxt_movq_wRn_M0(wrd);
2452 gen_op_iwmmxt_set_mup();
2453 gen_op_iwmmxt_set_cup();
2454 break;
d00584b7 2455 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2456 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2457 if (((insn >> 22) & 3) == 0)
2458 return 1;
18c9b560
AZ
2459 wrd = (insn >> 12) & 0xf;
2460 rd0 = (insn >> 16) & 0xf;
2461 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2462 tmp = tcg_temp_new_i32();
da6b5335 2463 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2464 tcg_temp_free_i32(tmp);
18c9b560 2465 return 1;
da6b5335 2466 }
18c9b560 2467 switch ((insn >> 22) & 3) {
18c9b560 2468 case 1:
477955bd 2469 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2470 break;
2471 case 2:
477955bd 2472 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2473 break;
2474 case 3:
477955bd 2475 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2476 break;
2477 }
7d1b0095 2478 tcg_temp_free_i32(tmp);
18c9b560
AZ
2479 gen_op_iwmmxt_movq_wRn_M0(wrd);
2480 gen_op_iwmmxt_set_mup();
2481 gen_op_iwmmxt_set_cup();
2482 break;
d00584b7 2483 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2484 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2485 if (((insn >> 22) & 3) == 0)
2486 return 1;
18c9b560
AZ
2487 wrd = (insn >> 12) & 0xf;
2488 rd0 = (insn >> 16) & 0xf;
2489 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2490 tmp = tcg_temp_new_i32();
18c9b560 2491 switch ((insn >> 22) & 3) {
18c9b560 2492 case 1:
da6b5335 2493 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2494 tcg_temp_free_i32(tmp);
18c9b560 2495 return 1;
da6b5335 2496 }
477955bd 2497 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2498 break;
2499 case 2:
da6b5335 2500 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2501 tcg_temp_free_i32(tmp);
18c9b560 2502 return 1;
da6b5335 2503 }
477955bd 2504 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2505 break;
2506 case 3:
da6b5335 2507 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2508 tcg_temp_free_i32(tmp);
18c9b560 2509 return 1;
da6b5335 2510 }
477955bd 2511 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2512 break;
2513 }
7d1b0095 2514 tcg_temp_free_i32(tmp);
18c9b560
AZ
2515 gen_op_iwmmxt_movq_wRn_M0(wrd);
2516 gen_op_iwmmxt_set_mup();
2517 gen_op_iwmmxt_set_cup();
2518 break;
d00584b7 2519 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2520 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2521 wrd = (insn >> 12) & 0xf;
2522 rd0 = (insn >> 16) & 0xf;
2523 rd1 = (insn >> 0) & 0xf;
2524 gen_op_iwmmxt_movq_M0_wRn(rd0);
2525 switch ((insn >> 22) & 3) {
2526 case 0:
2527 if (insn & (1 << 21))
2528 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2529 else
2530 gen_op_iwmmxt_minub_M0_wRn(rd1);
2531 break;
2532 case 1:
2533 if (insn & (1 << 21))
2534 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2535 else
2536 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2537 break;
2538 case 2:
2539 if (insn & (1 << 21))
2540 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2541 else
2542 gen_op_iwmmxt_minul_M0_wRn(rd1);
2543 break;
2544 case 3:
2545 return 1;
2546 }
2547 gen_op_iwmmxt_movq_wRn_M0(wrd);
2548 gen_op_iwmmxt_set_mup();
2549 break;
d00584b7 2550 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2551 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2552 wrd = (insn >> 12) & 0xf;
2553 rd0 = (insn >> 16) & 0xf;
2554 rd1 = (insn >> 0) & 0xf;
2555 gen_op_iwmmxt_movq_M0_wRn(rd0);
2556 switch ((insn >> 22) & 3) {
2557 case 0:
2558 if (insn & (1 << 21))
2559 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2560 else
2561 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2562 break;
2563 case 1:
2564 if (insn & (1 << 21))
2565 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2566 else
2567 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2568 break;
2569 case 2:
2570 if (insn & (1 << 21))
2571 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2572 else
2573 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2574 break;
2575 case 3:
2576 return 1;
2577 }
2578 gen_op_iwmmxt_movq_wRn_M0(wrd);
2579 gen_op_iwmmxt_set_mup();
2580 break;
d00584b7 2581 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2582 case 0x402: case 0x502: case 0x602: case 0x702:
2583 wrd = (insn >> 12) & 0xf;
2584 rd0 = (insn >> 16) & 0xf;
2585 rd1 = (insn >> 0) & 0xf;
2586 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2587 tmp = tcg_const_i32((insn >> 20) & 3);
2588 iwmmxt_load_reg(cpu_V1, rd1);
2589 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2590 tcg_temp_free_i32(tmp);
18c9b560
AZ
2591 gen_op_iwmmxt_movq_wRn_M0(wrd);
2592 gen_op_iwmmxt_set_mup();
2593 break;
d00584b7 2594 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2595 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2596 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2597 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2598 wrd = (insn >> 12) & 0xf;
2599 rd0 = (insn >> 16) & 0xf;
2600 rd1 = (insn >> 0) & 0xf;
2601 gen_op_iwmmxt_movq_M0_wRn(rd0);
2602 switch ((insn >> 20) & 0xf) {
2603 case 0x0:
2604 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2605 break;
2606 case 0x1:
2607 gen_op_iwmmxt_subub_M0_wRn(rd1);
2608 break;
2609 case 0x3:
2610 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2611 break;
2612 case 0x4:
2613 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2614 break;
2615 case 0x5:
2616 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2617 break;
2618 case 0x7:
2619 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2620 break;
2621 case 0x8:
2622 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2623 break;
2624 case 0x9:
2625 gen_op_iwmmxt_subul_M0_wRn(rd1);
2626 break;
2627 case 0xb:
2628 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2629 break;
2630 default:
2631 return 1;
2632 }
2633 gen_op_iwmmxt_movq_wRn_M0(wrd);
2634 gen_op_iwmmxt_set_mup();
2635 gen_op_iwmmxt_set_cup();
2636 break;
d00584b7 2637 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2638 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2639 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2640 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2641 wrd = (insn >> 12) & 0xf;
2642 rd0 = (insn >> 16) & 0xf;
2643 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2644 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2645 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2646 tcg_temp_free_i32(tmp);
18c9b560
AZ
2647 gen_op_iwmmxt_movq_wRn_M0(wrd);
2648 gen_op_iwmmxt_set_mup();
2649 gen_op_iwmmxt_set_cup();
2650 break;
d00584b7 2651 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2652 case 0x418: case 0x518: case 0x618: case 0x718:
2653 case 0x818: case 0x918: case 0xa18: case 0xb18:
2654 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2655 wrd = (insn >> 12) & 0xf;
2656 rd0 = (insn >> 16) & 0xf;
2657 rd1 = (insn >> 0) & 0xf;
2658 gen_op_iwmmxt_movq_M0_wRn(rd0);
2659 switch ((insn >> 20) & 0xf) {
2660 case 0x0:
2661 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2662 break;
2663 case 0x1:
2664 gen_op_iwmmxt_addub_M0_wRn(rd1);
2665 break;
2666 case 0x3:
2667 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2668 break;
2669 case 0x4:
2670 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2671 break;
2672 case 0x5:
2673 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2674 break;
2675 case 0x7:
2676 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2677 break;
2678 case 0x8:
2679 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2680 break;
2681 case 0x9:
2682 gen_op_iwmmxt_addul_M0_wRn(rd1);
2683 break;
2684 case 0xb:
2685 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2686 break;
2687 default:
2688 return 1;
2689 }
2690 gen_op_iwmmxt_movq_wRn_M0(wrd);
2691 gen_op_iwmmxt_set_mup();
2692 gen_op_iwmmxt_set_cup();
2693 break;
d00584b7 2694 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2695 case 0x408: case 0x508: case 0x608: case 0x708:
2696 case 0x808: case 0x908: case 0xa08: case 0xb08:
2697 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2698 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2699 return 1;
18c9b560
AZ
2700 wrd = (insn >> 12) & 0xf;
2701 rd0 = (insn >> 16) & 0xf;
2702 rd1 = (insn >> 0) & 0xf;
2703 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2704 switch ((insn >> 22) & 3) {
18c9b560
AZ
2705 case 1:
2706 if (insn & (1 << 21))
2707 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2708 else
2709 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2710 break;
2711 case 2:
2712 if (insn & (1 << 21))
2713 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2714 else
2715 gen_op_iwmmxt_packul_M0_wRn(rd1);
2716 break;
2717 case 3:
2718 if (insn & (1 << 21))
2719 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2720 else
2721 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2722 break;
2723 }
2724 gen_op_iwmmxt_movq_wRn_M0(wrd);
2725 gen_op_iwmmxt_set_mup();
2726 gen_op_iwmmxt_set_cup();
2727 break;
2728 case 0x201: case 0x203: case 0x205: case 0x207:
2729 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2730 case 0x211: case 0x213: case 0x215: case 0x217:
2731 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2732 wrd = (insn >> 5) & 0xf;
2733 rd0 = (insn >> 12) & 0xf;
2734 rd1 = (insn >> 0) & 0xf;
2735 if (rd0 == 0xf || rd1 == 0xf)
2736 return 1;
2737 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2738 tmp = load_reg(s, rd0);
2739 tmp2 = load_reg(s, rd1);
18c9b560 2740 switch ((insn >> 16) & 0xf) {
d00584b7 2741 case 0x0: /* TMIA */
da6b5335 2742 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2743 break;
d00584b7 2744 case 0x8: /* TMIAPH */
da6b5335 2745 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2746 break;
d00584b7 2747 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2748 if (insn & (1 << 16))
da6b5335 2749 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2750 if (insn & (1 << 17))
da6b5335
FN
2751 tcg_gen_shri_i32(tmp2, tmp2, 16);
2752 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2753 break;
2754 default:
7d1b0095
PM
2755 tcg_temp_free_i32(tmp2);
2756 tcg_temp_free_i32(tmp);
18c9b560
AZ
2757 return 1;
2758 }
7d1b0095
PM
2759 tcg_temp_free_i32(tmp2);
2760 tcg_temp_free_i32(tmp);
18c9b560
AZ
2761 gen_op_iwmmxt_movq_wRn_M0(wrd);
2762 gen_op_iwmmxt_set_mup();
2763 break;
2764 default:
2765 return 1;
2766 }
2767
2768 return 0;
2769}
2770
a1c7273b 2771/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2772 (ie. an undefined instruction). */
7dcc1f89 2773static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2774{
2775 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2776 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2777
2778 if ((insn & 0x0ff00f10) == 0x0e200010) {
2779 /* Multiply with Internal Accumulate Format */
2780 rd0 = (insn >> 12) & 0xf;
2781 rd1 = insn & 0xf;
2782 acc = (insn >> 5) & 7;
2783
2784 if (acc != 0)
2785 return 1;
2786
3a554c0f
FN
2787 tmp = load_reg(s, rd0);
2788 tmp2 = load_reg(s, rd1);
18c9b560 2789 switch ((insn >> 16) & 0xf) {
d00584b7 2790 case 0x0: /* MIA */
3a554c0f 2791 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2792 break;
d00584b7 2793 case 0x8: /* MIAPH */
3a554c0f 2794 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2795 break;
d00584b7
PM
2796 case 0xc: /* MIABB */
2797 case 0xd: /* MIABT */
2798 case 0xe: /* MIATB */
2799 case 0xf: /* MIATT */
18c9b560 2800 if (insn & (1 << 16))
3a554c0f 2801 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2802 if (insn & (1 << 17))
3a554c0f
FN
2803 tcg_gen_shri_i32(tmp2, tmp2, 16);
2804 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2805 break;
2806 default:
2807 return 1;
2808 }
7d1b0095
PM
2809 tcg_temp_free_i32(tmp2);
2810 tcg_temp_free_i32(tmp);
18c9b560
AZ
2811
2812 gen_op_iwmmxt_movq_wRn_M0(acc);
2813 return 0;
2814 }
2815
2816 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2817 /* Internal Accumulator Access Format */
2818 rdhi = (insn >> 16) & 0xf;
2819 rdlo = (insn >> 12) & 0xf;
2820 acc = insn & 7;
2821
2822 if (acc != 0)
2823 return 1;
2824
d00584b7 2825 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2826 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2827 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2828 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2829 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2830 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 2831 } else { /* MAR */
3a554c0f
FN
2832 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2833 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2834 }
2835 return 0;
2836 }
2837
2838 return 1;
2839}
2840
9ee6e8bb
PB
2841#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2842#define VFP_SREG(insn, bigbit, smallbit) \
2843 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2844#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2845 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2846 reg = (((insn) >> (bigbit)) & 0x0f) \
2847 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2848 } else { \
2849 if (insn & (1 << (smallbit))) \
2850 return 1; \
2851 reg = ((insn) >> (bigbit)) & 0x0f; \
2852 }} while (0)
2853
2854#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2855#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2856#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2857#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2858#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2859#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2860
39d5492a 2861static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2862{
39d5492a 2863 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2864 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2865 tcg_gen_shli_i32(tmp, var, 16);
2866 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2867 tcg_temp_free_i32(tmp);
ad69471c
PB
2868}
2869
39d5492a 2870static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2871{
39d5492a 2872 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2873 tcg_gen_andi_i32(var, var, 0xffff0000);
2874 tcg_gen_shri_i32(tmp, var, 16);
2875 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2876 tcg_temp_free_i32(tmp);
ad69471c
PB
2877}
2878
06db8196
PM
2879/*
2880 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2881 * (ie. an undefined instruction).
2882 */
7dcc1f89 2883static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95 2884{
d614a513 2885 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 2886 return 1;
d614a513 2887 }
40f137e1 2888
78e138bc
PM
2889 /*
2890 * If the decodetree decoder handles this insn it will always
2891 * emit code to either execute the insn or generate an appropriate
2892 * exception; so we don't need to ever return non-zero to tell
2893 * the calling code to emit an UNDEF exception.
2894 */
2895 if (extract32(insn, 28, 4) == 0xf) {
2896 if (disas_vfp_uncond(s, insn)) {
2897 return 0;
2898 }
2899 } else {
2900 if (disas_vfp(s, insn)) {
2901 return 0;
2902 }
2903 }
3111bfc2
PM
2904 /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
2905 return 1;
b7bcbe95
FB
2906}
2907
90aa39a1 2908static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 2909{
90aa39a1 2910#ifndef CONFIG_USER_ONLY
dcba3a8d 2911 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
2912 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
2913#else
2914 return true;
2915#endif
2916}
6e256c93 2917
8a6b28c7
EC
2918static void gen_goto_ptr(void)
2919{
7f11636d 2920 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
2921}
2922
4cae8f56
AB
2923/* This will end the TB but doesn't guarantee we'll return to
2924 * cpu_loop_exec. Any live exit_requests will be processed as we
2925 * enter the next TB.
2926 */
8a6b28c7 2927static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
2928{
2929 if (use_goto_tb(s, dest)) {
57fec1fe 2930 tcg_gen_goto_tb(n);
eaed129d 2931 gen_set_pc_im(s, dest);
07ea28b4 2932 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 2933 } else {
eaed129d 2934 gen_set_pc_im(s, dest);
8a6b28c7 2935 gen_goto_ptr();
6e256c93 2936 }
dcba3a8d 2937 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
2938}
2939
8aaca4c0
FB
2940static inline void gen_jmp (DisasContext *s, uint32_t dest)
2941{
b636649f 2942 if (unlikely(is_singlestepping(s))) {
8aaca4c0 2943 /* An indirect jump so that we still trigger the debug exception. */
5899f386 2944 if (s->thumb)
d9ba4830
PB
2945 dest |= 1;
2946 gen_bx_im(s, dest);
8aaca4c0 2947 } else {
6e256c93 2948 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
2949 }
2950}
2951
39d5492a 2952static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 2953{
ee097184 2954 if (x)
d9ba4830 2955 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 2956 else
d9ba4830 2957 gen_sxth(t0);
ee097184 2958 if (y)
d9ba4830 2959 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 2960 else
d9ba4830
PB
2961 gen_sxth(t1);
2962 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
2963}
2964
2965/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
2966static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2967{
b5ff1b31
FB
2968 uint32_t mask;
2969
2970 mask = 0;
2971 if (flags & (1 << 0))
2972 mask |= 0xff;
2973 if (flags & (1 << 1))
2974 mask |= 0xff00;
2975 if (flags & (1 << 2))
2976 mask |= 0xff0000;
2977 if (flags & (1 << 3))
2978 mask |= 0xff000000;
9ee6e8bb 2979
2ae23e75 2980 /* Mask out undefined bits. */
9ee6e8bb 2981 mask &= ~CPSR_RESERVED;
d614a513 2982 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 2983 mask &= ~CPSR_T;
d614a513
PM
2984 }
2985 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 2986 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
2987 }
2988 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 2989 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
2990 }
2991 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 2992 mask &= ~CPSR_IT;
d614a513 2993 }
4051e12c
PM
2994 /* Mask out execution state and reserved bits. */
2995 if (!spsr) {
2996 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
2997 }
b5ff1b31
FB
2998 /* Mask out privileged bits. */
2999 if (IS_USER(s))
9ee6e8bb 3000 mask &= CPSR_USER;
b5ff1b31
FB
3001 return mask;
3002}
3003
2fbac54b 3004/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3005static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3006{
39d5492a 3007 TCGv_i32 tmp;
b5ff1b31
FB
3008 if (spsr) {
3009 /* ??? This is also undefined in system mode. */
3010 if (IS_USER(s))
3011 return 1;
d9ba4830
PB
3012
3013 tmp = load_cpu_field(spsr);
3014 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3015 tcg_gen_andi_i32(t0, t0, mask);
3016 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3017 store_cpu_field(tmp, spsr);
b5ff1b31 3018 } else {
2fbac54b 3019 gen_set_cpsr(t0, mask);
b5ff1b31 3020 }
7d1b0095 3021 tcg_temp_free_i32(t0);
b5ff1b31
FB
3022 gen_lookup_tb(s);
3023 return 0;
3024}
3025
2fbac54b
FN
3026/* Returns nonzero if access to the PSR is not permitted. */
3027static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3028{
39d5492a 3029 TCGv_i32 tmp;
7d1b0095 3030 tmp = tcg_temp_new_i32();
2fbac54b
FN
3031 tcg_gen_movi_i32(tmp, val);
3032 return gen_set_psr(s, mask, spsr, tmp);
3033}
3034
8bfd0550
PM
3035static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
3036 int *tgtmode, int *regno)
3037{
3038 /* Decode the r and sysm fields of MSR/MRS banked accesses into
3039 * the target mode and register number, and identify the various
3040 * unpredictable cases.
3041 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
3042 * + executed in user mode
3043 * + using R15 as the src/dest register
3044 * + accessing an unimplemented register
3045 * + accessing a register that's inaccessible at current PL/security state*
3046 * + accessing a register that you could access with a different insn
3047 * We choose to UNDEF in all these cases.
3048 * Since we don't know which of the various AArch32 modes we are in
3049 * we have to defer some checks to runtime.
3050 * Accesses to Monitor mode registers from Secure EL1 (which implies
3051 * that EL3 is AArch64) must trap to EL3.
3052 *
3053 * If the access checks fail this function will emit code to take
3054 * an exception and return false. Otherwise it will return true,
3055 * and set *tgtmode and *regno appropriately.
3056 */
3057 int exc_target = default_exception_el(s);
3058
3059 /* These instructions are present only in ARMv8, or in ARMv7 with the
3060 * Virtualization Extensions.
3061 */
3062 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
3063 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3064 goto undef;
3065 }
3066
3067 if (IS_USER(s) || rn == 15) {
3068 goto undef;
3069 }
3070
3071 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
3072 * of registers into (r, sysm).
3073 */
3074 if (r) {
3075 /* SPSRs for other modes */
3076 switch (sysm) {
3077 case 0xe: /* SPSR_fiq */
3078 *tgtmode = ARM_CPU_MODE_FIQ;
3079 break;
3080 case 0x10: /* SPSR_irq */
3081 *tgtmode = ARM_CPU_MODE_IRQ;
3082 break;
3083 case 0x12: /* SPSR_svc */
3084 *tgtmode = ARM_CPU_MODE_SVC;
3085 break;
3086 case 0x14: /* SPSR_abt */
3087 *tgtmode = ARM_CPU_MODE_ABT;
3088 break;
3089 case 0x16: /* SPSR_und */
3090 *tgtmode = ARM_CPU_MODE_UND;
3091 break;
3092 case 0x1c: /* SPSR_mon */
3093 *tgtmode = ARM_CPU_MODE_MON;
3094 break;
3095 case 0x1e: /* SPSR_hyp */
3096 *tgtmode = ARM_CPU_MODE_HYP;
3097 break;
3098 default: /* unallocated */
3099 goto undef;
3100 }
3101 /* We arbitrarily assign SPSR a register number of 16. */
3102 *regno = 16;
3103 } else {
3104 /* general purpose registers for other modes */
3105 switch (sysm) {
3106 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
3107 *tgtmode = ARM_CPU_MODE_USR;
3108 *regno = sysm + 8;
3109 break;
3110 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
3111 *tgtmode = ARM_CPU_MODE_FIQ;
3112 *regno = sysm;
3113 break;
3114 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
3115 *tgtmode = ARM_CPU_MODE_IRQ;
3116 *regno = sysm & 1 ? 13 : 14;
3117 break;
3118 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
3119 *tgtmode = ARM_CPU_MODE_SVC;
3120 *regno = sysm & 1 ? 13 : 14;
3121 break;
3122 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
3123 *tgtmode = ARM_CPU_MODE_ABT;
3124 *regno = sysm & 1 ? 13 : 14;
3125 break;
3126 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
3127 *tgtmode = ARM_CPU_MODE_UND;
3128 *regno = sysm & 1 ? 13 : 14;
3129 break;
3130 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
3131 *tgtmode = ARM_CPU_MODE_MON;
3132 *regno = sysm & 1 ? 13 : 14;
3133 break;
3134 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
3135 *tgtmode = ARM_CPU_MODE_HYP;
3136 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
3137 *regno = sysm & 1 ? 13 : 17;
3138 break;
3139 default: /* unallocated */
3140 goto undef;
3141 }
3142 }
3143
3144 /* Catch the 'accessing inaccessible register' cases we can detect
3145 * at translate time.
3146 */
3147 switch (*tgtmode) {
3148 case ARM_CPU_MODE_MON:
3149 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
3150 goto undef;
3151 }
3152 if (s->current_el == 1) {
3153 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
3154 * then accesses to Mon registers trap to EL3
3155 */
3156 exc_target = 3;
3157 goto undef;
3158 }
3159 break;
3160 case ARM_CPU_MODE_HYP:
aec4dd09
PM
3161 /*
3162 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
3163 * (and so we can forbid accesses from EL2 or below). elr_hyp
3164 * can be accessed also from Hyp mode, so forbid accesses from
3165 * EL0 or EL1.
8bfd0550 3166 */
aec4dd09
PM
3167 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
3168 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
3169 goto undef;
3170 }
3171 break;
3172 default:
3173 break;
3174 }
3175
3176 return true;
3177
3178undef:
3179 /* If we get here then some access check did not pass */
3180 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
3181 return false;
3182}
3183
3184static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
3185{
3186 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3187 int tgtmode = 0, regno = 0;
3188
3189 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3190 return;
3191 }
3192
3193 /* Sync state because msr_banked() can raise exceptions */
3194 gen_set_condexec(s);
43722a6d 3195 gen_set_pc_im(s, s->pc_curr);
8bfd0550
PM
3196 tcg_reg = load_reg(s, rn);
3197 tcg_tgtmode = tcg_const_i32(tgtmode);
3198 tcg_regno = tcg_const_i32(regno);
3199 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
3200 tcg_temp_free_i32(tcg_tgtmode);
3201 tcg_temp_free_i32(tcg_regno);
3202 tcg_temp_free_i32(tcg_reg);
dcba3a8d 3203 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
3204}
3205
3206static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
3207{
3208 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3209 int tgtmode = 0, regno = 0;
3210
3211 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3212 return;
3213 }
3214
3215 /* Sync state because mrs_banked() can raise exceptions */
3216 gen_set_condexec(s);
43722a6d 3217 gen_set_pc_im(s, s->pc_curr);
8bfd0550
PM
3218 tcg_reg = tcg_temp_new_i32();
3219 tcg_tgtmode = tcg_const_i32(tgtmode);
3220 tcg_regno = tcg_const_i32(regno);
3221 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
3222 tcg_temp_free_i32(tcg_tgtmode);
3223 tcg_temp_free_i32(tcg_regno);
3224 store_reg(s, rn, tcg_reg);
dcba3a8d 3225 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
3226}
3227
fb0e8e79
PM
3228/* Store value to PC as for an exception return (ie don't
3229 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
3230 * will do the masking based on the new value of the Thumb bit.
3231 */
3232static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3233{
fb0e8e79
PM
3234 tcg_gen_mov_i32(cpu_R[15], pc);
3235 tcg_temp_free_i32(pc);
b5ff1b31
FB
3236}
3237
b0109805 3238/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3239static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3240{
fb0e8e79
PM
3241 store_pc_exc_ret(s, pc);
3242 /* The cpsr_write_eret helper will mask the low bits of PC
3243 * appropriately depending on the new Thumb bit, so it must
3244 * be called after storing the new PC.
3245 */
e69ad9df
AL
3246 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3247 gen_io_start();
3248 }
235ea1f5 3249 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
3250 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3251 gen_io_end();
3252 }
7d1b0095 3253 tcg_temp_free_i32(cpsr);
b29fd33d 3254 /* Must exit loop to check un-masked IRQs */
dcba3a8d 3255 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 3256}
3b46e624 3257
fb0e8e79
PM
3258/* Generate an old-style exception return. Marks pc as dead. */
3259static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3260{
3261 gen_rfe(s, pc, load_cpu_field(spsr));
3262}
3263
c22edfeb
AB
3264/*
3265 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
3266 * only call the helper when running single threaded TCG code to ensure
3267 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
3268 * just skip this instruction. Currently the SEV/SEVL instructions
3269 * which are *one* of many ways to wake the CPU from WFE are not
3270 * implemented so we can't sleep like WFI does.
3271 */
9ee6e8bb
PB
3272static void gen_nop_hint(DisasContext *s, int val)
3273{
3274 switch (val) {
2399d4e7
EC
3275 /* When running in MTTCG we don't generate jumps to the yield and
3276 * WFE helpers as it won't affect the scheduling of other vCPUs.
3277 * If we wanted to more completely model WFE/SEV so we don't busy
3278 * spin unnecessarily we would need to do something more involved.
3279 */
c87e5a61 3280 case 1: /* yield */
2399d4e7 3281 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 3282 gen_set_pc_im(s, s->pc);
dcba3a8d 3283 s->base.is_jmp = DISAS_YIELD;
c22edfeb 3284 }
c87e5a61 3285 break;
9ee6e8bb 3286 case 3: /* wfi */
eaed129d 3287 gen_set_pc_im(s, s->pc);
dcba3a8d 3288 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
3289 break;
3290 case 2: /* wfe */
2399d4e7 3291 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 3292 gen_set_pc_im(s, s->pc);
dcba3a8d 3293 s->base.is_jmp = DISAS_WFE;
c22edfeb 3294 }
72c1d3af 3295 break;
9ee6e8bb 3296 case 4: /* sev */
12b10571
MR
3297 case 5: /* sevl */
3298 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3299 default: /* nop */
3300 break;
3301 }
3302}
99c475ab 3303
ad69471c 3304#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3305
39d5492a 3306static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3307{
3308 switch (size) {
dd8fbd78
FN
3309 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3310 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3311 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3312 default: abort();
9ee6e8bb 3313 }
9ee6e8bb
PB
3314}
3315
39d5492a 3316static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3317{
3318 switch (size) {
dd8fbd78
FN
3319 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3320 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3321 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3322 default: return;
3323 }
3324}
3325
3326/* 32-bit pairwise ops end up the same as the elementwise versions. */
9ecd3c5c
RH
3327#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3328#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3329#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3330#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
ad69471c 3331
ad69471c
PB
3332#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3333 switch ((size << 1) | u) { \
3334 case 0: \
dd8fbd78 3335 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3336 break; \
3337 case 1: \
dd8fbd78 3338 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3339 break; \
3340 case 2: \
dd8fbd78 3341 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3342 break; \
3343 case 3: \
dd8fbd78 3344 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3345 break; \
3346 case 4: \
dd8fbd78 3347 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3348 break; \
3349 case 5: \
dd8fbd78 3350 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3351 break; \
3352 default: return 1; \
3353 }} while (0)
9ee6e8bb
PB
3354
3355#define GEN_NEON_INTEGER_OP(name) do { \
3356 switch ((size << 1) | u) { \
ad69471c 3357 case 0: \
dd8fbd78 3358 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3359 break; \
3360 case 1: \
dd8fbd78 3361 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3362 break; \
3363 case 2: \
dd8fbd78 3364 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3365 break; \
3366 case 3: \
dd8fbd78 3367 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3368 break; \
3369 case 4: \
dd8fbd78 3370 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3371 break; \
3372 case 5: \
dd8fbd78 3373 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3374 break; \
9ee6e8bb
PB
3375 default: return 1; \
3376 }} while (0)
3377
39d5492a 3378static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3379{
39d5492a 3380 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3381 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3382 return tmp;
9ee6e8bb
PB
3383}
3384
39d5492a 3385static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3386{
dd8fbd78 3387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3388 tcg_temp_free_i32(var);
9ee6e8bb
PB
3389}
3390
39d5492a 3391static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3392{
39d5492a 3393 TCGv_i32 tmp;
9ee6e8bb 3394 if (size == 1) {
0fad6efc
PM
3395 tmp = neon_load_reg(reg & 7, reg >> 4);
3396 if (reg & 8) {
dd8fbd78 3397 gen_neon_dup_high16(tmp);
0fad6efc
PM
3398 } else {
3399 gen_neon_dup_low16(tmp);
dd8fbd78 3400 }
0fad6efc
PM
3401 } else {
3402 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3403 }
dd8fbd78 3404 return tmp;
9ee6e8bb
PB
3405}
3406
02acedf9 3407static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3408{
b13708bb
RH
3409 TCGv_ptr pd, pm;
3410
600b828c 3411 if (!q && size == 2) {
02acedf9
PM
3412 return 1;
3413 }
b13708bb
RH
3414 pd = vfp_reg_ptr(true, rd);
3415 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
3416 if (q) {
3417 switch (size) {
3418 case 0:
b13708bb 3419 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
3420 break;
3421 case 1:
b13708bb 3422 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
3423 break;
3424 case 2:
b13708bb 3425 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
3426 break;
3427 default:
3428 abort();
3429 }
3430 } else {
3431 switch (size) {
3432 case 0:
b13708bb 3433 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
3434 break;
3435 case 1:
b13708bb 3436 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
3437 break;
3438 default:
3439 abort();
3440 }
3441 }
b13708bb
RH
3442 tcg_temp_free_ptr(pd);
3443 tcg_temp_free_ptr(pm);
02acedf9 3444 return 0;
19457615
FN
3445}
3446
d68a6f3a 3447static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3448{
b13708bb
RH
3449 TCGv_ptr pd, pm;
3450
600b828c 3451 if (!q && size == 2) {
d68a6f3a
PM
3452 return 1;
3453 }
b13708bb
RH
3454 pd = vfp_reg_ptr(true, rd);
3455 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
3456 if (q) {
3457 switch (size) {
3458 case 0:
b13708bb 3459 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
3460 break;
3461 case 1:
b13708bb 3462 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
3463 break;
3464 case 2:
b13708bb 3465 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
3466 break;
3467 default:
3468 abort();
3469 }
3470 } else {
3471 switch (size) {
3472 case 0:
b13708bb 3473 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
3474 break;
3475 case 1:
b13708bb 3476 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
3477 break;
3478 default:
3479 abort();
3480 }
3481 }
b13708bb
RH
3482 tcg_temp_free_ptr(pd);
3483 tcg_temp_free_ptr(pm);
d68a6f3a 3484 return 0;
19457615
FN
3485}
3486
39d5492a 3487static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3488{
39d5492a 3489 TCGv_i32 rd, tmp;
19457615 3490
7d1b0095
PM
3491 rd = tcg_temp_new_i32();
3492 tmp = tcg_temp_new_i32();
19457615
FN
3493
3494 tcg_gen_shli_i32(rd, t0, 8);
3495 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3496 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3497 tcg_gen_or_i32(rd, rd, tmp);
3498
3499 tcg_gen_shri_i32(t1, t1, 8);
3500 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3501 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3502 tcg_gen_or_i32(t1, t1, tmp);
3503 tcg_gen_mov_i32(t0, rd);
3504
7d1b0095
PM
3505 tcg_temp_free_i32(tmp);
3506 tcg_temp_free_i32(rd);
19457615
FN
3507}
3508
39d5492a 3509static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3510{
39d5492a 3511 TCGv_i32 rd, tmp;
19457615 3512
7d1b0095
PM
3513 rd = tcg_temp_new_i32();
3514 tmp = tcg_temp_new_i32();
19457615
FN
3515
3516 tcg_gen_shli_i32(rd, t0, 16);
3517 tcg_gen_andi_i32(tmp, t1, 0xffff);
3518 tcg_gen_or_i32(rd, rd, tmp);
3519 tcg_gen_shri_i32(t1, t1, 16);
3520 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3521 tcg_gen_or_i32(t1, t1, tmp);
3522 tcg_gen_mov_i32(t0, rd);
3523
7d1b0095
PM
3524 tcg_temp_free_i32(tmp);
3525 tcg_temp_free_i32(rd);
19457615
FN
3526}
3527
3528
9ee6e8bb
PB
3529static struct {
3530 int nregs;
3531 int interleave;
3532 int spacing;
308e5636 3533} const neon_ls_element_type[11] = {
ac55d007
RH
3534 {1, 4, 1},
3535 {1, 4, 2},
9ee6e8bb 3536 {4, 1, 1},
ac55d007
RH
3537 {2, 2, 2},
3538 {1, 3, 1},
3539 {1, 3, 2},
9ee6e8bb
PB
3540 {3, 1, 1},
3541 {1, 1, 1},
ac55d007
RH
3542 {1, 2, 1},
3543 {1, 2, 2},
9ee6e8bb
PB
3544 {2, 1, 1}
3545};
3546
3547/* Translate a NEON load/store element instruction. Return nonzero if the
3548 instruction is invalid. */
7dcc1f89 3549static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3550{
3551 int rd, rn, rm;
3552 int op;
3553 int nregs;
3554 int interleave;
84496233 3555 int spacing;
9ee6e8bb
PB
3556 int stride;
3557 int size;
3558 int reg;
9ee6e8bb 3559 int load;
9ee6e8bb 3560 int n;
7377c2c9 3561 int vec_size;
ac55d007
RH
3562 int mmu_idx;
3563 TCGMemOp endian;
39d5492a
PM
3564 TCGv_i32 addr;
3565 TCGv_i32 tmp;
3566 TCGv_i32 tmp2;
84496233 3567 TCGv_i64 tmp64;
9ee6e8bb 3568
2c7ffc41
PM
3569 /* FIXME: this access check should not take precedence over UNDEF
3570 * for invalid encodings; we will generate incorrect syndrome information
3571 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3572 */
9dbbc748 3573 if (s->fp_excp_el) {
2c7ffc41 3574 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 3575 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3576 return 0;
3577 }
3578
5df8bac1 3579 if (!s->vfp_enabled)
9ee6e8bb
PB
3580 return 1;
3581 VFP_DREG_D(rd, insn);
3582 rn = (insn >> 16) & 0xf;
3583 rm = insn & 0xf;
3584 load = (insn & (1 << 21)) != 0;
ac55d007
RH
3585 endian = s->be_data;
3586 mmu_idx = get_mem_index(s);
9ee6e8bb
PB
3587 if ((insn & (1 << 23)) == 0) {
3588 /* Load store all elements. */
3589 op = (insn >> 8) & 0xf;
3590 size = (insn >> 6) & 3;
84496233 3591 if (op > 10)
9ee6e8bb 3592 return 1;
f2dd89d0
PM
3593 /* Catch UNDEF cases for bad values of align field */
3594 switch (op & 0xc) {
3595 case 4:
3596 if (((insn >> 5) & 1) == 1) {
3597 return 1;
3598 }
3599 break;
3600 case 8:
3601 if (((insn >> 4) & 3) == 3) {
3602 return 1;
3603 }
3604 break;
3605 default:
3606 break;
3607 }
9ee6e8bb
PB
3608 nregs = neon_ls_element_type[op].nregs;
3609 interleave = neon_ls_element_type[op].interleave;
84496233 3610 spacing = neon_ls_element_type[op].spacing;
ac55d007 3611 if (size == 3 && (interleave | spacing) != 1) {
84496233 3612 return 1;
ac55d007 3613 }
e23f12b3
RH
3614 /* For our purposes, bytes are always little-endian. */
3615 if (size == 0) {
3616 endian = MO_LE;
3617 }
3618 /* Consecutive little-endian elements from a single register
3619 * can be promoted to a larger little-endian operation.
3620 */
3621 if (interleave == 1 && endian == MO_LE) {
3622 size = 3;
3623 }
ac55d007 3624 tmp64 = tcg_temp_new_i64();
e318a60b 3625 addr = tcg_temp_new_i32();
ac55d007 3626 tmp2 = tcg_const_i32(1 << size);
dcc65026 3627 load_reg_var(s, addr, rn);
9ee6e8bb 3628 for (reg = 0; reg < nregs; reg++) {
ac55d007
RH
3629 for (n = 0; n < 8 >> size; n++) {
3630 int xs;
3631 for (xs = 0; xs < interleave; xs++) {
3632 int tt = rd + reg + spacing * xs;
3633
3634 if (load) {
3635 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
3636 neon_store_element64(tt, n, size, tmp64);
3637 } else {
3638 neon_load_element64(tmp64, tt, n, size);
3639 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
9ee6e8bb 3640 }
ac55d007 3641 tcg_gen_add_i32(addr, addr, tmp2);
9ee6e8bb
PB
3642 }
3643 }
9ee6e8bb 3644 }
e318a60b 3645 tcg_temp_free_i32(addr);
ac55d007
RH
3646 tcg_temp_free_i32(tmp2);
3647 tcg_temp_free_i64(tmp64);
3648 stride = nregs * interleave * 8;
9ee6e8bb
PB
3649 } else {
3650 size = (insn >> 10) & 3;
3651 if (size == 3) {
3652 /* Load single element to all lanes. */
8e18cde3
PM
3653 int a = (insn >> 4) & 1;
3654 if (!load) {
9ee6e8bb 3655 return 1;
8e18cde3 3656 }
9ee6e8bb
PB
3657 size = (insn >> 6) & 3;
3658 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3659
3660 if (size == 3) {
3661 if (nregs != 4 || a == 0) {
9ee6e8bb 3662 return 1;
99c475ab 3663 }
8e18cde3
PM
3664 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3665 size = 2;
3666 }
3667 if (nregs == 1 && a == 1 && size == 0) {
3668 return 1;
3669 }
3670 if (nregs == 3 && a == 1) {
3671 return 1;
3672 }
e318a60b 3673 addr = tcg_temp_new_i32();
8e18cde3 3674 load_reg_var(s, addr, rn);
7377c2c9
RH
3675
3676 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
3677 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
3678 */
3679 stride = (insn & (1 << 5)) ? 2 : 1;
3680 vec_size = nregs == 1 ? stride * 8 : 8;
3681
3682 tmp = tcg_temp_new_i32();
3683 for (reg = 0; reg < nregs; reg++) {
3684 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3685 s->be_data | size);
3686 if ((rd & 1) && vec_size == 16) {
3687 /* We cannot write 16 bytes at once because the
3688 * destination is unaligned.
3689 */
3690 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3691 8, 8, tmp);
3692 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
3693 neon_reg_offset(rd, 0), 8, 8);
3694 } else {
3695 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3696 vec_size, vec_size, tmp);
8e18cde3 3697 }
7377c2c9
RH
3698 tcg_gen_addi_i32(addr, addr, 1 << size);
3699 rd += stride;
9ee6e8bb 3700 }
7377c2c9 3701 tcg_temp_free_i32(tmp);
e318a60b 3702 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3703 stride = (1 << size) * nregs;
3704 } else {
3705 /* Single element. */
93262b16 3706 int idx = (insn >> 4) & 0xf;
2d6ac920 3707 int reg_idx;
9ee6e8bb
PB
3708 switch (size) {
3709 case 0:
2d6ac920 3710 reg_idx = (insn >> 5) & 7;
9ee6e8bb
PB
3711 stride = 1;
3712 break;
3713 case 1:
2d6ac920 3714 reg_idx = (insn >> 6) & 3;
9ee6e8bb
PB
3715 stride = (insn & (1 << 5)) ? 2 : 1;
3716 break;
3717 case 2:
2d6ac920 3718 reg_idx = (insn >> 7) & 1;
9ee6e8bb
PB
3719 stride = (insn & (1 << 6)) ? 2 : 1;
3720 break;
3721 default:
3722 abort();
3723 }
3724 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
3725 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3726 switch (nregs) {
3727 case 1:
3728 if (((idx & (1 << size)) != 0) ||
3729 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3730 return 1;
3731 }
3732 break;
3733 case 3:
3734 if ((idx & 1) != 0) {
3735 return 1;
3736 }
3737 /* fall through */
3738 case 2:
3739 if (size == 2 && (idx & 2) != 0) {
3740 return 1;
3741 }
3742 break;
3743 case 4:
3744 if ((size == 2) && ((idx & 3) == 3)) {
3745 return 1;
3746 }
3747 break;
3748 default:
3749 abort();
3750 }
3751 if ((rd + stride * (nregs - 1)) > 31) {
3752 /* Attempts to write off the end of the register file
3753 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3754 * the neon_load_reg() would write off the end of the array.
3755 */
3756 return 1;
3757 }
2d6ac920 3758 tmp = tcg_temp_new_i32();
e318a60b 3759 addr = tcg_temp_new_i32();
dcc65026 3760 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3761 for (reg = 0; reg < nregs; reg++) {
3762 if (load) {
2d6ac920
RH
3763 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3764 s->be_data | size);
3765 neon_store_element(rd, reg_idx, size, tmp);
9ee6e8bb 3766 } else { /* Store */
2d6ac920
RH
3767 neon_load_element(tmp, rd, reg_idx, size);
3768 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
3769 s->be_data | size);
99c475ab 3770 }
9ee6e8bb 3771 rd += stride;
1b2b1e54 3772 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3773 }
e318a60b 3774 tcg_temp_free_i32(addr);
2d6ac920 3775 tcg_temp_free_i32(tmp);
9ee6e8bb 3776 stride = nregs * (1 << size);
99c475ab 3777 }
9ee6e8bb
PB
3778 }
3779 if (rm != 15) {
39d5492a 3780 TCGv_i32 base;
b26eefb6
PB
3781
3782 base = load_reg(s, rn);
9ee6e8bb 3783 if (rm == 13) {
b26eefb6 3784 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3785 } else {
39d5492a 3786 TCGv_i32 index;
b26eefb6
PB
3787 index = load_reg(s, rm);
3788 tcg_gen_add_i32(base, base, index);
7d1b0095 3789 tcg_temp_free_i32(index);
9ee6e8bb 3790 }
b26eefb6 3791 store_reg(s, rn, base);
9ee6e8bb
PB
3792 }
3793 return 0;
3794}
3b46e624 3795
39d5492a 3796static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3797{
3798 switch (size) {
3799 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3800 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 3801 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
3802 default: abort();
3803 }
3804}
3805
39d5492a 3806static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3807{
3808 switch (size) {
02da0b2d
PM
3809 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3810 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3811 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
3812 default: abort();
3813 }
3814}
3815
39d5492a 3816static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3817{
3818 switch (size) {
02da0b2d
PM
3819 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3820 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3821 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
3822 default: abort();
3823 }
3824}
3825
39d5492a 3826static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
3827{
3828 switch (size) {
02da0b2d
PM
3829 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
3830 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
3831 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
3832 default: abort();
3833 }
3834}
3835
39d5492a 3836static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
3837 int q, int u)
3838{
3839 if (q) {
3840 if (u) {
3841 switch (size) {
3842 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3843 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3844 default: abort();
3845 }
3846 } else {
3847 switch (size) {
3848 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3849 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3850 default: abort();
3851 }
3852 }
3853 } else {
3854 if (u) {
3855 switch (size) {
b408a9b0
CL
3856 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
3857 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
3858 default: abort();
3859 }
3860 } else {
3861 switch (size) {
3862 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3863 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3864 default: abort();
3865 }
3866 }
3867 }
3868}
3869
39d5492a 3870static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
3871{
3872 if (u) {
3873 switch (size) {
3874 case 0: gen_helper_neon_widen_u8(dest, src); break;
3875 case 1: gen_helper_neon_widen_u16(dest, src); break;
3876 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3877 default: abort();
3878 }
3879 } else {
3880 switch (size) {
3881 case 0: gen_helper_neon_widen_s8(dest, src); break;
3882 case 1: gen_helper_neon_widen_s16(dest, src); break;
3883 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3884 default: abort();
3885 }
3886 }
7d1b0095 3887 tcg_temp_free_i32(src);
ad69471c
PB
3888}
3889
3890static inline void gen_neon_addl(int size)
3891{
3892 switch (size) {
3893 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3894 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3895 case 2: tcg_gen_add_i64(CPU_V001); break;
3896 default: abort();
3897 }
3898}
3899
3900static inline void gen_neon_subl(int size)
3901{
3902 switch (size) {
3903 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3904 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3905 case 2: tcg_gen_sub_i64(CPU_V001); break;
3906 default: abort();
3907 }
3908}
3909
a7812ae4 3910static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
3911{
3912 switch (size) {
3913 case 0: gen_helper_neon_negl_u16(var, var); break;
3914 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
3915 case 2:
3916 tcg_gen_neg_i64(var, var);
3917 break;
ad69471c
PB
3918 default: abort();
3919 }
3920}
3921
a7812ae4 3922static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
3923{
3924 switch (size) {
02da0b2d
PM
3925 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
3926 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
3927 default: abort();
3928 }
3929}
3930
39d5492a
PM
3931static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
3932 int size, int u)
ad69471c 3933{
a7812ae4 3934 TCGv_i64 tmp;
ad69471c
PB
3935
3936 switch ((size << 1) | u) {
3937 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
3938 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
3939 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
3940 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
3941 case 4:
3942 tmp = gen_muls_i64_i32(a, b);
3943 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 3944 tcg_temp_free_i64(tmp);
ad69471c
PB
3945 break;
3946 case 5:
3947 tmp = gen_mulu_i64_i32(a, b);
3948 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 3949 tcg_temp_free_i64(tmp);
ad69471c
PB
3950 break;
3951 default: abort();
3952 }
c6067f04
CL
3953
3954 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3955 Don't forget to clean them now. */
3956 if (size < 2) {
7d1b0095
PM
3957 tcg_temp_free_i32(a);
3958 tcg_temp_free_i32(b);
c6067f04 3959 }
ad69471c
PB
3960}
3961
39d5492a
PM
3962static void gen_neon_narrow_op(int op, int u, int size,
3963 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
3964{
3965 if (op) {
3966 if (u) {
3967 gen_neon_unarrow_sats(size, dest, src);
3968 } else {
3969 gen_neon_narrow(size, dest, src);
3970 }
3971 } else {
3972 if (u) {
3973 gen_neon_narrow_satu(size, dest, src);
3974 } else {
3975 gen_neon_narrow_sats(size, dest, src);
3976 }
3977 }
3978}
3979
62698be3
PM
3980/* Symbolic constants for op fields for Neon 3-register same-length.
3981 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3982 * table A7-9.
3983 */
3984#define NEON_3R_VHADD 0
3985#define NEON_3R_VQADD 1
3986#define NEON_3R_VRHADD 2
3987#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3988#define NEON_3R_VHSUB 4
3989#define NEON_3R_VQSUB 5
3990#define NEON_3R_VCGT 6
3991#define NEON_3R_VCGE 7
3992#define NEON_3R_VSHL 8
3993#define NEON_3R_VQSHL 9
3994#define NEON_3R_VRSHL 10
3995#define NEON_3R_VQRSHL 11
3996#define NEON_3R_VMAX 12
3997#define NEON_3R_VMIN 13
3998#define NEON_3R_VABD 14
3999#define NEON_3R_VABA 15
4000#define NEON_3R_VADD_VSUB 16
4001#define NEON_3R_VTST_VCEQ 17
4a7832b0 4002#define NEON_3R_VML 18 /* VMLA, VMLS */
62698be3
PM
4003#define NEON_3R_VMUL 19
4004#define NEON_3R_VPMAX 20
4005#define NEON_3R_VPMIN 21
4006#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 4007#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 4008#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 4009#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
4010#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4011#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4012#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4013#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4014#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4015#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4016
4017static const uint8_t neon_3r_sizes[] = {
4018 [NEON_3R_VHADD] = 0x7,
4019 [NEON_3R_VQADD] = 0xf,
4020 [NEON_3R_VRHADD] = 0x7,
4021 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4022 [NEON_3R_VHSUB] = 0x7,
4023 [NEON_3R_VQSUB] = 0xf,
4024 [NEON_3R_VCGT] = 0x7,
4025 [NEON_3R_VCGE] = 0x7,
4026 [NEON_3R_VSHL] = 0xf,
4027 [NEON_3R_VQSHL] = 0xf,
4028 [NEON_3R_VRSHL] = 0xf,
4029 [NEON_3R_VQRSHL] = 0xf,
4030 [NEON_3R_VMAX] = 0x7,
4031 [NEON_3R_VMIN] = 0x7,
4032 [NEON_3R_VABD] = 0x7,
4033 [NEON_3R_VABA] = 0x7,
4034 [NEON_3R_VADD_VSUB] = 0xf,
4035 [NEON_3R_VTST_VCEQ] = 0x7,
4036 [NEON_3R_VML] = 0x7,
4037 [NEON_3R_VMUL] = 0x7,
4038 [NEON_3R_VPMAX] = 0x7,
4039 [NEON_3R_VPMIN] = 0x7,
4040 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 4041 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 4042 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 4043 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
4044 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4045 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4046 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4047 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4048 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4049 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4050};
4051
600b828c
PM
4052/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4053 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4054 * table A7-13.
4055 */
4056#define NEON_2RM_VREV64 0
4057#define NEON_2RM_VREV32 1
4058#define NEON_2RM_VREV16 2
4059#define NEON_2RM_VPADDL 4
4060#define NEON_2RM_VPADDL_U 5
9d935509
AB
4061#define NEON_2RM_AESE 6 /* Includes AESD */
4062#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4063#define NEON_2RM_VCLS 8
4064#define NEON_2RM_VCLZ 9
4065#define NEON_2RM_VCNT 10
4066#define NEON_2RM_VMVN 11
4067#define NEON_2RM_VPADAL 12
4068#define NEON_2RM_VPADAL_U 13
4069#define NEON_2RM_VQABS 14
4070#define NEON_2RM_VQNEG 15
4071#define NEON_2RM_VCGT0 16
4072#define NEON_2RM_VCGE0 17
4073#define NEON_2RM_VCEQ0 18
4074#define NEON_2RM_VCLE0 19
4075#define NEON_2RM_VCLT0 20
f1ecb913 4076#define NEON_2RM_SHA1H 21
600b828c
PM
4077#define NEON_2RM_VABS 22
4078#define NEON_2RM_VNEG 23
4079#define NEON_2RM_VCGT0_F 24
4080#define NEON_2RM_VCGE0_F 25
4081#define NEON_2RM_VCEQ0_F 26
4082#define NEON_2RM_VCLE0_F 27
4083#define NEON_2RM_VCLT0_F 28
4084#define NEON_2RM_VABS_F 30
4085#define NEON_2RM_VNEG_F 31
4086#define NEON_2RM_VSWP 32
4087#define NEON_2RM_VTRN 33
4088#define NEON_2RM_VUZP 34
4089#define NEON_2RM_VZIP 35
4090#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4091#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4092#define NEON_2RM_VSHLL 38
f1ecb913 4093#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 4094#define NEON_2RM_VRINTN 40
2ce70625 4095#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4096#define NEON_2RM_VRINTA 42
4097#define NEON_2RM_VRINTZ 43
600b828c 4098#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4099#define NEON_2RM_VRINTM 45
600b828c 4100#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4101#define NEON_2RM_VRINTP 47
901ad525
WN
4102#define NEON_2RM_VCVTAU 48
4103#define NEON_2RM_VCVTAS 49
4104#define NEON_2RM_VCVTNU 50
4105#define NEON_2RM_VCVTNS 51
4106#define NEON_2RM_VCVTPU 52
4107#define NEON_2RM_VCVTPS 53
4108#define NEON_2RM_VCVTMU 54
4109#define NEON_2RM_VCVTMS 55
600b828c
PM
4110#define NEON_2RM_VRECPE 56
4111#define NEON_2RM_VRSQRTE 57
4112#define NEON_2RM_VRECPE_F 58
4113#define NEON_2RM_VRSQRTE_F 59
4114#define NEON_2RM_VCVT_FS 60
4115#define NEON_2RM_VCVT_FU 61
4116#define NEON_2RM_VCVT_SF 62
4117#define NEON_2RM_VCVT_UF 63
4118
fe8fcf3d
PM
4119static bool neon_2rm_is_v8_op(int op)
4120{
4121 /* Return true if this neon 2reg-misc op is ARMv8 and up */
4122 switch (op) {
4123 case NEON_2RM_VRINTN:
4124 case NEON_2RM_VRINTA:
4125 case NEON_2RM_VRINTM:
4126 case NEON_2RM_VRINTP:
4127 case NEON_2RM_VRINTZ:
4128 case NEON_2RM_VRINTX:
4129 case NEON_2RM_VCVTAU:
4130 case NEON_2RM_VCVTAS:
4131 case NEON_2RM_VCVTNU:
4132 case NEON_2RM_VCVTNS:
4133 case NEON_2RM_VCVTPU:
4134 case NEON_2RM_VCVTPS:
4135 case NEON_2RM_VCVTMU:
4136 case NEON_2RM_VCVTMS:
4137 return true;
4138 default:
4139 return false;
4140 }
4141}
4142
600b828c
PM
4143/* Each entry in this array has bit n set if the insn allows
4144 * size value n (otherwise it will UNDEF). Since unallocated
4145 * op values will have no bits set they always UNDEF.
4146 */
4147static const uint8_t neon_2rm_sizes[] = {
4148 [NEON_2RM_VREV64] = 0x7,
4149 [NEON_2RM_VREV32] = 0x3,
4150 [NEON_2RM_VREV16] = 0x1,
4151 [NEON_2RM_VPADDL] = 0x7,
4152 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4153 [NEON_2RM_AESE] = 0x1,
4154 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4155 [NEON_2RM_VCLS] = 0x7,
4156 [NEON_2RM_VCLZ] = 0x7,
4157 [NEON_2RM_VCNT] = 0x1,
4158 [NEON_2RM_VMVN] = 0x1,
4159 [NEON_2RM_VPADAL] = 0x7,
4160 [NEON_2RM_VPADAL_U] = 0x7,
4161 [NEON_2RM_VQABS] = 0x7,
4162 [NEON_2RM_VQNEG] = 0x7,
4163 [NEON_2RM_VCGT0] = 0x7,
4164 [NEON_2RM_VCGE0] = 0x7,
4165 [NEON_2RM_VCEQ0] = 0x7,
4166 [NEON_2RM_VCLE0] = 0x7,
4167 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 4168 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
4169 [NEON_2RM_VABS] = 0x7,
4170 [NEON_2RM_VNEG] = 0x7,
4171 [NEON_2RM_VCGT0_F] = 0x4,
4172 [NEON_2RM_VCGE0_F] = 0x4,
4173 [NEON_2RM_VCEQ0_F] = 0x4,
4174 [NEON_2RM_VCLE0_F] = 0x4,
4175 [NEON_2RM_VCLT0_F] = 0x4,
4176 [NEON_2RM_VABS_F] = 0x4,
4177 [NEON_2RM_VNEG_F] = 0x4,
4178 [NEON_2RM_VSWP] = 0x1,
4179 [NEON_2RM_VTRN] = 0x7,
4180 [NEON_2RM_VUZP] = 0x7,
4181 [NEON_2RM_VZIP] = 0x7,
4182 [NEON_2RM_VMOVN] = 0x7,
4183 [NEON_2RM_VQMOVN] = 0x7,
4184 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 4185 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 4186 [NEON_2RM_VRINTN] = 0x4,
2ce70625 4187 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
4188 [NEON_2RM_VRINTA] = 0x4,
4189 [NEON_2RM_VRINTZ] = 0x4,
600b828c 4190 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 4191 [NEON_2RM_VRINTM] = 0x4,
600b828c 4192 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 4193 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
4194 [NEON_2RM_VCVTAU] = 0x4,
4195 [NEON_2RM_VCVTAS] = 0x4,
4196 [NEON_2RM_VCVTNU] = 0x4,
4197 [NEON_2RM_VCVTNS] = 0x4,
4198 [NEON_2RM_VCVTPU] = 0x4,
4199 [NEON_2RM_VCVTPS] = 0x4,
4200 [NEON_2RM_VCVTMU] = 0x4,
4201 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
4202 [NEON_2RM_VRECPE] = 0x4,
4203 [NEON_2RM_VRSQRTE] = 0x4,
4204 [NEON_2RM_VRECPE_F] = 0x4,
4205 [NEON_2RM_VRSQRTE_F] = 0x4,
4206 [NEON_2RM_VCVT_FS] = 0x4,
4207 [NEON_2RM_VCVT_FU] = 0x4,
4208 [NEON_2RM_VCVT_SF] = 0x4,
4209 [NEON_2RM_VCVT_UF] = 0x4,
4210};
4211
36a71934
RH
4212
4213/* Expand v8.1 simd helper. */
4214static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
4215 int q, int rd, int rn, int rm)
4216{
962fcbf2 4217 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
4218 int opr_sz = (1 + q) * 8;
4219 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
4220 vfp_reg_offset(1, rn),
4221 vfp_reg_offset(1, rm), cpu_env,
4222 opr_sz, opr_sz, 0, fn);
4223 return 0;
4224 }
4225 return 1;
4226}
4227
41f6c113
RH
4228static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4229{
4230 tcg_gen_vec_sar8i_i64(a, a, shift);
4231 tcg_gen_vec_add8_i64(d, d, a);
4232}
4233
4234static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4235{
4236 tcg_gen_vec_sar16i_i64(a, a, shift);
4237 tcg_gen_vec_add16_i64(d, d, a);
4238}
4239
4240static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4241{
4242 tcg_gen_sari_i32(a, a, shift);
4243 tcg_gen_add_i32(d, d, a);
4244}
4245
4246static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4247{
4248 tcg_gen_sari_i64(a, a, shift);
4249 tcg_gen_add_i64(d, d, a);
4250}
4251
4252static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4253{
4254 tcg_gen_sari_vec(vece, a, a, sh);
4255 tcg_gen_add_vec(vece, d, d, a);
4256}
4257
53229a77
RH
4258static const TCGOpcode vecop_list_ssra[] = {
4259 INDEX_op_sari_vec, INDEX_op_add_vec, 0
4260};
4261
41f6c113
RH
4262const GVecGen2i ssra_op[4] = {
4263 { .fni8 = gen_ssra8_i64,
4264 .fniv = gen_ssra_vec,
4265 .load_dest = true,
53229a77 4266 .opt_opc = vecop_list_ssra,
41f6c113
RH
4267 .vece = MO_8 },
4268 { .fni8 = gen_ssra16_i64,
4269 .fniv = gen_ssra_vec,
4270 .load_dest = true,
53229a77 4271 .opt_opc = vecop_list_ssra,
41f6c113
RH
4272 .vece = MO_16 },
4273 { .fni4 = gen_ssra32_i32,
4274 .fniv = gen_ssra_vec,
4275 .load_dest = true,
53229a77 4276 .opt_opc = vecop_list_ssra,
41f6c113
RH
4277 .vece = MO_32 },
4278 { .fni8 = gen_ssra64_i64,
4279 .fniv = gen_ssra_vec,
4280 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 4281 .opt_opc = vecop_list_ssra,
41f6c113 4282 .load_dest = true,
41f6c113
RH
4283 .vece = MO_64 },
4284};
4285
4286static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4287{
4288 tcg_gen_vec_shr8i_i64(a, a, shift);
4289 tcg_gen_vec_add8_i64(d, d, a);
4290}
4291
4292static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4293{
4294 tcg_gen_vec_shr16i_i64(a, a, shift);
4295 tcg_gen_vec_add16_i64(d, d, a);
4296}
4297
4298static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4299{
4300 tcg_gen_shri_i32(a, a, shift);
4301 tcg_gen_add_i32(d, d, a);
4302}
4303
4304static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4305{
4306 tcg_gen_shri_i64(a, a, shift);
4307 tcg_gen_add_i64(d, d, a);
4308}
4309
4310static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4311{
4312 tcg_gen_shri_vec(vece, a, a, sh);
4313 tcg_gen_add_vec(vece, d, d, a);
4314}
4315
53229a77
RH
4316static const TCGOpcode vecop_list_usra[] = {
4317 INDEX_op_shri_vec, INDEX_op_add_vec, 0
4318};
4319
41f6c113
RH
4320const GVecGen2i usra_op[4] = {
4321 { .fni8 = gen_usra8_i64,
4322 .fniv = gen_usra_vec,
4323 .load_dest = true,
53229a77 4324 .opt_opc = vecop_list_usra,
41f6c113
RH
4325 .vece = MO_8, },
4326 { .fni8 = gen_usra16_i64,
4327 .fniv = gen_usra_vec,
4328 .load_dest = true,
53229a77 4329 .opt_opc = vecop_list_usra,
41f6c113
RH
4330 .vece = MO_16, },
4331 { .fni4 = gen_usra32_i32,
4332 .fniv = gen_usra_vec,
4333 .load_dest = true,
53229a77 4334 .opt_opc = vecop_list_usra,
41f6c113
RH
4335 .vece = MO_32, },
4336 { .fni8 = gen_usra64_i64,
4337 .fniv = gen_usra_vec,
4338 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4339 .load_dest = true,
53229a77 4340 .opt_opc = vecop_list_usra,
41f6c113
RH
4341 .vece = MO_64, },
4342};
eabcd6fa 4343
f3cd8218
RH
4344static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4345{
4346 uint64_t mask = dup_const(MO_8, 0xff >> shift);
4347 TCGv_i64 t = tcg_temp_new_i64();
4348
4349 tcg_gen_shri_i64(t, a, shift);
4350 tcg_gen_andi_i64(t, t, mask);
4351 tcg_gen_andi_i64(d, d, ~mask);
4352 tcg_gen_or_i64(d, d, t);
4353 tcg_temp_free_i64(t);
4354}
4355
4356static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4357{
4358 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
4359 TCGv_i64 t = tcg_temp_new_i64();
4360
4361 tcg_gen_shri_i64(t, a, shift);
4362 tcg_gen_andi_i64(t, t, mask);
4363 tcg_gen_andi_i64(d, d, ~mask);
4364 tcg_gen_or_i64(d, d, t);
4365 tcg_temp_free_i64(t);
4366}
4367
4368static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4369{
4370 tcg_gen_shri_i32(a, a, shift);
4371 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
4372}
4373
4374static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4375{
4376 tcg_gen_shri_i64(a, a, shift);
4377 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
4378}
4379
4380static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4381{
4382 if (sh == 0) {
4383 tcg_gen_mov_vec(d, a);
4384 } else {
4385 TCGv_vec t = tcg_temp_new_vec_matching(d);
4386 TCGv_vec m = tcg_temp_new_vec_matching(d);
4387
4388 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
4389 tcg_gen_shri_vec(vece, t, a, sh);
4390 tcg_gen_and_vec(vece, d, d, m);
4391 tcg_gen_or_vec(vece, d, d, t);
4392
4393 tcg_temp_free_vec(t);
4394 tcg_temp_free_vec(m);
4395 }
4396}
4397
53229a77
RH
4398static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
4399
f3cd8218
RH
4400const GVecGen2i sri_op[4] = {
4401 { .fni8 = gen_shr8_ins_i64,
4402 .fniv = gen_shr_ins_vec,
4403 .load_dest = true,
53229a77 4404 .opt_opc = vecop_list_sri,
f3cd8218
RH
4405 .vece = MO_8 },
4406 { .fni8 = gen_shr16_ins_i64,
4407 .fniv = gen_shr_ins_vec,
4408 .load_dest = true,
53229a77 4409 .opt_opc = vecop_list_sri,
f3cd8218
RH
4410 .vece = MO_16 },
4411 { .fni4 = gen_shr32_ins_i32,
4412 .fniv = gen_shr_ins_vec,
4413 .load_dest = true,
53229a77 4414 .opt_opc = vecop_list_sri,
f3cd8218
RH
4415 .vece = MO_32 },
4416 { .fni8 = gen_shr64_ins_i64,
4417 .fniv = gen_shr_ins_vec,
4418 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4419 .load_dest = true,
53229a77 4420 .opt_opc = vecop_list_sri,
f3cd8218
RH
4421 .vece = MO_64 },
4422};
4423
4424static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4425{
4426 uint64_t mask = dup_const(MO_8, 0xff << shift);
4427 TCGv_i64 t = tcg_temp_new_i64();
4428
4429 tcg_gen_shli_i64(t, a, shift);
4430 tcg_gen_andi_i64(t, t, mask);
4431 tcg_gen_andi_i64(d, d, ~mask);
4432 tcg_gen_or_i64(d, d, t);
4433 tcg_temp_free_i64(t);
4434}
4435
4436static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4437{
4438 uint64_t mask = dup_const(MO_16, 0xffff << shift);
4439 TCGv_i64 t = tcg_temp_new_i64();
4440
4441 tcg_gen_shli_i64(t, a, shift);
4442 tcg_gen_andi_i64(t, t, mask);
4443 tcg_gen_andi_i64(d, d, ~mask);
4444 tcg_gen_or_i64(d, d, t);
4445 tcg_temp_free_i64(t);
4446}
4447
4448static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4449{
4450 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
4451}
4452
4453static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4454{
4455 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
4456}
4457
4458static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4459{
4460 if (sh == 0) {
4461 tcg_gen_mov_vec(d, a);
4462 } else {
4463 TCGv_vec t = tcg_temp_new_vec_matching(d);
4464 TCGv_vec m = tcg_temp_new_vec_matching(d);
4465
4466 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
4467 tcg_gen_shli_vec(vece, t, a, sh);
4468 tcg_gen_and_vec(vece, d, d, m);
4469 tcg_gen_or_vec(vece, d, d, t);
4470
4471 tcg_temp_free_vec(t);
4472 tcg_temp_free_vec(m);
4473 }
4474}
4475
53229a77
RH
4476static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
4477
f3cd8218
RH
4478const GVecGen2i sli_op[4] = {
4479 { .fni8 = gen_shl8_ins_i64,
4480 .fniv = gen_shl_ins_vec,
4481 .load_dest = true,
53229a77 4482 .opt_opc = vecop_list_sli,
f3cd8218
RH
4483 .vece = MO_8 },
4484 { .fni8 = gen_shl16_ins_i64,
4485 .fniv = gen_shl_ins_vec,
4486 .load_dest = true,
53229a77 4487 .opt_opc = vecop_list_sli,
f3cd8218
RH
4488 .vece = MO_16 },
4489 { .fni4 = gen_shl32_ins_i32,
4490 .fniv = gen_shl_ins_vec,
4491 .load_dest = true,
53229a77 4492 .opt_opc = vecop_list_sli,
f3cd8218
RH
4493 .vece = MO_32 },
4494 { .fni8 = gen_shl64_ins_i64,
4495 .fniv = gen_shl_ins_vec,
4496 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4497 .load_dest = true,
53229a77 4498 .opt_opc = vecop_list_sli,
f3cd8218
RH
4499 .vece = MO_64 },
4500};
4501
4a7832b0
RH
4502static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4503{
4504 gen_helper_neon_mul_u8(a, a, b);
4505 gen_helper_neon_add_u8(d, d, a);
4506}
4507
4508static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4509{
4510 gen_helper_neon_mul_u8(a, a, b);
4511 gen_helper_neon_sub_u8(d, d, a);
4512}
4513
4514static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4515{
4516 gen_helper_neon_mul_u16(a, a, b);
4517 gen_helper_neon_add_u16(d, d, a);
4518}
4519
4520static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4521{
4522 gen_helper_neon_mul_u16(a, a, b);
4523 gen_helper_neon_sub_u16(d, d, a);
4524}
4525
4526static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4527{
4528 tcg_gen_mul_i32(a, a, b);
4529 tcg_gen_add_i32(d, d, a);
4530}
4531
4532static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4533{
4534 tcg_gen_mul_i32(a, a, b);
4535 tcg_gen_sub_i32(d, d, a);
4536}
4537
4538static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4539{
4540 tcg_gen_mul_i64(a, a, b);
4541 tcg_gen_add_i64(d, d, a);
4542}
4543
4544static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4545{
4546 tcg_gen_mul_i64(a, a, b);
4547 tcg_gen_sub_i64(d, d, a);
4548}
4549
4550static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4551{
4552 tcg_gen_mul_vec(vece, a, a, b);
4553 tcg_gen_add_vec(vece, d, d, a);
4554}
4555
4556static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4557{
4558 tcg_gen_mul_vec(vece, a, a, b);
4559 tcg_gen_sub_vec(vece, d, d, a);
4560}
4561
4562/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4563 * these tables are shared with AArch64 which does support them.
4564 */
53229a77
RH
4565
4566static const TCGOpcode vecop_list_mla[] = {
4567 INDEX_op_mul_vec, INDEX_op_add_vec, 0
4568};
4569
4570static const TCGOpcode vecop_list_mls[] = {
4571 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
4572};
4573
4a7832b0
RH
4574const GVecGen3 mla_op[4] = {
4575 { .fni4 = gen_mla8_i32,
4576 .fniv = gen_mla_vec,
4a7832b0 4577 .load_dest = true,
53229a77 4578 .opt_opc = vecop_list_mla,
4a7832b0
RH
4579 .vece = MO_8 },
4580 { .fni4 = gen_mla16_i32,
4581 .fniv = gen_mla_vec,
4a7832b0 4582 .load_dest = true,
53229a77 4583 .opt_opc = vecop_list_mla,
4a7832b0
RH
4584 .vece = MO_16 },
4585 { .fni4 = gen_mla32_i32,
4586 .fniv = gen_mla_vec,
4a7832b0 4587 .load_dest = true,
53229a77 4588 .opt_opc = vecop_list_mla,
4a7832b0
RH
4589 .vece = MO_32 },
4590 { .fni8 = gen_mla64_i64,
4591 .fniv = gen_mla_vec,
4a7832b0
RH
4592 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4593 .load_dest = true,
53229a77 4594 .opt_opc = vecop_list_mla,
4a7832b0
RH
4595 .vece = MO_64 },
4596};
4597
4598const GVecGen3 mls_op[4] = {
4599 { .fni4 = gen_mls8_i32,
4600 .fniv = gen_mls_vec,
4a7832b0 4601 .load_dest = true,
53229a77 4602 .opt_opc = vecop_list_mls,
4a7832b0
RH
4603 .vece = MO_8 },
4604 { .fni4 = gen_mls16_i32,
4605 .fniv = gen_mls_vec,
4a7832b0 4606 .load_dest = true,
53229a77 4607 .opt_opc = vecop_list_mls,
4a7832b0
RH
4608 .vece = MO_16 },
4609 { .fni4 = gen_mls32_i32,
4610 .fniv = gen_mls_vec,
4a7832b0 4611 .load_dest = true,
53229a77 4612 .opt_opc = vecop_list_mls,
4a7832b0
RH
4613 .vece = MO_32 },
4614 { .fni8 = gen_mls64_i64,
4615 .fniv = gen_mls_vec,
4a7832b0
RH
4616 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4617 .load_dest = true,
53229a77 4618 .opt_opc = vecop_list_mls,
4a7832b0
RH
4619 .vece = MO_64 },
4620};
4621
ea580fa3
RH
4622/* CMTST : test is "if (X & Y != 0)". */
4623static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4624{
4625 tcg_gen_and_i32(d, a, b);
4626 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
4627 tcg_gen_neg_i32(d, d);
4628}
4629
4630void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4631{
4632 tcg_gen_and_i64(d, a, b);
4633 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
4634 tcg_gen_neg_i64(d, d);
4635}
4636
4637static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4638{
4639 tcg_gen_and_vec(vece, d, a, b);
4640 tcg_gen_dupi_vec(vece, a, 0);
4641 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
4642}
4643
53229a77
RH
4644static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
4645
ea580fa3
RH
4646const GVecGen3 cmtst_op[4] = {
4647 { .fni4 = gen_helper_neon_tst_u8,
4648 .fniv = gen_cmtst_vec,
53229a77 4649 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4650 .vece = MO_8 },
4651 { .fni4 = gen_helper_neon_tst_u16,
4652 .fniv = gen_cmtst_vec,
53229a77 4653 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4654 .vece = MO_16 },
4655 { .fni4 = gen_cmtst_i32,
4656 .fniv = gen_cmtst_vec,
53229a77 4657 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4658 .vece = MO_32 },
4659 { .fni8 = gen_cmtst_i64,
4660 .fniv = gen_cmtst_vec,
4661 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 4662 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4663 .vece = MO_64 },
4664};
4665
89e68b57
RH
4666static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4667 TCGv_vec a, TCGv_vec b)
4668{
4669 TCGv_vec x = tcg_temp_new_vec_matching(t);
4670 tcg_gen_add_vec(vece, x, a, b);
4671 tcg_gen_usadd_vec(vece, t, a, b);
4672 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4673 tcg_gen_or_vec(vece, sat, sat, x);
4674 tcg_temp_free_vec(x);
4675}
4676
53229a77
RH
4677static const TCGOpcode vecop_list_uqadd[] = {
4678 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4679};
4680
89e68b57
RH
4681const GVecGen4 uqadd_op[4] = {
4682 { .fniv = gen_uqadd_vec,
4683 .fno = gen_helper_gvec_uqadd_b,
89e68b57 4684 .write_aofs = true,
53229a77 4685 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4686 .vece = MO_8 },
4687 { .fniv = gen_uqadd_vec,
4688 .fno = gen_helper_gvec_uqadd_h,
89e68b57 4689 .write_aofs = true,
53229a77 4690 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4691 .vece = MO_16 },
4692 { .fniv = gen_uqadd_vec,
4693 .fno = gen_helper_gvec_uqadd_s,
89e68b57 4694 .write_aofs = true,
53229a77 4695 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4696 .vece = MO_32 },
4697 { .fniv = gen_uqadd_vec,
4698 .fno = gen_helper_gvec_uqadd_d,
89e68b57 4699 .write_aofs = true,
53229a77 4700 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4701 .vece = MO_64 },
4702};
4703
4704static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4705 TCGv_vec a, TCGv_vec b)
4706{
4707 TCGv_vec x = tcg_temp_new_vec_matching(t);
4708 tcg_gen_add_vec(vece, x, a, b);
4709 tcg_gen_ssadd_vec(vece, t, a, b);
4710 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4711 tcg_gen_or_vec(vece, sat, sat, x);
4712 tcg_temp_free_vec(x);
4713}
4714
53229a77
RH
4715static const TCGOpcode vecop_list_sqadd[] = {
4716 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4717};
4718
89e68b57
RH
4719const GVecGen4 sqadd_op[4] = {
4720 { .fniv = gen_sqadd_vec,
4721 .fno = gen_helper_gvec_sqadd_b,
53229a77 4722 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4723 .write_aofs = true,
4724 .vece = MO_8 },
4725 { .fniv = gen_sqadd_vec,
4726 .fno = gen_helper_gvec_sqadd_h,
53229a77 4727 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4728 .write_aofs = true,
4729 .vece = MO_16 },
4730 { .fniv = gen_sqadd_vec,
4731 .fno = gen_helper_gvec_sqadd_s,
53229a77 4732 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4733 .write_aofs = true,
4734 .vece = MO_32 },
4735 { .fniv = gen_sqadd_vec,
4736 .fno = gen_helper_gvec_sqadd_d,
53229a77 4737 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4738 .write_aofs = true,
4739 .vece = MO_64 },
4740};
4741
4742static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4743 TCGv_vec a, TCGv_vec b)
4744{
4745 TCGv_vec x = tcg_temp_new_vec_matching(t);
4746 tcg_gen_sub_vec(vece, x, a, b);
4747 tcg_gen_ussub_vec(vece, t, a, b);
4748 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4749 tcg_gen_or_vec(vece, sat, sat, x);
4750 tcg_temp_free_vec(x);
4751}
4752
53229a77
RH
4753static const TCGOpcode vecop_list_uqsub[] = {
4754 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4755};
4756
89e68b57
RH
4757const GVecGen4 uqsub_op[4] = {
4758 { .fniv = gen_uqsub_vec,
4759 .fno = gen_helper_gvec_uqsub_b,
53229a77 4760 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4761 .write_aofs = true,
4762 .vece = MO_8 },
4763 { .fniv = gen_uqsub_vec,
4764 .fno = gen_helper_gvec_uqsub_h,
53229a77 4765 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4766 .write_aofs = true,
4767 .vece = MO_16 },
4768 { .fniv = gen_uqsub_vec,
4769 .fno = gen_helper_gvec_uqsub_s,
53229a77 4770 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4771 .write_aofs = true,
4772 .vece = MO_32 },
4773 { .fniv = gen_uqsub_vec,
4774 .fno = gen_helper_gvec_uqsub_d,
53229a77 4775 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4776 .write_aofs = true,
4777 .vece = MO_64 },
4778};
4779
4780static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4781 TCGv_vec a, TCGv_vec b)
4782{
4783 TCGv_vec x = tcg_temp_new_vec_matching(t);
4784 tcg_gen_sub_vec(vece, x, a, b);
4785 tcg_gen_sssub_vec(vece, t, a, b);
4786 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4787 tcg_gen_or_vec(vece, sat, sat, x);
4788 tcg_temp_free_vec(x);
4789}
4790
53229a77
RH
4791static const TCGOpcode vecop_list_sqsub[] = {
4792 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4793};
4794
89e68b57
RH
4795const GVecGen4 sqsub_op[4] = {
4796 { .fniv = gen_sqsub_vec,
4797 .fno = gen_helper_gvec_sqsub_b,
53229a77 4798 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4799 .write_aofs = true,
4800 .vece = MO_8 },
4801 { .fniv = gen_sqsub_vec,
4802 .fno = gen_helper_gvec_sqsub_h,
53229a77 4803 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4804 .write_aofs = true,
4805 .vece = MO_16 },
4806 { .fniv = gen_sqsub_vec,
4807 .fno = gen_helper_gvec_sqsub_s,
53229a77 4808 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4809 .write_aofs = true,
4810 .vece = MO_32 },
4811 { .fniv = gen_sqsub_vec,
4812 .fno = gen_helper_gvec_sqsub_d,
53229a77 4813 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4814 .write_aofs = true,
4815 .vece = MO_64 },
4816};
4817
9ee6e8bb
PB
4818/* Translate a NEON data processing instruction. Return nonzero if the
4819 instruction is invalid.
ad69471c
PB
4820 We process data in a mixture of 32-bit and 64-bit chunks.
4821 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4822
7dcc1f89 4823static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4824{
4825 int op;
4826 int q;
eabcd6fa 4827 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
9ee6e8bb
PB
4828 int size;
4829 int shift;
4830 int pass;
4831 int count;
4832 int pairwise;
4833 int u;
eabcd6fa 4834 int vec_size;
f3cd8218 4835 uint32_t imm;
39d5492a 4836 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 4837 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 4838 TCGv_i64 tmp64;
9ee6e8bb 4839
2c7ffc41
PM
4840 /* FIXME: this access check should not take precedence over UNDEF
4841 * for invalid encodings; we will generate incorrect syndrome information
4842 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4843 */
9dbbc748 4844 if (s->fp_excp_el) {
2c7ffc41 4845 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 4846 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4847 return 0;
4848 }
4849
5df8bac1 4850 if (!s->vfp_enabled)
9ee6e8bb
PB
4851 return 1;
4852 q = (insn & (1 << 6)) != 0;
4853 u = (insn >> 24) & 1;
4854 VFP_DREG_D(rd, insn);
4855 VFP_DREG_N(rn, insn);
4856 VFP_DREG_M(rm, insn);
4857 size = (insn >> 20) & 3;
eabcd6fa
RH
4858 vec_size = q ? 16 : 8;
4859 rd_ofs = neon_reg_offset(rd, 0);
4860 rn_ofs = neon_reg_offset(rn, 0);
4861 rm_ofs = neon_reg_offset(rm, 0);
4862
9ee6e8bb
PB
4863 if ((insn & (1 << 23)) == 0) {
4864 /* Three register same length. */
4865 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4866 /* Catch invalid op and bad size combinations: UNDEF */
4867 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4868 return 1;
4869 }
25f84f79
PM
4870 /* All insns of this form UNDEF for either this condition or the
4871 * superset of cases "Q==1"; we catch the latter later.
4872 */
4873 if (q && ((rd | rn | rm) & 1)) {
4874 return 1;
4875 }
36a71934
RH
4876 switch (op) {
4877 case NEON_3R_SHA:
4878 /* The SHA-1/SHA-256 3-register instructions require special
4879 * treatment here, as their size field is overloaded as an
4880 * op type selector, and they all consume their input in a
4881 * single pass.
4882 */
f1ecb913
AB
4883 if (!q) {
4884 return 1;
4885 }
4886 if (!u) { /* SHA-1 */
962fcbf2 4887 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
4888 return 1;
4889 }
1a66ac61
RH
4890 ptr1 = vfp_reg_ptr(true, rd);
4891 ptr2 = vfp_reg_ptr(true, rn);
4892 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 4893 tmp4 = tcg_const_i32(size);
1a66ac61 4894 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
4895 tcg_temp_free_i32(tmp4);
4896 } else { /* SHA-256 */
962fcbf2 4897 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
4898 return 1;
4899 }
1a66ac61
RH
4900 ptr1 = vfp_reg_ptr(true, rd);
4901 ptr2 = vfp_reg_ptr(true, rn);
4902 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
4903 switch (size) {
4904 case 0:
1a66ac61 4905 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
4906 break;
4907 case 1:
1a66ac61 4908 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
4909 break;
4910 case 2:
1a66ac61 4911 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
4912 break;
4913 }
4914 }
1a66ac61
RH
4915 tcg_temp_free_ptr(ptr1);
4916 tcg_temp_free_ptr(ptr2);
4917 tcg_temp_free_ptr(ptr3);
f1ecb913 4918 return 0;
36a71934
RH
4919
4920 case NEON_3R_VPADD_VQRDMLAH:
4921 if (!u) {
4922 break; /* VPADD */
4923 }
4924 /* VQRDMLAH */
4925 switch (size) {
4926 case 1:
4927 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
4928 q, rd, rn, rm);
4929 case 2:
4930 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
4931 q, rd, rn, rm);
4932 }
4933 return 1;
4934
4935 case NEON_3R_VFM_VQRDMLSH:
4936 if (!u) {
4937 /* VFM, VFMS */
4938 if (size == 1) {
4939 return 1;
4940 }
4941 break;
4942 }
4943 /* VQRDMLSH */
4944 switch (size) {
4945 case 1:
4946 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
4947 q, rd, rn, rm);
4948 case 2:
4949 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
4950 q, rd, rn, rm);
4951 }
4952 return 1;
eabcd6fa
RH
4953
4954 case NEON_3R_LOGIC: /* Logic ops. */
4955 switch ((u << 2) | size) {
4956 case 0: /* VAND */
4957 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
4958 vec_size, vec_size);
4959 break;
4960 case 1: /* VBIC */
4961 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
4962 vec_size, vec_size);
4963 break;
2900847f
RH
4964 case 2: /* VORR */
4965 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
4966 vec_size, vec_size);
eabcd6fa
RH
4967 break;
4968 case 3: /* VORN */
4969 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
4970 vec_size, vec_size);
4971 break;
4972 case 4: /* VEOR */
4973 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
4974 vec_size, vec_size);
4975 break;
4976 case 5: /* VBSL */
3a7a2b4e
RH
4977 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
4978 vec_size, vec_size);
eabcd6fa
RH
4979 break;
4980 case 6: /* VBIT */
3a7a2b4e
RH
4981 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
4982 vec_size, vec_size);
eabcd6fa
RH
4983 break;
4984 case 7: /* VBIF */
3a7a2b4e
RH
4985 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
4986 vec_size, vec_size);
eabcd6fa
RH
4987 break;
4988 }
4989 return 0;
e4717ae0
RH
4990
4991 case NEON_3R_VADD_VSUB:
4992 if (u) {
4993 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
4994 vec_size, vec_size);
4995 } else {
4996 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
4997 vec_size, vec_size);
4998 }
4999 return 0;
82083184 5000
89e68b57
RH
5001 case NEON_3R_VQADD:
5002 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5003 rn_ofs, rm_ofs, vec_size, vec_size,
5004 (u ? uqadd_op : sqadd_op) + size);
2f143d3a 5005 return 0;
89e68b57
RH
5006
5007 case NEON_3R_VQSUB:
5008 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5009 rn_ofs, rm_ofs, vec_size, vec_size,
5010 (u ? uqsub_op : sqsub_op) + size);
2f143d3a 5011 return 0;
89e68b57 5012
82083184
RH
5013 case NEON_3R_VMUL: /* VMUL */
5014 if (u) {
5015 /* Polynomial case allows only P8 and is handled below. */
5016 if (size != 0) {
5017 return 1;
5018 }
5019 } else {
5020 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
5021 vec_size, vec_size);
5022 return 0;
5023 }
5024 break;
4a7832b0
RH
5025
5026 case NEON_3R_VML: /* VMLA, VMLS */
5027 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
5028 u ? &mls_op[size] : &mla_op[size]);
5029 return 0;
ea580fa3
RH
5030
5031 case NEON_3R_VTST_VCEQ:
5032 if (u) { /* VCEQ */
5033 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
5034 vec_size, vec_size);
5035 } else { /* VTST */
5036 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
5037 vec_size, vec_size, &cmtst_op[size]);
5038 }
5039 return 0;
5040
5041 case NEON_3R_VCGT:
5042 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
5043 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5044 return 0;
5045
5046 case NEON_3R_VCGE:
5047 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
5048 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5049 return 0;
6f278221
RH
5050
5051 case NEON_3R_VMAX:
5052 if (u) {
5053 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
5054 vec_size, vec_size);
5055 } else {
5056 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
5057 vec_size, vec_size);
5058 }
5059 return 0;
5060 case NEON_3R_VMIN:
5061 if (u) {
5062 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
5063 vec_size, vec_size);
5064 } else {
5065 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
5066 vec_size, vec_size);
5067 }
5068 return 0;
f1ecb913 5069 }
4a7832b0 5070
eabcd6fa 5071 if (size == 3) {
62698be3 5072 /* 64-bit element instructions. */
9ee6e8bb 5073 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5074 neon_load_reg64(cpu_V0, rn + pass);
5075 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5076 switch (op) {
62698be3 5077 case NEON_3R_VSHL:
ad69471c
PB
5078 if (u) {
5079 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5080 } else {
5081 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5082 }
5083 break;
62698be3 5084 case NEON_3R_VQSHL:
ad69471c 5085 if (u) {
02da0b2d
PM
5086 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5087 cpu_V1, cpu_V0);
ad69471c 5088 } else {
02da0b2d
PM
5089 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5090 cpu_V1, cpu_V0);
ad69471c
PB
5091 }
5092 break;
62698be3 5093 case NEON_3R_VRSHL:
ad69471c
PB
5094 if (u) {
5095 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5096 } else {
ad69471c
PB
5097 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5098 }
5099 break;
62698be3 5100 case NEON_3R_VQRSHL:
ad69471c 5101 if (u) {
02da0b2d
PM
5102 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5103 cpu_V1, cpu_V0);
ad69471c 5104 } else {
02da0b2d
PM
5105 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5106 cpu_V1, cpu_V0);
1e8d4eec 5107 }
9ee6e8bb 5108 break;
9ee6e8bb
PB
5109 default:
5110 abort();
2c0262af 5111 }
ad69471c 5112 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5113 }
9ee6e8bb 5114 return 0;
2c0262af 5115 }
25f84f79 5116 pairwise = 0;
9ee6e8bb 5117 switch (op) {
62698be3
PM
5118 case NEON_3R_VSHL:
5119 case NEON_3R_VQSHL:
5120 case NEON_3R_VRSHL:
5121 case NEON_3R_VQRSHL:
9ee6e8bb 5122 {
ad69471c
PB
5123 int rtmp;
5124 /* Shift instruction operands are reversed. */
5125 rtmp = rn;
9ee6e8bb 5126 rn = rm;
ad69471c 5127 rm = rtmp;
9ee6e8bb 5128 }
2c0262af 5129 break;
36a71934 5130 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
5131 case NEON_3R_VPMAX:
5132 case NEON_3R_VPMIN:
9ee6e8bb 5133 pairwise = 1;
2c0262af 5134 break;
25f84f79
PM
5135 case NEON_3R_FLOAT_ARITH:
5136 pairwise = (u && size < 2); /* if VPADD (float) */
5137 break;
5138 case NEON_3R_FLOAT_MINMAX:
5139 pairwise = u; /* if VPMIN/VPMAX (float) */
5140 break;
5141 case NEON_3R_FLOAT_CMP:
5142 if (!u && size) {
5143 /* no encoding for U=0 C=1x */
5144 return 1;
5145 }
5146 break;
5147 case NEON_3R_FLOAT_ACMP:
5148 if (!u) {
5149 return 1;
5150 }
5151 break;
505935fc
WN
5152 case NEON_3R_FLOAT_MISC:
5153 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5154 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5155 return 1;
5156 }
2c0262af 5157 break;
36a71934
RH
5158 case NEON_3R_VFM_VQRDMLSH:
5159 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
5160 return 1;
5161 }
5162 break;
9ee6e8bb 5163 default:
2c0262af 5164 break;
9ee6e8bb 5165 }
dd8fbd78 5166
25f84f79
PM
5167 if (pairwise && q) {
5168 /* All the pairwise insns UNDEF if Q is set */
5169 return 1;
5170 }
5171
9ee6e8bb
PB
5172 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5173
5174 if (pairwise) {
5175 /* Pairwise. */
a5a14945
JR
5176 if (pass < 1) {
5177 tmp = neon_load_reg(rn, 0);
5178 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5179 } else {
a5a14945
JR
5180 tmp = neon_load_reg(rm, 0);
5181 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5182 }
5183 } else {
5184 /* Elementwise. */
dd8fbd78
FN
5185 tmp = neon_load_reg(rn, pass);
5186 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5187 }
5188 switch (op) {
62698be3 5189 case NEON_3R_VHADD:
9ee6e8bb
PB
5190 GEN_NEON_INTEGER_OP(hadd);
5191 break;
62698be3 5192 case NEON_3R_VRHADD:
9ee6e8bb 5193 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5194 break;
62698be3 5195 case NEON_3R_VHSUB:
9ee6e8bb
PB
5196 GEN_NEON_INTEGER_OP(hsub);
5197 break;
62698be3 5198 case NEON_3R_VSHL:
ad69471c 5199 GEN_NEON_INTEGER_OP(shl);
2c0262af 5200 break;
62698be3 5201 case NEON_3R_VQSHL:
02da0b2d 5202 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5203 break;
62698be3 5204 case NEON_3R_VRSHL:
ad69471c 5205 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5206 break;
62698be3 5207 case NEON_3R_VQRSHL:
02da0b2d 5208 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5209 break;
62698be3 5210 case NEON_3R_VABD:
9ee6e8bb
PB
5211 GEN_NEON_INTEGER_OP(abd);
5212 break;
62698be3 5213 case NEON_3R_VABA:
9ee6e8bb 5214 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5215 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5216 tmp2 = neon_load_reg(rd, pass);
5217 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5218 break;
62698be3 5219 case NEON_3R_VMUL:
82083184
RH
5220 /* VMUL.P8; other cases already eliminated. */
5221 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb 5222 break;
62698be3 5223 case NEON_3R_VPMAX:
9ee6e8bb
PB
5224 GEN_NEON_INTEGER_OP(pmax);
5225 break;
62698be3 5226 case NEON_3R_VPMIN:
9ee6e8bb
PB
5227 GEN_NEON_INTEGER_OP(pmin);
5228 break;
62698be3 5229 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5230 if (!u) { /* VQDMULH */
5231 switch (size) {
02da0b2d
PM
5232 case 1:
5233 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5234 break;
5235 case 2:
5236 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5237 break;
62698be3 5238 default: abort();
9ee6e8bb 5239 }
62698be3 5240 } else { /* VQRDMULH */
9ee6e8bb 5241 switch (size) {
02da0b2d
PM
5242 case 1:
5243 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5244 break;
5245 case 2:
5246 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5247 break;
62698be3 5248 default: abort();
9ee6e8bb
PB
5249 }
5250 }
5251 break;
36a71934 5252 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 5253 switch (size) {
dd8fbd78
FN
5254 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5255 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5256 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5257 default: abort();
9ee6e8bb
PB
5258 }
5259 break;
62698be3 5260 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5261 {
5262 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5263 switch ((u << 2) | size) {
5264 case 0: /* VADD */
aa47cfdd
PM
5265 case 4: /* VPADD */
5266 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5267 break;
5268 case 2: /* VSUB */
aa47cfdd 5269 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5270 break;
5271 case 6: /* VABD */
aa47cfdd 5272 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5273 break;
5274 default:
62698be3 5275 abort();
9ee6e8bb 5276 }
aa47cfdd 5277 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5278 break;
aa47cfdd 5279 }
62698be3 5280 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5281 {
5282 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5283 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5284 if (!u) {
7d1b0095 5285 tcg_temp_free_i32(tmp2);
dd8fbd78 5286 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5287 if (size == 0) {
aa47cfdd 5288 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5289 } else {
aa47cfdd 5290 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5291 }
5292 }
aa47cfdd 5293 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5294 break;
aa47cfdd 5295 }
62698be3 5296 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5297 {
5298 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5299 if (!u) {
aa47cfdd 5300 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5301 } else {
aa47cfdd
PM
5302 if (size == 0) {
5303 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5304 } else {
5305 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5306 }
b5ff1b31 5307 }
aa47cfdd 5308 tcg_temp_free_ptr(fpstatus);
2c0262af 5309 break;
aa47cfdd 5310 }
62698be3 5311 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5312 {
5313 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5314 if (size == 0) {
5315 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5316 } else {
5317 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5318 }
5319 tcg_temp_free_ptr(fpstatus);
2c0262af 5320 break;
aa47cfdd 5321 }
62698be3 5322 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5323 {
5324 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5325 if (size == 0) {
f71a2ae5 5326 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5327 } else {
f71a2ae5 5328 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5329 }
5330 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5331 break;
aa47cfdd 5332 }
505935fc
WN
5333 case NEON_3R_FLOAT_MISC:
5334 if (u) {
5335 /* VMAXNM/VMINNM */
5336 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5337 if (size == 0) {
f71a2ae5 5338 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5339 } else {
f71a2ae5 5340 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5341 }
5342 tcg_temp_free_ptr(fpstatus);
5343 } else {
5344 if (size == 0) {
5345 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5346 } else {
5347 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5348 }
5349 }
2c0262af 5350 break;
36a71934 5351 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
5352 {
5353 /* VFMA, VFMS: fused multiply-add */
5354 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5355 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5356 if (size) {
5357 /* VFMS */
5358 gen_helper_vfp_negs(tmp, tmp);
5359 }
5360 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5361 tcg_temp_free_i32(tmp3);
5362 tcg_temp_free_ptr(fpstatus);
5363 break;
5364 }
9ee6e8bb
PB
5365 default:
5366 abort();
2c0262af 5367 }
7d1b0095 5368 tcg_temp_free_i32(tmp2);
dd8fbd78 5369
9ee6e8bb
PB
5370 /* Save the result. For elementwise operations we can put it
5371 straight into the destination register. For pairwise operations
5372 we have to be careful to avoid clobbering the source operands. */
5373 if (pairwise && rd == rm) {
dd8fbd78 5374 neon_store_scratch(pass, tmp);
9ee6e8bb 5375 } else {
dd8fbd78 5376 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5377 }
5378
5379 } /* for pass */
5380 if (pairwise && rd == rm) {
5381 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5382 tmp = neon_load_scratch(pass);
5383 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5384 }
5385 }
ad69471c 5386 /* End of 3 register same size operations. */
9ee6e8bb
PB
5387 } else if (insn & (1 << 4)) {
5388 if ((insn & 0x00380080) != 0) {
5389 /* Two registers and shift. */
5390 op = (insn >> 8) & 0xf;
5391 if (insn & (1 << 7)) {
cc13115b
PM
5392 /* 64-bit shift. */
5393 if (op > 7) {
5394 return 1;
5395 }
9ee6e8bb
PB
5396 size = 3;
5397 } else {
5398 size = 2;
5399 while ((insn & (1 << (size + 19))) == 0)
5400 size--;
5401 }
5402 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
9ee6e8bb
PB
5403 if (op < 8) {
5404 /* Shift by immediate:
5405 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5406 if (q && ((rd | rm) & 1)) {
5407 return 1;
5408 }
5409 if (!u && (op == 4 || op == 6)) {
5410 return 1;
5411 }
9ee6e8bb
PB
5412 /* Right shifts are encoded as N - shift, where N is the
5413 element size in bits. */
1dc8425e 5414 if (op <= 4) {
9ee6e8bb 5415 shift = shift - (1 << (size + 3));
1dc8425e
RH
5416 }
5417
5418 switch (op) {
5419 case 0: /* VSHR */
5420 /* Right shift comes here negative. */
5421 shift = -shift;
5422 /* Shifts larger than the element size are architecturally
5423 * valid. Unsigned results in all zeros; signed results
5424 * in all sign bits.
5425 */
5426 if (!u) {
5427 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
5428 MIN(shift, (8 << size) - 1),
5429 vec_size, vec_size);
5430 } else if (shift >= 8 << size) {
5431 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5432 } else {
5433 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
5434 vec_size, vec_size);
5435 }
5436 return 0;
5437
41f6c113
RH
5438 case 1: /* VSRA */
5439 /* Right shift comes here negative. */
5440 shift = -shift;
5441 /* Shifts larger than the element size are architecturally
5442 * valid. Unsigned results in all zeros; signed results
5443 * in all sign bits.
5444 */
5445 if (!u) {
5446 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5447 MIN(shift, (8 << size) - 1),
5448 &ssra_op[size]);
5449 } else if (shift >= 8 << size) {
5450 /* rd += 0 */
5451 } else {
5452 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5453 shift, &usra_op[size]);
5454 }
5455 return 0;
5456
f3cd8218
RH
5457 case 4: /* VSRI */
5458 if (!u) {
5459 return 1;
5460 }
5461 /* Right shift comes here negative. */
5462 shift = -shift;
5463 /* Shift out of range leaves destination unchanged. */
5464 if (shift < 8 << size) {
5465 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5466 shift, &sri_op[size]);
5467 }
5468 return 0;
5469
1dc8425e 5470 case 5: /* VSHL, VSLI */
f3cd8218
RH
5471 if (u) { /* VSLI */
5472 /* Shift out of range leaves destination unchanged. */
5473 if (shift < 8 << size) {
5474 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
5475 vec_size, shift, &sli_op[size]);
5476 }
5477 } else { /* VSHL */
1dc8425e
RH
5478 /* Shifts larger than the element size are
5479 * architecturally valid and results in zero.
5480 */
5481 if (shift >= 8 << size) {
5482 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5483 } else {
5484 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
5485 vec_size, vec_size);
5486 }
1dc8425e 5487 }
f3cd8218 5488 return 0;
1dc8425e
RH
5489 }
5490
9ee6e8bb
PB
5491 if (size == 3) {
5492 count = q + 1;
5493 } else {
5494 count = q ? 4: 2;
5495 }
1dc8425e
RH
5496
5497 /* To avoid excessive duplication of ops we implement shift
5498 * by immediate using the variable shift operations.
5499 */
5500 imm = dup_const(size, shift);
9ee6e8bb
PB
5501
5502 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5503 if (size == 3) {
5504 neon_load_reg64(cpu_V0, rm + pass);
5505 tcg_gen_movi_i64(cpu_V1, imm);
5506 switch (op) {
ad69471c
PB
5507 case 2: /* VRSHR */
5508 case 3: /* VRSRA */
5509 if (u)
5510 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5511 else
ad69471c 5512 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5513 break;
0322b26e 5514 case 6: /* VQSHLU */
02da0b2d
PM
5515 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5516 cpu_V0, cpu_V1);
ad69471c 5517 break;
0322b26e
PM
5518 case 7: /* VQSHL */
5519 if (u) {
02da0b2d 5520 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5521 cpu_V0, cpu_V1);
5522 } else {
02da0b2d 5523 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5524 cpu_V0, cpu_V1);
5525 }
9ee6e8bb 5526 break;
1dc8425e
RH
5527 default:
5528 g_assert_not_reached();
9ee6e8bb 5529 }
41f6c113 5530 if (op == 3) {
ad69471c 5531 /* Accumulate. */
5371cb81 5532 neon_load_reg64(cpu_V1, rd + pass);
ad69471c 5533 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5534 }
5535 neon_store_reg64(cpu_V0, rd + pass);
5536 } else { /* size < 3 */
5537 /* Operands in T0 and T1. */
dd8fbd78 5538 tmp = neon_load_reg(rm, pass);
7d1b0095 5539 tmp2 = tcg_temp_new_i32();
dd8fbd78 5540 tcg_gen_movi_i32(tmp2, imm);
ad69471c 5541 switch (op) {
ad69471c
PB
5542 case 2: /* VRSHR */
5543 case 3: /* VRSRA */
5544 GEN_NEON_INTEGER_OP(rshl);
5545 break;
0322b26e 5546 case 6: /* VQSHLU */
ad69471c 5547 switch (size) {
0322b26e 5548 case 0:
02da0b2d
PM
5549 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5550 tmp, tmp2);
0322b26e
PM
5551 break;
5552 case 1:
02da0b2d
PM
5553 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5554 tmp, tmp2);
0322b26e
PM
5555 break;
5556 case 2:
02da0b2d
PM
5557 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5558 tmp, tmp2);
0322b26e
PM
5559 break;
5560 default:
cc13115b 5561 abort();
ad69471c
PB
5562 }
5563 break;
0322b26e 5564 case 7: /* VQSHL */
02da0b2d 5565 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5566 break;
1dc8425e
RH
5567 default:
5568 g_assert_not_reached();
ad69471c 5569 }
7d1b0095 5570 tcg_temp_free_i32(tmp2);
ad69471c 5571
41f6c113 5572 if (op == 3) {
ad69471c 5573 /* Accumulate. */
dd8fbd78 5574 tmp2 = neon_load_reg(rd, pass);
5371cb81 5575 gen_neon_add(size, tmp, tmp2);
7d1b0095 5576 tcg_temp_free_i32(tmp2);
ad69471c 5577 }
dd8fbd78 5578 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5579 }
5580 } /* for pass */
5581 } else if (op < 10) {
ad69471c 5582 /* Shift by immediate and narrow:
9ee6e8bb 5583 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5584 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5585 if (rm & 1) {
5586 return 1;
5587 }
9ee6e8bb
PB
5588 shift = shift - (1 << (size + 3));
5589 size++;
92cdfaeb 5590 if (size == 3) {
a7812ae4 5591 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5592 neon_load_reg64(cpu_V0, rm);
5593 neon_load_reg64(cpu_V1, rm + 1);
5594 for (pass = 0; pass < 2; pass++) {
5595 TCGv_i64 in;
5596 if (pass == 0) {
5597 in = cpu_V0;
5598 } else {
5599 in = cpu_V1;
5600 }
ad69471c 5601 if (q) {
0b36f4cd 5602 if (input_unsigned) {
92cdfaeb 5603 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5604 } else {
92cdfaeb 5605 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5606 }
ad69471c 5607 } else {
0b36f4cd 5608 if (input_unsigned) {
92cdfaeb 5609 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5610 } else {
92cdfaeb 5611 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5612 }
ad69471c 5613 }
7d1b0095 5614 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5615 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5616 neon_store_reg(rd, pass, tmp);
5617 } /* for pass */
5618 tcg_temp_free_i64(tmp64);
5619 } else {
5620 if (size == 1) {
5621 imm = (uint16_t)shift;
5622 imm |= imm << 16;
2c0262af 5623 } else {
92cdfaeb
PM
5624 /* size == 2 */
5625 imm = (uint32_t)shift;
5626 }
5627 tmp2 = tcg_const_i32(imm);
5628 tmp4 = neon_load_reg(rm + 1, 0);
5629 tmp5 = neon_load_reg(rm + 1, 1);
5630 for (pass = 0; pass < 2; pass++) {
5631 if (pass == 0) {
5632 tmp = neon_load_reg(rm, 0);
5633 } else {
5634 tmp = tmp4;
5635 }
0b36f4cd
CL
5636 gen_neon_shift_narrow(size, tmp, tmp2, q,
5637 input_unsigned);
92cdfaeb
PM
5638 if (pass == 0) {
5639 tmp3 = neon_load_reg(rm, 1);
5640 } else {
5641 tmp3 = tmp5;
5642 }
0b36f4cd
CL
5643 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5644 input_unsigned);
36aa55dc 5645 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5646 tcg_temp_free_i32(tmp);
5647 tcg_temp_free_i32(tmp3);
5648 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5649 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5650 neon_store_reg(rd, pass, tmp);
5651 } /* for pass */
c6067f04 5652 tcg_temp_free_i32(tmp2);
b75263d6 5653 }
9ee6e8bb 5654 } else if (op == 10) {
cc13115b
PM
5655 /* VSHLL, VMOVL */
5656 if (q || (rd & 1)) {
9ee6e8bb 5657 return 1;
cc13115b 5658 }
ad69471c
PB
5659 tmp = neon_load_reg(rm, 0);
5660 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5661 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5662 if (pass == 1)
5663 tmp = tmp2;
5664
5665 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5666
9ee6e8bb
PB
5667 if (shift != 0) {
5668 /* The shift is less than the width of the source
ad69471c
PB
5669 type, so we can just shift the whole register. */
5670 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5671 /* Widen the result of shift: we need to clear
5672 * the potential overflow bits resulting from
5673 * left bits of the narrow input appearing as
5674 * right bits of left the neighbour narrow
5675 * input. */
ad69471c
PB
5676 if (size < 2 || !u) {
5677 uint64_t imm64;
5678 if (size == 0) {
5679 imm = (0xffu >> (8 - shift));
5680 imm |= imm << 16;
acdf01ef 5681 } else if (size == 1) {
ad69471c 5682 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5683 } else {
5684 /* size == 2 */
5685 imm = 0xffffffff >> (32 - shift);
5686 }
5687 if (size < 2) {
5688 imm64 = imm | (((uint64_t)imm) << 32);
5689 } else {
5690 imm64 = imm;
9ee6e8bb 5691 }
acdf01ef 5692 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5693 }
5694 }
ad69471c 5695 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5696 }
f73534a5 5697 } else if (op >= 14) {
9ee6e8bb 5698 /* VCVT fixed-point. */
c253dd78
PM
5699 TCGv_ptr fpst;
5700 TCGv_i32 shiftv;
5701 VFPGenFixPointFn *fn;
5702
cc13115b
PM
5703 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5704 return 1;
5705 }
c253dd78
PM
5706
5707 if (!(op & 1)) {
5708 if (u) {
5709 fn = gen_helper_vfp_ultos;
5710 } else {
5711 fn = gen_helper_vfp_sltos;
5712 }
5713 } else {
5714 if (u) {
5715 fn = gen_helper_vfp_touls_round_to_zero;
5716 } else {
5717 fn = gen_helper_vfp_tosls_round_to_zero;
5718 }
5719 }
5720
f73534a5
PM
5721 /* We have already masked out the must-be-1 top bit of imm6,
5722 * hence this 32-shift where the ARM ARM has 64-imm6.
5723 */
5724 shift = 32 - shift;
c253dd78
PM
5725 fpst = get_fpstatus_ptr(1);
5726 shiftv = tcg_const_i32(shift);
9ee6e8bb 5727 for (pass = 0; pass < (q ? 4 : 2); pass++) {
c253dd78
PM
5728 TCGv_i32 tmpf = neon_load_reg(rm, pass);
5729 fn(tmpf, tmpf, shiftv, fpst);
5730 neon_store_reg(rd, pass, tmpf);
2c0262af 5731 }
c253dd78
PM
5732 tcg_temp_free_ptr(fpst);
5733 tcg_temp_free_i32(shiftv);
2c0262af 5734 } else {
9ee6e8bb
PB
5735 return 1;
5736 }
5737 } else { /* (insn & 0x00380080) == 0 */
246fa4ac
RH
5738 int invert, reg_ofs, vec_size;
5739
7d80fee5
PM
5740 if (q && (rd & 1)) {
5741 return 1;
5742 }
9ee6e8bb
PB
5743
5744 op = (insn >> 8) & 0xf;
5745 /* One register and immediate. */
5746 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5747 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5748 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5749 * We choose to not special-case this and will behave as if a
5750 * valid constant encoding of 0 had been given.
5751 */
9ee6e8bb
PB
5752 switch (op) {
5753 case 0: case 1:
5754 /* no-op */
5755 break;
5756 case 2: case 3:
5757 imm <<= 8;
5758 break;
5759 case 4: case 5:
5760 imm <<= 16;
5761 break;
5762 case 6: case 7:
5763 imm <<= 24;
5764 break;
5765 case 8: case 9:
5766 imm |= imm << 16;
5767 break;
5768 case 10: case 11:
5769 imm = (imm << 8) | (imm << 24);
5770 break;
5771 case 12:
8e31209e 5772 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5773 break;
5774 case 13:
5775 imm = (imm << 16) | 0xffff;
5776 break;
5777 case 14:
5778 imm |= (imm << 8) | (imm << 16) | (imm << 24);
246fa4ac 5779 if (invert) {
9ee6e8bb 5780 imm = ~imm;
246fa4ac 5781 }
9ee6e8bb
PB
5782 break;
5783 case 15:
7d80fee5
PM
5784 if (invert) {
5785 return 1;
5786 }
9ee6e8bb
PB
5787 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5788 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5789 break;
5790 }
246fa4ac 5791 if (invert) {
9ee6e8bb 5792 imm = ~imm;
246fa4ac 5793 }
9ee6e8bb 5794
246fa4ac
RH
5795 reg_ofs = neon_reg_offset(rd, 0);
5796 vec_size = q ? 16 : 8;
5797
5798 if (op & 1 && op < 12) {
5799 if (invert) {
5800 /* The immediate value has already been inverted,
5801 * so BIC becomes AND.
5802 */
5803 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
5804 vec_size, vec_size);
9ee6e8bb 5805 } else {
246fa4ac
RH
5806 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
5807 vec_size, vec_size);
5808 }
5809 } else {
5810 /* VMOV, VMVN. */
5811 if (op == 14 && invert) {
5812 TCGv_i64 t64 = tcg_temp_new_i64();
5813
5814 for (pass = 0; pass <= q; ++pass) {
5815 uint64_t val = 0;
a5a14945 5816 int n;
246fa4ac
RH
5817
5818 for (n = 0; n < 8; n++) {
5819 if (imm & (1 << (n + pass * 8))) {
5820 val |= 0xffull << (n * 8);
5821 }
9ee6e8bb 5822 }
246fa4ac
RH
5823 tcg_gen_movi_i64(t64, val);
5824 neon_store_reg64(t64, rd + pass);
9ee6e8bb 5825 }
246fa4ac
RH
5826 tcg_temp_free_i64(t64);
5827 } else {
5828 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
9ee6e8bb
PB
5829 }
5830 }
5831 }
e4b3861d 5832 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5833 if (size != 3) {
5834 op = (insn >> 8) & 0xf;
5835 if ((insn & (1 << 6)) == 0) {
5836 /* Three registers of different lengths. */
5837 int src1_wide;
5838 int src2_wide;
5839 int prewiden;
526d0096
PM
5840 /* undefreq: bit 0 : UNDEF if size == 0
5841 * bit 1 : UNDEF if size == 1
5842 * bit 2 : UNDEF if size == 2
5843 * bit 3 : UNDEF if U == 1
5844 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
5845 */
5846 int undefreq;
5847 /* prewiden, src1_wide, src2_wide, undefreq */
5848 static const int neon_3reg_wide[16][4] = {
5849 {1, 0, 0, 0}, /* VADDL */
5850 {1, 1, 0, 0}, /* VADDW */
5851 {1, 0, 0, 0}, /* VSUBL */
5852 {1, 1, 0, 0}, /* VSUBW */
5853 {0, 1, 1, 0}, /* VADDHN */
5854 {0, 0, 0, 0}, /* VABAL */
5855 {0, 1, 1, 0}, /* VSUBHN */
5856 {0, 0, 0, 0}, /* VABDL */
5857 {0, 0, 0, 0}, /* VMLAL */
526d0096 5858 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 5859 {0, 0, 0, 0}, /* VMLSL */
526d0096 5860 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 5861 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 5862 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 5863 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 5864 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5865 };
5866
5867 prewiden = neon_3reg_wide[op][0];
5868 src1_wide = neon_3reg_wide[op][1];
5869 src2_wide = neon_3reg_wide[op][2];
695272dc 5870 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5871
526d0096
PM
5872 if ((undefreq & (1 << size)) ||
5873 ((undefreq & 8) && u)) {
695272dc
PM
5874 return 1;
5875 }
5876 if ((src1_wide && (rn & 1)) ||
5877 (src2_wide && (rm & 1)) ||
5878 (!src2_wide && (rd & 1))) {
ad69471c 5879 return 1;
695272dc 5880 }
ad69471c 5881
4e624eda
PM
5882 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
5883 * outside the loop below as it only performs a single pass.
5884 */
5885 if (op == 14 && size == 2) {
5886 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
5887
962fcbf2 5888 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
5889 return 1;
5890 }
5891 tcg_rn = tcg_temp_new_i64();
5892 tcg_rm = tcg_temp_new_i64();
5893 tcg_rd = tcg_temp_new_i64();
5894 neon_load_reg64(tcg_rn, rn);
5895 neon_load_reg64(tcg_rm, rm);
5896 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
5897 neon_store_reg64(tcg_rd, rd);
5898 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
5899 neon_store_reg64(tcg_rd, rd + 1);
5900 tcg_temp_free_i64(tcg_rn);
5901 tcg_temp_free_i64(tcg_rm);
5902 tcg_temp_free_i64(tcg_rd);
5903 return 0;
5904 }
5905
9ee6e8bb
PB
5906 /* Avoid overlapping operands. Wide source operands are
5907 always aligned so will never overlap with wide
5908 destinations in problematic ways. */
8f8e3aa4 5909 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5910 tmp = neon_load_reg(rm, 1);
5911 neon_store_scratch(2, tmp);
8f8e3aa4 5912 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5913 tmp = neon_load_reg(rn, 1);
5914 neon_store_scratch(2, tmp);
9ee6e8bb 5915 }
f764718d 5916 tmp3 = NULL;
9ee6e8bb 5917 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5918 if (src1_wide) {
5919 neon_load_reg64(cpu_V0, rn + pass);
f764718d 5920 tmp = NULL;
9ee6e8bb 5921 } else {
ad69471c 5922 if (pass == 1 && rd == rn) {
dd8fbd78 5923 tmp = neon_load_scratch(2);
9ee6e8bb 5924 } else {
ad69471c
PB
5925 tmp = neon_load_reg(rn, pass);
5926 }
5927 if (prewiden) {
5928 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5929 }
5930 }
ad69471c
PB
5931 if (src2_wide) {
5932 neon_load_reg64(cpu_V1, rm + pass);
f764718d 5933 tmp2 = NULL;
9ee6e8bb 5934 } else {
ad69471c 5935 if (pass == 1 && rd == rm) {
dd8fbd78 5936 tmp2 = neon_load_scratch(2);
9ee6e8bb 5937 } else {
ad69471c
PB
5938 tmp2 = neon_load_reg(rm, pass);
5939 }
5940 if (prewiden) {
5941 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5942 }
9ee6e8bb
PB
5943 }
5944 switch (op) {
5945 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5946 gen_neon_addl(size);
9ee6e8bb 5947 break;
79b0e534 5948 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5949 gen_neon_subl(size);
9ee6e8bb
PB
5950 break;
5951 case 5: case 7: /* VABAL, VABDL */
5952 switch ((size << 1) | u) {
ad69471c
PB
5953 case 0:
5954 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5955 break;
5956 case 1:
5957 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5958 break;
5959 case 2:
5960 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5961 break;
5962 case 3:
5963 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5964 break;
5965 case 4:
5966 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5967 break;
5968 case 5:
5969 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5970 break;
9ee6e8bb
PB
5971 default: abort();
5972 }
7d1b0095
PM
5973 tcg_temp_free_i32(tmp2);
5974 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5975 break;
5976 case 8: case 9: case 10: case 11: case 12: case 13:
5977 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5978 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5979 break;
5980 case 14: /* Polynomial VMULL */
e5ca24cb 5981 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5982 tcg_temp_free_i32(tmp2);
5983 tcg_temp_free_i32(tmp);
e5ca24cb 5984 break;
695272dc
PM
5985 default: /* 15 is RESERVED: caught earlier */
5986 abort();
9ee6e8bb 5987 }
ebcd88ce
PM
5988 if (op == 13) {
5989 /* VQDMULL */
5990 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5991 neon_store_reg64(cpu_V0, rd + pass);
5992 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5993 /* Accumulate. */
ebcd88ce 5994 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5995 switch (op) {
4dc064e6
PM
5996 case 10: /* VMLSL */
5997 gen_neon_negl(cpu_V0, size);
5998 /* Fall through */
5999 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6000 gen_neon_addl(size);
9ee6e8bb
PB
6001 break;
6002 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6003 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6004 if (op == 11) {
6005 gen_neon_negl(cpu_V0, size);
6006 }
ad69471c
PB
6007 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6008 break;
9ee6e8bb
PB
6009 default:
6010 abort();
6011 }
ad69471c 6012 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6013 } else if (op == 4 || op == 6) {
6014 /* Narrowing operation. */
7d1b0095 6015 tmp = tcg_temp_new_i32();
79b0e534 6016 if (!u) {
9ee6e8bb 6017 switch (size) {
ad69471c
PB
6018 case 0:
6019 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6020 break;
6021 case 1:
6022 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6023 break;
6024 case 2:
6025 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6026 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6027 break;
9ee6e8bb
PB
6028 default: abort();
6029 }
6030 } else {
6031 switch (size) {
ad69471c
PB
6032 case 0:
6033 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6034 break;
6035 case 1:
6036 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6037 break;
6038 case 2:
6039 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6040 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6041 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6042 break;
9ee6e8bb
PB
6043 default: abort();
6044 }
6045 }
ad69471c
PB
6046 if (pass == 0) {
6047 tmp3 = tmp;
6048 } else {
6049 neon_store_reg(rd, 0, tmp3);
6050 neon_store_reg(rd, 1, tmp);
6051 }
9ee6e8bb
PB
6052 } else {
6053 /* Write back the result. */
ad69471c 6054 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6055 }
6056 }
6057 } else {
3e3326df
PM
6058 /* Two registers and a scalar. NB that for ops of this form
6059 * the ARM ARM labels bit 24 as Q, but it is in our variable
6060 * 'u', not 'q'.
6061 */
6062 if (size == 0) {
6063 return 1;
6064 }
9ee6e8bb 6065 switch (op) {
9ee6e8bb 6066 case 1: /* Float VMLA scalar */
9ee6e8bb 6067 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6068 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6069 if (size == 1) {
6070 return 1;
6071 }
6072 /* fall through */
6073 case 0: /* Integer VMLA scalar */
6074 case 4: /* Integer VMLS scalar */
6075 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6076 case 12: /* VQDMULH scalar */
6077 case 13: /* VQRDMULH scalar */
3e3326df
PM
6078 if (u && ((rd | rn) & 1)) {
6079 return 1;
6080 }
dd8fbd78
FN
6081 tmp = neon_get_scalar(size, rm);
6082 neon_store_scratch(0, tmp);
9ee6e8bb 6083 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6084 tmp = neon_load_scratch(0);
6085 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6086 if (op == 12) {
6087 if (size == 1) {
02da0b2d 6088 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6089 } else {
02da0b2d 6090 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6091 }
6092 } else if (op == 13) {
6093 if (size == 1) {
02da0b2d 6094 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6095 } else {
02da0b2d 6096 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6097 }
6098 } else if (op & 1) {
aa47cfdd
PM
6099 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6100 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6101 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6102 } else {
6103 switch (size) {
dd8fbd78
FN
6104 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6105 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6106 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6107 default: abort();
9ee6e8bb
PB
6108 }
6109 }
7d1b0095 6110 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6111 if (op < 8) {
6112 /* Accumulate. */
dd8fbd78 6113 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6114 switch (op) {
6115 case 0:
dd8fbd78 6116 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6117 break;
6118 case 1:
aa47cfdd
PM
6119 {
6120 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6121 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6122 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6123 break;
aa47cfdd 6124 }
9ee6e8bb 6125 case 4:
dd8fbd78 6126 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6127 break;
6128 case 5:
aa47cfdd
PM
6129 {
6130 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6131 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6132 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6133 break;
aa47cfdd 6134 }
9ee6e8bb
PB
6135 default:
6136 abort();
6137 }
7d1b0095 6138 tcg_temp_free_i32(tmp2);
9ee6e8bb 6139 }
dd8fbd78 6140 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6141 }
6142 break;
9ee6e8bb 6143 case 3: /* VQDMLAL scalar */
9ee6e8bb 6144 case 7: /* VQDMLSL scalar */
9ee6e8bb 6145 case 11: /* VQDMULL scalar */
3e3326df 6146 if (u == 1) {
ad69471c 6147 return 1;
3e3326df
PM
6148 }
6149 /* fall through */
6150 case 2: /* VMLAL sclar */
6151 case 6: /* VMLSL scalar */
6152 case 10: /* VMULL scalar */
6153 if (rd & 1) {
6154 return 1;
6155 }
dd8fbd78 6156 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6157 /* We need a copy of tmp2 because gen_neon_mull
6158 * deletes it during pass 0. */
7d1b0095 6159 tmp4 = tcg_temp_new_i32();
c6067f04 6160 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6161 tmp3 = neon_load_reg(rn, 1);
ad69471c 6162
9ee6e8bb 6163 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6164 if (pass == 0) {
6165 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6166 } else {
dd8fbd78 6167 tmp = tmp3;
c6067f04 6168 tmp2 = tmp4;
9ee6e8bb 6169 }
ad69471c 6170 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6171 if (op != 11) {
6172 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6173 }
9ee6e8bb 6174 switch (op) {
4dc064e6
PM
6175 case 6:
6176 gen_neon_negl(cpu_V0, size);
6177 /* Fall through */
6178 case 2:
ad69471c 6179 gen_neon_addl(size);
9ee6e8bb
PB
6180 break;
6181 case 3: case 7:
ad69471c 6182 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6183 if (op == 7) {
6184 gen_neon_negl(cpu_V0, size);
6185 }
ad69471c 6186 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6187 break;
6188 case 10:
6189 /* no-op */
6190 break;
6191 case 11:
ad69471c 6192 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6193 break;
6194 default:
6195 abort();
6196 }
ad69471c 6197 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6198 }
61adacc8
RH
6199 break;
6200 case 14: /* VQRDMLAH scalar */
6201 case 15: /* VQRDMLSH scalar */
6202 {
6203 NeonGenThreeOpEnvFn *fn;
dd8fbd78 6204
962fcbf2 6205 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
6206 return 1;
6207 }
6208 if (u && ((rd | rn) & 1)) {
6209 return 1;
6210 }
6211 if (op == 14) {
6212 if (size == 1) {
6213 fn = gen_helper_neon_qrdmlah_s16;
6214 } else {
6215 fn = gen_helper_neon_qrdmlah_s32;
6216 }
6217 } else {
6218 if (size == 1) {
6219 fn = gen_helper_neon_qrdmlsh_s16;
6220 } else {
6221 fn = gen_helper_neon_qrdmlsh_s32;
6222 }
6223 }
dd8fbd78 6224
61adacc8
RH
6225 tmp2 = neon_get_scalar(size, rm);
6226 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6227 tmp = neon_load_reg(rn, pass);
6228 tmp3 = neon_load_reg(rd, pass);
6229 fn(tmp, cpu_env, tmp, tmp2, tmp3);
6230 tcg_temp_free_i32(tmp3);
6231 neon_store_reg(rd, pass, tmp);
6232 }
6233 tcg_temp_free_i32(tmp2);
6234 }
9ee6e8bb 6235 break;
61adacc8
RH
6236 default:
6237 g_assert_not_reached();
9ee6e8bb
PB
6238 }
6239 }
6240 } else { /* size == 3 */
6241 if (!u) {
6242 /* Extract. */
9ee6e8bb 6243 imm = (insn >> 8) & 0xf;
ad69471c
PB
6244
6245 if (imm > 7 && !q)
6246 return 1;
6247
52579ea1
PM
6248 if (q && ((rd | rn | rm) & 1)) {
6249 return 1;
6250 }
6251
ad69471c
PB
6252 if (imm == 0) {
6253 neon_load_reg64(cpu_V0, rn);
6254 if (q) {
6255 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6256 }
ad69471c
PB
6257 } else if (imm == 8) {
6258 neon_load_reg64(cpu_V0, rn + 1);
6259 if (q) {
6260 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6261 }
ad69471c 6262 } else if (q) {
a7812ae4 6263 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6264 if (imm < 8) {
6265 neon_load_reg64(cpu_V0, rn);
a7812ae4 6266 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6267 } else {
6268 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6269 neon_load_reg64(tmp64, rm);
ad69471c
PB
6270 }
6271 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6272 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6273 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6274 if (imm < 8) {
6275 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6276 } else {
ad69471c
PB
6277 neon_load_reg64(cpu_V1, rm + 1);
6278 imm -= 8;
9ee6e8bb 6279 }
ad69471c 6280 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6281 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6282 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6283 tcg_temp_free_i64(tmp64);
ad69471c 6284 } else {
a7812ae4 6285 /* BUGFIX */
ad69471c 6286 neon_load_reg64(cpu_V0, rn);
a7812ae4 6287 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6288 neon_load_reg64(cpu_V1, rm);
a7812ae4 6289 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6290 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6291 }
6292 neon_store_reg64(cpu_V0, rd);
6293 if (q) {
6294 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6295 }
6296 } else if ((insn & (1 << 11)) == 0) {
6297 /* Two register misc. */
6298 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6299 size = (insn >> 18) & 3;
600b828c
PM
6300 /* UNDEF for unknown op values and bad op-size combinations */
6301 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6302 return 1;
6303 }
fe8fcf3d
PM
6304 if (neon_2rm_is_v8_op(op) &&
6305 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6306 return 1;
6307 }
fc2a9b37
PM
6308 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6309 q && ((rm | rd) & 1)) {
6310 return 1;
6311 }
9ee6e8bb 6312 switch (op) {
600b828c 6313 case NEON_2RM_VREV64:
9ee6e8bb 6314 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6315 tmp = neon_load_reg(rm, pass * 2);
6316 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6317 switch (size) {
dd8fbd78
FN
6318 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6319 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6320 case 2: /* no-op */ break;
6321 default: abort();
6322 }
dd8fbd78 6323 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6324 if (size == 2) {
dd8fbd78 6325 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6326 } else {
9ee6e8bb 6327 switch (size) {
dd8fbd78
FN
6328 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6329 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6330 default: abort();
6331 }
dd8fbd78 6332 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6333 }
6334 }
6335 break;
600b828c
PM
6336 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6337 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6338 for (pass = 0; pass < q + 1; pass++) {
6339 tmp = neon_load_reg(rm, pass * 2);
6340 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6341 tmp = neon_load_reg(rm, pass * 2 + 1);
6342 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6343 switch (size) {
6344 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6345 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6346 case 2: tcg_gen_add_i64(CPU_V001); break;
6347 default: abort();
6348 }
600b828c 6349 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6350 /* Accumulate. */
ad69471c
PB
6351 neon_load_reg64(cpu_V1, rd + pass);
6352 gen_neon_addl(size);
9ee6e8bb 6353 }
ad69471c 6354 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6355 }
6356 break;
600b828c 6357 case NEON_2RM_VTRN:
9ee6e8bb 6358 if (size == 2) {
a5a14945 6359 int n;
9ee6e8bb 6360 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6361 tmp = neon_load_reg(rm, n);
6362 tmp2 = neon_load_reg(rd, n + 1);
6363 neon_store_reg(rm, n, tmp2);
6364 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6365 }
6366 } else {
6367 goto elementwise;
6368 }
6369 break;
600b828c 6370 case NEON_2RM_VUZP:
02acedf9 6371 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6372 return 1;
9ee6e8bb
PB
6373 }
6374 break;
600b828c 6375 case NEON_2RM_VZIP:
d68a6f3a 6376 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6377 return 1;
9ee6e8bb
PB
6378 }
6379 break;
600b828c
PM
6380 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6381 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6382 if (rm & 1) {
6383 return 1;
6384 }
f764718d 6385 tmp2 = NULL;
9ee6e8bb 6386 for (pass = 0; pass < 2; pass++) {
ad69471c 6387 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6388 tmp = tcg_temp_new_i32();
600b828c
PM
6389 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6390 tmp, cpu_V0);
ad69471c
PB
6391 if (pass == 0) {
6392 tmp2 = tmp;
6393 } else {
6394 neon_store_reg(rd, 0, tmp2);
6395 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6396 }
9ee6e8bb
PB
6397 }
6398 break;
600b828c 6399 case NEON_2RM_VSHLL:
fc2a9b37 6400 if (q || (rd & 1)) {
9ee6e8bb 6401 return 1;
600b828c 6402 }
ad69471c
PB
6403 tmp = neon_load_reg(rm, 0);
6404 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6405 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6406 if (pass == 1)
6407 tmp = tmp2;
6408 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6409 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6410 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6411 }
6412 break;
600b828c 6413 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
6414 {
6415 TCGv_ptr fpst;
6416 TCGv_i32 ahp;
6417
602f6e42 6418 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
6419 q || (rm & 1)) {
6420 return 1;
6421 }
486624fc
AB
6422 fpst = get_fpstatus_ptr(true);
6423 ahp = get_ahp_flag();
58f2682e
PM
6424 tmp = neon_load_reg(rm, 0);
6425 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6426 tmp2 = neon_load_reg(rm, 1);
6427 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
60011498
PB
6428 tcg_gen_shli_i32(tmp2, tmp2, 16);
6429 tcg_gen_or_i32(tmp2, tmp2, tmp);
58f2682e
PM
6430 tcg_temp_free_i32(tmp);
6431 tmp = neon_load_reg(rm, 2);
6432 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6433 tmp3 = neon_load_reg(rm, 3);
60011498 6434 neon_store_reg(rd, 0, tmp2);
58f2682e
PM
6435 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
6436 tcg_gen_shli_i32(tmp3, tmp3, 16);
6437 tcg_gen_or_i32(tmp3, tmp3, tmp);
6438 neon_store_reg(rd, 1, tmp3);
7d1b0095 6439 tcg_temp_free_i32(tmp);
486624fc
AB
6440 tcg_temp_free_i32(ahp);
6441 tcg_temp_free_ptr(fpst);
60011498 6442 break;
486624fc 6443 }
600b828c 6444 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
6445 {
6446 TCGv_ptr fpst;
6447 TCGv_i32 ahp;
602f6e42 6448 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
6449 q || (rd & 1)) {
6450 return 1;
6451 }
486624fc
AB
6452 fpst = get_fpstatus_ptr(true);
6453 ahp = get_ahp_flag();
7d1b0095 6454 tmp3 = tcg_temp_new_i32();
60011498
PB
6455 tmp = neon_load_reg(rm, 0);
6456 tmp2 = neon_load_reg(rm, 1);
6457 tcg_gen_ext16u_i32(tmp3, tmp);
b66f6b99
PM
6458 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6459 neon_store_reg(rd, 0, tmp3);
6460 tcg_gen_shri_i32(tmp, tmp, 16);
6461 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
6462 neon_store_reg(rd, 1, tmp);
6463 tmp3 = tcg_temp_new_i32();
60011498 6464 tcg_gen_ext16u_i32(tmp3, tmp2);
b66f6b99
PM
6465 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6466 neon_store_reg(rd, 2, tmp3);
6467 tcg_gen_shri_i32(tmp2, tmp2, 16);
6468 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
6469 neon_store_reg(rd, 3, tmp2);
486624fc
AB
6470 tcg_temp_free_i32(ahp);
6471 tcg_temp_free_ptr(fpst);
60011498 6472 break;
486624fc 6473 }
9d935509 6474 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 6475 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
6476 return 1;
6477 }
1a66ac61
RH
6478 ptr1 = vfp_reg_ptr(true, rd);
6479 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
6480
6481 /* Bit 6 is the lowest opcode bit; it distinguishes between
6482 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6483 */
6484 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6485
6486 if (op == NEON_2RM_AESE) {
1a66ac61 6487 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 6488 } else {
1a66ac61 6489 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 6490 }
1a66ac61
RH
6491 tcg_temp_free_ptr(ptr1);
6492 tcg_temp_free_ptr(ptr2);
9d935509
AB
6493 tcg_temp_free_i32(tmp3);
6494 break;
f1ecb913 6495 case NEON_2RM_SHA1H:
962fcbf2 6496 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
6497 return 1;
6498 }
1a66ac61
RH
6499 ptr1 = vfp_reg_ptr(true, rd);
6500 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 6501
1a66ac61 6502 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 6503
1a66ac61
RH
6504 tcg_temp_free_ptr(ptr1);
6505 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
6506 break;
6507 case NEON_2RM_SHA1SU1:
6508 if ((rm | rd) & 1) {
6509 return 1;
6510 }
6511 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6512 if (q) {
962fcbf2 6513 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
6514 return 1;
6515 }
962fcbf2 6516 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
6517 return 1;
6518 }
1a66ac61
RH
6519 ptr1 = vfp_reg_ptr(true, rd);
6520 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 6521 if (q) {
1a66ac61 6522 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 6523 } else {
1a66ac61 6524 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 6525 }
1a66ac61
RH
6526 tcg_temp_free_ptr(ptr1);
6527 tcg_temp_free_ptr(ptr2);
f1ecb913 6528 break;
4bf940be
RH
6529
6530 case NEON_2RM_VMVN:
6531 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
6532 break;
6533 case NEON_2RM_VNEG:
6534 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
6535 break;
4e027a71
RH
6536 case NEON_2RM_VABS:
6537 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
6538 break;
4bf940be 6539
9ee6e8bb
PB
6540 default:
6541 elementwise:
6542 for (pass = 0; pass < (q ? 4 : 2); pass++) {
60737ed5 6543 tmp = neon_load_reg(rm, pass);
9ee6e8bb 6544 switch (op) {
600b828c 6545 case NEON_2RM_VREV32:
9ee6e8bb 6546 switch (size) {
dd8fbd78
FN
6547 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6548 case 1: gen_swap_half(tmp); break;
600b828c 6549 default: abort();
9ee6e8bb
PB
6550 }
6551 break;
600b828c 6552 case NEON_2RM_VREV16:
dd8fbd78 6553 gen_rev16(tmp);
9ee6e8bb 6554 break;
600b828c 6555 case NEON_2RM_VCLS:
9ee6e8bb 6556 switch (size) {
dd8fbd78
FN
6557 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6558 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6559 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6560 default: abort();
9ee6e8bb
PB
6561 }
6562 break;
600b828c 6563 case NEON_2RM_VCLZ:
9ee6e8bb 6564 switch (size) {
dd8fbd78
FN
6565 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6566 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 6567 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 6568 default: abort();
9ee6e8bb
PB
6569 }
6570 break;
600b828c 6571 case NEON_2RM_VCNT:
dd8fbd78 6572 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6573 break;
600b828c 6574 case NEON_2RM_VQABS:
9ee6e8bb 6575 switch (size) {
02da0b2d
PM
6576 case 0:
6577 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6578 break;
6579 case 1:
6580 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6581 break;
6582 case 2:
6583 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6584 break;
600b828c 6585 default: abort();
9ee6e8bb
PB
6586 }
6587 break;
600b828c 6588 case NEON_2RM_VQNEG:
9ee6e8bb 6589 switch (size) {
02da0b2d
PM
6590 case 0:
6591 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6592 break;
6593 case 1:
6594 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6595 break;
6596 case 2:
6597 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6598 break;
600b828c 6599 default: abort();
9ee6e8bb
PB
6600 }
6601 break;
600b828c 6602 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6603 tmp2 = tcg_const_i32(0);
9ee6e8bb 6604 switch(size) {
dd8fbd78
FN
6605 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6606 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6607 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6608 default: abort();
9ee6e8bb 6609 }
39d5492a 6610 tcg_temp_free_i32(tmp2);
600b828c 6611 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6612 tcg_gen_not_i32(tmp, tmp);
600b828c 6613 }
9ee6e8bb 6614 break;
600b828c 6615 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6616 tmp2 = tcg_const_i32(0);
9ee6e8bb 6617 switch(size) {
dd8fbd78
FN
6618 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6619 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6620 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6621 default: abort();
9ee6e8bb 6622 }
39d5492a 6623 tcg_temp_free_i32(tmp2);
600b828c 6624 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6625 tcg_gen_not_i32(tmp, tmp);
600b828c 6626 }
9ee6e8bb 6627 break;
600b828c 6628 case NEON_2RM_VCEQ0:
dd8fbd78 6629 tmp2 = tcg_const_i32(0);
9ee6e8bb 6630 switch(size) {
dd8fbd78
FN
6631 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6632 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6633 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6634 default: abort();
9ee6e8bb 6635 }
39d5492a 6636 tcg_temp_free_i32(tmp2);
9ee6e8bb 6637 break;
600b828c 6638 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6639 {
6640 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6641 tmp2 = tcg_const_i32(0);
aa47cfdd 6642 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6643 tcg_temp_free_i32(tmp2);
aa47cfdd 6644 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6645 break;
aa47cfdd 6646 }
600b828c 6647 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6648 {
6649 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6650 tmp2 = tcg_const_i32(0);
aa47cfdd 6651 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6652 tcg_temp_free_i32(tmp2);
aa47cfdd 6653 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6654 break;
aa47cfdd 6655 }
600b828c 6656 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6657 {
6658 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6659 tmp2 = tcg_const_i32(0);
aa47cfdd 6660 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6661 tcg_temp_free_i32(tmp2);
aa47cfdd 6662 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6663 break;
aa47cfdd 6664 }
600b828c 6665 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6666 {
6667 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6668 tmp2 = tcg_const_i32(0);
aa47cfdd 6669 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6670 tcg_temp_free_i32(tmp2);
aa47cfdd 6671 tcg_temp_free_ptr(fpstatus);
0e326109 6672 break;
aa47cfdd 6673 }
600b828c 6674 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6675 {
6676 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6677 tmp2 = tcg_const_i32(0);
aa47cfdd 6678 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6679 tcg_temp_free_i32(tmp2);
aa47cfdd 6680 tcg_temp_free_ptr(fpstatus);
0e326109 6681 break;
aa47cfdd 6682 }
600b828c 6683 case NEON_2RM_VABS_F:
fd8a68cd 6684 gen_helper_vfp_abss(tmp, tmp);
9ee6e8bb 6685 break;
600b828c 6686 case NEON_2RM_VNEG_F:
cedcc96f 6687 gen_helper_vfp_negs(tmp, tmp);
9ee6e8bb 6688 break;
600b828c 6689 case NEON_2RM_VSWP:
dd8fbd78
FN
6690 tmp2 = neon_load_reg(rd, pass);
6691 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6692 break;
600b828c 6693 case NEON_2RM_VTRN:
dd8fbd78 6694 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6695 switch (size) {
dd8fbd78
FN
6696 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6697 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6698 default: abort();
9ee6e8bb 6699 }
dd8fbd78 6700 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6701 break;
34f7b0a2
WN
6702 case NEON_2RM_VRINTN:
6703 case NEON_2RM_VRINTA:
6704 case NEON_2RM_VRINTM:
6705 case NEON_2RM_VRINTP:
6706 case NEON_2RM_VRINTZ:
6707 {
6708 TCGv_i32 tcg_rmode;
6709 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6710 int rmode;
6711
6712 if (op == NEON_2RM_VRINTZ) {
6713 rmode = FPROUNDING_ZERO;
6714 } else {
6715 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6716 }
6717
6718 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6719 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6720 cpu_env);
3b52ad1f 6721 gen_helper_rints(tmp, tmp, fpstatus);
34f7b0a2
WN
6722 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6723 cpu_env);
6724 tcg_temp_free_ptr(fpstatus);
6725 tcg_temp_free_i32(tcg_rmode);
6726 break;
6727 }
2ce70625
WN
6728 case NEON_2RM_VRINTX:
6729 {
6730 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
3b52ad1f 6731 gen_helper_rints_exact(tmp, tmp, fpstatus);
2ce70625
WN
6732 tcg_temp_free_ptr(fpstatus);
6733 break;
6734 }
901ad525
WN
6735 case NEON_2RM_VCVTAU:
6736 case NEON_2RM_VCVTAS:
6737 case NEON_2RM_VCVTNU:
6738 case NEON_2RM_VCVTNS:
6739 case NEON_2RM_VCVTPU:
6740 case NEON_2RM_VCVTPS:
6741 case NEON_2RM_VCVTMU:
6742 case NEON_2RM_VCVTMS:
6743 {
6744 bool is_signed = !extract32(insn, 7, 1);
6745 TCGv_ptr fpst = get_fpstatus_ptr(1);
6746 TCGv_i32 tcg_rmode, tcg_shift;
6747 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6748
6749 tcg_shift = tcg_const_i32(0);
6750 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6751 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6752 cpu_env);
6753
6754 if (is_signed) {
30bf0a01 6755 gen_helper_vfp_tosls(tmp, tmp,
901ad525
WN
6756 tcg_shift, fpst);
6757 } else {
30bf0a01 6758 gen_helper_vfp_touls(tmp, tmp,
901ad525
WN
6759 tcg_shift, fpst);
6760 }
6761
6762 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6763 cpu_env);
6764 tcg_temp_free_i32(tcg_rmode);
6765 tcg_temp_free_i32(tcg_shift);
6766 tcg_temp_free_ptr(fpst);
6767 break;
6768 }
600b828c 6769 case NEON_2RM_VRECPE:
b6d4443a
AB
6770 {
6771 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6772 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6773 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6774 break;
b6d4443a 6775 }
600b828c 6776 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6777 {
6778 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6779 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6780 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6781 break;
c2fb418e 6782 }
600b828c 6783 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6784 {
6785 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9a011fec 6786 gen_helper_recpe_f32(tmp, tmp, fpstatus);
b6d4443a 6787 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6788 break;
b6d4443a 6789 }
600b828c 6790 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6791 {
6792 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9a011fec 6793 gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
c2fb418e 6794 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6795 break;
c2fb418e 6796 }
600b828c 6797 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
60737ed5
PM
6798 {
6799 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6800 gen_helper_vfp_sitos(tmp, tmp, fpstatus);
6801 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6802 break;
60737ed5 6803 }
600b828c 6804 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
60737ed5
PM
6805 {
6806 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6807 gen_helper_vfp_uitos(tmp, tmp, fpstatus);
6808 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6809 break;
60737ed5 6810 }
600b828c 6811 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
60737ed5
PM
6812 {
6813 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6814 gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
6815 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6816 break;
60737ed5 6817 }
600b828c 6818 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
60737ed5
PM
6819 {
6820 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6821 gen_helper_vfp_touizs(tmp, tmp, fpstatus);
6822 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6823 break;
60737ed5 6824 }
9ee6e8bb 6825 default:
600b828c
PM
6826 /* Reserved op values were caught by the
6827 * neon_2rm_sizes[] check earlier.
6828 */
6829 abort();
9ee6e8bb 6830 }
60737ed5 6831 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6832 }
6833 break;
6834 }
6835 } else if ((insn & (1 << 10)) == 0) {
6836 /* VTBL, VTBX. */
56907d77
PM
6837 int n = ((insn >> 8) & 3) + 1;
6838 if ((rn + n) > 32) {
6839 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6840 * helper function running off the end of the register file.
6841 */
6842 return 1;
6843 }
6844 n <<= 3;
9ee6e8bb 6845 if (insn & (1 << 6)) {
8f8e3aa4 6846 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6847 } else {
7d1b0095 6848 tmp = tcg_temp_new_i32();
8f8e3aa4 6849 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6850 }
8f8e3aa4 6851 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 6852 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 6853 tmp5 = tcg_const_i32(n);
e7c06c4e 6854 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 6855 tcg_temp_free_i32(tmp);
9ee6e8bb 6856 if (insn & (1 << 6)) {
8f8e3aa4 6857 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6858 } else {
7d1b0095 6859 tmp = tcg_temp_new_i32();
8f8e3aa4 6860 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6861 }
8f8e3aa4 6862 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 6863 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 6864 tcg_temp_free_i32(tmp5);
e7c06c4e 6865 tcg_temp_free_ptr(ptr1);
8f8e3aa4 6866 neon_store_reg(rd, 0, tmp2);
3018f259 6867 neon_store_reg(rd, 1, tmp3);
7d1b0095 6868 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6869 } else if ((insn & 0x380) == 0) {
6870 /* VDUP */
32f91fb7
RH
6871 int element;
6872 TCGMemOp size;
6873
133da6aa
JR
6874 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6875 return 1;
6876 }
9ee6e8bb 6877 if (insn & (1 << 16)) {
32f91fb7
RH
6878 size = MO_8;
6879 element = (insn >> 17) & 7;
9ee6e8bb 6880 } else if (insn & (1 << 17)) {
32f91fb7
RH
6881 size = MO_16;
6882 element = (insn >> 18) & 3;
6883 } else {
6884 size = MO_32;
6885 element = (insn >> 19) & 1;
9ee6e8bb 6886 }
32f91fb7
RH
6887 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
6888 neon_element_offset(rm, element, size),
6889 q ? 16 : 8, q ? 16 : 8);
9ee6e8bb
PB
6890 } else {
6891 return 1;
6892 }
6893 }
6894 }
6895 return 0;
6896}
6897
8b7209fa
RH
6898/* Advanced SIMD three registers of the same length extension.
6899 * 31 25 23 22 20 16 12 11 10 9 8 3 0
6900 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6901 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6902 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6903 */
6904static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
6905{
26c470a7
RH
6906 gen_helper_gvec_3 *fn_gvec = NULL;
6907 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
6908 int rd, rn, rm, opr_sz;
6909 int data = 0;
87732318
RH
6910 int off_rn, off_rm;
6911 bool is_long = false, q = extract32(insn, 6, 1);
6912 bool ptr_is_env = false;
8b7209fa
RH
6913
6914 if ((insn & 0xfe200f10) == 0xfc200800) {
6915 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
6916 int size = extract32(insn, 20, 1);
6917 data = extract32(insn, 23, 2); /* rot */
962fcbf2 6918 if (!dc_isar_feature(aa32_vcma, s)
5763190f 6919 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
6920 return 1;
6921 }
6922 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
6923 } else if ((insn & 0xfea00f10) == 0xfc800800) {
6924 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
6925 int size = extract32(insn, 20, 1);
6926 data = extract32(insn, 24, 1); /* rot */
962fcbf2 6927 if (!dc_isar_feature(aa32_vcma, s)
5763190f 6928 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
6929 return 1;
6930 }
6931 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
6932 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
6933 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
6934 bool u = extract32(insn, 4, 1);
962fcbf2 6935 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
6936 return 1;
6937 }
6938 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
87732318
RH
6939 } else if ((insn & 0xff300f10) == 0xfc200810) {
6940 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
6941 int is_s = extract32(insn, 23, 1);
6942 if (!dc_isar_feature(aa32_fhm, s)) {
6943 return 1;
6944 }
6945 is_long = true;
6946 data = is_s; /* is_2 == 0 */
6947 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
6948 ptr_is_env = true;
8b7209fa
RH
6949 } else {
6950 return 1;
6951 }
6952
87732318
RH
6953 VFP_DREG_D(rd, insn);
6954 if (rd & q) {
6955 return 1;
6956 }
6957 if (q || !is_long) {
6958 VFP_DREG_N(rn, insn);
6959 VFP_DREG_M(rm, insn);
6960 if ((rn | rm) & q & !is_long) {
6961 return 1;
6962 }
6963 off_rn = vfp_reg_offset(1, rn);
6964 off_rm = vfp_reg_offset(1, rm);
6965 } else {
6966 rn = VFP_SREG_N(insn);
6967 rm = VFP_SREG_M(insn);
6968 off_rn = vfp_reg_offset(0, rn);
6969 off_rm = vfp_reg_offset(0, rm);
6970 }
6971
8b7209fa
RH
6972 if (s->fp_excp_el) {
6973 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 6974 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8b7209fa
RH
6975 return 0;
6976 }
6977 if (!s->vfp_enabled) {
6978 return 1;
6979 }
6980
6981 opr_sz = (1 + q) * 8;
26c470a7 6982 if (fn_gvec_ptr) {
87732318
RH
6983 TCGv_ptr ptr;
6984 if (ptr_is_env) {
6985 ptr = cpu_env;
6986 } else {
6987 ptr = get_fpstatus_ptr(1);
6988 }
6989 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 6990 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
6991 if (!ptr_is_env) {
6992 tcg_temp_free_ptr(ptr);
6993 }
26c470a7 6994 } else {
87732318 6995 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
6996 opr_sz, opr_sz, data, fn_gvec);
6997 }
8b7209fa
RH
6998 return 0;
6999}
7000
638808ff
RH
7001/* Advanced SIMD two registers and a scalar extension.
7002 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7003 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7004 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7005 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7006 *
7007 */
7008
7009static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7010{
26c470a7
RH
7011 gen_helper_gvec_3 *fn_gvec = NULL;
7012 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 7013 int rd, rn, rm, opr_sz, data;
87732318
RH
7014 int off_rn, off_rm;
7015 bool is_long = false, q = extract32(insn, 6, 1);
7016 bool ptr_is_env = false;
638808ff
RH
7017
7018 if ((insn & 0xff000f10) == 0xfe000800) {
7019 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
7020 int rot = extract32(insn, 20, 2);
7021 int size = extract32(insn, 23, 1);
7022 int index;
7023
962fcbf2 7024 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
7025 return 1;
7026 }
2cc99919 7027 if (size == 0) {
5763190f 7028 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2cc99919
RH
7029 return 1;
7030 }
7031 /* For fp16, rm is just Vm, and index is M. */
7032 rm = extract32(insn, 0, 4);
7033 index = extract32(insn, 5, 1);
7034 } else {
7035 /* For fp32, rm is the usual M:Vm, and index is 0. */
7036 VFP_DREG_M(rm, insn);
7037 index = 0;
7038 }
7039 data = (index << 2) | rot;
7040 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7041 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
7042 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7043 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7044 int u = extract32(insn, 4, 1);
87732318 7045
962fcbf2 7046 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
7047 return 1;
7048 }
7049 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7050 /* rm is just Vm, and index is M. */
7051 data = extract32(insn, 5, 1); /* index */
7052 rm = extract32(insn, 0, 4);
87732318
RH
7053 } else if ((insn & 0xffa00f10) == 0xfe000810) {
7054 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
7055 int is_s = extract32(insn, 20, 1);
7056 int vm20 = extract32(insn, 0, 3);
7057 int vm3 = extract32(insn, 3, 1);
7058 int m = extract32(insn, 5, 1);
7059 int index;
7060
7061 if (!dc_isar_feature(aa32_fhm, s)) {
7062 return 1;
7063 }
7064 if (q) {
7065 rm = vm20;
7066 index = m * 2 + vm3;
7067 } else {
7068 rm = vm20 * 2 + m;
7069 index = vm3;
7070 }
7071 is_long = true;
7072 data = (index << 2) | is_s; /* is_2 == 0 */
7073 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
7074 ptr_is_env = true;
638808ff
RH
7075 } else {
7076 return 1;
7077 }
7078
87732318
RH
7079 VFP_DREG_D(rd, insn);
7080 if (rd & q) {
7081 return 1;
7082 }
7083 if (q || !is_long) {
7084 VFP_DREG_N(rn, insn);
7085 if (rn & q & !is_long) {
7086 return 1;
7087 }
7088 off_rn = vfp_reg_offset(1, rn);
7089 off_rm = vfp_reg_offset(1, rm);
7090 } else {
7091 rn = VFP_SREG_N(insn);
7092 off_rn = vfp_reg_offset(0, rn);
7093 off_rm = vfp_reg_offset(0, rm);
7094 }
638808ff
RH
7095 if (s->fp_excp_el) {
7096 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 7097 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
638808ff
RH
7098 return 0;
7099 }
7100 if (!s->vfp_enabled) {
7101 return 1;
7102 }
7103
7104 opr_sz = (1 + q) * 8;
26c470a7 7105 if (fn_gvec_ptr) {
87732318
RH
7106 TCGv_ptr ptr;
7107 if (ptr_is_env) {
7108 ptr = cpu_env;
7109 } else {
7110 ptr = get_fpstatus_ptr(1);
7111 }
7112 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 7113 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
7114 if (!ptr_is_env) {
7115 tcg_temp_free_ptr(ptr);
7116 }
26c470a7 7117 } else {
87732318 7118 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
7119 opr_sz, opr_sz, data, fn_gvec);
7120 }
638808ff
RH
7121 return 0;
7122}
7123
7dcc1f89 7124static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7125{
4b6a83fb
PM
7126 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7127 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7128
7129 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7130
7131 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7132 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7133 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7134 return 1;
7135 }
d614a513 7136 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7137 return disas_iwmmxt_insn(s, insn);
d614a513 7138 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7139 return disas_dsp_insn(s, insn);
c0f4af17
PM
7140 }
7141 return 1;
4b6a83fb
PM
7142 }
7143
7144 /* Otherwise treat as a generic register access */
7145 is64 = (insn & (1 << 25)) == 0;
7146 if (!is64 && ((insn & (1 << 4)) == 0)) {
7147 /* cdp */
7148 return 1;
7149 }
7150
7151 crm = insn & 0xf;
7152 if (is64) {
7153 crn = 0;
7154 opc1 = (insn >> 4) & 0xf;
7155 opc2 = 0;
7156 rt2 = (insn >> 16) & 0xf;
7157 } else {
7158 crn = (insn >> 16) & 0xf;
7159 opc1 = (insn >> 21) & 7;
7160 opc2 = (insn >> 5) & 7;
7161 rt2 = 0;
7162 }
7163 isread = (insn >> 20) & 1;
7164 rt = (insn >> 12) & 0xf;
7165
60322b39 7166 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7167 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7168 if (ri) {
7169 /* Check access permissions */
dcbff19b 7170 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7171 return 1;
7172 }
7173
c0f4af17 7174 if (ri->accessfn ||
d614a513 7175 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7176 /* Emit code to perform further access permissions checks at
7177 * runtime; this may result in an exception.
c0f4af17
PM
7178 * Note that on XScale all cp0..c13 registers do an access check
7179 * call in order to handle c15_cpar.
f59df3f2
PM
7180 */
7181 TCGv_ptr tmpptr;
3f208fd7 7182 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7183 uint32_t syndrome;
7184
7185 /* Note that since we are an implementation which takes an
7186 * exception on a trapped conditional instruction only if the
7187 * instruction passes its condition code check, we can take
7188 * advantage of the clause in the ARM ARM that allows us to set
7189 * the COND field in the instruction to 0xE in all cases.
7190 * We could fish the actual condition out of the insn (ARM)
7191 * or the condexec bits (Thumb) but it isn't necessary.
7192 */
7193 switch (cpnum) {
7194 case 14:
7195 if (is64) {
7196 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7197 isread, false);
8bcbf37c
PM
7198 } else {
7199 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7200 rt, isread, false);
8bcbf37c
PM
7201 }
7202 break;
7203 case 15:
7204 if (is64) {
7205 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7206 isread, false);
8bcbf37c
PM
7207 } else {
7208 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7209 rt, isread, false);
8bcbf37c
PM
7210 }
7211 break;
7212 default:
7213 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7214 * so this can only happen if this is an ARMv7 or earlier CPU,
7215 * in which case the syndrome information won't actually be
7216 * guest visible.
7217 */
d614a513 7218 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7219 syndrome = syn_uncategorized();
7220 break;
7221 }
7222
43bfa4a1 7223 gen_set_condexec(s);
43722a6d 7224 gen_set_pc_im(s, s->pc_curr);
f59df3f2 7225 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7226 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7227 tcg_isread = tcg_const_i32(isread);
7228 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7229 tcg_isread);
f59df3f2 7230 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7231 tcg_temp_free_i32(tcg_syn);
3f208fd7 7232 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7233 }
7234
4b6a83fb
PM
7235 /* Handle special cases first */
7236 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7237 case ARM_CP_NOP:
7238 return 0;
7239 case ARM_CP_WFI:
7240 if (isread) {
7241 return 1;
7242 }
eaed129d 7243 gen_set_pc_im(s, s->pc);
dcba3a8d 7244 s->base.is_jmp = DISAS_WFI;
2bee5105 7245 return 0;
4b6a83fb
PM
7246 default:
7247 break;
7248 }
7249
c5a49c63 7250 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7251 gen_io_start();
7252 }
7253
4b6a83fb
PM
7254 if (isread) {
7255 /* Read */
7256 if (is64) {
7257 TCGv_i64 tmp64;
7258 TCGv_i32 tmp;
7259 if (ri->type & ARM_CP_CONST) {
7260 tmp64 = tcg_const_i64(ri->resetvalue);
7261 } else if (ri->readfn) {
7262 TCGv_ptr tmpptr;
4b6a83fb
PM
7263 tmp64 = tcg_temp_new_i64();
7264 tmpptr = tcg_const_ptr(ri);
7265 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7266 tcg_temp_free_ptr(tmpptr);
7267 } else {
7268 tmp64 = tcg_temp_new_i64();
7269 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7270 }
7271 tmp = tcg_temp_new_i32();
ecc7b3aa 7272 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7273 store_reg(s, rt, tmp);
7274 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7275 tmp = tcg_temp_new_i32();
ecc7b3aa 7276 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7277 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7278 store_reg(s, rt2, tmp);
7279 } else {
39d5492a 7280 TCGv_i32 tmp;
4b6a83fb
PM
7281 if (ri->type & ARM_CP_CONST) {
7282 tmp = tcg_const_i32(ri->resetvalue);
7283 } else if (ri->readfn) {
7284 TCGv_ptr tmpptr;
4b6a83fb
PM
7285 tmp = tcg_temp_new_i32();
7286 tmpptr = tcg_const_ptr(ri);
7287 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7288 tcg_temp_free_ptr(tmpptr);
7289 } else {
7290 tmp = load_cpu_offset(ri->fieldoffset);
7291 }
7292 if (rt == 15) {
7293 /* Destination register of r15 for 32 bit loads sets
7294 * the condition codes from the high 4 bits of the value
7295 */
7296 gen_set_nzcv(tmp);
7297 tcg_temp_free_i32(tmp);
7298 } else {
7299 store_reg(s, rt, tmp);
7300 }
7301 }
7302 } else {
7303 /* Write */
7304 if (ri->type & ARM_CP_CONST) {
7305 /* If not forbidden by access permissions, treat as WI */
7306 return 0;
7307 }
7308
7309 if (is64) {
39d5492a 7310 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7311 TCGv_i64 tmp64 = tcg_temp_new_i64();
7312 tmplo = load_reg(s, rt);
7313 tmphi = load_reg(s, rt2);
7314 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7315 tcg_temp_free_i32(tmplo);
7316 tcg_temp_free_i32(tmphi);
7317 if (ri->writefn) {
7318 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7319 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7320 tcg_temp_free_ptr(tmpptr);
7321 } else {
7322 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7323 }
7324 tcg_temp_free_i64(tmp64);
7325 } else {
7326 if (ri->writefn) {
39d5492a 7327 TCGv_i32 tmp;
4b6a83fb 7328 TCGv_ptr tmpptr;
4b6a83fb
PM
7329 tmp = load_reg(s, rt);
7330 tmpptr = tcg_const_ptr(ri);
7331 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7332 tcg_temp_free_ptr(tmpptr);
7333 tcg_temp_free_i32(tmp);
7334 } else {
39d5492a 7335 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7336 store_cpu_offset(tmp, ri->fieldoffset);
7337 }
7338 }
2452731c
PM
7339 }
7340
c5a49c63 7341 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7342 /* I/O operations must end the TB here (whether read or write) */
7343 gen_io_end();
7344 gen_lookup_tb(s);
7345 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7346 /* We default to ending the TB on a coprocessor register write,
7347 * but allow this to be suppressed by the register definition
7348 * (usually only necessary to work around guest bugs).
7349 */
2452731c 7350 gen_lookup_tb(s);
4b6a83fb 7351 }
2452731c 7352
4b6a83fb
PM
7353 return 0;
7354 }
7355
626187d8
PM
7356 /* Unknown register; this might be a guest error or a QEMU
7357 * unimplemented feature.
7358 */
7359 if (is64) {
7360 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7361 "64 bit system register cp:%d opc1: %d crm:%d "
7362 "(%s)\n",
7363 isread ? "read" : "write", cpnum, opc1, crm,
7364 s->ns ? "non-secure" : "secure");
626187d8
PM
7365 } else {
7366 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7367 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7368 "(%s)\n",
7369 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7370 s->ns ? "non-secure" : "secure");
626187d8
PM
7371 }
7372
4a9a539f 7373 return 1;
9ee6e8bb
PB
7374}
7375
5e3f878a
PB
7376
7377/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7378static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7379{
39d5492a 7380 TCGv_i32 tmp;
7d1b0095 7381 tmp = tcg_temp_new_i32();
ecc7b3aa 7382 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7383 store_reg(s, rlow, tmp);
7d1b0095 7384 tmp = tcg_temp_new_i32();
5e3f878a 7385 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7386 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7387 store_reg(s, rhigh, tmp);
7388}
7389
7390/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7391static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7392{
a7812ae4 7393 TCGv_i64 tmp;
39d5492a 7394 TCGv_i32 tmp2;
5e3f878a 7395
36aa55dc 7396 /* Load value and extend to 64 bits. */
a7812ae4 7397 tmp = tcg_temp_new_i64();
5e3f878a
PB
7398 tmp2 = load_reg(s, rlow);
7399 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7400 tcg_temp_free_i32(tmp2);
5e3f878a 7401 tcg_gen_add_i64(val, val, tmp);
b75263d6 7402 tcg_temp_free_i64(tmp);
5e3f878a
PB
7403}
7404
7405/* load and add a 64-bit value from a register pair. */
a7812ae4 7406static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7407{
a7812ae4 7408 TCGv_i64 tmp;
39d5492a
PM
7409 TCGv_i32 tmpl;
7410 TCGv_i32 tmph;
5e3f878a
PB
7411
7412 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7413 tmpl = load_reg(s, rlow);
7414 tmph = load_reg(s, rhigh);
a7812ae4 7415 tmp = tcg_temp_new_i64();
36aa55dc 7416 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7417 tcg_temp_free_i32(tmpl);
7418 tcg_temp_free_i32(tmph);
5e3f878a 7419 tcg_gen_add_i64(val, val, tmp);
b75263d6 7420 tcg_temp_free_i64(tmp);
5e3f878a
PB
7421}
7422
c9f10124 7423/* Set N and Z flags from hi|lo. */
39d5492a 7424static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7425{
c9f10124
RH
7426 tcg_gen_mov_i32(cpu_NF, hi);
7427 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7428}
7429
426f5abc
PB
7430/* Load/Store exclusive instructions are implemented by remembering
7431 the value/address loaded, and seeing if these are the same
354161b3 7432 when the store is performed. This should be sufficient to implement
426f5abc 7433 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7434 regular stores. The compare vs the remembered value is done during
7435 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7436static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7437 TCGv_i32 addr, int size)
426f5abc 7438{
94ee24e7 7439 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7440 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7441
50225ad0
PM
7442 s->is_ldex = true;
7443
426f5abc 7444 if (size == 3) {
39d5492a 7445 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7446 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7447
3448d47b
PM
7448 /* For AArch32, architecturally the 32-bit word at the lowest
7449 * address is always Rt and the one at addr+4 is Rt2, even if
7450 * the CPU is big-endian. That means we don't want to do a
7451 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7452 * for an architecturally 64-bit access, but instead do a
7453 * 64-bit access using MO_BE if appropriate and then split
7454 * the two halves.
7455 * This only makes a difference for BE32 user-mode, where
7456 * frob64() must not flip the two halves of the 64-bit data
7457 * but this code must treat BE32 user-mode like BE32 system.
7458 */
7459 TCGv taddr = gen_aa32_addr(s, addr, opc);
7460
7461 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7462 tcg_temp_free(taddr);
354161b3 7463 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
7464 if (s->be_data == MO_BE) {
7465 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7466 } else {
7467 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7468 }
354161b3
EC
7469 tcg_temp_free_i64(t64);
7470
7471 store_reg(s, rt2, tmp2);
03d05e2d 7472 } else {
354161b3 7473 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7474 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7475 }
03d05e2d
PM
7476
7477 store_reg(s, rt, tmp);
7478 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7479}
7480
7481static void gen_clrex(DisasContext *s)
7482{
03d05e2d 7483 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7484}
7485
426f5abc 7486static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7487 TCGv_i32 addr, int size)
426f5abc 7488{
354161b3
EC
7489 TCGv_i32 t0, t1, t2;
7490 TCGv_i64 extaddr;
7491 TCGv taddr;
42a268c2
RH
7492 TCGLabel *done_label;
7493 TCGLabel *fail_label;
354161b3 7494 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7495
7496 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7497 [addr] = {Rt};
7498 {Rd} = 0;
7499 } else {
7500 {Rd} = 1;
7501 } */
7502 fail_label = gen_new_label();
7503 done_label = gen_new_label();
03d05e2d
PM
7504 extaddr = tcg_temp_new_i64();
7505 tcg_gen_extu_i32_i64(extaddr, addr);
7506 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7507 tcg_temp_free_i64(extaddr);
7508
354161b3
EC
7509 taddr = gen_aa32_addr(s, addr, opc);
7510 t0 = tcg_temp_new_i32();
7511 t1 = load_reg(s, rt);
426f5abc 7512 if (size == 3) {
354161b3
EC
7513 TCGv_i64 o64 = tcg_temp_new_i64();
7514 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7515
354161b3 7516 t2 = load_reg(s, rt2);
3448d47b
PM
7517 /* For AArch32, architecturally the 32-bit word at the lowest
7518 * address is always Rt and the one at addr+4 is Rt2, even if
7519 * the CPU is big-endian. Since we're going to treat this as a
7520 * single 64-bit BE store, we need to put the two halves in the
7521 * opposite order for BE to LE, so that they end up in the right
7522 * places.
7523 * We don't want gen_aa32_frob64() because that does the wrong
7524 * thing for BE32 usermode.
7525 */
7526 if (s->be_data == MO_BE) {
7527 tcg_gen_concat_i32_i64(n64, t2, t1);
7528 } else {
7529 tcg_gen_concat_i32_i64(n64, t1, t2);
7530 }
354161b3 7531 tcg_temp_free_i32(t2);
03d05e2d 7532
354161b3
EC
7533 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7534 get_mem_index(s), opc);
7535 tcg_temp_free_i64(n64);
7536
354161b3
EC
7537 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7538 tcg_gen_extrl_i64_i32(t0, o64);
7539
7540 tcg_temp_free_i64(o64);
7541 } else {
7542 t2 = tcg_temp_new_i32();
7543 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7544 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7545 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7546 tcg_temp_free_i32(t2);
426f5abc 7547 }
354161b3
EC
7548 tcg_temp_free_i32(t1);
7549 tcg_temp_free(taddr);
7550 tcg_gen_mov_i32(cpu_R[rd], t0);
7551 tcg_temp_free_i32(t0);
426f5abc 7552 tcg_gen_br(done_label);
354161b3 7553
426f5abc
PB
7554 gen_set_label(fail_label);
7555 tcg_gen_movi_i32(cpu_R[rd], 1);
7556 gen_set_label(done_label);
03d05e2d 7557 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 7558}
426f5abc 7559
81465888
PM
7560/* gen_srs:
7561 * @env: CPUARMState
7562 * @s: DisasContext
7563 * @mode: mode field from insn (which stack to store to)
7564 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7565 * @writeback: true if writeback bit set
7566 *
7567 * Generate code for the SRS (Store Return State) insn.
7568 */
7569static void gen_srs(DisasContext *s,
7570 uint32_t mode, uint32_t amode, bool writeback)
7571{
7572 int32_t offset;
cbc0326b
PM
7573 TCGv_i32 addr, tmp;
7574 bool undef = false;
7575
7576 /* SRS is:
7577 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7578 * and specified mode is monitor mode
cbc0326b
PM
7579 * - UNDEFINED in Hyp mode
7580 * - UNPREDICTABLE in User or System mode
7581 * - UNPREDICTABLE if the specified mode is:
7582 * -- not implemented
7583 * -- not a valid mode number
7584 * -- a mode that's at a higher exception level
7585 * -- Monitor, if we are Non-secure
f01377f5 7586 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7587 */
ba63cf47 7588 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7589 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7590 return;
7591 }
7592
7593 if (s->current_el == 0 || s->current_el == 2) {
7594 undef = true;
7595 }
7596
7597 switch (mode) {
7598 case ARM_CPU_MODE_USR:
7599 case ARM_CPU_MODE_FIQ:
7600 case ARM_CPU_MODE_IRQ:
7601 case ARM_CPU_MODE_SVC:
7602 case ARM_CPU_MODE_ABT:
7603 case ARM_CPU_MODE_UND:
7604 case ARM_CPU_MODE_SYS:
7605 break;
7606 case ARM_CPU_MODE_HYP:
7607 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7608 undef = true;
7609 }
7610 break;
7611 case ARM_CPU_MODE_MON:
7612 /* No need to check specifically for "are we non-secure" because
7613 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7614 * so if this isn't EL3 then we must be non-secure.
7615 */
7616 if (s->current_el != 3) {
7617 undef = true;
7618 }
7619 break;
7620 default:
7621 undef = true;
7622 }
7623
7624 if (undef) {
7625 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7626 default_exception_el(s));
7627 return;
7628 }
7629
7630 addr = tcg_temp_new_i32();
7631 tmp = tcg_const_i32(mode);
f01377f5
PM
7632 /* get_r13_banked() will raise an exception if called from System mode */
7633 gen_set_condexec(s);
43722a6d 7634 gen_set_pc_im(s, s->pc_curr);
81465888
PM
7635 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7636 tcg_temp_free_i32(tmp);
7637 switch (amode) {
7638 case 0: /* DA */
7639 offset = -4;
7640 break;
7641 case 1: /* IA */
7642 offset = 0;
7643 break;
7644 case 2: /* DB */
7645 offset = -8;
7646 break;
7647 case 3: /* IB */
7648 offset = 4;
7649 break;
7650 default:
7651 abort();
7652 }
7653 tcg_gen_addi_i32(addr, addr, offset);
7654 tmp = load_reg(s, 14);
12dcc321 7655 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7656 tcg_temp_free_i32(tmp);
81465888
PM
7657 tmp = load_cpu_field(spsr);
7658 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 7659 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7660 tcg_temp_free_i32(tmp);
81465888
PM
7661 if (writeback) {
7662 switch (amode) {
7663 case 0:
7664 offset = -8;
7665 break;
7666 case 1:
7667 offset = 4;
7668 break;
7669 case 2:
7670 offset = -4;
7671 break;
7672 case 3:
7673 offset = 0;
7674 break;
7675 default:
7676 abort();
7677 }
7678 tcg_gen_addi_i32(addr, addr, offset);
7679 tmp = tcg_const_i32(mode);
7680 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7681 tcg_temp_free_i32(tmp);
7682 }
7683 tcg_temp_free_i32(addr);
dcba3a8d 7684 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
7685}
7686
c2d9644e
RK
7687/* Generate a label used for skipping this instruction */
7688static void arm_gen_condlabel(DisasContext *s)
7689{
7690 if (!s->condjmp) {
7691 s->condlabel = gen_new_label();
7692 s->condjmp = 1;
7693 }
7694}
7695
7696/* Skip this instruction if the ARM condition is false */
7697static void arm_skip_unless(DisasContext *s, uint32_t cond)
7698{
7699 arm_gen_condlabel(s);
7700 arm_gen_test_cc(cond ^ 1, s->condlabel);
7701}
7702
f4df2210 7703static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 7704{
f4df2210 7705 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7706 TCGv_i32 tmp;
7707 TCGv_i32 tmp2;
7708 TCGv_i32 tmp3;
7709 TCGv_i32 addr;
a7812ae4 7710 TCGv_i64 tmp64;
9ee6e8bb 7711
e13886e3
PM
7712 /* M variants do not implement ARM mode; this must raise the INVSTATE
7713 * UsageFault exception.
7714 */
b53d8923 7715 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
7716 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
7717 default_exception_el(s));
7718 return;
b53d8923 7719 }
9ee6e8bb
PB
7720 cond = insn >> 28;
7721 if (cond == 0xf){
be5e7a76
DES
7722 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7723 * choose to UNDEF. In ARMv5 and above the space is used
7724 * for miscellaneous unconditional instructions.
7725 */
7726 ARCH(5);
7727
9ee6e8bb
PB
7728 /* Unconditional instructions. */
7729 if (((insn >> 25) & 7) == 1) {
7730 /* NEON Data processing. */
d614a513 7731 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7732 goto illegal_op;
d614a513 7733 }
9ee6e8bb 7734
7dcc1f89 7735 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 7736 goto illegal_op;
7dcc1f89 7737 }
9ee6e8bb
PB
7738 return;
7739 }
7740 if ((insn & 0x0f100000) == 0x04000000) {
7741 /* NEON load/store. */
d614a513 7742 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7743 goto illegal_op;
d614a513 7744 }
9ee6e8bb 7745
7dcc1f89 7746 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 7747 goto illegal_op;
7dcc1f89 7748 }
9ee6e8bb
PB
7749 return;
7750 }
6a57f3eb
WN
7751 if ((insn & 0x0f000e10) == 0x0e000a00) {
7752 /* VFP. */
7dcc1f89 7753 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
7754 goto illegal_op;
7755 }
7756 return;
7757 }
3d185e5d
PM
7758 if (((insn & 0x0f30f000) == 0x0510f000) ||
7759 ((insn & 0x0f30f010) == 0x0710f000)) {
7760 if ((insn & (1 << 22)) == 0) {
7761 /* PLDW; v7MP */
d614a513 7762 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7763 goto illegal_op;
7764 }
7765 }
7766 /* Otherwise PLD; v5TE+ */
be5e7a76 7767 ARCH(5TE);
3d185e5d
PM
7768 return;
7769 }
7770 if (((insn & 0x0f70f000) == 0x0450f000) ||
7771 ((insn & 0x0f70f010) == 0x0650f000)) {
7772 ARCH(7);
7773 return; /* PLI; V7 */
7774 }
7775 if (((insn & 0x0f700000) == 0x04100000) ||
7776 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 7777 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7778 goto illegal_op;
7779 }
7780 return; /* v7MP: Unallocated memory hint: must NOP */
7781 }
7782
7783 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7784 ARCH(6);
7785 /* setend */
9886ecdf
PB
7786 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
7787 gen_helper_setend(cpu_env);
dcba3a8d 7788 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
7789 }
7790 return;
7791 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7792 switch ((insn >> 4) & 0xf) {
7793 case 1: /* clrex */
7794 ARCH(6K);
426f5abc 7795 gen_clrex(s);
9ee6e8bb
PB
7796 return;
7797 case 4: /* dsb */
7798 case 5: /* dmb */
9ee6e8bb 7799 ARCH(7);
61e4c432 7800 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 7801 return;
6df99dec
SS
7802 case 6: /* isb */
7803 /* We need to break the TB after this insn to execute
7804 * self-modifying code correctly and also to take
7805 * any pending interrupts immediately.
7806 */
4818c374 7807 gen_goto_tb(s, 0, s->pc);
6df99dec 7808 return;
9888bd1e
RH
7809 case 7: /* sb */
7810 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
7811 goto illegal_op;
7812 }
7813 /*
7814 * TODO: There is no speculation barrier opcode
7815 * for TCG; MB and end the TB instead.
7816 */
7817 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
4818c374 7818 gen_goto_tb(s, 0, s->pc);
9888bd1e 7819 return;
9ee6e8bb
PB
7820 default:
7821 goto illegal_op;
7822 }
7823 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7824 /* srs */
81465888
PM
7825 ARCH(6);
7826 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7827 return;
ea825eee 7828 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7829 /* rfe */
c67b6b71 7830 int32_t offset;
9ee6e8bb
PB
7831 if (IS_USER(s))
7832 goto illegal_op;
7833 ARCH(6);
7834 rn = (insn >> 16) & 0xf;
b0109805 7835 addr = load_reg(s, rn);
9ee6e8bb
PB
7836 i = (insn >> 23) & 3;
7837 switch (i) {
b0109805 7838 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7839 case 1: offset = 0; break; /* IA */
7840 case 2: offset = -8; break; /* DB */
b0109805 7841 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7842 default: abort();
7843 }
7844 if (offset)
b0109805
PB
7845 tcg_gen_addi_i32(addr, addr, offset);
7846 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7847 tmp = tcg_temp_new_i32();
12dcc321 7848 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 7849 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7850 tmp2 = tcg_temp_new_i32();
12dcc321 7851 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
7852 if (insn & (1 << 21)) {
7853 /* Base writeback. */
7854 switch (i) {
b0109805 7855 case 0: offset = -8; break;
c67b6b71
FN
7856 case 1: offset = 4; break;
7857 case 2: offset = -4; break;
b0109805 7858 case 3: offset = 0; break;
9ee6e8bb
PB
7859 default: abort();
7860 }
7861 if (offset)
b0109805
PB
7862 tcg_gen_addi_i32(addr, addr, offset);
7863 store_reg(s, rn, addr);
7864 } else {
7d1b0095 7865 tcg_temp_free_i32(addr);
9ee6e8bb 7866 }
b0109805 7867 gen_rfe(s, tmp, tmp2);
c67b6b71 7868 return;
9ee6e8bb
PB
7869 } else if ((insn & 0x0e000000) == 0x0a000000) {
7870 /* branch link and change to thumb (blx <offset>) */
7871 int32_t offset;
7872
7d1b0095 7873 tmp = tcg_temp_new_i32();
fdbcf632 7874 tcg_gen_movi_i32(tmp, s->pc);
d9ba4830 7875 store_reg(s, 14, tmp);
9ee6e8bb
PB
7876 /* Sign-extend the 24-bit offset */
7877 offset = (((int32_t)insn) << 8) >> 8;
fdbcf632 7878 val = read_pc(s);
9ee6e8bb
PB
7879 /* offset * 4 + bit24 * 2 + (thumb bit) */
7880 val += (offset << 2) | ((insn >> 23) & 2) | 1;
be5e7a76 7881 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7882 gen_bx_im(s, val);
9ee6e8bb
PB
7883 return;
7884 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 7885 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 7886 /* iWMMXt register transfer. */
c0f4af17 7887 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 7888 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 7889 return;
c0f4af17
PM
7890 }
7891 }
9ee6e8bb 7892 }
8b7209fa
RH
7893 } else if ((insn & 0x0e000a00) == 0x0c000800
7894 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7895 if (disas_neon_insn_3same_ext(s, insn)) {
7896 goto illegal_op;
7897 }
7898 return;
638808ff
RH
7899 } else if ((insn & 0x0f000a00) == 0x0e000800
7900 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7901 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
7902 goto illegal_op;
7903 }
7904 return;
9ee6e8bb
PB
7905 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7906 /* Coprocessor double register transfer. */
be5e7a76 7907 ARCH(5TE);
9ee6e8bb
PB
7908 } else if ((insn & 0x0f000010) == 0x0e000010) {
7909 /* Additional coprocessor register transfer. */
7997d92f 7910 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7911 uint32_t mask;
7912 uint32_t val;
7913 /* cps (privileged) */
7914 if (IS_USER(s))
7915 return;
7916 mask = val = 0;
7917 if (insn & (1 << 19)) {
7918 if (insn & (1 << 8))
7919 mask |= CPSR_A;
7920 if (insn & (1 << 7))
7921 mask |= CPSR_I;
7922 if (insn & (1 << 6))
7923 mask |= CPSR_F;
7924 if (insn & (1 << 18))
7925 val |= mask;
7926 }
7997d92f 7927 if (insn & (1 << 17)) {
9ee6e8bb
PB
7928 mask |= CPSR_M;
7929 val |= (insn & 0x1f);
7930 }
7931 if (mask) {
2fbac54b 7932 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7933 }
7934 return;
7935 }
7936 goto illegal_op;
7937 }
7938 if (cond != 0xe) {
7939 /* if not always execute, we generate a conditional jump to
7940 next instruction */
c2d9644e 7941 arm_skip_unless(s, cond);
9ee6e8bb
PB
7942 }
7943 if ((insn & 0x0f900000) == 0x03000000) {
7944 if ((insn & (1 << 21)) == 0) {
7945 ARCH(6T2);
7946 rd = (insn >> 12) & 0xf;
7947 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7948 if ((insn & (1 << 22)) == 0) {
7949 /* MOVW */
7d1b0095 7950 tmp = tcg_temp_new_i32();
5e3f878a 7951 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7952 } else {
7953 /* MOVT */
5e3f878a 7954 tmp = load_reg(s, rd);
86831435 7955 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7956 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7957 }
5e3f878a 7958 store_reg(s, rd, tmp);
9ee6e8bb
PB
7959 } else {
7960 if (((insn >> 12) & 0xf) != 0xf)
7961 goto illegal_op;
7962 if (((insn >> 16) & 0xf) == 0) {
7963 gen_nop_hint(s, insn & 0xff);
7964 } else {
7965 /* CPSR = immediate */
7966 val = insn & 0xff;
7967 shift = ((insn >> 8) & 0xf) * 2;
7968 if (shift)
7969 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7970 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
7971 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7972 i, val)) {
9ee6e8bb 7973 goto illegal_op;
7dcc1f89 7974 }
9ee6e8bb
PB
7975 }
7976 }
7977 } else if ((insn & 0x0f900000) == 0x01000000
7978 && (insn & 0x00000090) != 0x00000090) {
7979 /* miscellaneous instructions */
7980 op1 = (insn >> 21) & 3;
7981 sh = (insn >> 4) & 0xf;
7982 rm = insn & 0xf;
7983 switch (sh) {
8bfd0550
PM
7984 case 0x0: /* MSR, MRS */
7985 if (insn & (1 << 9)) {
7986 /* MSR (banked) and MRS (banked) */
7987 int sysm = extract32(insn, 16, 4) |
7988 (extract32(insn, 8, 1) << 4);
7989 int r = extract32(insn, 22, 1);
7990
7991 if (op1 & 1) {
7992 /* MSR (banked) */
7993 gen_msr_banked(s, r, sysm, rm);
7994 } else {
7995 /* MRS (banked) */
7996 int rd = extract32(insn, 12, 4);
7997
7998 gen_mrs_banked(s, r, sysm, rd);
7999 }
8000 break;
8001 }
8002
8003 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8004 if (op1 & 1) {
8005 /* PSR = reg */
2fbac54b 8006 tmp = load_reg(s, rm);
9ee6e8bb 8007 i = ((op1 & 2) != 0);
7dcc1f89 8008 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8009 goto illegal_op;
8010 } else {
8011 /* reg = PSR */
8012 rd = (insn >> 12) & 0xf;
8013 if (op1 & 2) {
8014 if (IS_USER(s))
8015 goto illegal_op;
d9ba4830 8016 tmp = load_cpu_field(spsr);
9ee6e8bb 8017 } else {
7d1b0095 8018 tmp = tcg_temp_new_i32();
9ef39277 8019 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8020 }
d9ba4830 8021 store_reg(s, rd, tmp);
9ee6e8bb
PB
8022 }
8023 break;
8024 case 0x1:
8025 if (op1 == 1) {
8026 /* branch/exchange thumb (bx). */
be5e7a76 8027 ARCH(4T);
d9ba4830
PB
8028 tmp = load_reg(s, rm);
8029 gen_bx(s, tmp);
9ee6e8bb
PB
8030 } else if (op1 == 3) {
8031 /* clz */
be5e7a76 8032 ARCH(5);
9ee6e8bb 8033 rd = (insn >> 12) & 0xf;
1497c961 8034 tmp = load_reg(s, rm);
7539a012 8035 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8036 store_reg(s, rd, tmp);
9ee6e8bb
PB
8037 } else {
8038 goto illegal_op;
8039 }
8040 break;
8041 case 0x2:
8042 if (op1 == 1) {
8043 ARCH(5J); /* bxj */
8044 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8045 tmp = load_reg(s, rm);
8046 gen_bx(s, tmp);
9ee6e8bb
PB
8047 } else {
8048 goto illegal_op;
8049 }
8050 break;
8051 case 0x3:
8052 if (op1 != 1)
8053 goto illegal_op;
8054
be5e7a76 8055 ARCH(5);
9ee6e8bb 8056 /* branch link/exchange thumb (blx) */
d9ba4830 8057 tmp = load_reg(s, rm);
7d1b0095 8058 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8059 tcg_gen_movi_i32(tmp2, s->pc);
8060 store_reg(s, 14, tmp2);
8061 gen_bx(s, tmp);
9ee6e8bb 8062 break;
eb0ecd5a
WN
8063 case 0x4:
8064 {
8065 /* crc32/crc32c */
8066 uint32_t c = extract32(insn, 8, 4);
8067
8068 /* Check this CPU supports ARMv8 CRC instructions.
8069 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8070 * Bits 8, 10 and 11 should be zero.
8071 */
962fcbf2 8072 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
eb0ecd5a
WN
8073 goto illegal_op;
8074 }
8075
8076 rn = extract32(insn, 16, 4);
8077 rd = extract32(insn, 12, 4);
8078
8079 tmp = load_reg(s, rn);
8080 tmp2 = load_reg(s, rm);
aa633469
PM
8081 if (op1 == 0) {
8082 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8083 } else if (op1 == 1) {
8084 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8085 }
eb0ecd5a
WN
8086 tmp3 = tcg_const_i32(1 << op1);
8087 if (c & 0x2) {
8088 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8089 } else {
8090 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8091 }
8092 tcg_temp_free_i32(tmp2);
8093 tcg_temp_free_i32(tmp3);
8094 store_reg(s, rd, tmp);
8095 break;
8096 }
9ee6e8bb 8097 case 0x5: /* saturating add/subtract */
be5e7a76 8098 ARCH(5TE);
9ee6e8bb
PB
8099 rd = (insn >> 12) & 0xf;
8100 rn = (insn >> 16) & 0xf;
b40d0353 8101 tmp = load_reg(s, rm);
5e3f878a 8102 tmp2 = load_reg(s, rn);
9ee6e8bb 8103 if (op1 & 2)
9ef39277 8104 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8105 if (op1 & 1)
9ef39277 8106 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8107 else
9ef39277 8108 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8109 tcg_temp_free_i32(tmp2);
5e3f878a 8110 store_reg(s, rd, tmp);
9ee6e8bb 8111 break;
55c544ed
PM
8112 case 0x6: /* ERET */
8113 if (op1 != 3) {
8114 goto illegal_op;
8115 }
8116 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8117 goto illegal_op;
8118 }
8119 if ((insn & 0x000fff0f) != 0x0000000e) {
8120 /* UNPREDICTABLE; we choose to UNDEF */
8121 goto illegal_op;
8122 }
8123
8124 if (s->current_el == 2) {
8125 tmp = load_cpu_field(elr_el[2]);
8126 } else {
8127 tmp = load_reg(s, 14);
8128 }
8129 gen_exception_return(s, tmp);
8130 break;
49e14940 8131 case 7:
d4a2dc67
PM
8132 {
8133 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8134 switch (op1) {
19a6e31c
PM
8135 case 0:
8136 /* HLT */
8137 gen_hlt(s, imm16);
8138 break;
37e6456e
PM
8139 case 1:
8140 /* bkpt */
8141 ARCH(5);
c900a2e6 8142 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
8143 break;
8144 case 2:
8145 /* Hypervisor call (v7) */
8146 ARCH(7);
8147 if (IS_USER(s)) {
8148 goto illegal_op;
8149 }
8150 gen_hvc(s, imm16);
8151 break;
8152 case 3:
8153 /* Secure monitor call (v6+) */
8154 ARCH(6K);
8155 if (IS_USER(s)) {
8156 goto illegal_op;
8157 }
8158 gen_smc(s);
8159 break;
8160 default:
19a6e31c 8161 g_assert_not_reached();
49e14940 8162 }
9ee6e8bb 8163 break;
d4a2dc67 8164 }
9ee6e8bb
PB
8165 case 0x8: /* signed multiply */
8166 case 0xa:
8167 case 0xc:
8168 case 0xe:
be5e7a76 8169 ARCH(5TE);
9ee6e8bb
PB
8170 rs = (insn >> 8) & 0xf;
8171 rn = (insn >> 12) & 0xf;
8172 rd = (insn >> 16) & 0xf;
8173 if (op1 == 1) {
8174 /* (32 * 16) >> 16 */
5e3f878a
PB
8175 tmp = load_reg(s, rm);
8176 tmp2 = load_reg(s, rs);
9ee6e8bb 8177 if (sh & 4)
5e3f878a 8178 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8179 else
5e3f878a 8180 gen_sxth(tmp2);
a7812ae4
PB
8181 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8182 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8183 tmp = tcg_temp_new_i32();
ecc7b3aa 8184 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8185 tcg_temp_free_i64(tmp64);
9ee6e8bb 8186 if ((sh & 2) == 0) {
5e3f878a 8187 tmp2 = load_reg(s, rn);
9ef39277 8188 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8189 tcg_temp_free_i32(tmp2);
9ee6e8bb 8190 }
5e3f878a 8191 store_reg(s, rd, tmp);
9ee6e8bb
PB
8192 } else {
8193 /* 16 * 16 */
5e3f878a
PB
8194 tmp = load_reg(s, rm);
8195 tmp2 = load_reg(s, rs);
8196 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8197 tcg_temp_free_i32(tmp2);
9ee6e8bb 8198 if (op1 == 2) {
a7812ae4
PB
8199 tmp64 = tcg_temp_new_i64();
8200 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8201 tcg_temp_free_i32(tmp);
a7812ae4
PB
8202 gen_addq(s, tmp64, rn, rd);
8203 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8204 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8205 } else {
8206 if (op1 == 0) {
5e3f878a 8207 tmp2 = load_reg(s, rn);
9ef39277 8208 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8209 tcg_temp_free_i32(tmp2);
9ee6e8bb 8210 }
5e3f878a 8211 store_reg(s, rd, tmp);
9ee6e8bb
PB
8212 }
8213 }
8214 break;
8215 default:
8216 goto illegal_op;
8217 }
8218 } else if (((insn & 0x0e000000) == 0 &&
8219 (insn & 0x00000090) != 0x90) ||
8220 ((insn & 0x0e000000) == (1 << 25))) {
8221 int set_cc, logic_cc, shiftop;
8222
8223 op1 = (insn >> 21) & 0xf;
8224 set_cc = (insn >> 20) & 1;
8225 logic_cc = table_logic_cc[op1] & set_cc;
8226
8227 /* data processing instruction */
8228 if (insn & (1 << 25)) {
8229 /* immediate operand */
8230 val = insn & 0xff;
8231 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8232 if (shift) {
9ee6e8bb 8233 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8234 }
7d1b0095 8235 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8236 tcg_gen_movi_i32(tmp2, val);
8237 if (logic_cc && shift) {
8238 gen_set_CF_bit31(tmp2);
8239 }
9ee6e8bb
PB
8240 } else {
8241 /* register */
8242 rm = (insn) & 0xf;
e9bb4aa9 8243 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8244 shiftop = (insn >> 5) & 3;
8245 if (!(insn & (1 << 4))) {
8246 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8247 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8248 } else {
8249 rs = (insn >> 8) & 0xf;
8984bd2e 8250 tmp = load_reg(s, rs);
e9bb4aa9 8251 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8252 }
8253 }
8254 if (op1 != 0x0f && op1 != 0x0d) {
8255 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8256 tmp = load_reg(s, rn);
8257 } else {
f764718d 8258 tmp = NULL;
9ee6e8bb
PB
8259 }
8260 rd = (insn >> 12) & 0xf;
8261 switch(op1) {
8262 case 0x00:
e9bb4aa9
JR
8263 tcg_gen_and_i32(tmp, tmp, tmp2);
8264 if (logic_cc) {
8265 gen_logic_CC(tmp);
8266 }
7dcc1f89 8267 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8268 break;
8269 case 0x01:
e9bb4aa9
JR
8270 tcg_gen_xor_i32(tmp, tmp, tmp2);
8271 if (logic_cc) {
8272 gen_logic_CC(tmp);
8273 }
7dcc1f89 8274 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8275 break;
8276 case 0x02:
8277 if (set_cc && rd == 15) {
8278 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8279 if (IS_USER(s)) {
9ee6e8bb 8280 goto illegal_op;
e9bb4aa9 8281 }
72485ec4 8282 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8283 gen_exception_return(s, tmp);
9ee6e8bb 8284 } else {
e9bb4aa9 8285 if (set_cc) {
72485ec4 8286 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8287 } else {
8288 tcg_gen_sub_i32(tmp, tmp, tmp2);
8289 }
7dcc1f89 8290 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8291 }
8292 break;
8293 case 0x03:
e9bb4aa9 8294 if (set_cc) {
72485ec4 8295 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8296 } else {
8297 tcg_gen_sub_i32(tmp, tmp2, tmp);
8298 }
7dcc1f89 8299 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8300 break;
8301 case 0x04:
e9bb4aa9 8302 if (set_cc) {
72485ec4 8303 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8304 } else {
8305 tcg_gen_add_i32(tmp, tmp, tmp2);
8306 }
7dcc1f89 8307 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8308 break;
8309 case 0x05:
e9bb4aa9 8310 if (set_cc) {
49b4c31e 8311 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8312 } else {
8313 gen_add_carry(tmp, tmp, tmp2);
8314 }
7dcc1f89 8315 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8316 break;
8317 case 0x06:
e9bb4aa9 8318 if (set_cc) {
2de68a49 8319 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8320 } else {
8321 gen_sub_carry(tmp, tmp, tmp2);
8322 }
7dcc1f89 8323 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8324 break;
8325 case 0x07:
e9bb4aa9 8326 if (set_cc) {
2de68a49 8327 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8328 } else {
8329 gen_sub_carry(tmp, tmp2, tmp);
8330 }
7dcc1f89 8331 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8332 break;
8333 case 0x08:
8334 if (set_cc) {
e9bb4aa9
JR
8335 tcg_gen_and_i32(tmp, tmp, tmp2);
8336 gen_logic_CC(tmp);
9ee6e8bb 8337 }
7d1b0095 8338 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8339 break;
8340 case 0x09:
8341 if (set_cc) {
e9bb4aa9
JR
8342 tcg_gen_xor_i32(tmp, tmp, tmp2);
8343 gen_logic_CC(tmp);
9ee6e8bb 8344 }
7d1b0095 8345 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8346 break;
8347 case 0x0a:
8348 if (set_cc) {
72485ec4 8349 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8350 }
7d1b0095 8351 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8352 break;
8353 case 0x0b:
8354 if (set_cc) {
72485ec4 8355 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8356 }
7d1b0095 8357 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8358 break;
8359 case 0x0c:
e9bb4aa9
JR
8360 tcg_gen_or_i32(tmp, tmp, tmp2);
8361 if (logic_cc) {
8362 gen_logic_CC(tmp);
8363 }
7dcc1f89 8364 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8365 break;
8366 case 0x0d:
8367 if (logic_cc && rd == 15) {
8368 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8369 if (IS_USER(s)) {
9ee6e8bb 8370 goto illegal_op;
e9bb4aa9
JR
8371 }
8372 gen_exception_return(s, tmp2);
9ee6e8bb 8373 } else {
e9bb4aa9
JR
8374 if (logic_cc) {
8375 gen_logic_CC(tmp2);
8376 }
7dcc1f89 8377 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8378 }
8379 break;
8380 case 0x0e:
f669df27 8381 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8382 if (logic_cc) {
8383 gen_logic_CC(tmp);
8384 }
7dcc1f89 8385 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8386 break;
8387 default:
8388 case 0x0f:
e9bb4aa9
JR
8389 tcg_gen_not_i32(tmp2, tmp2);
8390 if (logic_cc) {
8391 gen_logic_CC(tmp2);
8392 }
7dcc1f89 8393 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8394 break;
8395 }
e9bb4aa9 8396 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8397 tcg_temp_free_i32(tmp2);
e9bb4aa9 8398 }
9ee6e8bb
PB
8399 } else {
8400 /* other instructions */
8401 op1 = (insn >> 24) & 0xf;
8402 switch(op1) {
8403 case 0x0:
8404 case 0x1:
8405 /* multiplies, extra load/stores */
8406 sh = (insn >> 5) & 3;
8407 if (sh == 0) {
8408 if (op1 == 0x0) {
8409 rd = (insn >> 16) & 0xf;
8410 rn = (insn >> 12) & 0xf;
8411 rs = (insn >> 8) & 0xf;
8412 rm = (insn) & 0xf;
8413 op1 = (insn >> 20) & 0xf;
8414 switch (op1) {
8415 case 0: case 1: case 2: case 3: case 6:
8416 /* 32 bit mul */
5e3f878a
PB
8417 tmp = load_reg(s, rs);
8418 tmp2 = load_reg(s, rm);
8419 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8420 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8421 if (insn & (1 << 22)) {
8422 /* Subtract (mls) */
8423 ARCH(6T2);
5e3f878a
PB
8424 tmp2 = load_reg(s, rn);
8425 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8426 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8427 } else if (insn & (1 << 21)) {
8428 /* Add */
5e3f878a
PB
8429 tmp2 = load_reg(s, rn);
8430 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8431 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8432 }
8433 if (insn & (1 << 20))
5e3f878a
PB
8434 gen_logic_CC(tmp);
8435 store_reg(s, rd, tmp);
9ee6e8bb 8436 break;
8aac08b1
AJ
8437 case 4:
8438 /* 64 bit mul double accumulate (UMAAL) */
8439 ARCH(6);
8440 tmp = load_reg(s, rs);
8441 tmp2 = load_reg(s, rm);
8442 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8443 gen_addq_lo(s, tmp64, rn);
8444 gen_addq_lo(s, tmp64, rd);
8445 gen_storeq_reg(s, rn, rd, tmp64);
8446 tcg_temp_free_i64(tmp64);
8447 break;
8448 case 8: case 9: case 10: case 11:
8449 case 12: case 13: case 14: case 15:
8450 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8451 tmp = load_reg(s, rs);
8452 tmp2 = load_reg(s, rm);
8aac08b1 8453 if (insn & (1 << 22)) {
c9f10124 8454 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8455 } else {
c9f10124 8456 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8457 }
8458 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8459 TCGv_i32 al = load_reg(s, rn);
8460 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8461 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8462 tcg_temp_free_i32(al);
8463 tcg_temp_free_i32(ah);
9ee6e8bb 8464 }
8aac08b1 8465 if (insn & (1 << 20)) {
c9f10124 8466 gen_logicq_cc(tmp, tmp2);
8aac08b1 8467 }
c9f10124
RH
8468 store_reg(s, rn, tmp);
8469 store_reg(s, rd, tmp2);
9ee6e8bb 8470 break;
8aac08b1
AJ
8471 default:
8472 goto illegal_op;
9ee6e8bb
PB
8473 }
8474 } else {
8475 rn = (insn >> 16) & 0xf;
8476 rd = (insn >> 12) & 0xf;
8477 if (insn & (1 << 23)) {
8478 /* load/store exclusive */
96c55295
PM
8479 bool is_ld = extract32(insn, 20, 1);
8480 bool is_lasr = !extract32(insn, 8, 1);
2359bf80 8481 int op2 = (insn >> 8) & 3;
86753403 8482 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8483
8484 switch (op2) {
8485 case 0: /* lda/stl */
8486 if (op1 == 1) {
8487 goto illegal_op;
8488 }
8489 ARCH(8);
8490 break;
8491 case 1: /* reserved */
8492 goto illegal_op;
8493 case 2: /* ldaex/stlex */
8494 ARCH(8);
8495 break;
8496 case 3: /* ldrex/strex */
8497 if (op1) {
8498 ARCH(6K);
8499 } else {
8500 ARCH(6);
8501 }
8502 break;
8503 }
8504
3174f8e9 8505 addr = tcg_temp_local_new_i32();
98a46317 8506 load_reg_var(s, addr, rn);
2359bf80 8507
96c55295
PM
8508 if (is_lasr && !is_ld) {
8509 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8510 }
8511
2359bf80 8512 if (op2 == 0) {
96c55295 8513 if (is_ld) {
2359bf80
MR
8514 tmp = tcg_temp_new_i32();
8515 switch (op1) {
8516 case 0: /* lda */
9bb6558a
PM
8517 gen_aa32_ld32u_iss(s, tmp, addr,
8518 get_mem_index(s),
8519 rd | ISSIsAcqRel);
2359bf80
MR
8520 break;
8521 case 2: /* ldab */
9bb6558a
PM
8522 gen_aa32_ld8u_iss(s, tmp, addr,
8523 get_mem_index(s),
8524 rd | ISSIsAcqRel);
2359bf80
MR
8525 break;
8526 case 3: /* ldah */
9bb6558a
PM
8527 gen_aa32_ld16u_iss(s, tmp, addr,
8528 get_mem_index(s),
8529 rd | ISSIsAcqRel);
2359bf80
MR
8530 break;
8531 default:
8532 abort();
8533 }
8534 store_reg(s, rd, tmp);
8535 } else {
8536 rm = insn & 0xf;
8537 tmp = load_reg(s, rm);
8538 switch (op1) {
8539 case 0: /* stl */
9bb6558a
PM
8540 gen_aa32_st32_iss(s, tmp, addr,
8541 get_mem_index(s),
8542 rm | ISSIsAcqRel);
2359bf80
MR
8543 break;
8544 case 2: /* stlb */
9bb6558a
PM
8545 gen_aa32_st8_iss(s, tmp, addr,
8546 get_mem_index(s),
8547 rm | ISSIsAcqRel);
2359bf80
MR
8548 break;
8549 case 3: /* stlh */
9bb6558a
PM
8550 gen_aa32_st16_iss(s, tmp, addr,
8551 get_mem_index(s),
8552 rm | ISSIsAcqRel);
2359bf80
MR
8553 break;
8554 default:
8555 abort();
8556 }
8557 tcg_temp_free_i32(tmp);
8558 }
96c55295 8559 } else if (is_ld) {
86753403
PB
8560 switch (op1) {
8561 case 0: /* ldrex */
426f5abc 8562 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8563 break;
8564 case 1: /* ldrexd */
426f5abc 8565 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8566 break;
8567 case 2: /* ldrexb */
426f5abc 8568 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8569 break;
8570 case 3: /* ldrexh */
426f5abc 8571 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8572 break;
8573 default:
8574 abort();
8575 }
9ee6e8bb
PB
8576 } else {
8577 rm = insn & 0xf;
86753403
PB
8578 switch (op1) {
8579 case 0: /* strex */
426f5abc 8580 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8581 break;
8582 case 1: /* strexd */
502e64fe 8583 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8584 break;
8585 case 2: /* strexb */
426f5abc 8586 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8587 break;
8588 case 3: /* strexh */
426f5abc 8589 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8590 break;
8591 default:
8592 abort();
8593 }
9ee6e8bb 8594 }
39d5492a 8595 tcg_temp_free_i32(addr);
96c55295
PM
8596
8597 if (is_lasr && is_ld) {
8598 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
8599 }
c4869ca6
OS
8600 } else if ((insn & 0x00300f00) == 0) {
8601 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
8602 * - SWP, SWPB
8603 */
8604
cf12bce0
EC
8605 TCGv taddr;
8606 TCGMemOp opc = s->be_data;
8607
9ee6e8bb
PB
8608 rm = (insn) & 0xf;
8609
9ee6e8bb 8610 if (insn & (1 << 22)) {
cf12bce0 8611 opc |= MO_UB;
9ee6e8bb 8612 } else {
cf12bce0 8613 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 8614 }
cf12bce0
EC
8615
8616 addr = load_reg(s, rn);
8617 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 8618 tcg_temp_free_i32(addr);
cf12bce0
EC
8619
8620 tmp = load_reg(s, rm);
8621 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8622 get_mem_index(s), opc);
8623 tcg_temp_free(taddr);
8624 store_reg(s, rd, tmp);
c4869ca6
OS
8625 } else {
8626 goto illegal_op;
9ee6e8bb
PB
8627 }
8628 }
8629 } else {
8630 int address_offset;
3960c336 8631 bool load = insn & (1 << 20);
63f26fcf
PM
8632 bool wbit = insn & (1 << 21);
8633 bool pbit = insn & (1 << 24);
3960c336 8634 bool doubleword = false;
9bb6558a
PM
8635 ISSInfo issinfo;
8636
9ee6e8bb
PB
8637 /* Misc load/store */
8638 rn = (insn >> 16) & 0xf;
8639 rd = (insn >> 12) & 0xf;
3960c336 8640
9bb6558a
PM
8641 /* ISS not valid if writeback */
8642 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8643
3960c336
PM
8644 if (!load && (sh & 2)) {
8645 /* doubleword */
8646 ARCH(5TE);
8647 if (rd & 1) {
8648 /* UNPREDICTABLE; we choose to UNDEF */
8649 goto illegal_op;
8650 }
8651 load = (sh & 1) == 0;
8652 doubleword = true;
8653 }
8654
b0109805 8655 addr = load_reg(s, rn);
63f26fcf 8656 if (pbit) {
b0109805 8657 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 8658 }
9ee6e8bb 8659 address_offset = 0;
3960c336
PM
8660
8661 if (doubleword) {
8662 if (!load) {
9ee6e8bb 8663 /* store */
b0109805 8664 tmp = load_reg(s, rd);
12dcc321 8665 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8666 tcg_temp_free_i32(tmp);
b0109805
PB
8667 tcg_gen_addi_i32(addr, addr, 4);
8668 tmp = load_reg(s, rd + 1);
12dcc321 8669 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8670 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8671 } else {
8672 /* load */
5a839c0d 8673 tmp = tcg_temp_new_i32();
12dcc321 8674 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8675 store_reg(s, rd, tmp);
8676 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8677 tmp = tcg_temp_new_i32();
12dcc321 8678 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8679 rd++;
9ee6e8bb
PB
8680 }
8681 address_offset = -4;
3960c336
PM
8682 } else if (load) {
8683 /* load */
8684 tmp = tcg_temp_new_i32();
8685 switch (sh) {
8686 case 1:
9bb6558a
PM
8687 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
8688 issinfo);
3960c336
PM
8689 break;
8690 case 2:
9bb6558a
PM
8691 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
8692 issinfo);
3960c336
PM
8693 break;
8694 default:
8695 case 3:
9bb6558a
PM
8696 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
8697 issinfo);
3960c336
PM
8698 break;
8699 }
9ee6e8bb
PB
8700 } else {
8701 /* store */
b0109805 8702 tmp = load_reg(s, rd);
9bb6558a 8703 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 8704 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8705 }
8706 /* Perform base writeback before the loaded value to
8707 ensure correct behavior with overlapping index registers.
b6af0975 8708 ldrd with base writeback is undefined if the
9ee6e8bb 8709 destination and index registers overlap. */
63f26fcf 8710 if (!pbit) {
b0109805
PB
8711 gen_add_datah_offset(s, insn, address_offset, addr);
8712 store_reg(s, rn, addr);
63f26fcf 8713 } else if (wbit) {
9ee6e8bb 8714 if (address_offset)
b0109805
PB
8715 tcg_gen_addi_i32(addr, addr, address_offset);
8716 store_reg(s, rn, addr);
8717 } else {
7d1b0095 8718 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8719 }
8720 if (load) {
8721 /* Complete the load. */
b0109805 8722 store_reg(s, rd, tmp);
9ee6e8bb
PB
8723 }
8724 }
8725 break;
8726 case 0x4:
8727 case 0x5:
8728 goto do_ldst;
8729 case 0x6:
8730 case 0x7:
8731 if (insn & (1 << 4)) {
8732 ARCH(6);
8733 /* Armv6 Media instructions. */
8734 rm = insn & 0xf;
8735 rn = (insn >> 16) & 0xf;
2c0262af 8736 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8737 rs = (insn >> 8) & 0xf;
8738 switch ((insn >> 23) & 3) {
8739 case 0: /* Parallel add/subtract. */
8740 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8741 tmp = load_reg(s, rn);
8742 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8743 sh = (insn >> 5) & 7;
8744 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8745 goto illegal_op;
6ddbc6e4 8746 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8747 tcg_temp_free_i32(tmp2);
6ddbc6e4 8748 store_reg(s, rd, tmp);
9ee6e8bb
PB
8749 break;
8750 case 1:
8751 if ((insn & 0x00700020) == 0) {
6c95676b 8752 /* Halfword pack. */
3670669c
PB
8753 tmp = load_reg(s, rn);
8754 tmp2 = load_reg(s, rm);
9ee6e8bb 8755 shift = (insn >> 7) & 0x1f;
3670669c
PB
8756 if (insn & (1 << 6)) {
8757 /* pkhtb */
22478e79
AZ
8758 if (shift == 0)
8759 shift = 31;
8760 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8761 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8762 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8763 } else {
8764 /* pkhbt */
22478e79
AZ
8765 if (shift)
8766 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8767 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8768 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8769 }
8770 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8771 tcg_temp_free_i32(tmp2);
3670669c 8772 store_reg(s, rd, tmp);
9ee6e8bb
PB
8773 } else if ((insn & 0x00200020) == 0x00200000) {
8774 /* [us]sat */
6ddbc6e4 8775 tmp = load_reg(s, rm);
9ee6e8bb
PB
8776 shift = (insn >> 7) & 0x1f;
8777 if (insn & (1 << 6)) {
8778 if (shift == 0)
8779 shift = 31;
6ddbc6e4 8780 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8781 } else {
6ddbc6e4 8782 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8783 }
8784 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8785 tmp2 = tcg_const_i32(sh);
8786 if (insn & (1 << 22))
9ef39277 8787 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8788 else
9ef39277 8789 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8790 tcg_temp_free_i32(tmp2);
6ddbc6e4 8791 store_reg(s, rd, tmp);
9ee6e8bb
PB
8792 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8793 /* [us]sat16 */
6ddbc6e4 8794 tmp = load_reg(s, rm);
9ee6e8bb 8795 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8796 tmp2 = tcg_const_i32(sh);
8797 if (insn & (1 << 22))
9ef39277 8798 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8799 else
9ef39277 8800 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8801 tcg_temp_free_i32(tmp2);
6ddbc6e4 8802 store_reg(s, rd, tmp);
9ee6e8bb
PB
8803 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8804 /* Select bytes. */
6ddbc6e4
PB
8805 tmp = load_reg(s, rn);
8806 tmp2 = load_reg(s, rm);
7d1b0095 8807 tmp3 = tcg_temp_new_i32();
0ecb72a5 8808 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8809 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8810 tcg_temp_free_i32(tmp3);
8811 tcg_temp_free_i32(tmp2);
6ddbc6e4 8812 store_reg(s, rd, tmp);
9ee6e8bb 8813 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8814 tmp = load_reg(s, rm);
9ee6e8bb 8815 shift = (insn >> 10) & 3;
1301f322 8816 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8817 rotate, a shift is sufficient. */
8818 if (shift != 0)
f669df27 8819 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8820 op1 = (insn >> 20) & 7;
8821 switch (op1) {
5e3f878a
PB
8822 case 0: gen_sxtb16(tmp); break;
8823 case 2: gen_sxtb(tmp); break;
8824 case 3: gen_sxth(tmp); break;
8825 case 4: gen_uxtb16(tmp); break;
8826 case 6: gen_uxtb(tmp); break;
8827 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8828 default: goto illegal_op;
8829 }
8830 if (rn != 15) {
5e3f878a 8831 tmp2 = load_reg(s, rn);
9ee6e8bb 8832 if ((op1 & 3) == 0) {
5e3f878a 8833 gen_add16(tmp, tmp2);
9ee6e8bb 8834 } else {
5e3f878a 8835 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8836 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8837 }
8838 }
6c95676b 8839 store_reg(s, rd, tmp);
9ee6e8bb
PB
8840 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8841 /* rev */
b0109805 8842 tmp = load_reg(s, rm);
9ee6e8bb
PB
8843 if (insn & (1 << 22)) {
8844 if (insn & (1 << 7)) {
b0109805 8845 gen_revsh(tmp);
9ee6e8bb
PB
8846 } else {
8847 ARCH(6T2);
b0109805 8848 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8849 }
8850 } else {
8851 if (insn & (1 << 7))
b0109805 8852 gen_rev16(tmp);
9ee6e8bb 8853 else
66896cb8 8854 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8855 }
b0109805 8856 store_reg(s, rd, tmp);
9ee6e8bb
PB
8857 } else {
8858 goto illegal_op;
8859 }
8860 break;
8861 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8862 switch ((insn >> 20) & 0x7) {
8863 case 5:
8864 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8865 /* op2 not 00x or 11x : UNDEF */
8866 goto illegal_op;
8867 }
838fa72d
AJ
8868 /* Signed multiply most significant [accumulate].
8869 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8870 tmp = load_reg(s, rm);
8871 tmp2 = load_reg(s, rs);
a7812ae4 8872 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8873
955a7dd5 8874 if (rd != 15) {
838fa72d 8875 tmp = load_reg(s, rd);
9ee6e8bb 8876 if (insn & (1 << 6)) {
838fa72d 8877 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8878 } else {
838fa72d 8879 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8880 }
8881 }
838fa72d
AJ
8882 if (insn & (1 << 5)) {
8883 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8884 }
8885 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8886 tmp = tcg_temp_new_i32();
ecc7b3aa 8887 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 8888 tcg_temp_free_i64(tmp64);
955a7dd5 8889 store_reg(s, rn, tmp);
41e9564d
PM
8890 break;
8891 case 0:
8892 case 4:
8893 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8894 if (insn & (1 << 7)) {
8895 goto illegal_op;
8896 }
8897 tmp = load_reg(s, rm);
8898 tmp2 = load_reg(s, rs);
9ee6e8bb 8899 if (insn & (1 << 5))
5e3f878a
PB
8900 gen_swap_half(tmp2);
8901 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8902 if (insn & (1 << 22)) {
5e3f878a 8903 /* smlald, smlsld */
33bbd75a
PC
8904 TCGv_i64 tmp64_2;
8905
a7812ae4 8906 tmp64 = tcg_temp_new_i64();
33bbd75a 8907 tmp64_2 = tcg_temp_new_i64();
a7812ae4 8908 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 8909 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 8910 tcg_temp_free_i32(tmp);
33bbd75a
PC
8911 tcg_temp_free_i32(tmp2);
8912 if (insn & (1 << 6)) {
8913 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8914 } else {
8915 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8916 }
8917 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
8918 gen_addq(s, tmp64, rd, rn);
8919 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8920 tcg_temp_free_i64(tmp64);
9ee6e8bb 8921 } else {
5e3f878a 8922 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
8923 if (insn & (1 << 6)) {
8924 /* This subtraction cannot overflow. */
8925 tcg_gen_sub_i32(tmp, tmp, tmp2);
8926 } else {
8927 /* This addition cannot overflow 32 bits;
8928 * however it may overflow considered as a
8929 * signed operation, in which case we must set
8930 * the Q flag.
8931 */
8932 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8933 }
8934 tcg_temp_free_i32(tmp2);
22478e79 8935 if (rd != 15)
9ee6e8bb 8936 {
22478e79 8937 tmp2 = load_reg(s, rd);
9ef39277 8938 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8939 tcg_temp_free_i32(tmp2);
9ee6e8bb 8940 }
22478e79 8941 store_reg(s, rn, tmp);
9ee6e8bb 8942 }
41e9564d 8943 break;
b8b8ea05
PM
8944 case 1:
8945 case 3:
8946 /* SDIV, UDIV */
7e0cf8b4 8947 if (!dc_isar_feature(arm_div, s)) {
b8b8ea05
PM
8948 goto illegal_op;
8949 }
8950 if (((insn >> 5) & 7) || (rd != 15)) {
8951 goto illegal_op;
8952 }
8953 tmp = load_reg(s, rm);
8954 tmp2 = load_reg(s, rs);
8955 if (insn & (1 << 21)) {
8956 gen_helper_udiv(tmp, tmp, tmp2);
8957 } else {
8958 gen_helper_sdiv(tmp, tmp, tmp2);
8959 }
8960 tcg_temp_free_i32(tmp2);
8961 store_reg(s, rn, tmp);
8962 break;
41e9564d
PM
8963 default:
8964 goto illegal_op;
9ee6e8bb
PB
8965 }
8966 break;
8967 case 3:
8968 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8969 switch (op1) {
8970 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8971 ARCH(6);
8972 tmp = load_reg(s, rm);
8973 tmp2 = load_reg(s, rs);
8974 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8975 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8976 if (rd != 15) {
8977 tmp2 = load_reg(s, rd);
6ddbc6e4 8978 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8979 tcg_temp_free_i32(tmp2);
9ee6e8bb 8980 }
ded9d295 8981 store_reg(s, rn, tmp);
9ee6e8bb
PB
8982 break;
8983 case 0x20: case 0x24: case 0x28: case 0x2c:
8984 /* Bitfield insert/clear. */
8985 ARCH(6T2);
8986 shift = (insn >> 7) & 0x1f;
8987 i = (insn >> 16) & 0x1f;
45140a57
KB
8988 if (i < shift) {
8989 /* UNPREDICTABLE; we choose to UNDEF */
8990 goto illegal_op;
8991 }
9ee6e8bb
PB
8992 i = i + 1 - shift;
8993 if (rm == 15) {
7d1b0095 8994 tmp = tcg_temp_new_i32();
5e3f878a 8995 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8996 } else {
5e3f878a 8997 tmp = load_reg(s, rm);
9ee6e8bb
PB
8998 }
8999 if (i != 32) {
5e3f878a 9000 tmp2 = load_reg(s, rd);
d593c48e 9001 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9002 tcg_temp_free_i32(tmp2);
9ee6e8bb 9003 }
5e3f878a 9004 store_reg(s, rd, tmp);
9ee6e8bb
PB
9005 break;
9006 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9007 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9008 ARCH(6T2);
5e3f878a 9009 tmp = load_reg(s, rm);
9ee6e8bb
PB
9010 shift = (insn >> 7) & 0x1f;
9011 i = ((insn >> 16) & 0x1f) + 1;
9012 if (shift + i > 32)
9013 goto illegal_op;
9014 if (i < 32) {
9015 if (op1 & 0x20) {
59a71b4c 9016 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9017 } else {
59a71b4c 9018 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9019 }
9020 }
5e3f878a 9021 store_reg(s, rd, tmp);
9ee6e8bb
PB
9022 break;
9023 default:
9024 goto illegal_op;
9025 }
9026 break;
9027 }
9028 break;
9029 }
9030 do_ldst:
9031 /* Check for undefined extension instructions
9032 * per the ARM Bible IE:
9033 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9034 */
9035 sh = (0xf << 20) | (0xf << 4);
9036 if (op1 == 0x7 && ((insn & sh) == sh))
9037 {
9038 goto illegal_op;
9039 }
9040 /* load/store byte/word */
9041 rn = (insn >> 16) & 0xf;
9042 rd = (insn >> 12) & 0xf;
b0109805 9043 tmp2 = load_reg(s, rn);
a99caa48
PM
9044 if ((insn & 0x01200000) == 0x00200000) {
9045 /* ldrt/strt */
579d21cc 9046 i = get_a32_user_mem_index(s);
a99caa48
PM
9047 } else {
9048 i = get_mem_index(s);
9049 }
9ee6e8bb 9050 if (insn & (1 << 24))
b0109805 9051 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9052 if (insn & (1 << 20)) {
9053 /* load */
5a839c0d 9054 tmp = tcg_temp_new_i32();
9ee6e8bb 9055 if (insn & (1 << 22)) {
9bb6558a 9056 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9057 } else {
9bb6558a 9058 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9059 }
9ee6e8bb
PB
9060 } else {
9061 /* store */
b0109805 9062 tmp = load_reg(s, rd);
5a839c0d 9063 if (insn & (1 << 22)) {
9bb6558a 9064 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9065 } else {
9bb6558a 9066 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9067 }
9068 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9069 }
9070 if (!(insn & (1 << 24))) {
b0109805
PB
9071 gen_add_data_offset(s, insn, tmp2);
9072 store_reg(s, rn, tmp2);
9073 } else if (insn & (1 << 21)) {
9074 store_reg(s, rn, tmp2);
9075 } else {
7d1b0095 9076 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9077 }
9078 if (insn & (1 << 20)) {
9079 /* Complete the load. */
7dcc1f89 9080 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9081 }
9082 break;
9083 case 0x08:
9084 case 0x09:
9085 {
da3e53dd
PM
9086 int j, n, loaded_base;
9087 bool exc_return = false;
9088 bool is_load = extract32(insn, 20, 1);
9089 bool user = false;
39d5492a 9090 TCGv_i32 loaded_var;
9ee6e8bb
PB
9091 /* load/store multiple words */
9092 /* XXX: store correct base if write back */
9ee6e8bb 9093 if (insn & (1 << 22)) {
da3e53dd 9094 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9095 if (IS_USER(s))
9096 goto illegal_op; /* only usable in supervisor mode */
9097
da3e53dd
PM
9098 if (is_load && extract32(insn, 15, 1)) {
9099 exc_return = true;
9100 } else {
9101 user = true;
9102 }
9ee6e8bb
PB
9103 }
9104 rn = (insn >> 16) & 0xf;
b0109805 9105 addr = load_reg(s, rn);
9ee6e8bb
PB
9106
9107 /* compute total size */
9108 loaded_base = 0;
f764718d 9109 loaded_var = NULL;
9ee6e8bb 9110 n = 0;
9798ac71 9111 for (i = 0; i < 16; i++) {
9ee6e8bb
PB
9112 if (insn & (1 << i))
9113 n++;
9114 }
9115 /* XXX: test invalid n == 0 case ? */
9116 if (insn & (1 << 23)) {
9117 if (insn & (1 << 24)) {
9118 /* pre increment */
b0109805 9119 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9120 } else {
9121 /* post increment */
9122 }
9123 } else {
9124 if (insn & (1 << 24)) {
9125 /* pre decrement */
b0109805 9126 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9127 } else {
9128 /* post decrement */
9129 if (n != 1)
b0109805 9130 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9131 }
9132 }
9133 j = 0;
9798ac71 9134 for (i = 0; i < 16; i++) {
9ee6e8bb 9135 if (insn & (1 << i)) {
da3e53dd 9136 if (is_load) {
9ee6e8bb 9137 /* load */
5a839c0d 9138 tmp = tcg_temp_new_i32();
12dcc321 9139 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9140 if (user) {
b75263d6 9141 tmp2 = tcg_const_i32(i);
1ce94f81 9142 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9143 tcg_temp_free_i32(tmp2);
7d1b0095 9144 tcg_temp_free_i32(tmp);
9ee6e8bb 9145 } else if (i == rn) {
b0109805 9146 loaded_var = tmp;
9ee6e8bb 9147 loaded_base = 1;
9d090d17 9148 } else if (i == 15 && exc_return) {
fb0e8e79 9149 store_pc_exc_ret(s, tmp);
9ee6e8bb 9150 } else {
7dcc1f89 9151 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9152 }
9153 } else {
9154 /* store */
9155 if (i == 15) {
7d1b0095 9156 tmp = tcg_temp_new_i32();
fdbcf632 9157 tcg_gen_movi_i32(tmp, read_pc(s));
9ee6e8bb 9158 } else if (user) {
7d1b0095 9159 tmp = tcg_temp_new_i32();
b75263d6 9160 tmp2 = tcg_const_i32(i);
9ef39277 9161 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9162 tcg_temp_free_i32(tmp2);
9ee6e8bb 9163 } else {
b0109805 9164 tmp = load_reg(s, i);
9ee6e8bb 9165 }
12dcc321 9166 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9167 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9168 }
9169 j++;
9170 /* no need to add after the last transfer */
9171 if (j != n)
b0109805 9172 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9173 }
9174 }
9175 if (insn & (1 << 21)) {
9176 /* write back */
9177 if (insn & (1 << 23)) {
9178 if (insn & (1 << 24)) {
9179 /* pre increment */
9180 } else {
9181 /* post increment */
b0109805 9182 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9183 }
9184 } else {
9185 if (insn & (1 << 24)) {
9186 /* pre decrement */
9187 if (n != 1)
b0109805 9188 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9189 } else {
9190 /* post decrement */
b0109805 9191 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9192 }
9193 }
b0109805
PB
9194 store_reg(s, rn, addr);
9195 } else {
7d1b0095 9196 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9197 }
9198 if (loaded_base) {
b0109805 9199 store_reg(s, rn, loaded_var);
9ee6e8bb 9200 }
da3e53dd 9201 if (exc_return) {
9ee6e8bb 9202 /* Restore CPSR from SPSR. */
d9ba4830 9203 tmp = load_cpu_field(spsr);
e69ad9df
AL
9204 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9205 gen_io_start();
9206 }
235ea1f5 9207 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
9208 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9209 gen_io_end();
9210 }
7d1b0095 9211 tcg_temp_free_i32(tmp);
b29fd33d 9212 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9213 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9214 }
9215 }
9216 break;
9217 case 0xa:
9218 case 0xb:
9219 {
9220 int32_t offset;
9221
9222 /* branch (and link) */
9ee6e8bb 9223 if (insn & (1 << 24)) {
7d1b0095 9224 tmp = tcg_temp_new_i32();
fdbcf632 9225 tcg_gen_movi_i32(tmp, s->pc);
5e3f878a 9226 store_reg(s, 14, tmp);
9ee6e8bb 9227 }
534df156 9228 offset = sextract32(insn << 2, 0, 26);
fdbcf632 9229 gen_jmp(s, read_pc(s) + offset);
9ee6e8bb
PB
9230 }
9231 break;
9232 case 0xc:
9233 case 0xd:
9234 case 0xe:
6a57f3eb
WN
9235 if (((insn >> 8) & 0xe) == 10) {
9236 /* VFP. */
7dcc1f89 9237 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9238 goto illegal_op;
9239 }
7dcc1f89 9240 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9241 /* Coprocessor. */
9ee6e8bb 9242 goto illegal_op;
6a57f3eb 9243 }
9ee6e8bb
PB
9244 break;
9245 case 0xf:
9246 /* swi */
eaed129d 9247 gen_set_pc_im(s, s->pc);
d4a2dc67 9248 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 9249 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
9250 break;
9251 default:
9252 illegal_op:
73710361
GB
9253 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9254 default_exception_el(s));
9ee6e8bb
PB
9255 break;
9256 }
9257 }
9258}
9259
331b1ca6 9260static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
296e5a0a 9261{
331b1ca6
RH
9262 /*
9263 * Return true if this is a 16 bit instruction. We must be precise
9264 * about this (matching the decode).
296e5a0a
PM
9265 */
9266 if ((insn >> 11) < 0x1d) {
9267 /* Definitely a 16-bit instruction */
9268 return true;
9269 }
9270
9271 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9272 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9273 * end up actually treating this as two 16-bit insns, though,
9274 * if it's half of a bl/blx pair that might span a page boundary.
9275 */
14120108
JS
9276 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
9277 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
9278 /* Thumb2 cores (including all M profile ones) always treat
9279 * 32-bit insns as 32-bit.
9280 */
9281 return false;
9282 }
9283
331b1ca6 9284 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
9285 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9286 * is not on the next page; we merge this into a 32-bit
9287 * insn.
9288 */
9289 return false;
9290 }
9291 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9292 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9293 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9294 * -- handle as single 16 bit insn
9295 */
9296 return true;
9297}
9298
9ee6e8bb
PB
9299/* Return true if this is a Thumb-2 logical op. */
9300static int
9301thumb2_logic_op(int op)
9302{
9303 return (op < 8);
9304}
9305
9306/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9307 then set condition code flags based on the result of the operation.
9308 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9309 to the high bit of T1.
9310 Returns zero if the opcode is valid. */
9311
9312static int
39d5492a
PM
9313gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9314 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9315{
9316 int logic_cc;
9317
9318 logic_cc = 0;
9319 switch (op) {
9320 case 0: /* and */
396e467c 9321 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9322 logic_cc = conds;
9323 break;
9324 case 1: /* bic */
f669df27 9325 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9326 logic_cc = conds;
9327 break;
9328 case 2: /* orr */
396e467c 9329 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9330 logic_cc = conds;
9331 break;
9332 case 3: /* orn */
29501f1b 9333 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9334 logic_cc = conds;
9335 break;
9336 case 4: /* eor */
396e467c 9337 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9338 logic_cc = conds;
9339 break;
9340 case 8: /* add */
9341 if (conds)
72485ec4 9342 gen_add_CC(t0, t0, t1);
9ee6e8bb 9343 else
396e467c 9344 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9345 break;
9346 case 10: /* adc */
9347 if (conds)
49b4c31e 9348 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9349 else
396e467c 9350 gen_adc(t0, t1);
9ee6e8bb
PB
9351 break;
9352 case 11: /* sbc */
2de68a49
RH
9353 if (conds) {
9354 gen_sbc_CC(t0, t0, t1);
9355 } else {
396e467c 9356 gen_sub_carry(t0, t0, t1);
2de68a49 9357 }
9ee6e8bb
PB
9358 break;
9359 case 13: /* sub */
9360 if (conds)
72485ec4 9361 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9362 else
396e467c 9363 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9364 break;
9365 case 14: /* rsb */
9366 if (conds)
72485ec4 9367 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9368 else
396e467c 9369 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9370 break;
9371 default: /* 5, 6, 7, 9, 12, 15. */
9372 return 1;
9373 }
9374 if (logic_cc) {
396e467c 9375 gen_logic_CC(t0);
9ee6e8bb 9376 if (shifter_out)
396e467c 9377 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9378 }
9379 return 0;
9380}
9381
2eea841c
PM
9382/* Translate a 32-bit thumb instruction. */
9383static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 9384{
296e5a0a 9385 uint32_t imm, shift, offset;
9ee6e8bb 9386 uint32_t rd, rn, rm, rs;
39d5492a
PM
9387 TCGv_i32 tmp;
9388 TCGv_i32 tmp2;
9389 TCGv_i32 tmp3;
9390 TCGv_i32 addr;
a7812ae4 9391 TCGv_i64 tmp64;
9ee6e8bb
PB
9392 int op;
9393 int shiftop;
9394 int conds;
9395 int logic_cc;
9396
14120108
JS
9397 /*
9398 * ARMv6-M supports a limited subset of Thumb2 instructions.
9399 * Other Thumb1 architectures allow only 32-bit
9400 * combined BL/BLX prefix and suffix.
296e5a0a 9401 */
14120108
JS
9402 if (arm_dc_feature(s, ARM_FEATURE_M) &&
9403 !arm_dc_feature(s, ARM_FEATURE_V7)) {
9404 int i;
9405 bool found = false;
8297cb13
JS
9406 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
9407 0xf3b08040 /* dsb */,
9408 0xf3b08050 /* dmb */,
9409 0xf3b08060 /* isb */,
9410 0xf3e08000 /* mrs */,
9411 0xf000d000 /* bl */};
9412 static const uint32_t armv6m_mask[] = {0xffe0d000,
9413 0xfff0d0f0,
9414 0xfff0d0f0,
9415 0xfff0d0f0,
9416 0xffe0d000,
9417 0xf800d000};
14120108
JS
9418
9419 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
9420 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
9421 found = true;
9422 break;
9423 }
9424 }
9425 if (!found) {
9426 goto illegal_op;
9427 }
9428 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
9429 ARCH(6T2);
9430 }
9431
9432 rn = (insn >> 16) & 0xf;
9433 rs = (insn >> 12) & 0xf;
9434 rd = (insn >> 8) & 0xf;
9435 rm = insn & 0xf;
9436 switch ((insn >> 25) & 0xf) {
9437 case 0: case 1: case 2: case 3:
9438 /* 16-bit instructions. Should never happen. */
9439 abort();
9440 case 4:
9441 if (insn & (1 << 22)) {
ebfe27c5
PM
9442 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9443 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 9444 * table branch, TT.
ebfe27c5 9445 */
76eff04d
PM
9446 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9447 arm_dc_feature(s, ARM_FEATURE_V8)) {
9448 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9449 * - SG (v8M only)
9450 * The bulk of the behaviour for this instruction is implemented
9451 * in v7m_handle_execute_nsc(), which deals with the insn when
9452 * it is executed by a CPU in non-secure state from memory
9453 * which is Secure & NonSecure-Callable.
9454 * Here we only need to handle the remaining cases:
9455 * * in NS memory (including the "security extension not
9456 * implemented" case) : NOP
9457 * * in S memory but CPU already secure (clear IT bits)
9458 * We know that the attribute for the memory this insn is
9459 * in must match the current CPU state, because otherwise
9460 * get_phys_addr_pmsav8 would have generated an exception.
9461 */
9462 if (s->v8m_secure) {
9463 /* Like the IT insn, we don't need to generate any code */
9464 s->condexec_cond = 0;
9465 s->condexec_mask = 0;
9466 }
9467 } else if (insn & 0x01200000) {
ebfe27c5
PM
9468 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9469 * - load/store dual (post-indexed)
9470 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9471 * - load/store dual (literal and immediate)
9472 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9473 * - load/store dual (pre-indexed)
9474 */
910d7692
PM
9475 bool wback = extract32(insn, 21, 1);
9476
16e0d823
RH
9477 if (rn == 15 && (insn & (1 << 21))) {
9478 /* UNPREDICTABLE */
9479 goto illegal_op;
9ee6e8bb 9480 }
16e0d823
RH
9481
9482 addr = add_reg_for_lit(s, rn, 0);
9ee6e8bb 9483 offset = (insn & 0xff) * 4;
910d7692 9484 if ((insn & (1 << 23)) == 0) {
9ee6e8bb 9485 offset = -offset;
910d7692
PM
9486 }
9487
9488 if (s->v8m_stackcheck && rn == 13 && wback) {
9489 /*
9490 * Here 'addr' is the current SP; if offset is +ve we're
9491 * moving SP up, else down. It is UNKNOWN whether the limit
9492 * check triggers when SP starts below the limit and ends
9493 * up above it; check whichever of the current and final
9494 * SP is lower, so QEMU will trigger in that situation.
9495 */
9496 if ((int32_t)offset < 0) {
9497 TCGv_i32 newsp = tcg_temp_new_i32();
9498
9499 tcg_gen_addi_i32(newsp, addr, offset);
9500 gen_helper_v8m_stackcheck(cpu_env, newsp);
9501 tcg_temp_free_i32(newsp);
9502 } else {
9503 gen_helper_v8m_stackcheck(cpu_env, addr);
9504 }
9505 }
9506
9ee6e8bb 9507 if (insn & (1 << 24)) {
b0109805 9508 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9509 offset = 0;
9510 }
9511 if (insn & (1 << 20)) {
9512 /* ldrd */
e2592fad 9513 tmp = tcg_temp_new_i32();
12dcc321 9514 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9515 store_reg(s, rs, tmp);
9516 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9517 tmp = tcg_temp_new_i32();
12dcc321 9518 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9519 store_reg(s, rd, tmp);
9ee6e8bb
PB
9520 } else {
9521 /* strd */
b0109805 9522 tmp = load_reg(s, rs);
12dcc321 9523 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9524 tcg_temp_free_i32(tmp);
b0109805
PB
9525 tcg_gen_addi_i32(addr, addr, 4);
9526 tmp = load_reg(s, rd);
12dcc321 9527 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9528 tcg_temp_free_i32(tmp);
9ee6e8bb 9529 }
910d7692 9530 if (wback) {
9ee6e8bb 9531 /* Base writeback. */
b0109805
PB
9532 tcg_gen_addi_i32(addr, addr, offset - 4);
9533 store_reg(s, rn, addr);
9534 } else {
7d1b0095 9535 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9536 }
9537 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
9538 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9539 * - load/store exclusive word
5158de24 9540 * - TT (v8M only)
ebfe27c5
PM
9541 */
9542 if (rs == 15) {
5158de24
PM
9543 if (!(insn & (1 << 20)) &&
9544 arm_dc_feature(s, ARM_FEATURE_M) &&
9545 arm_dc_feature(s, ARM_FEATURE_V8)) {
9546 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
9547 * - TT (v8M only)
9548 */
9549 bool alt = insn & (1 << 7);
9550 TCGv_i32 addr, op, ttresp;
9551
9552 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
9553 /* we UNDEF for these UNPREDICTABLE cases */
9554 goto illegal_op;
9555 }
9556
9557 if (alt && !s->v8m_secure) {
9558 goto illegal_op;
9559 }
9560
9561 addr = load_reg(s, rn);
9562 op = tcg_const_i32(extract32(insn, 6, 2));
9563 ttresp = tcg_temp_new_i32();
9564 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
9565 tcg_temp_free_i32(addr);
9566 tcg_temp_free_i32(op);
9567 store_reg(s, rd, ttresp);
384c6c03 9568 break;
5158de24 9569 }
ebfe27c5
PM
9570 goto illegal_op;
9571 }
39d5492a 9572 addr = tcg_temp_local_new_i32();
98a46317 9573 load_reg_var(s, addr, rn);
426f5abc 9574 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9575 if (insn & (1 << 20)) {
426f5abc 9576 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9577 } else {
426f5abc 9578 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9579 }
39d5492a 9580 tcg_temp_free_i32(addr);
2359bf80 9581 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb 9582 /* Table Branch. */
fdbcf632 9583 addr = load_reg(s, rn);
b26eefb6 9584 tmp = load_reg(s, rm);
b0109805 9585 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9586 if (insn & (1 << 4)) {
9587 /* tbh */
b0109805 9588 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9589 tcg_temp_free_i32(tmp);
e2592fad 9590 tmp = tcg_temp_new_i32();
12dcc321 9591 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9592 } else { /* tbb */
7d1b0095 9593 tcg_temp_free_i32(tmp);
e2592fad 9594 tmp = tcg_temp_new_i32();
12dcc321 9595 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9596 }
7d1b0095 9597 tcg_temp_free_i32(addr);
b0109805 9598 tcg_gen_shli_i32(tmp, tmp, 1);
fdbcf632 9599 tcg_gen_addi_i32(tmp, tmp, read_pc(s));
b0109805 9600 store_reg(s, 15, tmp);
9ee6e8bb 9601 } else {
96c55295
PM
9602 bool is_lasr = false;
9603 bool is_ld = extract32(insn, 20, 1);
2359bf80 9604 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9605 op = (insn >> 4) & 0x3;
2359bf80
MR
9606 switch (op2) {
9607 case 0:
426f5abc 9608 goto illegal_op;
2359bf80
MR
9609 case 1:
9610 /* Load/store exclusive byte/halfword/doubleword */
9611 if (op == 2) {
9612 goto illegal_op;
9613 }
9614 ARCH(7);
9615 break;
9616 case 2:
9617 /* Load-acquire/store-release */
9618 if (op == 3) {
9619 goto illegal_op;
9620 }
9621 /* Fall through */
9622 case 3:
9623 /* Load-acquire/store-release exclusive */
9624 ARCH(8);
96c55295 9625 is_lasr = true;
2359bf80 9626 break;
426f5abc 9627 }
96c55295
PM
9628
9629 if (is_lasr && !is_ld) {
9630 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9631 }
9632
39d5492a 9633 addr = tcg_temp_local_new_i32();
98a46317 9634 load_reg_var(s, addr, rn);
2359bf80 9635 if (!(op2 & 1)) {
96c55295 9636 if (is_ld) {
2359bf80
MR
9637 tmp = tcg_temp_new_i32();
9638 switch (op) {
9639 case 0: /* ldab */
9bb6558a
PM
9640 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9641 rs | ISSIsAcqRel);
2359bf80
MR
9642 break;
9643 case 1: /* ldah */
9bb6558a
PM
9644 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9645 rs | ISSIsAcqRel);
2359bf80
MR
9646 break;
9647 case 2: /* lda */
9bb6558a
PM
9648 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9649 rs | ISSIsAcqRel);
2359bf80
MR
9650 break;
9651 default:
9652 abort();
9653 }
9654 store_reg(s, rs, tmp);
9655 } else {
9656 tmp = load_reg(s, rs);
9657 switch (op) {
9658 case 0: /* stlb */
9bb6558a
PM
9659 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9660 rs | ISSIsAcqRel);
2359bf80
MR
9661 break;
9662 case 1: /* stlh */
9bb6558a
PM
9663 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9664 rs | ISSIsAcqRel);
2359bf80
MR
9665 break;
9666 case 2: /* stl */
9bb6558a
PM
9667 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9668 rs | ISSIsAcqRel);
2359bf80
MR
9669 break;
9670 default:
9671 abort();
9672 }
9673 tcg_temp_free_i32(tmp);
9674 }
96c55295 9675 } else if (is_ld) {
426f5abc 9676 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9677 } else {
426f5abc 9678 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9679 }
39d5492a 9680 tcg_temp_free_i32(addr);
96c55295
PM
9681
9682 if (is_lasr && is_ld) {
9683 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
9684 }
9ee6e8bb
PB
9685 }
9686 } else {
9687 /* Load/store multiple, RFE, SRS. */
9688 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9689 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9690 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9691 goto illegal_op;
00115976 9692 }
9ee6e8bb
PB
9693 if (insn & (1 << 20)) {
9694 /* rfe */
b0109805
PB
9695 addr = load_reg(s, rn);
9696 if ((insn & (1 << 24)) == 0)
9697 tcg_gen_addi_i32(addr, addr, -8);
9698 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9699 tmp = tcg_temp_new_i32();
12dcc321 9700 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9701 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9702 tmp2 = tcg_temp_new_i32();
12dcc321 9703 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9704 if (insn & (1 << 21)) {
9705 /* Base writeback. */
b0109805
PB
9706 if (insn & (1 << 24)) {
9707 tcg_gen_addi_i32(addr, addr, 4);
9708 } else {
9709 tcg_gen_addi_i32(addr, addr, -4);
9710 }
9711 store_reg(s, rn, addr);
9712 } else {
7d1b0095 9713 tcg_temp_free_i32(addr);
9ee6e8bb 9714 }
b0109805 9715 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9716 } else {
9717 /* srs */
81465888
PM
9718 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9719 insn & (1 << 21));
9ee6e8bb
PB
9720 }
9721 } else {
5856d44e 9722 int i, loaded_base = 0;
39d5492a 9723 TCGv_i32 loaded_var;
7c0ed88e 9724 bool wback = extract32(insn, 21, 1);
9ee6e8bb 9725 /* Load/store multiple. */
b0109805 9726 addr = load_reg(s, rn);
9ee6e8bb
PB
9727 offset = 0;
9728 for (i = 0; i < 16; i++) {
9729 if (insn & (1 << i))
9730 offset += 4;
9731 }
7c0ed88e 9732
9ee6e8bb 9733 if (insn & (1 << 24)) {
b0109805 9734 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9735 }
9736
7c0ed88e
PM
9737 if (s->v8m_stackcheck && rn == 13 && wback) {
9738 /*
9739 * If the writeback is incrementing SP rather than
9740 * decrementing it, and the initial SP is below the
9741 * stack limit but the final written-back SP would
9742 * be above, then then we must not perform any memory
9743 * accesses, but it is IMPDEF whether we generate
9744 * an exception. We choose to do so in this case.
9745 * At this point 'addr' is the lowest address, so
9746 * either the original SP (if incrementing) or our
9747 * final SP (if decrementing), so that's what we check.
9748 */
9749 gen_helper_v8m_stackcheck(cpu_env, addr);
9750 }
9751
f764718d 9752 loaded_var = NULL;
9ee6e8bb
PB
9753 for (i = 0; i < 16; i++) {
9754 if ((insn & (1 << i)) == 0)
9755 continue;
9756 if (insn & (1 << 20)) {
9757 /* Load. */
e2592fad 9758 tmp = tcg_temp_new_i32();
12dcc321 9759 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9760 if (i == 15) {
3bb8a96f 9761 gen_bx_excret(s, tmp);
5856d44e
YO
9762 } else if (i == rn) {
9763 loaded_var = tmp;
9764 loaded_base = 1;
9ee6e8bb 9765 } else {
b0109805 9766 store_reg(s, i, tmp);
9ee6e8bb
PB
9767 }
9768 } else {
9769 /* Store. */
b0109805 9770 tmp = load_reg(s, i);
12dcc321 9771 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9772 tcg_temp_free_i32(tmp);
9ee6e8bb 9773 }
b0109805 9774 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9775 }
5856d44e
YO
9776 if (loaded_base) {
9777 store_reg(s, rn, loaded_var);
9778 }
7c0ed88e 9779 if (wback) {
9ee6e8bb
PB
9780 /* Base register writeback. */
9781 if (insn & (1 << 24)) {
b0109805 9782 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9783 }
9784 /* Fault if writeback register is in register list. */
9785 if (insn & (1 << rn))
9786 goto illegal_op;
b0109805
PB
9787 store_reg(s, rn, addr);
9788 } else {
7d1b0095 9789 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9790 }
9791 }
9792 }
9793 break;
2af9ab77
JB
9794 case 5:
9795
9ee6e8bb 9796 op = (insn >> 21) & 0xf;
2af9ab77 9797 if (op == 6) {
62b44f05
AR
9798 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9799 goto illegal_op;
9800 }
2af9ab77
JB
9801 /* Halfword pack. */
9802 tmp = load_reg(s, rn);
9803 tmp2 = load_reg(s, rm);
9804 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9805 if (insn & (1 << 5)) {
9806 /* pkhtb */
9807 if (shift == 0)
9808 shift = 31;
9809 tcg_gen_sari_i32(tmp2, tmp2, shift);
9810 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9811 tcg_gen_ext16u_i32(tmp2, tmp2);
9812 } else {
9813 /* pkhbt */
9814 if (shift)
9815 tcg_gen_shli_i32(tmp2, tmp2, shift);
9816 tcg_gen_ext16u_i32(tmp, tmp);
9817 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9818 }
9819 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9820 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9821 store_reg(s, rd, tmp);
9822 } else {
2af9ab77
JB
9823 /* Data processing register constant shift. */
9824 if (rn == 15) {
7d1b0095 9825 tmp = tcg_temp_new_i32();
2af9ab77
JB
9826 tcg_gen_movi_i32(tmp, 0);
9827 } else {
9828 tmp = load_reg(s, rn);
9829 }
9830 tmp2 = load_reg(s, rm);
9831
9832 shiftop = (insn >> 4) & 3;
9833 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9834 conds = (insn & (1 << 20)) != 0;
9835 logic_cc = (conds && thumb2_logic_op(op));
9836 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9837 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9838 goto illegal_op;
7d1b0095 9839 tcg_temp_free_i32(tmp2);
55203189
PM
9840 if (rd == 13 &&
9841 ((op == 2 && rn == 15) ||
9842 (op == 8 && rn == 13) ||
9843 (op == 13 && rn == 13))) {
9844 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
9845 store_sp_checked(s, tmp);
9846 } else if (rd != 15) {
2af9ab77
JB
9847 store_reg(s, rd, tmp);
9848 } else {
7d1b0095 9849 tcg_temp_free_i32(tmp);
2af9ab77 9850 }
3174f8e9 9851 }
9ee6e8bb
PB
9852 break;
9853 case 13: /* Misc data processing. */
9854 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9855 if (op < 4 && (insn & 0xf000) != 0xf000)
9856 goto illegal_op;
9857 switch (op) {
9858 case 0: /* Register controlled shift. */
8984bd2e
PB
9859 tmp = load_reg(s, rn);
9860 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9861 if ((insn & 0x70) != 0)
9862 goto illegal_op;
a2d12f0f
PM
9863 /*
9864 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
9865 * - MOV, MOVS (register-shifted register), flagsetting
9866 */
9ee6e8bb 9867 op = (insn >> 21) & 3;
8984bd2e
PB
9868 logic_cc = (insn & (1 << 20)) != 0;
9869 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9870 if (logic_cc)
9871 gen_logic_CC(tmp);
bedb8a6b 9872 store_reg(s, rd, tmp);
9ee6e8bb
PB
9873 break;
9874 case 1: /* Sign/zero extend. */
62b44f05
AR
9875 op = (insn >> 20) & 7;
9876 switch (op) {
9877 case 0: /* SXTAH, SXTH */
9878 case 1: /* UXTAH, UXTH */
9879 case 4: /* SXTAB, SXTB */
9880 case 5: /* UXTAB, UXTB */
9881 break;
9882 case 2: /* SXTAB16, SXTB16 */
9883 case 3: /* UXTAB16, UXTB16 */
9884 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9885 goto illegal_op;
9886 }
9887 break;
9888 default:
9889 goto illegal_op;
9890 }
9891 if (rn != 15) {
9892 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9893 goto illegal_op;
9894 }
9895 }
5e3f878a 9896 tmp = load_reg(s, rm);
9ee6e8bb 9897 shift = (insn >> 4) & 3;
1301f322 9898 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9899 rotate, a shift is sufficient. */
9900 if (shift != 0)
f669df27 9901 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9902 op = (insn >> 20) & 7;
9903 switch (op) {
5e3f878a
PB
9904 case 0: gen_sxth(tmp); break;
9905 case 1: gen_uxth(tmp); break;
9906 case 2: gen_sxtb16(tmp); break;
9907 case 3: gen_uxtb16(tmp); break;
9908 case 4: gen_sxtb(tmp); break;
9909 case 5: gen_uxtb(tmp); break;
62b44f05
AR
9910 default:
9911 g_assert_not_reached();
9ee6e8bb
PB
9912 }
9913 if (rn != 15) {
5e3f878a 9914 tmp2 = load_reg(s, rn);
9ee6e8bb 9915 if ((op >> 1) == 1) {
5e3f878a 9916 gen_add16(tmp, tmp2);
9ee6e8bb 9917 } else {
5e3f878a 9918 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9919 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9920 }
9921 }
5e3f878a 9922 store_reg(s, rd, tmp);
9ee6e8bb
PB
9923 break;
9924 case 2: /* SIMD add/subtract. */
62b44f05
AR
9925 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9926 goto illegal_op;
9927 }
9ee6e8bb
PB
9928 op = (insn >> 20) & 7;
9929 shift = (insn >> 4) & 7;
9930 if ((op & 3) == 3 || (shift & 3) == 3)
9931 goto illegal_op;
6ddbc6e4
PB
9932 tmp = load_reg(s, rn);
9933 tmp2 = load_reg(s, rm);
9934 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9935 tcg_temp_free_i32(tmp2);
6ddbc6e4 9936 store_reg(s, rd, tmp);
9ee6e8bb
PB
9937 break;
9938 case 3: /* Other data processing. */
9939 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9940 if (op < 4) {
9941 /* Saturating add/subtract. */
62b44f05
AR
9942 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9943 goto illegal_op;
9944 }
d9ba4830
PB
9945 tmp = load_reg(s, rn);
9946 tmp2 = load_reg(s, rm);
9ee6e8bb 9947 if (op & 1)
9ef39277 9948 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9949 if (op & 2)
9ef39277 9950 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9951 else
9ef39277 9952 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9953 tcg_temp_free_i32(tmp2);
9ee6e8bb 9954 } else {
62b44f05
AR
9955 switch (op) {
9956 case 0x0a: /* rbit */
9957 case 0x08: /* rev */
9958 case 0x09: /* rev16 */
9959 case 0x0b: /* revsh */
9960 case 0x18: /* clz */
9961 break;
9962 case 0x10: /* sel */
9963 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9964 goto illegal_op;
9965 }
9966 break;
9967 case 0x20: /* crc32/crc32c */
9968 case 0x21:
9969 case 0x22:
9970 case 0x28:
9971 case 0x29:
9972 case 0x2a:
962fcbf2 9973 if (!dc_isar_feature(aa32_crc32, s)) {
62b44f05
AR
9974 goto illegal_op;
9975 }
9976 break;
9977 default:
9978 goto illegal_op;
9979 }
d9ba4830 9980 tmp = load_reg(s, rn);
9ee6e8bb
PB
9981 switch (op) {
9982 case 0x0a: /* rbit */
d9ba4830 9983 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9984 break;
9985 case 0x08: /* rev */
66896cb8 9986 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9987 break;
9988 case 0x09: /* rev16 */
d9ba4830 9989 gen_rev16(tmp);
9ee6e8bb
PB
9990 break;
9991 case 0x0b: /* revsh */
d9ba4830 9992 gen_revsh(tmp);
9ee6e8bb
PB
9993 break;
9994 case 0x10: /* sel */
d9ba4830 9995 tmp2 = load_reg(s, rm);
7d1b0095 9996 tmp3 = tcg_temp_new_i32();
0ecb72a5 9997 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9998 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9999 tcg_temp_free_i32(tmp3);
10000 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10001 break;
10002 case 0x18: /* clz */
7539a012 10003 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10004 break;
eb0ecd5a
WN
10005 case 0x20:
10006 case 0x21:
10007 case 0x22:
10008 case 0x28:
10009 case 0x29:
10010 case 0x2a:
10011 {
10012 /* crc32/crc32c */
10013 uint32_t sz = op & 0x3;
10014 uint32_t c = op & 0x8;
10015
eb0ecd5a 10016 tmp2 = load_reg(s, rm);
aa633469
PM
10017 if (sz == 0) {
10018 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10019 } else if (sz == 1) {
10020 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10021 }
eb0ecd5a
WN
10022 tmp3 = tcg_const_i32(1 << sz);
10023 if (c) {
10024 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10025 } else {
10026 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10027 }
10028 tcg_temp_free_i32(tmp2);
10029 tcg_temp_free_i32(tmp3);
10030 break;
10031 }
9ee6e8bb 10032 default:
62b44f05 10033 g_assert_not_reached();
9ee6e8bb
PB
10034 }
10035 }
d9ba4830 10036 store_reg(s, rd, tmp);
9ee6e8bb
PB
10037 break;
10038 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10039 switch ((insn >> 20) & 7) {
10040 case 0: /* 32 x 32 -> 32 */
10041 case 7: /* Unsigned sum of absolute differences. */
10042 break;
10043 case 1: /* 16 x 16 -> 32 */
10044 case 2: /* Dual multiply add. */
10045 case 3: /* 32 * 16 -> 32msb */
10046 case 4: /* Dual multiply subtract. */
10047 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10048 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10049 goto illegal_op;
10050 }
10051 break;
10052 }
9ee6e8bb 10053 op = (insn >> 4) & 0xf;
d9ba4830
PB
10054 tmp = load_reg(s, rn);
10055 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10056 switch ((insn >> 20) & 7) {
10057 case 0: /* 32 x 32 -> 32 */
d9ba4830 10058 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10059 tcg_temp_free_i32(tmp2);
9ee6e8bb 10060 if (rs != 15) {
d9ba4830 10061 tmp2 = load_reg(s, rs);
9ee6e8bb 10062 if (op)
d9ba4830 10063 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10064 else
d9ba4830 10065 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10066 tcg_temp_free_i32(tmp2);
9ee6e8bb 10067 }
9ee6e8bb
PB
10068 break;
10069 case 1: /* 16 x 16 -> 32 */
d9ba4830 10070 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10071 tcg_temp_free_i32(tmp2);
9ee6e8bb 10072 if (rs != 15) {
d9ba4830 10073 tmp2 = load_reg(s, rs);
9ef39277 10074 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10075 tcg_temp_free_i32(tmp2);
9ee6e8bb 10076 }
9ee6e8bb
PB
10077 break;
10078 case 2: /* Dual multiply add. */
10079 case 4: /* Dual multiply subtract. */
10080 if (op)
d9ba4830
PB
10081 gen_swap_half(tmp2);
10082 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10083 if (insn & (1 << 22)) {
e1d177b9 10084 /* This subtraction cannot overflow. */
d9ba4830 10085 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10086 } else {
e1d177b9
PM
10087 /* This addition cannot overflow 32 bits;
10088 * however it may overflow considered as a signed
10089 * operation, in which case we must set the Q flag.
10090 */
9ef39277 10091 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10092 }
7d1b0095 10093 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10094 if (rs != 15)
10095 {
d9ba4830 10096 tmp2 = load_reg(s, rs);
9ef39277 10097 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10098 tcg_temp_free_i32(tmp2);
9ee6e8bb 10099 }
9ee6e8bb
PB
10100 break;
10101 case 3: /* 32 * 16 -> 32msb */
10102 if (op)
d9ba4830 10103 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10104 else
d9ba4830 10105 gen_sxth(tmp2);
a7812ae4
PB
10106 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10107 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10108 tmp = tcg_temp_new_i32();
ecc7b3aa 10109 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10110 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10111 if (rs != 15)
10112 {
d9ba4830 10113 tmp2 = load_reg(s, rs);
9ef39277 10114 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10115 tcg_temp_free_i32(tmp2);
9ee6e8bb 10116 }
9ee6e8bb 10117 break;
838fa72d
AJ
10118 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10119 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10120 if (rs != 15) {
838fa72d
AJ
10121 tmp = load_reg(s, rs);
10122 if (insn & (1 << 20)) {
10123 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10124 } else {
838fa72d 10125 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10126 }
2c0262af 10127 }
838fa72d
AJ
10128 if (insn & (1 << 4)) {
10129 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10130 }
10131 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10132 tmp = tcg_temp_new_i32();
ecc7b3aa 10133 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10134 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10135 break;
10136 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10137 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10138 tcg_temp_free_i32(tmp2);
9ee6e8bb 10139 if (rs != 15) {
d9ba4830
PB
10140 tmp2 = load_reg(s, rs);
10141 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10142 tcg_temp_free_i32(tmp2);
5fd46862 10143 }
9ee6e8bb 10144 break;
2c0262af 10145 }
d9ba4830 10146 store_reg(s, rd, tmp);
2c0262af 10147 break;
9ee6e8bb
PB
10148 case 6: case 7: /* 64-bit multiply, Divide. */
10149 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10150 tmp = load_reg(s, rn);
10151 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10152 if ((op & 0x50) == 0x10) {
10153 /* sdiv, udiv */
7e0cf8b4 10154 if (!dc_isar_feature(thumb_div, s)) {
9ee6e8bb 10155 goto illegal_op;
47789990 10156 }
9ee6e8bb 10157 if (op & 0x20)
5e3f878a 10158 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10159 else
5e3f878a 10160 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10161 tcg_temp_free_i32(tmp2);
5e3f878a 10162 store_reg(s, rd, tmp);
9ee6e8bb
PB
10163 } else if ((op & 0xe) == 0xc) {
10164 /* Dual multiply accumulate long. */
62b44f05
AR
10165 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10166 tcg_temp_free_i32(tmp);
10167 tcg_temp_free_i32(tmp2);
10168 goto illegal_op;
10169 }
9ee6e8bb 10170 if (op & 1)
5e3f878a
PB
10171 gen_swap_half(tmp2);
10172 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10173 if (op & 0x10) {
5e3f878a 10174 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10175 } else {
5e3f878a 10176 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10177 }
7d1b0095 10178 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10179 /* BUGFIX */
10180 tmp64 = tcg_temp_new_i64();
10181 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10182 tcg_temp_free_i32(tmp);
a7812ae4
PB
10183 gen_addq(s, tmp64, rs, rd);
10184 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10185 tcg_temp_free_i64(tmp64);
2c0262af 10186 } else {
9ee6e8bb
PB
10187 if (op & 0x20) {
10188 /* Unsigned 64-bit multiply */
a7812ae4 10189 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10190 } else {
9ee6e8bb
PB
10191 if (op & 8) {
10192 /* smlalxy */
62b44f05
AR
10193 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10194 tcg_temp_free_i32(tmp2);
10195 tcg_temp_free_i32(tmp);
10196 goto illegal_op;
10197 }
5e3f878a 10198 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10199 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10200 tmp64 = tcg_temp_new_i64();
10201 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10202 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10203 } else {
10204 /* Signed 64-bit multiply */
a7812ae4 10205 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10206 }
b5ff1b31 10207 }
9ee6e8bb
PB
10208 if (op & 4) {
10209 /* umaal */
62b44f05
AR
10210 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10211 tcg_temp_free_i64(tmp64);
10212 goto illegal_op;
10213 }
a7812ae4
PB
10214 gen_addq_lo(s, tmp64, rs);
10215 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10216 } else if (op & 0x40) {
10217 /* 64-bit accumulate. */
a7812ae4 10218 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10219 }
a7812ae4 10220 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10221 tcg_temp_free_i64(tmp64);
5fd46862 10222 }
2c0262af 10223 break;
9ee6e8bb
PB
10224 }
10225 break;
10226 case 6: case 7: case 14: case 15:
10227 /* Coprocessor. */
7517748e 10228 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8859ba3c
PM
10229 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10230 if (extract32(insn, 24, 2) == 3) {
10231 goto illegal_op; /* op0 = 0b11 : unallocated */
10232 }
10233
10234 /*
10235 * Decode VLLDM and VLSTM first: these are nonstandard because:
10236 * * if there is no FPU then these insns must NOP in
10237 * Secure state and UNDEF in Nonsecure state
10238 * * if there is an FPU then these insns do not have
10239 * the usual behaviour that disas_vfp_insn() provides of
10240 * being controlled by CPACR/NSACR enable bits or the
10241 * lazy-stacking logic.
7517748e 10242 */
b1e5336a
PM
10243 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10244 (insn & 0xffa00f00) == 0xec200a00) {
10245 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10246 * - VLLDM, VLSTM
10247 * We choose to UNDEF if the RAZ bits are non-zero.
10248 */
10249 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10250 goto illegal_op;
10251 }
019076b0
PM
10252
10253 if (arm_dc_feature(s, ARM_FEATURE_VFP)) {
10254 TCGv_i32 fptr = load_reg(s, rn);
10255
10256 if (extract32(insn, 20, 1)) {
956fe143 10257 gen_helper_v7m_vlldm(cpu_env, fptr);
019076b0
PM
10258 } else {
10259 gen_helper_v7m_vlstm(cpu_env, fptr);
10260 }
10261 tcg_temp_free_i32(fptr);
10262
10263 /* End the TB, because we have updated FP control bits */
10264 s->base.is_jmp = DISAS_UPDATE;
10265 }
b1e5336a
PM
10266 break;
10267 }
8859ba3c
PM
10268 if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
10269 ((insn >> 8) & 0xe) == 10) {
10270 /* FP, and the CPU supports it */
10271 if (disas_vfp_insn(s, insn)) {
10272 goto illegal_op;
10273 }
10274 break;
10275 }
10276
b1e5336a 10277 /* All other insns: NOCP */
7517748e
PM
10278 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10279 default_exception_el(s));
10280 break;
10281 }
0052087e
RH
10282 if ((insn & 0xfe000a00) == 0xfc000800
10283 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10284 /* The Thumb2 and ARM encodings are identical. */
10285 if (disas_neon_insn_3same_ext(s, insn)) {
10286 goto illegal_op;
10287 }
10288 } else if ((insn & 0xff000a00) == 0xfe000800
10289 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10290 /* The Thumb2 and ARM encodings are identical. */
10291 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10292 goto illegal_op;
10293 }
10294 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 10295 /* Translate into the equivalent ARM encoding. */
f06053e3 10296 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10297 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10298 goto illegal_op;
7dcc1f89 10299 }
6a57f3eb 10300 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10301 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10302 goto illegal_op;
10303 }
9ee6e8bb
PB
10304 } else {
10305 if (insn & (1 << 28))
10306 goto illegal_op;
7dcc1f89 10307 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10308 goto illegal_op;
7dcc1f89 10309 }
9ee6e8bb
PB
10310 }
10311 break;
10312 case 8: case 9: case 10: case 11:
10313 if (insn & (1 << 15)) {
10314 /* Branches, misc control. */
10315 if (insn & 0x5000) {
10316 /* Unconditional branch. */
10317 /* signextend(hw1[10:0]) -> offset[:12]. */
10318 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10319 /* hw1[10:0] -> offset[11:1]. */
10320 offset |= (insn & 0x7ff) << 1;
10321 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10322 offset[24:22] already have the same value because of the
10323 sign extension above. */
10324 offset ^= ((~insn) & (1 << 13)) << 10;
10325 offset ^= ((~insn) & (1 << 11)) << 11;
10326
9ee6e8bb
PB
10327 if (insn & (1 << 14)) {
10328 /* Branch and link. */
3174f8e9 10329 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10330 }
3b46e624 10331
fdbcf632 10332 offset += read_pc(s);
9ee6e8bb
PB
10333 if (insn & (1 << 12)) {
10334 /* b/bl */
b0109805 10335 gen_jmp(s, offset);
9ee6e8bb
PB
10336 } else {
10337 /* blx */
b0109805 10338 offset &= ~(uint32_t)2;
be5e7a76 10339 /* thumb2 bx, no need to check */
b0109805 10340 gen_bx_im(s, offset);
2c0262af 10341 }
9ee6e8bb
PB
10342 } else if (((insn >> 23) & 7) == 7) {
10343 /* Misc control */
10344 if (insn & (1 << 13))
10345 goto illegal_op;
10346
10347 if (insn & (1 << 26)) {
001b3cab
PM
10348 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10349 goto illegal_op;
10350 }
37e6456e
PM
10351 if (!(insn & (1 << 20))) {
10352 /* Hypervisor call (v7) */
10353 int imm16 = extract32(insn, 16, 4) << 12
10354 | extract32(insn, 0, 12);
10355 ARCH(7);
10356 if (IS_USER(s)) {
10357 goto illegal_op;
10358 }
10359 gen_hvc(s, imm16);
10360 } else {
10361 /* Secure monitor call (v6+) */
10362 ARCH(6K);
10363 if (IS_USER(s)) {
10364 goto illegal_op;
10365 }
10366 gen_smc(s);
10367 }
2c0262af 10368 } else {
9ee6e8bb
PB
10369 op = (insn >> 20) & 7;
10370 switch (op) {
10371 case 0: /* msr cpsr. */
b53d8923 10372 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10373 tmp = load_reg(s, rn);
b28b3377
PM
10374 /* the constant is the mask and SYSm fields */
10375 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10376 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10377 tcg_temp_free_i32(addr);
7d1b0095 10378 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10379 gen_lookup_tb(s);
10380 break;
10381 }
10382 /* fall through */
10383 case 1: /* msr spsr. */
b53d8923 10384 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10385 goto illegal_op;
b53d8923 10386 }
8bfd0550
PM
10387
10388 if (extract32(insn, 5, 1)) {
10389 /* MSR (banked) */
10390 int sysm = extract32(insn, 8, 4) |
10391 (extract32(insn, 4, 1) << 4);
10392 int r = op & 1;
10393
10394 gen_msr_banked(s, r, sysm, rm);
10395 break;
10396 }
10397
10398 /* MSR (for PSRs) */
2fbac54b
FN
10399 tmp = load_reg(s, rn);
10400 if (gen_set_psr(s,
7dcc1f89 10401 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10402 op == 1, tmp))
9ee6e8bb
PB
10403 goto illegal_op;
10404 break;
10405 case 2: /* cps, nop-hint. */
10406 if (((insn >> 8) & 7) == 0) {
10407 gen_nop_hint(s, insn & 0xff);
10408 }
10409 /* Implemented as NOP in user mode. */
10410 if (IS_USER(s))
10411 break;
10412 offset = 0;
10413 imm = 0;
10414 if (insn & (1 << 10)) {
10415 if (insn & (1 << 7))
10416 offset |= CPSR_A;
10417 if (insn & (1 << 6))
10418 offset |= CPSR_I;
10419 if (insn & (1 << 5))
10420 offset |= CPSR_F;
10421 if (insn & (1 << 9))
10422 imm = CPSR_A | CPSR_I | CPSR_F;
10423 }
10424 if (insn & (1 << 8)) {
10425 offset |= 0x1f;
10426 imm |= (insn & 0x1f);
10427 }
10428 if (offset) {
2fbac54b 10429 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10430 }
10431 break;
10432 case 3: /* Special control operations. */
14120108 10433 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 10434 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
10435 goto illegal_op;
10436 }
9ee6e8bb
PB
10437 op = (insn >> 4) & 0xf;
10438 switch (op) {
10439 case 2: /* clrex */
426f5abc 10440 gen_clrex(s);
9ee6e8bb
PB
10441 break;
10442 case 4: /* dsb */
10443 case 5: /* dmb */
61e4c432 10444 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10445 break;
6df99dec
SS
10446 case 6: /* isb */
10447 /* We need to break the TB after this insn
10448 * to execute self-modifying code correctly
10449 * and also to take any pending interrupts
10450 * immediately.
10451 */
4818c374 10452 gen_goto_tb(s, 0, s->pc);
6df99dec 10453 break;
9888bd1e
RH
10454 case 7: /* sb */
10455 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
10456 goto illegal_op;
10457 }
10458 /*
10459 * TODO: There is no speculation barrier opcode
10460 * for TCG; MB and end the TB instead.
10461 */
10462 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
4818c374 10463 gen_goto_tb(s, 0, s->pc);
9888bd1e 10464 break;
9ee6e8bb
PB
10465 default:
10466 goto illegal_op;
10467 }
10468 break;
10469 case 4: /* bxj */
9d7c59c8
PM
10470 /* Trivial implementation equivalent to bx.
10471 * This instruction doesn't exist at all for M-profile.
10472 */
10473 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10474 goto illegal_op;
10475 }
d9ba4830
PB
10476 tmp = load_reg(s, rn);
10477 gen_bx(s, tmp);
9ee6e8bb
PB
10478 break;
10479 case 5: /* Exception return. */
b8b45b68
RV
10480 if (IS_USER(s)) {
10481 goto illegal_op;
10482 }
10483 if (rn != 14 || rd != 15) {
10484 goto illegal_op;
10485 }
55c544ed
PM
10486 if (s->current_el == 2) {
10487 /* ERET from Hyp uses ELR_Hyp, not LR */
10488 if (insn & 0xff) {
10489 goto illegal_op;
10490 }
10491 tmp = load_cpu_field(elr_el[2]);
10492 } else {
10493 tmp = load_reg(s, rn);
10494 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10495 }
b8b45b68
RV
10496 gen_exception_return(s, tmp);
10497 break;
8bfd0550 10498 case 6: /* MRS */
43ac6574
PM
10499 if (extract32(insn, 5, 1) &&
10500 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10501 /* MRS (banked) */
10502 int sysm = extract32(insn, 16, 4) |
10503 (extract32(insn, 4, 1) << 4);
10504
10505 gen_mrs_banked(s, 0, sysm, rd);
10506 break;
10507 }
10508
3d54026f
PM
10509 if (extract32(insn, 16, 4) != 0xf) {
10510 goto illegal_op;
10511 }
10512 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10513 extract32(insn, 0, 8) != 0) {
10514 goto illegal_op;
10515 }
10516
8bfd0550 10517 /* mrs cpsr */
7d1b0095 10518 tmp = tcg_temp_new_i32();
b53d8923 10519 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10520 addr = tcg_const_i32(insn & 0xff);
10521 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10522 tcg_temp_free_i32(addr);
9ee6e8bb 10523 } else {
9ef39277 10524 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10525 }
8984bd2e 10526 store_reg(s, rd, tmp);
9ee6e8bb 10527 break;
8bfd0550 10528 case 7: /* MRS */
43ac6574
PM
10529 if (extract32(insn, 5, 1) &&
10530 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10531 /* MRS (banked) */
10532 int sysm = extract32(insn, 16, 4) |
10533 (extract32(insn, 4, 1) << 4);
10534
10535 gen_mrs_banked(s, 1, sysm, rd);
10536 break;
10537 }
10538
10539 /* mrs spsr. */
9ee6e8bb 10540 /* Not accessible in user mode. */
b53d8923 10541 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10542 goto illegal_op;
b53d8923 10543 }
3d54026f
PM
10544
10545 if (extract32(insn, 16, 4) != 0xf ||
10546 extract32(insn, 0, 8) != 0) {
10547 goto illegal_op;
10548 }
10549
d9ba4830
PB
10550 tmp = load_cpu_field(spsr);
10551 store_reg(s, rd, tmp);
9ee6e8bb 10552 break;
2c0262af
FB
10553 }
10554 }
9ee6e8bb
PB
10555 } else {
10556 /* Conditional branch. */
10557 op = (insn >> 22) & 0xf;
10558 /* Generate a conditional jump to next instruction. */
c2d9644e 10559 arm_skip_unless(s, op);
9ee6e8bb
PB
10560
10561 /* offset[11:1] = insn[10:0] */
10562 offset = (insn & 0x7ff) << 1;
10563 /* offset[17:12] = insn[21:16]. */
10564 offset |= (insn & 0x003f0000) >> 4;
10565 /* offset[31:20] = insn[26]. */
10566 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10567 /* offset[18] = insn[13]. */
10568 offset |= (insn & (1 << 13)) << 5;
10569 /* offset[19] = insn[11]. */
10570 offset |= (insn & (1 << 11)) << 8;
10571
10572 /* jump to the offset */
fdbcf632 10573 gen_jmp(s, read_pc(s) + offset);
9ee6e8bb
PB
10574 }
10575 } else {
55203189
PM
10576 /*
10577 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
10578 * - Data-processing (modified immediate, plain binary immediate)
10579 */
9ee6e8bb 10580 if (insn & (1 << 25)) {
55203189
PM
10581 /*
10582 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
10583 * - Data-processing (plain binary immediate)
10584 */
9ee6e8bb
PB
10585 if (insn & (1 << 24)) {
10586 if (insn & (1 << 20))
10587 goto illegal_op;
10588 /* Bitfield/Saturate. */
10589 op = (insn >> 21) & 7;
10590 imm = insn & 0x1f;
10591 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10592 if (rn == 15) {
7d1b0095 10593 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10594 tcg_gen_movi_i32(tmp, 0);
10595 } else {
10596 tmp = load_reg(s, rn);
10597 }
9ee6e8bb
PB
10598 switch (op) {
10599 case 2: /* Signed bitfield extract. */
10600 imm++;
10601 if (shift + imm > 32)
10602 goto illegal_op;
59a71b4c
RH
10603 if (imm < 32) {
10604 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10605 }
9ee6e8bb
PB
10606 break;
10607 case 6: /* Unsigned bitfield extract. */
10608 imm++;
10609 if (shift + imm > 32)
10610 goto illegal_op;
59a71b4c
RH
10611 if (imm < 32) {
10612 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10613 }
9ee6e8bb
PB
10614 break;
10615 case 3: /* Bitfield insert/clear. */
10616 if (imm < shift)
10617 goto illegal_op;
10618 imm = imm + 1 - shift;
10619 if (imm != 32) {
6ddbc6e4 10620 tmp2 = load_reg(s, rd);
d593c48e 10621 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10622 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10623 }
10624 break;
10625 case 7:
10626 goto illegal_op;
10627 default: /* Saturate. */
9ee6e8bb
PB
10628 if (shift) {
10629 if (op & 1)
6ddbc6e4 10630 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10631 else
6ddbc6e4 10632 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10633 }
6ddbc6e4 10634 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10635 if (op & 4) {
10636 /* Unsigned. */
62b44f05
AR
10637 if ((op & 1) && shift == 0) {
10638 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10639 tcg_temp_free_i32(tmp);
10640 tcg_temp_free_i32(tmp2);
10641 goto illegal_op;
10642 }
9ef39277 10643 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10644 } else {
9ef39277 10645 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10646 }
2c0262af 10647 } else {
9ee6e8bb 10648 /* Signed. */
62b44f05
AR
10649 if ((op & 1) && shift == 0) {
10650 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10651 tcg_temp_free_i32(tmp);
10652 tcg_temp_free_i32(tmp2);
10653 goto illegal_op;
10654 }
9ef39277 10655 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10656 } else {
9ef39277 10657 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10658 }
2c0262af 10659 }
b75263d6 10660 tcg_temp_free_i32(tmp2);
9ee6e8bb 10661 break;
2c0262af 10662 }
6ddbc6e4 10663 store_reg(s, rd, tmp);
9ee6e8bb
PB
10664 } else {
10665 imm = ((insn & 0x04000000) >> 15)
10666 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10667 if (insn & (1 << 22)) {
10668 /* 16-bit immediate. */
10669 imm |= (insn >> 4) & 0xf000;
10670 if (insn & (1 << 23)) {
10671 /* movt */
5e3f878a 10672 tmp = load_reg(s, rd);
86831435 10673 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10674 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10675 } else {
9ee6e8bb 10676 /* movw */
7d1b0095 10677 tmp = tcg_temp_new_i32();
5e3f878a 10678 tcg_gen_movi_i32(tmp, imm);
2c0262af 10679 }
55203189 10680 store_reg(s, rd, tmp);
2c0262af 10681 } else {
9ee6e8bb 10682 /* Add/sub 12-bit immediate. */
16e0d823
RH
10683 if (insn & (1 << 23)) {
10684 imm = -imm;
10685 }
10686 tmp = add_reg_for_lit(s, rn, imm);
10687 if (rn == 13 && rd == 13) {
10688 /* ADD SP, SP, imm or SUB SP, SP, imm */
10689 store_sp_checked(s, tmp);
2c0262af 10690 } else {
16e0d823 10691 store_reg(s, rd, tmp);
2c0262af 10692 }
9ee6e8bb 10693 }
191abaa2 10694 }
9ee6e8bb 10695 } else {
55203189
PM
10696 /*
10697 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
10698 * - Data-processing (modified immediate)
10699 */
9ee6e8bb
PB
10700 int shifter_out = 0;
10701 /* modified 12-bit immediate. */
10702 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10703 imm = (insn & 0xff);
10704 switch (shift) {
10705 case 0: /* XY */
10706 /* Nothing to do. */
10707 break;
10708 case 1: /* 00XY00XY */
10709 imm |= imm << 16;
10710 break;
10711 case 2: /* XY00XY00 */
10712 imm |= imm << 16;
10713 imm <<= 8;
10714 break;
10715 case 3: /* XYXYXYXY */
10716 imm |= imm << 16;
10717 imm |= imm << 8;
10718 break;
10719 default: /* Rotated constant. */
10720 shift = (shift << 1) | (imm >> 7);
10721 imm |= 0x80;
10722 imm = imm << (32 - shift);
10723 shifter_out = 1;
10724 break;
b5ff1b31 10725 }
7d1b0095 10726 tmp2 = tcg_temp_new_i32();
3174f8e9 10727 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10728 rn = (insn >> 16) & 0xf;
3174f8e9 10729 if (rn == 15) {
7d1b0095 10730 tmp = tcg_temp_new_i32();
3174f8e9
FN
10731 tcg_gen_movi_i32(tmp, 0);
10732 } else {
10733 tmp = load_reg(s, rn);
10734 }
9ee6e8bb
PB
10735 op = (insn >> 21) & 0xf;
10736 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10737 shifter_out, tmp, tmp2))
9ee6e8bb 10738 goto illegal_op;
7d1b0095 10739 tcg_temp_free_i32(tmp2);
9ee6e8bb 10740 rd = (insn >> 8) & 0xf;
55203189
PM
10741 if (rd == 13 && rn == 13
10742 && (op == 8 || op == 13)) {
10743 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
10744 store_sp_checked(s, tmp);
10745 } else if (rd != 15) {
3174f8e9
FN
10746 store_reg(s, rd, tmp);
10747 } else {
7d1b0095 10748 tcg_temp_free_i32(tmp);
2c0262af 10749 }
2c0262af 10750 }
9ee6e8bb
PB
10751 }
10752 break;
10753 case 12: /* Load/store single data item. */
10754 {
10755 int postinc = 0;
10756 int writeback = 0;
a99caa48 10757 int memidx;
9bb6558a
PM
10758 ISSInfo issinfo;
10759
9ee6e8bb 10760 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10761 if (disas_neon_ls_insn(s, insn)) {
c1713132 10762 goto illegal_op;
7dcc1f89 10763 }
9ee6e8bb
PB
10764 break;
10765 }
a2fdc890
PM
10766 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10767 if (rs == 15) {
10768 if (!(insn & (1 << 20))) {
10769 goto illegal_op;
10770 }
10771 if (op != 2) {
10772 /* Byte or halfword load space with dest == r15 : memory hints.
10773 * Catch them early so we don't emit pointless addressing code.
10774 * This space is a mix of:
10775 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10776 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10777 * cores)
10778 * unallocated hints, which must be treated as NOPs
10779 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10780 * which is easiest for the decoding logic
10781 * Some space which must UNDEF
10782 */
10783 int op1 = (insn >> 23) & 3;
10784 int op2 = (insn >> 6) & 0x3f;
10785 if (op & 2) {
10786 goto illegal_op;
10787 }
10788 if (rn == 15) {
02afbf64
PM
10789 /* UNPREDICTABLE, unallocated hint or
10790 * PLD/PLDW/PLI (literal)
10791 */
2eea841c 10792 return;
a2fdc890
PM
10793 }
10794 if (op1 & 1) {
2eea841c 10795 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10796 }
10797 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 10798 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10799 }
10800 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 10801 goto illegal_op;
a2fdc890
PM
10802 }
10803 }
a99caa48 10804 memidx = get_mem_index(s);
16e0d823
RH
10805 imm = insn & 0xfff;
10806 if (insn & (1 << 23)) {
10807 /* PC relative or Positive offset. */
10808 addr = add_reg_for_lit(s, rn, imm);
10809 } else if (rn == 15) {
10810 /* PC relative with negative offset. */
10811 addr = add_reg_for_lit(s, rn, -imm);
9ee6e8bb 10812 } else {
b0109805 10813 addr = load_reg(s, rn);
16e0d823
RH
10814 imm = insn & 0xff;
10815 switch ((insn >> 8) & 0xf) {
10816 case 0x0: /* Shifted Register. */
10817 shift = (insn >> 4) & 0xf;
10818 if (shift > 3) {
2a0308c5 10819 tcg_temp_free_i32(addr);
b7bcbe95 10820 goto illegal_op;
9ee6e8bb 10821 }
16e0d823
RH
10822 tmp = load_reg(s, rm);
10823 if (shift) {
10824 tcg_gen_shli_i32(tmp, tmp, shift);
10825 }
10826 tcg_gen_add_i32(addr, addr, tmp);
10827 tcg_temp_free_i32(tmp);
10828 break;
10829 case 0xc: /* Negative offset. */
10830 tcg_gen_addi_i32(addr, addr, -imm);
10831 break;
10832 case 0xe: /* User privilege. */
10833 tcg_gen_addi_i32(addr, addr, imm);
10834 memidx = get_a32_user_mem_index(s);
10835 break;
10836 case 0x9: /* Post-decrement. */
10837 imm = -imm;
10838 /* Fall through. */
10839 case 0xb: /* Post-increment. */
10840 postinc = 1;
10841 writeback = 1;
10842 break;
10843 case 0xd: /* Pre-decrement. */
10844 imm = -imm;
10845 /* Fall through. */
10846 case 0xf: /* Pre-increment. */
10847 writeback = 1;
10848 break;
10849 default:
10850 tcg_temp_free_i32(addr);
10851 goto illegal_op;
9ee6e8bb
PB
10852 }
10853 }
9bb6558a
PM
10854
10855 issinfo = writeback ? ISSInvalid : rs;
10856
0bc003ba
PM
10857 if (s->v8m_stackcheck && rn == 13 && writeback) {
10858 /*
10859 * Stackcheck. Here we know 'addr' is the current SP;
10860 * if imm is +ve we're moving SP up, else down. It is
10861 * UNKNOWN whether the limit check triggers when SP starts
10862 * below the limit and ends up above it; we chose to do so.
10863 */
10864 if ((int32_t)imm < 0) {
10865 TCGv_i32 newsp = tcg_temp_new_i32();
10866
10867 tcg_gen_addi_i32(newsp, addr, imm);
10868 gen_helper_v8m_stackcheck(cpu_env, newsp);
10869 tcg_temp_free_i32(newsp);
10870 } else {
10871 gen_helper_v8m_stackcheck(cpu_env, addr);
10872 }
10873 }
10874
10875 if (writeback && !postinc) {
10876 tcg_gen_addi_i32(addr, addr, imm);
10877 }
10878
9ee6e8bb
PB
10879 if (insn & (1 << 20)) {
10880 /* Load. */
5a839c0d 10881 tmp = tcg_temp_new_i32();
a2fdc890 10882 switch (op) {
5a839c0d 10883 case 0:
9bb6558a 10884 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10885 break;
10886 case 4:
9bb6558a 10887 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10888 break;
10889 case 1:
9bb6558a 10890 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10891 break;
10892 case 5:
9bb6558a 10893 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10894 break;
10895 case 2:
9bb6558a 10896 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10897 break;
2a0308c5 10898 default:
5a839c0d 10899 tcg_temp_free_i32(tmp);
2a0308c5
PM
10900 tcg_temp_free_i32(addr);
10901 goto illegal_op;
a2fdc890
PM
10902 }
10903 if (rs == 15) {
3bb8a96f 10904 gen_bx_excret(s, tmp);
9ee6e8bb 10905 } else {
a2fdc890 10906 store_reg(s, rs, tmp);
9ee6e8bb
PB
10907 }
10908 } else {
10909 /* Store. */
b0109805 10910 tmp = load_reg(s, rs);
9ee6e8bb 10911 switch (op) {
5a839c0d 10912 case 0:
9bb6558a 10913 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10914 break;
10915 case 1:
9bb6558a 10916 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10917 break;
10918 case 2:
9bb6558a 10919 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10920 break;
2a0308c5 10921 default:
5a839c0d 10922 tcg_temp_free_i32(tmp);
2a0308c5
PM
10923 tcg_temp_free_i32(addr);
10924 goto illegal_op;
b7bcbe95 10925 }
5a839c0d 10926 tcg_temp_free_i32(tmp);
2c0262af 10927 }
9ee6e8bb 10928 if (postinc)
b0109805
PB
10929 tcg_gen_addi_i32(addr, addr, imm);
10930 if (writeback) {
10931 store_reg(s, rn, addr);
10932 } else {
7d1b0095 10933 tcg_temp_free_i32(addr);
b0109805 10934 }
9ee6e8bb
PB
10935 }
10936 break;
10937 default:
10938 goto illegal_op;
2c0262af 10939 }
2eea841c 10940 return;
9ee6e8bb 10941illegal_op:
2eea841c
PM
10942 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10943 default_exception_el(s));
2c0262af
FB
10944}
10945
296e5a0a 10946static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 10947{
296e5a0a 10948 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
10949 int32_t offset;
10950 int i;
39d5492a
PM
10951 TCGv_i32 tmp;
10952 TCGv_i32 tmp2;
10953 TCGv_i32 addr;
99c475ab 10954
99c475ab
FB
10955 switch (insn >> 12) {
10956 case 0: case 1:
396e467c 10957
99c475ab
FB
10958 rd = insn & 7;
10959 op = (insn >> 11) & 3;
10960 if (op == 3) {
a2d12f0f
PM
10961 /*
10962 * 0b0001_1xxx_xxxx_xxxx
10963 * - Add, subtract (three low registers)
10964 * - Add, subtract (two low registers and immediate)
10965 */
99c475ab 10966 rn = (insn >> 3) & 7;
396e467c 10967 tmp = load_reg(s, rn);
99c475ab
FB
10968 if (insn & (1 << 10)) {
10969 /* immediate */
7d1b0095 10970 tmp2 = tcg_temp_new_i32();
396e467c 10971 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10972 } else {
10973 /* reg */
10974 rm = (insn >> 6) & 7;
396e467c 10975 tmp2 = load_reg(s, rm);
99c475ab 10976 }
9ee6e8bb
PB
10977 if (insn & (1 << 9)) {
10978 if (s->condexec_mask)
396e467c 10979 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10980 else
72485ec4 10981 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10982 } else {
10983 if (s->condexec_mask)
396e467c 10984 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10985 else
72485ec4 10986 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10987 }
7d1b0095 10988 tcg_temp_free_i32(tmp2);
396e467c 10989 store_reg(s, rd, tmp);
99c475ab
FB
10990 } else {
10991 /* shift immediate */
10992 rm = (insn >> 3) & 7;
10993 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10994 tmp = load_reg(s, rm);
10995 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10996 if (!s->condexec_mask)
10997 gen_logic_CC(tmp);
10998 store_reg(s, rd, tmp);
99c475ab
FB
10999 }
11000 break;
11001 case 2: case 3:
a2d12f0f
PM
11002 /*
11003 * 0b001x_xxxx_xxxx_xxxx
11004 * - Add, subtract, compare, move (one low register and immediate)
11005 */
99c475ab
FB
11006 op = (insn >> 11) & 3;
11007 rd = (insn >> 8) & 0x7;
396e467c 11008 if (op == 0) { /* mov */
7d1b0095 11009 tmp = tcg_temp_new_i32();
396e467c 11010 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11011 if (!s->condexec_mask)
396e467c
FN
11012 gen_logic_CC(tmp);
11013 store_reg(s, rd, tmp);
11014 } else {
11015 tmp = load_reg(s, rd);
7d1b0095 11016 tmp2 = tcg_temp_new_i32();
396e467c
FN
11017 tcg_gen_movi_i32(tmp2, insn & 0xff);
11018 switch (op) {
11019 case 1: /* cmp */
72485ec4 11020 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11021 tcg_temp_free_i32(tmp);
11022 tcg_temp_free_i32(tmp2);
396e467c
FN
11023 break;
11024 case 2: /* add */
11025 if (s->condexec_mask)
11026 tcg_gen_add_i32(tmp, tmp, tmp2);
11027 else
72485ec4 11028 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11029 tcg_temp_free_i32(tmp2);
396e467c
FN
11030 store_reg(s, rd, tmp);
11031 break;
11032 case 3: /* sub */
11033 if (s->condexec_mask)
11034 tcg_gen_sub_i32(tmp, tmp, tmp2);
11035 else
72485ec4 11036 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11037 tcg_temp_free_i32(tmp2);
396e467c
FN
11038 store_reg(s, rd, tmp);
11039 break;
11040 }
99c475ab 11041 }
99c475ab
FB
11042 break;
11043 case 4:
11044 if (insn & (1 << 11)) {
11045 rd = (insn >> 8) & 7;
5899f386 11046 /* load pc-relative. Bit 1 of PC is ignored. */
16e0d823 11047 addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
c40c8556 11048 tmp = tcg_temp_new_i32();
9bb6558a
PM
11049 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11050 rd | ISSIs16Bit);
7d1b0095 11051 tcg_temp_free_i32(addr);
b0109805 11052 store_reg(s, rd, tmp);
99c475ab
FB
11053 break;
11054 }
11055 if (insn & (1 << 10)) {
ebfe27c5
PM
11056 /* 0b0100_01xx_xxxx_xxxx
11057 * - data processing extended, branch and exchange
11058 */
99c475ab
FB
11059 rd = (insn & 7) | ((insn >> 4) & 8);
11060 rm = (insn >> 3) & 0xf;
11061 op = (insn >> 8) & 3;
11062 switch (op) {
11063 case 0: /* add */
396e467c
FN
11064 tmp = load_reg(s, rd);
11065 tmp2 = load_reg(s, rm);
11066 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11067 tcg_temp_free_i32(tmp2);
55203189
PM
11068 if (rd == 13) {
11069 /* ADD SP, SP, reg */
11070 store_sp_checked(s, tmp);
11071 } else {
11072 store_reg(s, rd, tmp);
11073 }
99c475ab
FB
11074 break;
11075 case 1: /* cmp */
396e467c
FN
11076 tmp = load_reg(s, rd);
11077 tmp2 = load_reg(s, rm);
72485ec4 11078 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11079 tcg_temp_free_i32(tmp2);
11080 tcg_temp_free_i32(tmp);
99c475ab
FB
11081 break;
11082 case 2: /* mov/cpy */
396e467c 11083 tmp = load_reg(s, rm);
55203189
PM
11084 if (rd == 13) {
11085 /* MOV SP, reg */
11086 store_sp_checked(s, tmp);
11087 } else {
11088 store_reg(s, rd, tmp);
11089 }
99c475ab 11090 break;
ebfe27c5
PM
11091 case 3:
11092 {
11093 /* 0b0100_0111_xxxx_xxxx
11094 * - branch [and link] exchange thumb register
11095 */
11096 bool link = insn & (1 << 7);
11097
fb602cb7 11098 if (insn & 3) {
ebfe27c5
PM
11099 goto undef;
11100 }
11101 if (link) {
be5e7a76 11102 ARCH(5);
ebfe27c5 11103 }
fb602cb7
PM
11104 if ((insn & 4)) {
11105 /* BXNS/BLXNS: only exists for v8M with the
11106 * security extensions, and always UNDEF if NonSecure.
11107 * We don't implement these in the user-only mode
11108 * either (in theory you can use them from Secure User
11109 * mode but they are too tied in to system emulation.)
11110 */
11111 if (!s->v8m_secure || IS_USER_ONLY) {
11112 goto undef;
11113 }
11114 if (link) {
3e3fa230 11115 gen_blxns(s, rm);
fb602cb7
PM
11116 } else {
11117 gen_bxns(s, rm);
11118 }
11119 break;
11120 }
11121 /* BLX/BX */
ebfe27c5
PM
11122 tmp = load_reg(s, rm);
11123 if (link) {
99c475ab 11124 val = (uint32_t)s->pc | 1;
7d1b0095 11125 tmp2 = tcg_temp_new_i32();
b0109805
PB
11126 tcg_gen_movi_i32(tmp2, val);
11127 store_reg(s, 14, tmp2);
3bb8a96f
PM
11128 gen_bx(s, tmp);
11129 } else {
11130 /* Only BX works as exception-return, not BLX */
11131 gen_bx_excret(s, tmp);
99c475ab 11132 }
99c475ab
FB
11133 break;
11134 }
ebfe27c5 11135 }
99c475ab
FB
11136 break;
11137 }
11138
a2d12f0f
PM
11139 /*
11140 * 0b0100_00xx_xxxx_xxxx
11141 * - Data-processing (two low registers)
11142 */
99c475ab
FB
11143 rd = insn & 7;
11144 rm = (insn >> 3) & 7;
11145 op = (insn >> 6) & 0xf;
11146 if (op == 2 || op == 3 || op == 4 || op == 7) {
11147 /* the shift/rotate ops want the operands backwards */
11148 val = rm;
11149 rm = rd;
11150 rd = val;
11151 val = 1;
11152 } else {
11153 val = 0;
11154 }
11155
396e467c 11156 if (op == 9) { /* neg */
7d1b0095 11157 tmp = tcg_temp_new_i32();
396e467c
FN
11158 tcg_gen_movi_i32(tmp, 0);
11159 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11160 tmp = load_reg(s, rd);
11161 } else {
f764718d 11162 tmp = NULL;
396e467c 11163 }
99c475ab 11164
396e467c 11165 tmp2 = load_reg(s, rm);
5899f386 11166 switch (op) {
99c475ab 11167 case 0x0: /* and */
396e467c 11168 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11169 if (!s->condexec_mask)
396e467c 11170 gen_logic_CC(tmp);
99c475ab
FB
11171 break;
11172 case 0x1: /* eor */
396e467c 11173 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11174 if (!s->condexec_mask)
396e467c 11175 gen_logic_CC(tmp);
99c475ab
FB
11176 break;
11177 case 0x2: /* lsl */
9ee6e8bb 11178 if (s->condexec_mask) {
365af80e 11179 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11180 } else {
9ef39277 11181 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11182 gen_logic_CC(tmp2);
9ee6e8bb 11183 }
99c475ab
FB
11184 break;
11185 case 0x3: /* lsr */
9ee6e8bb 11186 if (s->condexec_mask) {
365af80e 11187 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11188 } else {
9ef39277 11189 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11190 gen_logic_CC(tmp2);
9ee6e8bb 11191 }
99c475ab
FB
11192 break;
11193 case 0x4: /* asr */
9ee6e8bb 11194 if (s->condexec_mask) {
365af80e 11195 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11196 } else {
9ef39277 11197 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11198 gen_logic_CC(tmp2);
9ee6e8bb 11199 }
99c475ab
FB
11200 break;
11201 case 0x5: /* adc */
49b4c31e 11202 if (s->condexec_mask) {
396e467c 11203 gen_adc(tmp, tmp2);
49b4c31e
RH
11204 } else {
11205 gen_adc_CC(tmp, tmp, tmp2);
11206 }
99c475ab
FB
11207 break;
11208 case 0x6: /* sbc */
2de68a49 11209 if (s->condexec_mask) {
396e467c 11210 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11211 } else {
11212 gen_sbc_CC(tmp, tmp, tmp2);
11213 }
99c475ab
FB
11214 break;
11215 case 0x7: /* ror */
9ee6e8bb 11216 if (s->condexec_mask) {
f669df27
AJ
11217 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11218 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11219 } else {
9ef39277 11220 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11221 gen_logic_CC(tmp2);
9ee6e8bb 11222 }
99c475ab
FB
11223 break;
11224 case 0x8: /* tst */
396e467c
FN
11225 tcg_gen_and_i32(tmp, tmp, tmp2);
11226 gen_logic_CC(tmp);
99c475ab 11227 rd = 16;
5899f386 11228 break;
99c475ab 11229 case 0x9: /* neg */
9ee6e8bb 11230 if (s->condexec_mask)
396e467c 11231 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11232 else
72485ec4 11233 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11234 break;
11235 case 0xa: /* cmp */
72485ec4 11236 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11237 rd = 16;
11238 break;
11239 case 0xb: /* cmn */
72485ec4 11240 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11241 rd = 16;
11242 break;
11243 case 0xc: /* orr */
396e467c 11244 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11245 if (!s->condexec_mask)
396e467c 11246 gen_logic_CC(tmp);
99c475ab
FB
11247 break;
11248 case 0xd: /* mul */
7b2919a0 11249 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11250 if (!s->condexec_mask)
396e467c 11251 gen_logic_CC(tmp);
99c475ab
FB
11252 break;
11253 case 0xe: /* bic */
f669df27 11254 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11255 if (!s->condexec_mask)
396e467c 11256 gen_logic_CC(tmp);
99c475ab
FB
11257 break;
11258 case 0xf: /* mvn */
396e467c 11259 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11260 if (!s->condexec_mask)
396e467c 11261 gen_logic_CC(tmp2);
99c475ab 11262 val = 1;
5899f386 11263 rm = rd;
99c475ab
FB
11264 break;
11265 }
11266 if (rd != 16) {
396e467c
FN
11267 if (val) {
11268 store_reg(s, rm, tmp2);
11269 if (op != 0xf)
7d1b0095 11270 tcg_temp_free_i32(tmp);
396e467c
FN
11271 } else {
11272 store_reg(s, rd, tmp);
7d1b0095 11273 tcg_temp_free_i32(tmp2);
396e467c
FN
11274 }
11275 } else {
7d1b0095
PM
11276 tcg_temp_free_i32(tmp);
11277 tcg_temp_free_i32(tmp2);
99c475ab
FB
11278 }
11279 break;
11280
11281 case 5:
11282 /* load/store register offset. */
11283 rd = insn & 7;
11284 rn = (insn >> 3) & 7;
11285 rm = (insn >> 6) & 7;
11286 op = (insn >> 9) & 7;
b0109805 11287 addr = load_reg(s, rn);
b26eefb6 11288 tmp = load_reg(s, rm);
b0109805 11289 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11290 tcg_temp_free_i32(tmp);
99c475ab 11291
c40c8556 11292 if (op < 3) { /* store */
b0109805 11293 tmp = load_reg(s, rd);
c40c8556
PM
11294 } else {
11295 tmp = tcg_temp_new_i32();
11296 }
99c475ab
FB
11297
11298 switch (op) {
11299 case 0: /* str */
9bb6558a 11300 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11301 break;
11302 case 1: /* strh */
9bb6558a 11303 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11304 break;
11305 case 2: /* strb */
9bb6558a 11306 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11307 break;
11308 case 3: /* ldrsb */
9bb6558a 11309 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11310 break;
11311 case 4: /* ldr */
9bb6558a 11312 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11313 break;
11314 case 5: /* ldrh */
9bb6558a 11315 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11316 break;
11317 case 6: /* ldrb */
9bb6558a 11318 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11319 break;
11320 case 7: /* ldrsh */
9bb6558a 11321 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11322 break;
11323 }
c40c8556 11324 if (op >= 3) { /* load */
b0109805 11325 store_reg(s, rd, tmp);
c40c8556
PM
11326 } else {
11327 tcg_temp_free_i32(tmp);
11328 }
7d1b0095 11329 tcg_temp_free_i32(addr);
99c475ab
FB
11330 break;
11331
11332 case 6:
11333 /* load/store word immediate offset */
11334 rd = insn & 7;
11335 rn = (insn >> 3) & 7;
b0109805 11336 addr = load_reg(s, rn);
99c475ab 11337 val = (insn >> 4) & 0x7c;
b0109805 11338 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11339
11340 if (insn & (1 << 11)) {
11341 /* load */
c40c8556 11342 tmp = tcg_temp_new_i32();
12dcc321 11343 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11344 store_reg(s, rd, tmp);
99c475ab
FB
11345 } else {
11346 /* store */
b0109805 11347 tmp = load_reg(s, rd);
12dcc321 11348 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11349 tcg_temp_free_i32(tmp);
99c475ab 11350 }
7d1b0095 11351 tcg_temp_free_i32(addr);
99c475ab
FB
11352 break;
11353
11354 case 7:
11355 /* load/store byte immediate offset */
11356 rd = insn & 7;
11357 rn = (insn >> 3) & 7;
b0109805 11358 addr = load_reg(s, rn);
99c475ab 11359 val = (insn >> 6) & 0x1f;
b0109805 11360 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11361
11362 if (insn & (1 << 11)) {
11363 /* load */
c40c8556 11364 tmp = tcg_temp_new_i32();
9bb6558a 11365 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11366 store_reg(s, rd, tmp);
99c475ab
FB
11367 } else {
11368 /* store */
b0109805 11369 tmp = load_reg(s, rd);
9bb6558a 11370 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11371 tcg_temp_free_i32(tmp);
99c475ab 11372 }
7d1b0095 11373 tcg_temp_free_i32(addr);
99c475ab
FB
11374 break;
11375
11376 case 8:
11377 /* load/store halfword immediate offset */
11378 rd = insn & 7;
11379 rn = (insn >> 3) & 7;
b0109805 11380 addr = load_reg(s, rn);
99c475ab 11381 val = (insn >> 5) & 0x3e;
b0109805 11382 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11383
11384 if (insn & (1 << 11)) {
11385 /* load */
c40c8556 11386 tmp = tcg_temp_new_i32();
9bb6558a 11387 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11388 store_reg(s, rd, tmp);
99c475ab
FB
11389 } else {
11390 /* store */
b0109805 11391 tmp = load_reg(s, rd);
9bb6558a 11392 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11393 tcg_temp_free_i32(tmp);
99c475ab 11394 }
7d1b0095 11395 tcg_temp_free_i32(addr);
99c475ab
FB
11396 break;
11397
11398 case 9:
11399 /* load/store from stack */
11400 rd = (insn >> 8) & 7;
b0109805 11401 addr = load_reg(s, 13);
99c475ab 11402 val = (insn & 0xff) * 4;
b0109805 11403 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11404
11405 if (insn & (1 << 11)) {
11406 /* load */
c40c8556 11407 tmp = tcg_temp_new_i32();
9bb6558a 11408 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11409 store_reg(s, rd, tmp);
99c475ab
FB
11410 } else {
11411 /* store */
b0109805 11412 tmp = load_reg(s, rd);
9bb6558a 11413 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11414 tcg_temp_free_i32(tmp);
99c475ab 11415 }
7d1b0095 11416 tcg_temp_free_i32(addr);
99c475ab
FB
11417 break;
11418
11419 case 10:
55203189
PM
11420 /*
11421 * 0b1010_xxxx_xxxx_xxxx
11422 * - Add PC/SP (immediate)
11423 */
99c475ab 11424 rd = (insn >> 8) & 7;
99c475ab 11425 val = (insn & 0xff) * 4;
16e0d823 11426 tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
5e3f878a 11427 store_reg(s, rd, tmp);
99c475ab
FB
11428 break;
11429
11430 case 11:
11431 /* misc */
11432 op = (insn >> 8) & 0xf;
11433 switch (op) {
11434 case 0:
55203189
PM
11435 /*
11436 * 0b1011_0000_xxxx_xxxx
11437 * - ADD (SP plus immediate)
11438 * - SUB (SP minus immediate)
11439 */
b26eefb6 11440 tmp = load_reg(s, 13);
99c475ab
FB
11441 val = (insn & 0x7f) * 4;
11442 if (insn & (1 << 7))
6a0d8a1d 11443 val = -(int32_t)val;
b26eefb6 11444 tcg_gen_addi_i32(tmp, tmp, val);
55203189 11445 store_sp_checked(s, tmp);
99c475ab
FB
11446 break;
11447
9ee6e8bb
PB
11448 case 2: /* sign/zero extend. */
11449 ARCH(6);
11450 rd = insn & 7;
11451 rm = (insn >> 3) & 7;
b0109805 11452 tmp = load_reg(s, rm);
9ee6e8bb 11453 switch ((insn >> 6) & 3) {
b0109805
PB
11454 case 0: gen_sxth(tmp); break;
11455 case 1: gen_sxtb(tmp); break;
11456 case 2: gen_uxth(tmp); break;
11457 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11458 }
b0109805 11459 store_reg(s, rd, tmp);
9ee6e8bb 11460 break;
99c475ab 11461 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
11462 /*
11463 * 0b1011_x10x_xxxx_xxxx
11464 * - push/pop
11465 */
b0109805 11466 addr = load_reg(s, 13);
5899f386
FB
11467 if (insn & (1 << 8))
11468 offset = 4;
99c475ab 11469 else
5899f386
FB
11470 offset = 0;
11471 for (i = 0; i < 8; i++) {
11472 if (insn & (1 << i))
11473 offset += 4;
11474 }
11475 if ((insn & (1 << 11)) == 0) {
b0109805 11476 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11477 }
aa369e5c
PM
11478
11479 if (s->v8m_stackcheck) {
11480 /*
11481 * Here 'addr' is the lower of "old SP" and "new SP";
11482 * if this is a pop that starts below the limit and ends
11483 * above it, it is UNKNOWN whether the limit check triggers;
11484 * we choose to trigger.
11485 */
11486 gen_helper_v8m_stackcheck(cpu_env, addr);
11487 }
11488
99c475ab
FB
11489 for (i = 0; i < 8; i++) {
11490 if (insn & (1 << i)) {
11491 if (insn & (1 << 11)) {
11492 /* pop */
c40c8556 11493 tmp = tcg_temp_new_i32();
12dcc321 11494 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11495 store_reg(s, i, tmp);
99c475ab
FB
11496 } else {
11497 /* push */
b0109805 11498 tmp = load_reg(s, i);
12dcc321 11499 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11500 tcg_temp_free_i32(tmp);
99c475ab 11501 }
5899f386 11502 /* advance to the next address. */
b0109805 11503 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11504 }
11505 }
f764718d 11506 tmp = NULL;
99c475ab
FB
11507 if (insn & (1 << 8)) {
11508 if (insn & (1 << 11)) {
11509 /* pop pc */
c40c8556 11510 tmp = tcg_temp_new_i32();
12dcc321 11511 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11512 /* don't set the pc until the rest of the instruction
11513 has completed */
11514 } else {
11515 /* push lr */
b0109805 11516 tmp = load_reg(s, 14);
12dcc321 11517 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11518 tcg_temp_free_i32(tmp);
99c475ab 11519 }
b0109805 11520 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11521 }
5899f386 11522 if ((insn & (1 << 11)) == 0) {
b0109805 11523 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11524 }
99c475ab 11525 /* write back the new stack pointer */
b0109805 11526 store_reg(s, 13, addr);
99c475ab 11527 /* set the new PC value */
be5e7a76 11528 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11529 store_reg_from_load(s, 15, tmp);
be5e7a76 11530 }
99c475ab
FB
11531 break;
11532
9ee6e8bb
PB
11533 case 1: case 3: case 9: case 11: /* czb */
11534 rm = insn & 7;
d9ba4830 11535 tmp = load_reg(s, rm);
c2d9644e 11536 arm_gen_condlabel(s);
9ee6e8bb 11537 if (insn & (1 << 11))
cb63669a 11538 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11539 else
cb63669a 11540 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11541 tcg_temp_free_i32(tmp);
9ee6e8bb 11542 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
fdbcf632 11543 gen_jmp(s, read_pc(s) + offset);
9ee6e8bb
PB
11544 break;
11545
11546 case 15: /* IT, nop-hint. */
11547 if ((insn & 0xf) == 0) {
11548 gen_nop_hint(s, (insn >> 4) & 0xf);
11549 break;
11550 }
5529de1e
PM
11551 /*
11552 * IT (If-Then)
11553 *
11554 * Combinations of firstcond and mask which set up an 0b1111
11555 * condition are UNPREDICTABLE; we take the CONSTRAINED
11556 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
11557 * i.e. both meaning "execute always".
11558 */
9ee6e8bb
PB
11559 s->condexec_cond = (insn >> 4) & 0xe;
11560 s->condexec_mask = insn & 0x1f;
11561 /* No actual code generated for this insn, just setup state. */
11562 break;
11563
06c949e6 11564 case 0xe: /* bkpt */
d4a2dc67
PM
11565 {
11566 int imm8 = extract32(insn, 0, 8);
be5e7a76 11567 ARCH(5);
c900a2e6 11568 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 11569 break;
d4a2dc67 11570 }
06c949e6 11571
19a6e31c
PM
11572 case 0xa: /* rev, and hlt */
11573 {
11574 int op1 = extract32(insn, 6, 2);
11575
11576 if (op1 == 2) {
11577 /* HLT */
11578 int imm6 = extract32(insn, 0, 6);
11579
11580 gen_hlt(s, imm6);
11581 break;
11582 }
11583
11584 /* Otherwise this is rev */
9ee6e8bb
PB
11585 ARCH(6);
11586 rn = (insn >> 3) & 0x7;
11587 rd = insn & 0x7;
b0109805 11588 tmp = load_reg(s, rn);
19a6e31c 11589 switch (op1) {
66896cb8 11590 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11591 case 1: gen_rev16(tmp); break;
11592 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11593 default:
11594 g_assert_not_reached();
9ee6e8bb 11595 }
b0109805 11596 store_reg(s, rd, tmp);
9ee6e8bb 11597 break;
19a6e31c 11598 }
9ee6e8bb 11599
d9e028c1
PM
11600 case 6:
11601 switch ((insn >> 5) & 7) {
11602 case 2:
11603 /* setend */
11604 ARCH(6);
9886ecdf
PB
11605 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11606 gen_helper_setend(cpu_env);
dcba3a8d 11607 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 11608 }
9ee6e8bb 11609 break;
d9e028c1
PM
11610 case 3:
11611 /* cps */
11612 ARCH(6);
11613 if (IS_USER(s)) {
11614 break;
8984bd2e 11615 }
b53d8923 11616 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11617 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11618 /* FAULTMASK */
11619 if (insn & 1) {
11620 addr = tcg_const_i32(19);
11621 gen_helper_v7m_msr(cpu_env, addr, tmp);
11622 tcg_temp_free_i32(addr);
11623 }
11624 /* PRIMASK */
11625 if (insn & 2) {
11626 addr = tcg_const_i32(16);
11627 gen_helper_v7m_msr(cpu_env, addr, tmp);
11628 tcg_temp_free_i32(addr);
11629 }
11630 tcg_temp_free_i32(tmp);
11631 gen_lookup_tb(s);
11632 } else {
11633 if (insn & (1 << 4)) {
11634 shift = CPSR_A | CPSR_I | CPSR_F;
11635 } else {
11636 shift = 0;
11637 }
11638 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11639 }
d9e028c1
PM
11640 break;
11641 default:
11642 goto undef;
9ee6e8bb
PB
11643 }
11644 break;
11645
99c475ab
FB
11646 default:
11647 goto undef;
11648 }
11649 break;
11650
11651 case 12:
a7d3970d 11652 {
99c475ab 11653 /* load/store multiple */
f764718d 11654 TCGv_i32 loaded_var = NULL;
99c475ab 11655 rn = (insn >> 8) & 0x7;
b0109805 11656 addr = load_reg(s, rn);
99c475ab
FB
11657 for (i = 0; i < 8; i++) {
11658 if (insn & (1 << i)) {
99c475ab
FB
11659 if (insn & (1 << 11)) {
11660 /* load */
c40c8556 11661 tmp = tcg_temp_new_i32();
12dcc321 11662 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11663 if (i == rn) {
11664 loaded_var = tmp;
11665 } else {
11666 store_reg(s, i, tmp);
11667 }
99c475ab
FB
11668 } else {
11669 /* store */
b0109805 11670 tmp = load_reg(s, i);
12dcc321 11671 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11672 tcg_temp_free_i32(tmp);
99c475ab 11673 }
5899f386 11674 /* advance to the next address */
b0109805 11675 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11676 }
11677 }
b0109805 11678 if ((insn & (1 << rn)) == 0) {
a7d3970d 11679 /* base reg not in list: base register writeback */
b0109805
PB
11680 store_reg(s, rn, addr);
11681 } else {
a7d3970d
PM
11682 /* base reg in list: if load, complete it now */
11683 if (insn & (1 << 11)) {
11684 store_reg(s, rn, loaded_var);
11685 }
7d1b0095 11686 tcg_temp_free_i32(addr);
b0109805 11687 }
99c475ab 11688 break;
a7d3970d 11689 }
99c475ab
FB
11690 case 13:
11691 /* conditional branch or swi */
11692 cond = (insn >> 8) & 0xf;
11693 if (cond == 0xe)
11694 goto undef;
11695
11696 if (cond == 0xf) {
11697 /* swi */
eaed129d 11698 gen_set_pc_im(s, s->pc);
d4a2dc67 11699 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 11700 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
11701 break;
11702 }
11703 /* generate a conditional jump to next instruction */
c2d9644e 11704 arm_skip_unless(s, cond);
99c475ab
FB
11705
11706 /* jump to the offset */
fdbcf632 11707 val = read_pc(s);
99c475ab 11708 offset = ((int32_t)insn << 24) >> 24;
5899f386 11709 val += offset << 1;
8aaca4c0 11710 gen_jmp(s, val);
99c475ab
FB
11711 break;
11712
11713 case 14:
358bf29e 11714 if (insn & (1 << 11)) {
296e5a0a
PM
11715 /* thumb_insn_is_16bit() ensures we can't get here for
11716 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11717 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11718 */
11719 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11720 ARCH(5);
11721 offset = ((insn & 0x7ff) << 1);
11722 tmp = load_reg(s, 14);
11723 tcg_gen_addi_i32(tmp, tmp, offset);
11724 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11725
11726 tmp2 = tcg_temp_new_i32();
11727 tcg_gen_movi_i32(tmp2, s->pc | 1);
11728 store_reg(s, 14, tmp2);
11729 gen_bx(s, tmp);
358bf29e
PB
11730 break;
11731 }
9ee6e8bb 11732 /* unconditional branch */
fdbcf632 11733 val = read_pc(s);
99c475ab 11734 offset = ((int32_t)insn << 21) >> 21;
fdbcf632 11735 val += offset << 1;
8aaca4c0 11736 gen_jmp(s, val);
99c475ab
FB
11737 break;
11738
11739 case 15:
296e5a0a
PM
11740 /* thumb_insn_is_16bit() ensures we can't get here for
11741 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11742 */
11743 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11744
11745 if (insn & (1 << 11)) {
11746 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11747 offset = ((insn & 0x7ff) << 1) | 1;
11748 tmp = load_reg(s, 14);
11749 tcg_gen_addi_i32(tmp, tmp, offset);
11750
11751 tmp2 = tcg_temp_new_i32();
11752 tcg_gen_movi_i32(tmp2, s->pc | 1);
11753 store_reg(s, 14, tmp2);
11754 gen_bx(s, tmp);
11755 } else {
11756 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11757 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11758
fdbcf632 11759 tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
296e5a0a 11760 }
9ee6e8bb 11761 break;
99c475ab
FB
11762 }
11763 return;
9ee6e8bb 11764illegal_op:
99c475ab 11765undef:
73710361
GB
11766 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11767 default_exception_el(s));
99c475ab
FB
11768}
11769
541ebcd4
PM
11770static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11771{
11772 /* Return true if the insn at dc->pc might cross a page boundary.
11773 * (False positives are OK, false negatives are not.)
5b8d7289
PM
11774 * We know this is a Thumb insn, and our caller ensures we are
11775 * only called if dc->pc is less than 4 bytes from the page
11776 * boundary, so we cross the page if the first 16 bits indicate
11777 * that this is a 32 bit insn.
541ebcd4 11778 */
5b8d7289 11779 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 11780
331b1ca6 11781 return !thumb_insn_is_16bit(s, s->pc, insn);
541ebcd4
PM
11782}
11783
b542683d 11784static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 11785{
1d8a5535 11786 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 11787 CPUARMState *env = cs->env_ptr;
2fc0cc0e 11788 ARMCPU *cpu = env_archcpu(env);
aad821ac
RH
11789 uint32_t tb_flags = dc->base.tb->flags;
11790 uint32_t condexec, core_mmu_idx;
3b46e624 11791
962fcbf2 11792 dc->isar = &cpu->isar;
dcba3a8d 11793 dc->pc = dc->base.pc_first;
e50e6a20 11794 dc->condjmp = 0;
3926cc84 11795
40f860cd 11796 dc->aarch64 = 0;
cef9ee70
SS
11797 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11798 * there is no secure EL1, so we route exceptions to EL3.
11799 */
11800 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11801 !arm_el_is_aa64(env, 3);
aad821ac
RH
11802 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
11803 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
11804 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
11805 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
11806 dc->condexec_mask = (condexec & 0xf) << 1;
11807 dc->condexec_cond = condexec >> 4;
11808 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
11809 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
c1e37810 11810 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11811#if !defined(CONFIG_USER_ONLY)
c1e37810 11812 dc->user = (dc->current_el == 0);
3926cc84 11813#endif
aad821ac
RH
11814 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
11815 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
11816 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
11817 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
ea7ac69d
PM
11818 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11819 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
11820 dc->vec_stride = 0;
11821 } else {
11822 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
11823 dc->c15_cpar = 0;
11824 }
aad821ac 11825 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
fb602cb7
PM
11826 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11827 regime_is_secure(env, dc->mmu_idx);
aad821ac 11828 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
6d60c67a 11829 dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
6000531e
PM
11830 dc->v7m_new_fp_ctxt_needed =
11831 FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
e33cf0f8 11832 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
60322b39 11833 dc->cp_regs = cpu->cp_regs;
a984e42c 11834 dc->features = env->features;
40f860cd 11835
50225ad0
PM
11836 /* Single step state. The code-generation logic here is:
11837 * SS_ACTIVE == 0:
11838 * generate code with no special handling for single-stepping (except
11839 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11840 * this happens anyway because those changes are all system register or
11841 * PSTATE writes).
11842 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11843 * emit code for one insn
11844 * emit code to clear PSTATE.SS
11845 * emit code to generate software step exception for completed step
11846 * end TB (as usual for having generated an exception)
11847 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11848 * emit code to generate a software step exception
11849 * end the TB
11850 */
aad821ac
RH
11851 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
11852 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
50225ad0 11853 dc->is_ldex = false;
8bd587c1
PM
11854 if (!arm_feature(env, ARM_FEATURE_M)) {
11855 dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
11856 }
50225ad0 11857
bfe7ad5b 11858 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 11859
f7708456
RH
11860 /* If architectural single step active, limit to 1. */
11861 if (is_singlestepping(dc)) {
b542683d 11862 dc->base.max_insns = 1;
f7708456
RH
11863 }
11864
d0264d86
RH
11865 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11866 to those left on the page. */
11867 if (!dc->thumb) {
bfe7ad5b 11868 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 11869 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
11870 }
11871
d9eea52c
PM
11872 cpu_V0 = tcg_temp_new_i64();
11873 cpu_V1 = tcg_temp_new_i64();
e677137d 11874 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11875 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
11876}
11877
b1476854
LV
11878static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11879{
11880 DisasContext *dc = container_of(dcbase, DisasContext, base);
11881
11882 /* A note on handling of the condexec (IT) bits:
11883 *
11884 * We want to avoid the overhead of having to write the updated condexec
11885 * bits back to the CPUARMState for every instruction in an IT block. So:
11886 * (1) if the condexec bits are not already zero then we write
11887 * zero back into the CPUARMState now. This avoids complications trying
11888 * to do it at the end of the block. (For example if we don't do this
11889 * it's hard to identify whether we can safely skip writing condexec
11890 * at the end of the TB, which we definitely want to do for the case
11891 * where a TB doesn't do anything with the IT state at all.)
11892 * (2) if we are going to leave the TB then we call gen_set_condexec()
11893 * which will write the correct value into CPUARMState if zero is wrong.
11894 * This is done both for leaving the TB at the end, and for leaving
11895 * it because of an exception we know will happen, which is done in
11896 * gen_exception_insn(). The latter is necessary because we need to
11897 * leave the TB with the PC/IT state just prior to execution of the
11898 * instruction which caused the exception.
11899 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11900 * then the CPUARMState will be wrong and we need to reset it.
11901 * This is handled in the same way as restoration of the
11902 * PC in these situations; we save the value of the condexec bits
11903 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11904 * then uses this to restore them after an exception.
11905 *
11906 * Note that there are no instructions which can read the condexec
11907 * bits, and none which can write non-static values to them, so
11908 * we don't need to care about whether CPUARMState is correct in the
11909 * middle of a TB.
11910 */
11911
11912 /* Reset the conditional execution bits immediately. This avoids
11913 complications trying to do it at the end of the block. */
11914 if (dc->condexec_mask || dc->condexec_cond) {
11915 TCGv_i32 tmp = tcg_temp_new_i32();
11916 tcg_gen_movi_i32(tmp, 0);
11917 store_cpu_field(tmp, condexec_bits);
11918 }
11919}
11920
f62bd897
LV
11921static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11922{
11923 DisasContext *dc = container_of(dcbase, DisasContext, base);
11924
f62bd897
LV
11925 tcg_gen_insn_start(dc->pc,
11926 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11927 0);
15fa08f8 11928 dc->insn_start = tcg_last_op();
f62bd897
LV
11929}
11930
a68956ad
LV
11931static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
11932 const CPUBreakpoint *bp)
11933{
11934 DisasContext *dc = container_of(dcbase, DisasContext, base);
11935
11936 if (bp->flags & BP_CPU) {
11937 gen_set_condexec(dc);
11938 gen_set_pc_im(dc, dc->pc);
11939 gen_helper_check_breakpoints(cpu_env);
11940 /* End the TB early; it's likely not going to be executed */
11941 dc->base.is_jmp = DISAS_TOO_MANY;
11942 } else {
11943 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11944 /* The address covered by the breakpoint must be
11945 included in [tb->pc, tb->pc + tb->size) in order
11946 to for it to be properly cleared -- thus we
11947 increment the PC here so that the logic setting
11948 tb->size below does the right thing. */
11949 /* TODO: Advance PC by correct instruction length to
11950 * avoid disassembler error messages */
11951 dc->pc += 2;
11952 dc->base.is_jmp = DISAS_NORETURN;
11953 }
11954
11955 return true;
11956}
11957
722ef0a5 11958static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 11959{
13189a90
LV
11960#ifdef CONFIG_USER_ONLY
11961 /* Intercept jump to the magic kernel page. */
11962 if (dc->pc >= 0xffff0000) {
11963 /* We always get here via a jump, so know we are not in a
11964 conditional execution block. */
11965 gen_exception_internal(EXCP_KERNEL_TRAP);
11966 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 11967 return true;
13189a90
LV
11968 }
11969#endif
11970
11971 if (dc->ss_active && !dc->pstate_ss) {
11972 /* Singlestep state is Active-pending.
11973 * If we're in this state at the start of a TB then either
11974 * a) we just took an exception to an EL which is being debugged
11975 * and this is the first insn in the exception handler
11976 * b) debug exceptions were masked and we just unmasked them
11977 * without changing EL (eg by clearing PSTATE.D)
11978 * In either case we're going to take a swstep exception in the
11979 * "did not step an insn" case, and so the syndrome ISV and EX
11980 * bits should be zero.
11981 */
11982 assert(dc->base.num_insns == 1);
c1d5f50f 11983 gen_swstep_exception(dc, 0, 0);
13189a90 11984 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 11985 return true;
13189a90
LV
11986 }
11987
722ef0a5
RH
11988 return false;
11989}
13189a90 11990
d0264d86 11991static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 11992{
13189a90
LV
11993 if (dc->condjmp && !dc->base.is_jmp) {
11994 gen_set_label(dc->condlabel);
11995 dc->condjmp = 0;
11996 }
13189a90 11997 dc->base.pc_next = dc->pc;
23169224 11998 translator_loop_temp_check(&dc->base);
13189a90
LV
11999}
12000
722ef0a5
RH
12001static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12002{
12003 DisasContext *dc = container_of(dcbase, DisasContext, base);
12004 CPUARMState *env = cpu->env_ptr;
12005 unsigned int insn;
12006
12007 if (arm_pre_translate_insn(dc)) {
12008 return;
12009 }
12010
43722a6d 12011 dc->pc_curr = dc->pc;
722ef0a5 12012 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 12013 dc->insn = insn;
722ef0a5
RH
12014 dc->pc += 4;
12015 disas_arm_insn(dc, insn);
12016
d0264d86
RH
12017 arm_post_translate_insn(dc);
12018
12019 /* ARM is a fixed-length ISA. We performed the cross-page check
12020 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12021}
12022
dcf14dfb
PM
12023static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12024{
12025 /* Return true if this Thumb insn is always unconditional,
12026 * even inside an IT block. This is true of only a very few
12027 * instructions: BKPT, HLT, and SG.
12028 *
12029 * A larger class of instructions are UNPREDICTABLE if used
12030 * inside an IT block; we do not need to detect those here, because
12031 * what we do by default (perform the cc check and update the IT
12032 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12033 * choice for those situations.
12034 *
12035 * insn is either a 16-bit or a 32-bit instruction; the two are
12036 * distinguishable because for the 16-bit case the top 16 bits
12037 * are zeroes, and that isn't a valid 32-bit encoding.
12038 */
12039 if ((insn & 0xffffff00) == 0xbe00) {
12040 /* BKPT */
12041 return true;
12042 }
12043
12044 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12045 !arm_dc_feature(s, ARM_FEATURE_M)) {
12046 /* HLT: v8A only. This is unconditional even when it is going to
12047 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12048 * For v7 cores this was a plain old undefined encoding and so
12049 * honours its cc check. (We might be using the encoding as
12050 * a semihosting trap, but we don't change the cc check behaviour
12051 * on that account, because a debugger connected to a real v7A
12052 * core and emulating semihosting traps by catching the UNDEF
12053 * exception would also only see cases where the cc check passed.
12054 * No guest code should be trying to do a HLT semihosting trap
12055 * in an IT block anyway.
12056 */
12057 return true;
12058 }
12059
12060 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12061 arm_dc_feature(s, ARM_FEATURE_M)) {
12062 /* SG: v8M only */
12063 return true;
12064 }
12065
12066 return false;
12067}
12068
722ef0a5
RH
12069static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12070{
12071 DisasContext *dc = container_of(dcbase, DisasContext, base);
12072 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12073 uint32_t insn;
12074 bool is_16bit;
722ef0a5
RH
12075
12076 if (arm_pre_translate_insn(dc)) {
12077 return;
12078 }
12079
43722a6d 12080 dc->pc_curr = dc->pc;
296e5a0a 12081 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
331b1ca6 12082 is_16bit = thumb_insn_is_16bit(dc, dc->pc, insn);
296e5a0a
PM
12083 dc->pc += 2;
12084 if (!is_16bit) {
12085 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12086
12087 insn = insn << 16 | insn2;
12088 dc->pc += 2;
12089 }
58803318 12090 dc->insn = insn;
296e5a0a 12091
dcf14dfb 12092 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12093 uint32_t cond = dc->condexec_cond;
12094
5529de1e
PM
12095 /*
12096 * Conditionally skip the insn. Note that both 0xe and 0xf mean
12097 * "always"; 0xf is not "never".
12098 */
12099 if (cond < 0x0e) {
c2d9644e 12100 arm_skip_unless(dc, cond);
296e5a0a
PM
12101 }
12102 }
12103
12104 if (is_16bit) {
12105 disas_thumb_insn(dc, insn);
12106 } else {
2eea841c 12107 disas_thumb2_insn(dc, insn);
296e5a0a 12108 }
722ef0a5
RH
12109
12110 /* Advance the Thumb condexec condition. */
12111 if (dc->condexec_mask) {
12112 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12113 ((dc->condexec_mask >> 4) & 1));
12114 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12115 if (dc->condexec_mask == 0) {
12116 dc->condexec_cond = 0;
12117 }
12118 }
12119
d0264d86
RH
12120 arm_post_translate_insn(dc);
12121
12122 /* Thumb is a variable-length ISA. Stop translation when the next insn
12123 * will touch a new page. This ensures that prefetch aborts occur at
12124 * the right place.
12125 *
12126 * We want to stop the TB if the next insn starts in a new page,
12127 * or if it spans between this page and the next. This means that
12128 * if we're looking at the last halfword in the page we need to
12129 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12130 * or a 32-bit Thumb insn (which won't).
12131 * This is to avoid generating a silly TB with a single 16-bit insn
12132 * in it at the end of this page (which would execute correctly
12133 * but isn't very efficient).
12134 */
12135 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
12136 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
12137 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
12138 && insn_crosses_page(env, dc)))) {
12139 dc->base.is_jmp = DISAS_TOO_MANY;
12140 }
722ef0a5
RH
12141}
12142
70d3c035 12143static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12144{
70d3c035 12145 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12146
c5a49c63 12147 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12148 /* FIXME: This can theoretically happen with self-modifying code. */
12149 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12150 }
9ee6e8bb 12151
b5ff1b31 12152 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12153 instruction was a conditional branch or trap, and the PC has
12154 already been written. */
f021b2c4 12155 gen_set_condexec(dc);
dcba3a8d 12156 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12157 /* Exception return branches need some special case code at the
12158 * end of the TB, which is complex enough that it has to
12159 * handle the single-step vs not and the condition-failed
12160 * insn codepath itself.
12161 */
12162 gen_bx_excret_final_code(dc);
12163 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12164 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12165 switch (dc->base.is_jmp) {
7999a5c8 12166 case DISAS_SWI:
50225ad0 12167 gen_ss_advance(dc);
73710361
GB
12168 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12169 default_exception_el(dc));
7999a5c8
SF
12170 break;
12171 case DISAS_HVC:
37e6456e 12172 gen_ss_advance(dc);
73710361 12173 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12174 break;
12175 case DISAS_SMC:
37e6456e 12176 gen_ss_advance(dc);
73710361 12177 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12178 break;
12179 case DISAS_NEXT:
a68956ad 12180 case DISAS_TOO_MANY:
7999a5c8
SF
12181 case DISAS_UPDATE:
12182 gen_set_pc_im(dc, dc->pc);
12183 /* fall through */
12184 default:
5425415e
PM
12185 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12186 gen_singlestep_exception(dc);
a0c231e6
RH
12187 break;
12188 case DISAS_NORETURN:
12189 break;
7999a5c8 12190 }
8aaca4c0 12191 } else {
9ee6e8bb
PB
12192 /* While branches must always occur at the end of an IT block,
12193 there are a few other things that can cause us to terminate
65626741 12194 the TB in the middle of an IT block:
9ee6e8bb
PB
12195 - Exception generating instructions (bkpt, swi, undefined).
12196 - Page boundaries.
12197 - Hardware watchpoints.
12198 Hardware breakpoints have already been handled and skip this code.
12199 */
dcba3a8d 12200 switch(dc->base.is_jmp) {
8aaca4c0 12201 case DISAS_NEXT:
a68956ad 12202 case DISAS_TOO_MANY:
6e256c93 12203 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12204 break;
577bf808 12205 case DISAS_JUMP:
8a6b28c7
EC
12206 gen_goto_ptr();
12207 break;
e8d52302
AB
12208 case DISAS_UPDATE:
12209 gen_set_pc_im(dc, dc->pc);
12210 /* fall through */
577bf808 12211 default:
8aaca4c0 12212 /* indicate that the hash table must be used to find the next TB */
07ea28b4 12213 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 12214 break;
a0c231e6 12215 case DISAS_NORETURN:
8aaca4c0
FB
12216 /* nothing more to generate */
12217 break;
9ee6e8bb 12218 case DISAS_WFI:
58803318
SS
12219 {
12220 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12221 !(dc->insn & (1U << 31))) ? 2 : 4);
12222
12223 gen_helper_wfi(cpu_env, tmp);
12224 tcg_temp_free_i32(tmp);
84549b6d
PM
12225 /* The helper doesn't necessarily throw an exception, but we
12226 * must go back to the main loop to check for interrupts anyway.
12227 */
07ea28b4 12228 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 12229 break;
58803318 12230 }
72c1d3af
PM
12231 case DISAS_WFE:
12232 gen_helper_wfe(cpu_env);
12233 break;
c87e5a61
PM
12234 case DISAS_YIELD:
12235 gen_helper_yield(cpu_env);
12236 break;
9ee6e8bb 12237 case DISAS_SWI:
73710361
GB
12238 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12239 default_exception_el(dc));
9ee6e8bb 12240 break;
37e6456e 12241 case DISAS_HVC:
73710361 12242 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12243 break;
12244 case DISAS_SMC:
73710361 12245 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12246 break;
8aaca4c0 12247 }
f021b2c4
PM
12248 }
12249
12250 if (dc->condjmp) {
12251 /* "Condition failed" instruction codepath for the branch/trap insn */
12252 gen_set_label(dc->condlabel);
12253 gen_set_condexec(dc);
b636649f 12254 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12255 gen_set_pc_im(dc, dc->pc);
12256 gen_singlestep_exception(dc);
12257 } else {
6e256c93 12258 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12259 }
2c0262af 12260 }
23169224
LV
12261
12262 /* Functions above can change dc->pc, so re-align db->pc_next */
12263 dc->base.pc_next = dc->pc;
70d3c035
LV
12264}
12265
4013f7fc
LV
12266static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12267{
12268 DisasContext *dc = container_of(dcbase, DisasContext, base);
12269
12270 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12271 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12272}
12273
23169224
LV
12274static const TranslatorOps arm_translator_ops = {
12275 .init_disas_context = arm_tr_init_disas_context,
12276 .tb_start = arm_tr_tb_start,
12277 .insn_start = arm_tr_insn_start,
12278 .breakpoint_check = arm_tr_breakpoint_check,
12279 .translate_insn = arm_tr_translate_insn,
12280 .tb_stop = arm_tr_tb_stop,
12281 .disas_log = arm_tr_disas_log,
12282};
12283
722ef0a5
RH
12284static const TranslatorOps thumb_translator_ops = {
12285 .init_disas_context = arm_tr_init_disas_context,
12286 .tb_start = arm_tr_tb_start,
12287 .insn_start = arm_tr_insn_start,
12288 .breakpoint_check = arm_tr_breakpoint_check,
12289 .translate_insn = thumb_tr_translate_insn,
12290 .tb_stop = arm_tr_tb_stop,
12291 .disas_log = arm_tr_disas_log,
12292};
12293
70d3c035 12294/* generate intermediate code for basic block 'tb'. */
8b86d6d2 12295void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
70d3c035 12296{
23169224
LV
12297 DisasContext dc;
12298 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12299
aad821ac 12300 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
722ef0a5
RH
12301 ops = &thumb_translator_ops;
12302 }
23169224 12303#ifdef TARGET_AARCH64
aad821ac 12304 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
23169224 12305 ops = &aarch64_translator_ops;
2c0262af
FB
12306 }
12307#endif
23169224 12308
8b86d6d2 12309 translator_loop(ops, &dc.base, cpu, tb, max_insns);
2c0262af
FB
12310}
12311
bad729e2
RH
12312void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12313 target_ulong *data)
d2856f1a 12314{
3926cc84 12315 if (is_a64(env)) {
bad729e2 12316 env->pc = data[0];
40f860cd 12317 env->condexec_bits = 0;
aaa1f954 12318 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12319 } else {
bad729e2
RH
12320 env->regs[15] = data[0];
12321 env->condexec_bits = data[1];
aaa1f954 12322 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12323 }
d2856f1a 12324}