]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Use gvec for NEON_3R_VADD_VSUB insns
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
1d854765 31#include "arm_ldst.h"
19a6e31c 32#include "exec/semihost.h"
1497c961 33
2ef6175a
RH
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
2c0262af 36
a7e30d84 37#include "trace-tcg.h"
508127e2 38#include "exec/log.h"
a7e30d84
LV
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
09cbd501 45#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d 55
b5ff1b31
FB
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
308e5636 75static const char * const regnames[] =
155c3eac
FN
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
61adacc8
RH
79/* Function prototypes for gen_ functions calling Neon helpers. */
80typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
81 TCGv_i32, TCGv_i32);
82
b26eefb6
PB
83/* initialize TCG globals. */
84void arm_translate_init(void)
85{
155c3eac
FN
86 int i;
87
155c3eac 88 for (i = 0; i < 16; i++) {
e1ccc054 89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
e1ccc054
RH
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 97
e1ccc054 98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 102
14ade10f 103 a64_translate_init();
b26eefb6
PB
104}
105
9bb6558a
PM
106/* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
108 */
109typedef enum ISSInfo {
110 ISSNone = 0,
111 ISSRegMask = 0x1f,
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
116} ISSInfo;
117
118/* Save the syndrome information for a Data Abort */
119static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
120{
121 uint32_t syn;
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
128
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
132 */
133 return;
134 }
135
136 if (srt == 15) {
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
139 * the call sites.
140 */
141 return;
142 }
143
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
147}
148
8bd5c820 149static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 150{
8bd5c820 151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
152 * insns:
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
155 */
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
8bd5c820 160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
161 case ARMMMUIdx_S1E3:
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
8bd5c820 164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
b9f587d6 173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
177 case ARMMMUIdx_S2NS:
178 default:
179 g_assert_not_reached();
180 }
181}
182
39d5492a 183static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 184{
39d5492a 185 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
187 return tmp;
188}
189
0ecb72a5 190#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 191
39d5492a 192static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
193{
194 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 195 tcg_temp_free_i32(var);
d9ba4830
PB
196}
197
198#define store_cpu_field(var, name) \
0ecb72a5 199 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 200
b26eefb6 201/* Set a variable to the value of a CPU register. */
39d5492a 202static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
203{
204 if (reg == 15) {
205 uint32_t addr;
b90372ad 206 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
207 if (s->thumb)
208 addr = (long)s->pc + 2;
209 else
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
212 } else {
155c3eac 213 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
214 }
215}
216
217/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 218static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 219{
39d5492a 220 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
221 load_reg_var(s, tmp, reg);
222 return tmp;
223}
224
225/* Set a CPU register. The source must be a temporary and will be
226 marked as dead. */
39d5492a 227static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
228{
229 if (reg == 15) {
9b6a3ea7
PM
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
234 */
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 236 s->base.is_jmp = DISAS_JUMP;
b26eefb6 237 }
155c3eac 238 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 239 tcg_temp_free_i32(var);
b26eefb6
PB
240}
241
55203189
PM
242/*
243 * Variant of store_reg which applies v8M stack-limit checks before updating
244 * SP. If the check fails this will result in an exception being taken.
245 * We disable the stack checks for CONFIG_USER_ONLY because we have
246 * no idea what the stack limits should be in that case.
247 * If stack checking is not being done this just acts like store_reg().
248 */
249static void store_sp_checked(DisasContext *s, TCGv_i32 var)
250{
251#ifndef CONFIG_USER_ONLY
252 if (s->v8m_stackcheck) {
253 gen_helper_v8m_stackcheck(cpu_env, var);
254 }
255#endif
256 store_reg(s, 13, var);
257}
258
b26eefb6 259/* Value extensions. */
86831435
PB
260#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
261#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
262#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
263#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
264
1497c961
PB
265#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
266#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 267
b26eefb6 268
39d5492a 269static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 270{
39d5492a 271 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 272 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
273 tcg_temp_free_i32(tmp_mask);
274}
d9ba4830
PB
275/* Set NZCV flags from the high 4 bits of var. */
276#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
277
d4a2dc67 278static void gen_exception_internal(int excp)
d9ba4830 279{
d4a2dc67
PM
280 TCGv_i32 tcg_excp = tcg_const_i32(excp);
281
282 assert(excp_is_internal(excp));
283 gen_helper_exception_internal(cpu_env, tcg_excp);
284 tcg_temp_free_i32(tcg_excp);
285}
286
73710361 287static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
288{
289 TCGv_i32 tcg_excp = tcg_const_i32(excp);
290 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 291 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 292
73710361
GB
293 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
294 tcg_syn, tcg_el);
295
296 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
297 tcg_temp_free_i32(tcg_syn);
298 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
299}
300
50225ad0
PM
301static void gen_ss_advance(DisasContext *s)
302{
303 /* If the singlestep state is Active-not-pending, advance to
304 * Active-pending.
305 */
306 if (s->ss_active) {
307 s->pstate_ss = 0;
308 gen_helper_clear_pstate_ss(cpu_env);
309 }
310}
311
312static void gen_step_complete_exception(DisasContext *s)
313{
314 /* We just completed step of an insn. Move from Active-not-pending
315 * to Active-pending, and then also take the swstep exception.
316 * This corresponds to making the (IMPDEF) choice to prioritize
317 * swstep exceptions over asynchronous exceptions taken to an exception
318 * level where debug is disabled. This choice has the advantage that
319 * we do not need to maintain internal state corresponding to the
320 * ISV/EX syndrome bits between completion of the step and generation
321 * of the exception, and our syndrome information is always correct.
322 */
323 gen_ss_advance(s);
73710361
GB
324 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
325 default_exception_el(s));
dcba3a8d 326 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
327}
328
5425415e
PM
329static void gen_singlestep_exception(DisasContext *s)
330{
331 /* Generate the right kind of exception for singlestep, which is
332 * either the architectural singlestep or EXCP_DEBUG for QEMU's
333 * gdb singlestepping.
334 */
335 if (s->ss_active) {
336 gen_step_complete_exception(s);
337 } else {
338 gen_exception_internal(EXCP_DEBUG);
339 }
340}
341
b636649f
PM
342static inline bool is_singlestepping(DisasContext *s)
343{
344 /* Return true if we are singlestepping either because of
345 * architectural singlestep or QEMU gdbstub singlestep. This does
346 * not include the command line '-singlestep' mode which is rather
347 * misnamed as it only means "one instruction per TB" and doesn't
348 * affect the code we generate.
349 */
dcba3a8d 350 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
351}
352
39d5492a 353static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 354{
39d5492a
PM
355 TCGv_i32 tmp1 = tcg_temp_new_i32();
356 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
357 tcg_gen_ext16s_i32(tmp1, a);
358 tcg_gen_ext16s_i32(tmp2, b);
3670669c 359 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 360 tcg_temp_free_i32(tmp2);
3670669c
PB
361 tcg_gen_sari_i32(a, a, 16);
362 tcg_gen_sari_i32(b, b, 16);
363 tcg_gen_mul_i32(b, b, a);
364 tcg_gen_mov_i32(a, tmp1);
7d1b0095 365 tcg_temp_free_i32(tmp1);
3670669c
PB
366}
367
368/* Byteswap each halfword. */
39d5492a 369static void gen_rev16(TCGv_i32 var)
3670669c 370{
39d5492a 371 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 372 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 373 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
374 tcg_gen_and_i32(tmp, tmp, mask);
375 tcg_gen_and_i32(var, var, mask);
3670669c 376 tcg_gen_shli_i32(var, var, 8);
3670669c 377 tcg_gen_or_i32(var, var, tmp);
68cedf73 378 tcg_temp_free_i32(mask);
7d1b0095 379 tcg_temp_free_i32(tmp);
3670669c
PB
380}
381
382/* Byteswap low halfword and sign extend. */
39d5492a 383static void gen_revsh(TCGv_i32 var)
3670669c 384{
1a855029
AJ
385 tcg_gen_ext16u_i32(var, var);
386 tcg_gen_bswap16_i32(var, var);
387 tcg_gen_ext16s_i32(var, var);
3670669c
PB
388}
389
838fa72d 390/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 391static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 392{
838fa72d
AJ
393 TCGv_i64 tmp64 = tcg_temp_new_i64();
394
395 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 396 tcg_temp_free_i32(b);
838fa72d
AJ
397 tcg_gen_shli_i64(tmp64, tmp64, 32);
398 tcg_gen_add_i64(a, tmp64, a);
399
400 tcg_temp_free_i64(tmp64);
401 return a;
402}
403
404/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 405static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
406{
407 TCGv_i64 tmp64 = tcg_temp_new_i64();
408
409 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 410 tcg_temp_free_i32(b);
838fa72d
AJ
411 tcg_gen_shli_i64(tmp64, tmp64, 32);
412 tcg_gen_sub_i64(a, tmp64, a);
413
414 tcg_temp_free_i64(tmp64);
415 return a;
3670669c
PB
416}
417
5e3f878a 418/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 419static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 420{
39d5492a
PM
421 TCGv_i32 lo = tcg_temp_new_i32();
422 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 423 TCGv_i64 ret;
5e3f878a 424
831d7fe8 425 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 426 tcg_temp_free_i32(a);
7d1b0095 427 tcg_temp_free_i32(b);
831d7fe8
RH
428
429 ret = tcg_temp_new_i64();
430 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
431 tcg_temp_free_i32(lo);
432 tcg_temp_free_i32(hi);
831d7fe8
RH
433
434 return ret;
5e3f878a
PB
435}
436
39d5492a 437static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 438{
39d5492a
PM
439 TCGv_i32 lo = tcg_temp_new_i32();
440 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 441 TCGv_i64 ret;
5e3f878a 442
831d7fe8 443 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 444 tcg_temp_free_i32(a);
7d1b0095 445 tcg_temp_free_i32(b);
831d7fe8
RH
446
447 ret = tcg_temp_new_i64();
448 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
449 tcg_temp_free_i32(lo);
450 tcg_temp_free_i32(hi);
831d7fe8
RH
451
452 return ret;
5e3f878a
PB
453}
454
8f01245e 455/* Swap low and high halfwords. */
39d5492a 456static void gen_swap_half(TCGv_i32 var)
8f01245e 457{
39d5492a 458 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
459 tcg_gen_shri_i32(tmp, var, 16);
460 tcg_gen_shli_i32(var, var, 16);
461 tcg_gen_or_i32(var, var, tmp);
7d1b0095 462 tcg_temp_free_i32(tmp);
8f01245e
PB
463}
464
b26eefb6
PB
465/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
466 tmp = (t0 ^ t1) & 0x8000;
467 t0 &= ~0x8000;
468 t1 &= ~0x8000;
469 t0 = (t0 + t1) ^ tmp;
470 */
471
39d5492a 472static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 473{
39d5492a 474 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
475 tcg_gen_xor_i32(tmp, t0, t1);
476 tcg_gen_andi_i32(tmp, tmp, 0x8000);
477 tcg_gen_andi_i32(t0, t0, ~0x8000);
478 tcg_gen_andi_i32(t1, t1, ~0x8000);
479 tcg_gen_add_i32(t0, t0, t1);
480 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
481 tcg_temp_free_i32(tmp);
482 tcg_temp_free_i32(t1);
b26eefb6
PB
483}
484
485/* Set CF to the top bit of var. */
39d5492a 486static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 487{
66c374de 488 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
489}
490
491/* Set N and Z flags from var. */
39d5492a 492static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 493{
66c374de
AJ
494 tcg_gen_mov_i32(cpu_NF, var);
495 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
496}
497
498/* T0 += T1 + CF. */
39d5492a 499static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 500{
396e467c 501 tcg_gen_add_i32(t0, t0, t1);
66c374de 502 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
503}
504
e9bb4aa9 505/* dest = T0 + T1 + CF. */
39d5492a 506static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 507{
e9bb4aa9 508 tcg_gen_add_i32(dest, t0, t1);
66c374de 509 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
510}
511
3670669c 512/* dest = T0 - T1 + CF - 1. */
39d5492a 513static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 514{
3670669c 515 tcg_gen_sub_i32(dest, t0, t1);
66c374de 516 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 517 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
518}
519
72485ec4 520/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 521static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 522{
39d5492a 523 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
524 tcg_gen_movi_i32(tmp, 0);
525 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 526 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 527 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
528 tcg_gen_xor_i32(tmp, t0, t1);
529 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
530 tcg_temp_free_i32(tmp);
531 tcg_gen_mov_i32(dest, cpu_NF);
532}
533
49b4c31e 534/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 535static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 536{
39d5492a 537 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
538 if (TCG_TARGET_HAS_add2_i32) {
539 tcg_gen_movi_i32(tmp, 0);
540 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 541 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
542 } else {
543 TCGv_i64 q0 = tcg_temp_new_i64();
544 TCGv_i64 q1 = tcg_temp_new_i64();
545 tcg_gen_extu_i32_i64(q0, t0);
546 tcg_gen_extu_i32_i64(q1, t1);
547 tcg_gen_add_i64(q0, q0, q1);
548 tcg_gen_extu_i32_i64(q1, cpu_CF);
549 tcg_gen_add_i64(q0, q0, q1);
550 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
551 tcg_temp_free_i64(q0);
552 tcg_temp_free_i64(q1);
553 }
554 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
555 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
556 tcg_gen_xor_i32(tmp, t0, t1);
557 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
558 tcg_temp_free_i32(tmp);
559 tcg_gen_mov_i32(dest, cpu_NF);
560}
561
72485ec4 562/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 563static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 564{
39d5492a 565 TCGv_i32 tmp;
72485ec4
AJ
566 tcg_gen_sub_i32(cpu_NF, t0, t1);
567 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
568 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
569 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
570 tmp = tcg_temp_new_i32();
571 tcg_gen_xor_i32(tmp, t0, t1);
572 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
573 tcg_temp_free_i32(tmp);
574 tcg_gen_mov_i32(dest, cpu_NF);
575}
576
e77f0832 577/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 578static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 579{
39d5492a 580 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
581 tcg_gen_not_i32(tmp, t1);
582 gen_adc_CC(dest, t0, tmp);
39d5492a 583 tcg_temp_free_i32(tmp);
2de68a49
RH
584}
585
365af80e 586#define GEN_SHIFT(name) \
39d5492a 587static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 588{ \
39d5492a 589 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
590 tmp1 = tcg_temp_new_i32(); \
591 tcg_gen_andi_i32(tmp1, t1, 0xff); \
592 tmp2 = tcg_const_i32(0); \
593 tmp3 = tcg_const_i32(0x1f); \
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
595 tcg_temp_free_i32(tmp3); \
596 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
597 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
598 tcg_temp_free_i32(tmp2); \
599 tcg_temp_free_i32(tmp1); \
600}
601GEN_SHIFT(shl)
602GEN_SHIFT(shr)
603#undef GEN_SHIFT
604
39d5492a 605static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 606{
39d5492a 607 TCGv_i32 tmp1, tmp2;
365af80e
AJ
608 tmp1 = tcg_temp_new_i32();
609 tcg_gen_andi_i32(tmp1, t1, 0xff);
610 tmp2 = tcg_const_i32(0x1f);
611 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
612 tcg_temp_free_i32(tmp2);
613 tcg_gen_sar_i32(dest, t0, tmp1);
614 tcg_temp_free_i32(tmp1);
615}
616
39d5492a 617static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 618{
39d5492a
PM
619 TCGv_i32 c0 = tcg_const_i32(0);
620 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
621 tcg_gen_neg_i32(tmp, src);
622 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
623 tcg_temp_free_i32(c0);
624 tcg_temp_free_i32(tmp);
625}
ad69471c 626
39d5492a 627static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 628{
9a119ff6 629 if (shift == 0) {
66c374de 630 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 631 } else {
66c374de
AJ
632 tcg_gen_shri_i32(cpu_CF, var, shift);
633 if (shift != 31) {
634 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
635 }
9a119ff6 636 }
9a119ff6 637}
b26eefb6 638
9a119ff6 639/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
640static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
641 int shift, int flags)
9a119ff6
PB
642{
643 switch (shiftop) {
644 case 0: /* LSL */
645 if (shift != 0) {
646 if (flags)
647 shifter_out_im(var, 32 - shift);
648 tcg_gen_shli_i32(var, var, shift);
649 }
650 break;
651 case 1: /* LSR */
652 if (shift == 0) {
653 if (flags) {
66c374de 654 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
655 }
656 tcg_gen_movi_i32(var, 0);
657 } else {
658 if (flags)
659 shifter_out_im(var, shift - 1);
660 tcg_gen_shri_i32(var, var, shift);
661 }
662 break;
663 case 2: /* ASR */
664 if (shift == 0)
665 shift = 32;
666 if (flags)
667 shifter_out_im(var, shift - 1);
668 if (shift == 32)
669 shift = 31;
670 tcg_gen_sari_i32(var, var, shift);
671 break;
672 case 3: /* ROR/RRX */
673 if (shift != 0) {
674 if (flags)
675 shifter_out_im(var, shift - 1);
f669df27 676 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 677 } else {
39d5492a 678 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 679 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
680 if (flags)
681 shifter_out_im(var, 0);
682 tcg_gen_shri_i32(var, var, 1);
b26eefb6 683 tcg_gen_or_i32(var, var, tmp);
7d1b0095 684 tcg_temp_free_i32(tmp);
b26eefb6
PB
685 }
686 }
687};
688
39d5492a
PM
689static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
690 TCGv_i32 shift, int flags)
8984bd2e
PB
691{
692 if (flags) {
693 switch (shiftop) {
9ef39277
BS
694 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
695 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
696 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
697 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
698 }
699 } else {
700 switch (shiftop) {
365af80e
AJ
701 case 0:
702 gen_shl(var, var, shift);
703 break;
704 case 1:
705 gen_shr(var, var, shift);
706 break;
707 case 2:
708 gen_sar(var, var, shift);
709 break;
f669df27
AJ
710 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
711 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
712 }
713 }
7d1b0095 714 tcg_temp_free_i32(shift);
8984bd2e
PB
715}
716
6ddbc6e4
PB
717#define PAS_OP(pfx) \
718 switch (op2) { \
719 case 0: gen_pas_helper(glue(pfx,add16)); break; \
720 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
721 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
722 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
723 case 4: gen_pas_helper(glue(pfx,add8)); break; \
724 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
725 }
39d5492a 726static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 727{
a7812ae4 728 TCGv_ptr tmp;
6ddbc6e4
PB
729
730 switch (op1) {
731#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
732 case 1:
a7812ae4 733 tmp = tcg_temp_new_ptr();
0ecb72a5 734 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 735 PAS_OP(s)
b75263d6 736 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
737 break;
738 case 5:
a7812ae4 739 tmp = tcg_temp_new_ptr();
0ecb72a5 740 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 741 PAS_OP(u)
b75263d6 742 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
743 break;
744#undef gen_pas_helper
745#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
746 case 2:
747 PAS_OP(q);
748 break;
749 case 3:
750 PAS_OP(sh);
751 break;
752 case 6:
753 PAS_OP(uq);
754 break;
755 case 7:
756 PAS_OP(uh);
757 break;
758#undef gen_pas_helper
759 }
760}
9ee6e8bb
PB
761#undef PAS_OP
762
6ddbc6e4
PB
763/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
764#define PAS_OP(pfx) \
ed89a2f1 765 switch (op1) { \
6ddbc6e4
PB
766 case 0: gen_pas_helper(glue(pfx,add8)); break; \
767 case 1: gen_pas_helper(glue(pfx,add16)); break; \
768 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
769 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
770 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
771 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
772 }
39d5492a 773static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 774{
a7812ae4 775 TCGv_ptr tmp;
6ddbc6e4 776
ed89a2f1 777 switch (op2) {
6ddbc6e4
PB
778#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
779 case 0:
a7812ae4 780 tmp = tcg_temp_new_ptr();
0ecb72a5 781 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 782 PAS_OP(s)
b75263d6 783 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
784 break;
785 case 4:
a7812ae4 786 tmp = tcg_temp_new_ptr();
0ecb72a5 787 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 788 PAS_OP(u)
b75263d6 789 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
790 break;
791#undef gen_pas_helper
792#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
793 case 1:
794 PAS_OP(q);
795 break;
796 case 2:
797 PAS_OP(sh);
798 break;
799 case 5:
800 PAS_OP(uq);
801 break;
802 case 6:
803 PAS_OP(uh);
804 break;
805#undef gen_pas_helper
806 }
807}
9ee6e8bb
PB
808#undef PAS_OP
809
39fb730a 810/*
6c2c63d3 811 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
812 * This is common between ARM and Aarch64 targets.
813 */
6c2c63d3 814void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 815{
6c2c63d3
RH
816 TCGv_i32 value;
817 TCGCond cond;
818 bool global = true;
d9ba4830 819
d9ba4830
PB
820 switch (cc) {
821 case 0: /* eq: Z */
d9ba4830 822 case 1: /* ne: !Z */
6c2c63d3
RH
823 cond = TCG_COND_EQ;
824 value = cpu_ZF;
d9ba4830 825 break;
6c2c63d3 826
d9ba4830 827 case 2: /* cs: C */
d9ba4830 828 case 3: /* cc: !C */
6c2c63d3
RH
829 cond = TCG_COND_NE;
830 value = cpu_CF;
d9ba4830 831 break;
6c2c63d3 832
d9ba4830 833 case 4: /* mi: N */
d9ba4830 834 case 5: /* pl: !N */
6c2c63d3
RH
835 cond = TCG_COND_LT;
836 value = cpu_NF;
d9ba4830 837 break;
6c2c63d3 838
d9ba4830 839 case 6: /* vs: V */
d9ba4830 840 case 7: /* vc: !V */
6c2c63d3
RH
841 cond = TCG_COND_LT;
842 value = cpu_VF;
d9ba4830 843 break;
6c2c63d3 844
d9ba4830 845 case 8: /* hi: C && !Z */
6c2c63d3
RH
846 case 9: /* ls: !C || Z -> !(C && !Z) */
847 cond = TCG_COND_NE;
848 value = tcg_temp_new_i32();
849 global = false;
850 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
851 ZF is non-zero for !Z; so AND the two subexpressions. */
852 tcg_gen_neg_i32(value, cpu_CF);
853 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 854 break;
6c2c63d3 855
d9ba4830 856 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 857 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
858 /* Since we're only interested in the sign bit, == 0 is >= 0. */
859 cond = TCG_COND_GE;
860 value = tcg_temp_new_i32();
861 global = false;
862 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 863 break;
6c2c63d3 864
d9ba4830 865 case 12: /* gt: !Z && N == V */
d9ba4830 866 case 13: /* le: Z || N != V */
6c2c63d3
RH
867 cond = TCG_COND_NE;
868 value = tcg_temp_new_i32();
869 global = false;
870 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
871 * the sign bit then AND with ZF to yield the result. */
872 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
873 tcg_gen_sari_i32(value, value, 31);
874 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 875 break;
6c2c63d3 876
9305eac0
RH
877 case 14: /* always */
878 case 15: /* always */
879 /* Use the ALWAYS condition, which will fold early.
880 * It doesn't matter what we use for the value. */
881 cond = TCG_COND_ALWAYS;
882 value = cpu_ZF;
883 goto no_invert;
884
d9ba4830
PB
885 default:
886 fprintf(stderr, "Bad condition code 0x%x\n", cc);
887 abort();
888 }
6c2c63d3
RH
889
890 if (cc & 1) {
891 cond = tcg_invert_cond(cond);
892 }
893
9305eac0 894 no_invert:
6c2c63d3
RH
895 cmp->cond = cond;
896 cmp->value = value;
897 cmp->value_global = global;
898}
899
900void arm_free_cc(DisasCompare *cmp)
901{
902 if (!cmp->value_global) {
903 tcg_temp_free_i32(cmp->value);
904 }
905}
906
907void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
908{
909 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
910}
911
912void arm_gen_test_cc(int cc, TCGLabel *label)
913{
914 DisasCompare cmp;
915 arm_test_cc(&cmp, cc);
916 arm_jump_cc(&cmp, label);
917 arm_free_cc(&cmp);
d9ba4830 918}
2c0262af 919
b1d8e52e 920static const uint8_t table_logic_cc[16] = {
2c0262af
FB
921 1, /* and */
922 1, /* xor */
923 0, /* sub */
924 0, /* rsb */
925 0, /* add */
926 0, /* adc */
927 0, /* sbc */
928 0, /* rsc */
929 1, /* andl */
930 1, /* xorl */
931 0, /* cmp */
932 0, /* cmn */
933 1, /* orr */
934 1, /* mov */
935 1, /* bic */
936 1, /* mvn */
937};
3b46e624 938
4d5e8c96
PM
939static inline void gen_set_condexec(DisasContext *s)
940{
941 if (s->condexec_mask) {
942 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
943 TCGv_i32 tmp = tcg_temp_new_i32();
944 tcg_gen_movi_i32(tmp, val);
945 store_cpu_field(tmp, condexec_bits);
946 }
947}
948
949static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
950{
951 tcg_gen_movi_i32(cpu_R[15], val);
952}
953
d9ba4830
PB
954/* Set PC and Thumb state from an immediate address. */
955static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 956{
39d5492a 957 TCGv_i32 tmp;
99c475ab 958
dcba3a8d 959 s->base.is_jmp = DISAS_JUMP;
d9ba4830 960 if (s->thumb != (addr & 1)) {
7d1b0095 961 tmp = tcg_temp_new_i32();
d9ba4830 962 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 963 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 964 tcg_temp_free_i32(tmp);
d9ba4830 965 }
155c3eac 966 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
967}
968
969/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 970static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 971{
dcba3a8d 972 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
973 tcg_gen_andi_i32(cpu_R[15], var, ~1);
974 tcg_gen_andi_i32(var, var, 1);
975 store_cpu_field(var, thumb);
d9ba4830
PB
976}
977
3bb8a96f
PM
978/* Set PC and Thumb state from var. var is marked as dead.
979 * For M-profile CPUs, include logic to detect exception-return
980 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
981 * and BX reg, and no others, and happens only for code in Handler mode.
982 */
983static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
984{
985 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 986 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
987 */
988 gen_bx(s, var);
d02a8698
PM
989 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
990 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 991 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
992 }
993}
994
995static inline void gen_bx_excret_final_code(DisasContext *s)
996{
997 /* Generate the code to finish possible exception return and end the TB */
998 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
999 uint32_t min_magic;
1000
1001 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
1002 /* Covers FNC_RETURN and EXC_RETURN magic */
1003 min_magic = FNC_RETURN_MIN_MAGIC;
1004 } else {
1005 /* EXC_RETURN magic only */
1006 min_magic = EXC_RETURN_MIN_MAGIC;
1007 }
3bb8a96f
PM
1008
1009 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 1010 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
1011 /* No: end the TB as we would for a DISAS_JMP */
1012 if (is_singlestepping(s)) {
1013 gen_singlestep_exception(s);
1014 } else {
07ea28b4 1015 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
1016 }
1017 gen_set_label(excret_label);
1018 /* Yes: this is an exception return.
1019 * At this point in runtime env->regs[15] and env->thumb will hold
1020 * the exception-return magic number, which do_v7m_exception_exit()
1021 * will read. Nothing else will be able to see those values because
1022 * the cpu-exec main loop guarantees that we will always go straight
1023 * from raising the exception to the exception-handling code.
1024 *
1025 * gen_ss_advance(s) does nothing on M profile currently but
1026 * calling it is conceptually the right thing as we have executed
1027 * this instruction (compare SWI, HVC, SMC handling).
1028 */
1029 gen_ss_advance(s);
1030 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1031}
1032
fb602cb7
PM
1033static inline void gen_bxns(DisasContext *s, int rm)
1034{
1035 TCGv_i32 var = load_reg(s, rm);
1036
1037 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1038 * we need to sync state before calling it, but:
1039 * - we don't need to do gen_set_pc_im() because the bxns helper will
1040 * always set the PC itself
1041 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1042 * unless it's outside an IT block or the last insn in an IT block,
1043 * so we know that condexec == 0 (already set at the top of the TB)
1044 * is correct in the non-UNPREDICTABLE cases, and we can choose
1045 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1046 */
1047 gen_helper_v7m_bxns(cpu_env, var);
1048 tcg_temp_free_i32(var);
ef475b5d 1049 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1050}
1051
3e3fa230
PM
1052static inline void gen_blxns(DisasContext *s, int rm)
1053{
1054 TCGv_i32 var = load_reg(s, rm);
1055
1056 /* We don't need to sync condexec state, for the same reason as bxns.
1057 * We do however need to set the PC, because the blxns helper reads it.
1058 * The blxns helper may throw an exception.
1059 */
1060 gen_set_pc_im(s, s->pc);
1061 gen_helper_v7m_blxns(cpu_env, var);
1062 tcg_temp_free_i32(var);
1063 s->base.is_jmp = DISAS_EXIT;
1064}
1065
21aeb343
JR
1066/* Variant of store_reg which uses branch&exchange logic when storing
1067 to r15 in ARM architecture v7 and above. The source must be a temporary
1068 and will be marked as dead. */
7dcc1f89 1069static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1070{
1071 if (reg == 15 && ENABLE_ARCH_7) {
1072 gen_bx(s, var);
1073 } else {
1074 store_reg(s, reg, var);
1075 }
1076}
1077
be5e7a76
DES
1078/* Variant of store_reg which uses branch&exchange logic when storing
1079 * to r15 in ARM architecture v5T and above. This is used for storing
1080 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1081 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1082static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1083{
1084 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1085 gen_bx_excret(s, var);
be5e7a76
DES
1086 } else {
1087 store_reg(s, reg, var);
1088 }
1089}
1090
e334bd31
PB
1091#ifdef CONFIG_USER_ONLY
1092#define IS_USER_ONLY 1
1093#else
1094#define IS_USER_ONLY 0
1095#endif
1096
08307563
PM
1097/* Abstractions of "generate code to do a guest load/store for
1098 * AArch32", where a vaddr is always 32 bits (and is zero
1099 * extended if we're a 64 bit core) and data is also
1100 * 32 bits unless specifically doing a 64 bit access.
1101 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1102 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1103 */
08307563 1104
7f5616f5 1105static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1106{
7f5616f5
RH
1107 TCGv addr = tcg_temp_new();
1108 tcg_gen_extu_i32_tl(addr, a32);
1109
e334bd31 1110 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1111 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1112 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1113 }
7f5616f5 1114 return addr;
08307563
PM
1115}
1116
7f5616f5
RH
1117static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1118 int index, TCGMemOp opc)
08307563 1119{
2aeba0d0
JS
1120 TCGv addr;
1121
1122 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1123 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1124 opc |= MO_ALIGN;
1125 }
1126
1127 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1128 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1129 tcg_temp_free(addr);
08307563
PM
1130}
1131
7f5616f5
RH
1132static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1133 int index, TCGMemOp opc)
1134{
2aeba0d0
JS
1135 TCGv addr;
1136
1137 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1138 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1139 opc |= MO_ALIGN;
1140 }
1141
1142 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1143 tcg_gen_qemu_st_i32(val, addr, index, opc);
1144 tcg_temp_free(addr);
1145}
08307563 1146
7f5616f5 1147#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1148static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1149 TCGv_i32 a32, int index) \
08307563 1150{ \
7f5616f5 1151 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1152} \
1153static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1154 TCGv_i32 val, \
1155 TCGv_i32 a32, int index, \
1156 ISSInfo issinfo) \
1157{ \
1158 gen_aa32_ld##SUFF(s, val, a32, index); \
1159 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1160}
1161
7f5616f5 1162#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1163static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1164 TCGv_i32 a32, int index) \
08307563 1165{ \
7f5616f5 1166 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1167} \
1168static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1169 TCGv_i32 val, \
1170 TCGv_i32 a32, int index, \
1171 ISSInfo issinfo) \
1172{ \
1173 gen_aa32_st##SUFF(s, val, a32, index); \
1174 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1175}
1176
7f5616f5 1177static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1178{
e334bd31
PB
1179 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1180 if (!IS_USER_ONLY && s->sctlr_b) {
1181 tcg_gen_rotri_i64(val, val, 32);
1182 }
08307563
PM
1183}
1184
7f5616f5
RH
1185static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1186 int index, TCGMemOp opc)
08307563 1187{
7f5616f5
RH
1188 TCGv addr = gen_aa32_addr(s, a32, opc);
1189 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1190 gen_aa32_frob64(s, val);
1191 tcg_temp_free(addr);
1192}
1193
1194static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1195 TCGv_i32 a32, int index)
1196{
1197 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1198}
1199
1200static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1201 int index, TCGMemOp opc)
1202{
1203 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1204
1205 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1206 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1207 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1208 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1209 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1210 tcg_temp_free_i64(tmp);
e334bd31 1211 } else {
7f5616f5 1212 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1213 }
7f5616f5 1214 tcg_temp_free(addr);
08307563
PM
1215}
1216
7f5616f5
RH
1217static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1218 TCGv_i32 a32, int index)
1219{
1220 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1221}
08307563 1222
7f5616f5
RH
1223DO_GEN_LD(8s, MO_SB)
1224DO_GEN_LD(8u, MO_UB)
1225DO_GEN_LD(16s, MO_SW)
1226DO_GEN_LD(16u, MO_UW)
1227DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1228DO_GEN_ST(8, MO_UB)
1229DO_GEN_ST(16, MO_UW)
1230DO_GEN_ST(32, MO_UL)
08307563 1231
37e6456e
PM
1232static inline void gen_hvc(DisasContext *s, int imm16)
1233{
1234 /* The pre HVC helper handles cases when HVC gets trapped
1235 * as an undefined insn by runtime configuration (ie before
1236 * the insn really executes).
1237 */
1238 gen_set_pc_im(s, s->pc - 4);
1239 gen_helper_pre_hvc(cpu_env);
1240 /* Otherwise we will treat this as a real exception which
1241 * happens after execution of the insn. (The distinction matters
1242 * for the PC value reported to the exception handler and also
1243 * for single stepping.)
1244 */
1245 s->svc_imm = imm16;
1246 gen_set_pc_im(s, s->pc);
dcba3a8d 1247 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1248}
1249
1250static inline void gen_smc(DisasContext *s)
1251{
1252 /* As with HVC, we may take an exception either before or after
1253 * the insn executes.
1254 */
1255 TCGv_i32 tmp;
1256
1257 gen_set_pc_im(s, s->pc - 4);
1258 tmp = tcg_const_i32(syn_aa32_smc());
1259 gen_helper_pre_smc(cpu_env, tmp);
1260 tcg_temp_free_i32(tmp);
1261 gen_set_pc_im(s, s->pc);
dcba3a8d 1262 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1263}
1264
d4a2dc67
PM
1265static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1266{
1267 gen_set_condexec(s);
1268 gen_set_pc_im(s, s->pc - offset);
1269 gen_exception_internal(excp);
dcba3a8d 1270 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1271}
1272
73710361
GB
1273static void gen_exception_insn(DisasContext *s, int offset, int excp,
1274 int syn, uint32_t target_el)
d4a2dc67
PM
1275{
1276 gen_set_condexec(s);
1277 gen_set_pc_im(s, s->pc - offset);
73710361 1278 gen_exception(excp, syn, target_el);
dcba3a8d 1279 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1280}
1281
c900a2e6
PM
1282static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1283{
1284 TCGv_i32 tcg_syn;
1285
1286 gen_set_condexec(s);
1287 gen_set_pc_im(s, s->pc - offset);
1288 tcg_syn = tcg_const_i32(syn);
1289 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1290 tcg_temp_free_i32(tcg_syn);
1291 s->base.is_jmp = DISAS_NORETURN;
1292}
1293
b5ff1b31
FB
1294/* Force a TB lookup after an instruction that changes the CPU state. */
1295static inline void gen_lookup_tb(DisasContext *s)
1296{
a6445c52 1297 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1298 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1299}
1300
19a6e31c
PM
1301static inline void gen_hlt(DisasContext *s, int imm)
1302{
1303 /* HLT. This has two purposes.
1304 * Architecturally, it is an external halting debug instruction.
1305 * Since QEMU doesn't implement external debug, we treat this as
1306 * it is required for halting debug disabled: it will UNDEF.
1307 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1308 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1309 * must trigger semihosting even for ARMv7 and earlier, where
1310 * HLT was an undefined encoding.
1311 * In system mode, we don't allow userspace access to
1312 * semihosting, to provide some semblance of security
1313 * (and for consistency with our 32-bit semihosting).
1314 */
1315 if (semihosting_enabled() &&
1316#ifndef CONFIG_USER_ONLY
1317 s->current_el != 0 &&
1318#endif
1319 (imm == (s->thumb ? 0x3c : 0xf000))) {
1320 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1321 return;
1322 }
1323
1324 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1325 default_exception_el(s));
1326}
1327
b0109805 1328static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1329 TCGv_i32 var)
2c0262af 1330{
1e8d4eec 1331 int val, rm, shift, shiftop;
39d5492a 1332 TCGv_i32 offset;
2c0262af
FB
1333
1334 if (!(insn & (1 << 25))) {
1335 /* immediate */
1336 val = insn & 0xfff;
1337 if (!(insn & (1 << 23)))
1338 val = -val;
537730b9 1339 if (val != 0)
b0109805 1340 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1341 } else {
1342 /* shift/register */
1343 rm = (insn) & 0xf;
1344 shift = (insn >> 7) & 0x1f;
1e8d4eec 1345 shiftop = (insn >> 5) & 3;
b26eefb6 1346 offset = load_reg(s, rm);
9a119ff6 1347 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1348 if (!(insn & (1 << 23)))
b0109805 1349 tcg_gen_sub_i32(var, var, offset);
2c0262af 1350 else
b0109805 1351 tcg_gen_add_i32(var, var, offset);
7d1b0095 1352 tcg_temp_free_i32(offset);
2c0262af
FB
1353 }
1354}
1355
191f9a93 1356static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1357 int extra, TCGv_i32 var)
2c0262af
FB
1358{
1359 int val, rm;
39d5492a 1360 TCGv_i32 offset;
3b46e624 1361
2c0262af
FB
1362 if (insn & (1 << 22)) {
1363 /* immediate */
1364 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1365 if (!(insn & (1 << 23)))
1366 val = -val;
18acad92 1367 val += extra;
537730b9 1368 if (val != 0)
b0109805 1369 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1370 } else {
1371 /* register */
191f9a93 1372 if (extra)
b0109805 1373 tcg_gen_addi_i32(var, var, extra);
2c0262af 1374 rm = (insn) & 0xf;
b26eefb6 1375 offset = load_reg(s, rm);
2c0262af 1376 if (!(insn & (1 << 23)))
b0109805 1377 tcg_gen_sub_i32(var, var, offset);
2c0262af 1378 else
b0109805 1379 tcg_gen_add_i32(var, var, offset);
7d1b0095 1380 tcg_temp_free_i32(offset);
2c0262af
FB
1381 }
1382}
1383
5aaebd13
PM
1384static TCGv_ptr get_fpstatus_ptr(int neon)
1385{
1386 TCGv_ptr statusptr = tcg_temp_new_ptr();
1387 int offset;
1388 if (neon) {
0ecb72a5 1389 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1390 } else {
0ecb72a5 1391 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1392 }
1393 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1394 return statusptr;
1395}
1396
4373f3ce
PB
1397#define VFP_OP2(name) \
1398static inline void gen_vfp_##name(int dp) \
1399{ \
ae1857ec
PM
1400 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1401 if (dp) { \
1402 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1403 } else { \
1404 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1405 } \
1406 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1407}
1408
4373f3ce
PB
1409VFP_OP2(add)
1410VFP_OP2(sub)
1411VFP_OP2(mul)
1412VFP_OP2(div)
1413
1414#undef VFP_OP2
1415
605a6aed
PM
1416static inline void gen_vfp_F1_mul(int dp)
1417{
1418 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1419 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1420 if (dp) {
ae1857ec 1421 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1422 } else {
ae1857ec 1423 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1424 }
ae1857ec 1425 tcg_temp_free_ptr(fpst);
605a6aed
PM
1426}
1427
1428static inline void gen_vfp_F1_neg(int dp)
1429{
1430 /* Like gen_vfp_neg() but put result in F1 */
1431 if (dp) {
1432 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1433 } else {
1434 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1435 }
1436}
1437
4373f3ce
PB
1438static inline void gen_vfp_abs(int dp)
1439{
1440 if (dp)
1441 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1442 else
1443 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1444}
1445
1446static inline void gen_vfp_neg(int dp)
1447{
1448 if (dp)
1449 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1450 else
1451 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1452}
1453
1454static inline void gen_vfp_sqrt(int dp)
1455{
1456 if (dp)
1457 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1458 else
1459 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1460}
1461
1462static inline void gen_vfp_cmp(int dp)
1463{
1464 if (dp)
1465 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1466 else
1467 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1468}
1469
1470static inline void gen_vfp_cmpe(int dp)
1471{
1472 if (dp)
1473 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1474 else
1475 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1476}
1477
1478static inline void gen_vfp_F1_ld0(int dp)
1479{
1480 if (dp)
5b340b51 1481 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1482 else
5b340b51 1483 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1484}
1485
5500b06c
PM
1486#define VFP_GEN_ITOF(name) \
1487static inline void gen_vfp_##name(int dp, int neon) \
1488{ \
5aaebd13 1489 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1490 if (dp) { \
1491 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1492 } else { \
1493 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1494 } \
b7fa9214 1495 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1496}
1497
5500b06c
PM
1498VFP_GEN_ITOF(uito)
1499VFP_GEN_ITOF(sito)
1500#undef VFP_GEN_ITOF
4373f3ce 1501
5500b06c
PM
1502#define VFP_GEN_FTOI(name) \
1503static inline void gen_vfp_##name(int dp, int neon) \
1504{ \
5aaebd13 1505 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1506 if (dp) { \
1507 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1508 } else { \
1509 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1510 } \
b7fa9214 1511 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1512}
1513
5500b06c
PM
1514VFP_GEN_FTOI(toui)
1515VFP_GEN_FTOI(touiz)
1516VFP_GEN_FTOI(tosi)
1517VFP_GEN_FTOI(tosiz)
1518#undef VFP_GEN_FTOI
4373f3ce 1519
16d5b3ca 1520#define VFP_GEN_FIX(name, round) \
5500b06c 1521static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1522{ \
39d5492a 1523 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1524 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1525 if (dp) { \
16d5b3ca
WN
1526 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1527 statusptr); \
5500b06c 1528 } else { \
16d5b3ca
WN
1529 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1530 statusptr); \
5500b06c 1531 } \
b75263d6 1532 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1533 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1534}
16d5b3ca
WN
1535VFP_GEN_FIX(tosh, _round_to_zero)
1536VFP_GEN_FIX(tosl, _round_to_zero)
1537VFP_GEN_FIX(touh, _round_to_zero)
1538VFP_GEN_FIX(toul, _round_to_zero)
1539VFP_GEN_FIX(shto, )
1540VFP_GEN_FIX(slto, )
1541VFP_GEN_FIX(uhto, )
1542VFP_GEN_FIX(ulto, )
4373f3ce 1543#undef VFP_GEN_FIX
9ee6e8bb 1544
39d5492a 1545static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1546{
08307563 1547 if (dp) {
12dcc321 1548 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1549 } else {
12dcc321 1550 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1551 }
b5ff1b31
FB
1552}
1553
39d5492a 1554static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1555{
08307563 1556 if (dp) {
12dcc321 1557 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1558 } else {
12dcc321 1559 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1560 }
b5ff1b31
FB
1561}
1562
c39c2b90 1563static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1564{
9a2b5256 1565 if (dp) {
c39c2b90 1566 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1567 } else {
c39c2b90 1568 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1569 if (reg & 1) {
1570 ofs += offsetof(CPU_DoubleU, l.upper);
1571 } else {
1572 ofs += offsetof(CPU_DoubleU, l.lower);
1573 }
1574 return ofs;
8e96005d
FB
1575 }
1576}
9ee6e8bb
PB
1577
1578/* Return the offset of a 32-bit piece of a NEON register.
1579 zero is the least significant end of the register. */
1580static inline long
1581neon_reg_offset (int reg, int n)
1582{
1583 int sreg;
1584 sreg = reg * 2 + n;
1585 return vfp_reg_offset(0, sreg);
1586}
1587
32f91fb7
RH
1588/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1589 * where 0 is the least significant end of the register.
1590 */
1591static inline long
1592neon_element_offset(int reg, int element, TCGMemOp size)
1593{
1594 int element_size = 1 << size;
1595 int ofs = element * element_size;
1596#ifdef HOST_WORDS_BIGENDIAN
1597 /* Calculate the offset assuming fully little-endian,
1598 * then XOR to account for the order of the 8-byte units.
1599 */
1600 if (element_size < 8) {
1601 ofs ^= 8 - element_size;
1602 }
1603#endif
1604 return neon_reg_offset(reg, 0) + ofs;
1605}
1606
39d5492a 1607static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1608{
39d5492a 1609 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1610 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1611 return tmp;
1612}
1613
39d5492a 1614static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1615{
1616 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1617 tcg_temp_free_i32(var);
8f8e3aa4
PB
1618}
1619
a7812ae4 1620static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1621{
1622 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1623}
1624
a7812ae4 1625static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1626{
1627 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1628}
1629
1a66ac61
RH
1630static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1631{
1632 TCGv_ptr ret = tcg_temp_new_ptr();
1633 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1634 return ret;
1635}
1636
4373f3ce
PB
1637#define tcg_gen_ld_f32 tcg_gen_ld_i32
1638#define tcg_gen_ld_f64 tcg_gen_ld_i64
1639#define tcg_gen_st_f32 tcg_gen_st_i32
1640#define tcg_gen_st_f64 tcg_gen_st_i64
1641
b7bcbe95
FB
1642static inline void gen_mov_F0_vreg(int dp, int reg)
1643{
1644 if (dp)
4373f3ce 1645 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1646 else
4373f3ce 1647 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1648}
1649
1650static inline void gen_mov_F1_vreg(int dp, int reg)
1651{
1652 if (dp)
4373f3ce 1653 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1654 else
4373f3ce 1655 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1656}
1657
1658static inline void gen_mov_vreg_F0(int dp, int reg)
1659{
1660 if (dp)
4373f3ce 1661 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1662 else
4373f3ce 1663 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1664}
1665
d00584b7 1666#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1667
a7812ae4 1668static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1669{
0ecb72a5 1670 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1671}
1672
a7812ae4 1673static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1674{
0ecb72a5 1675 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1676}
1677
39d5492a 1678static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1679{
39d5492a 1680 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1681 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1682 return var;
e677137d
PB
1683}
1684
39d5492a 1685static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1686{
0ecb72a5 1687 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1688 tcg_temp_free_i32(var);
e677137d
PB
1689}
1690
1691static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1692{
1693 iwmmxt_store_reg(cpu_M0, rn);
1694}
1695
1696static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1697{
1698 iwmmxt_load_reg(cpu_M0, rn);
1699}
1700
1701static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1702{
1703 iwmmxt_load_reg(cpu_V1, rn);
1704 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1705}
1706
1707static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1708{
1709 iwmmxt_load_reg(cpu_V1, rn);
1710 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1711}
1712
1713static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1714{
1715 iwmmxt_load_reg(cpu_V1, rn);
1716 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1717}
1718
1719#define IWMMXT_OP(name) \
1720static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1721{ \
1722 iwmmxt_load_reg(cpu_V1, rn); \
1723 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1724}
1725
477955bd
PM
1726#define IWMMXT_OP_ENV(name) \
1727static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1728{ \
1729 iwmmxt_load_reg(cpu_V1, rn); \
1730 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1731}
1732
1733#define IWMMXT_OP_ENV_SIZE(name) \
1734IWMMXT_OP_ENV(name##b) \
1735IWMMXT_OP_ENV(name##w) \
1736IWMMXT_OP_ENV(name##l)
e677137d 1737
477955bd 1738#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1739static inline void gen_op_iwmmxt_##name##_M0(void) \
1740{ \
477955bd 1741 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1742}
1743
1744IWMMXT_OP(maddsq)
1745IWMMXT_OP(madduq)
1746IWMMXT_OP(sadb)
1747IWMMXT_OP(sadw)
1748IWMMXT_OP(mulslw)
1749IWMMXT_OP(mulshw)
1750IWMMXT_OP(mululw)
1751IWMMXT_OP(muluhw)
1752IWMMXT_OP(macsw)
1753IWMMXT_OP(macuw)
1754
477955bd
PM
1755IWMMXT_OP_ENV_SIZE(unpackl)
1756IWMMXT_OP_ENV_SIZE(unpackh)
1757
1758IWMMXT_OP_ENV1(unpacklub)
1759IWMMXT_OP_ENV1(unpackluw)
1760IWMMXT_OP_ENV1(unpacklul)
1761IWMMXT_OP_ENV1(unpackhub)
1762IWMMXT_OP_ENV1(unpackhuw)
1763IWMMXT_OP_ENV1(unpackhul)
1764IWMMXT_OP_ENV1(unpacklsb)
1765IWMMXT_OP_ENV1(unpacklsw)
1766IWMMXT_OP_ENV1(unpacklsl)
1767IWMMXT_OP_ENV1(unpackhsb)
1768IWMMXT_OP_ENV1(unpackhsw)
1769IWMMXT_OP_ENV1(unpackhsl)
1770
1771IWMMXT_OP_ENV_SIZE(cmpeq)
1772IWMMXT_OP_ENV_SIZE(cmpgtu)
1773IWMMXT_OP_ENV_SIZE(cmpgts)
1774
1775IWMMXT_OP_ENV_SIZE(mins)
1776IWMMXT_OP_ENV_SIZE(minu)
1777IWMMXT_OP_ENV_SIZE(maxs)
1778IWMMXT_OP_ENV_SIZE(maxu)
1779
1780IWMMXT_OP_ENV_SIZE(subn)
1781IWMMXT_OP_ENV_SIZE(addn)
1782IWMMXT_OP_ENV_SIZE(subu)
1783IWMMXT_OP_ENV_SIZE(addu)
1784IWMMXT_OP_ENV_SIZE(subs)
1785IWMMXT_OP_ENV_SIZE(adds)
1786
1787IWMMXT_OP_ENV(avgb0)
1788IWMMXT_OP_ENV(avgb1)
1789IWMMXT_OP_ENV(avgw0)
1790IWMMXT_OP_ENV(avgw1)
e677137d 1791
477955bd
PM
1792IWMMXT_OP_ENV(packuw)
1793IWMMXT_OP_ENV(packul)
1794IWMMXT_OP_ENV(packuq)
1795IWMMXT_OP_ENV(packsw)
1796IWMMXT_OP_ENV(packsl)
1797IWMMXT_OP_ENV(packsq)
e677137d 1798
e677137d
PB
1799static void gen_op_iwmmxt_set_mup(void)
1800{
39d5492a 1801 TCGv_i32 tmp;
e677137d
PB
1802 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1803 tcg_gen_ori_i32(tmp, tmp, 2);
1804 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1805}
1806
1807static void gen_op_iwmmxt_set_cup(void)
1808{
39d5492a 1809 TCGv_i32 tmp;
e677137d
PB
1810 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1811 tcg_gen_ori_i32(tmp, tmp, 1);
1812 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1813}
1814
1815static void gen_op_iwmmxt_setpsr_nz(void)
1816{
39d5492a 1817 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1818 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1819 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1820}
1821
1822static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1823{
1824 iwmmxt_load_reg(cpu_V1, rn);
86831435 1825 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1826 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1827}
1828
39d5492a
PM
1829static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1830 TCGv_i32 dest)
18c9b560
AZ
1831{
1832 int rd;
1833 uint32_t offset;
39d5492a 1834 TCGv_i32 tmp;
18c9b560
AZ
1835
1836 rd = (insn >> 16) & 0xf;
da6b5335 1837 tmp = load_reg(s, rd);
18c9b560
AZ
1838
1839 offset = (insn & 0xff) << ((insn >> 7) & 2);
1840 if (insn & (1 << 24)) {
1841 /* Pre indexed */
1842 if (insn & (1 << 23))
da6b5335 1843 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1844 else
da6b5335
FN
1845 tcg_gen_addi_i32(tmp, tmp, -offset);
1846 tcg_gen_mov_i32(dest, tmp);
18c9b560 1847 if (insn & (1 << 21))
da6b5335
FN
1848 store_reg(s, rd, tmp);
1849 else
7d1b0095 1850 tcg_temp_free_i32(tmp);
18c9b560
AZ
1851 } else if (insn & (1 << 21)) {
1852 /* Post indexed */
da6b5335 1853 tcg_gen_mov_i32(dest, tmp);
18c9b560 1854 if (insn & (1 << 23))
da6b5335 1855 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1856 else
da6b5335
FN
1857 tcg_gen_addi_i32(tmp, tmp, -offset);
1858 store_reg(s, rd, tmp);
18c9b560
AZ
1859 } else if (!(insn & (1 << 23)))
1860 return 1;
1861 return 0;
1862}
1863
39d5492a 1864static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1865{
1866 int rd = (insn >> 0) & 0xf;
39d5492a 1867 TCGv_i32 tmp;
18c9b560 1868
da6b5335
FN
1869 if (insn & (1 << 8)) {
1870 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1871 return 1;
da6b5335
FN
1872 } else {
1873 tmp = iwmmxt_load_creg(rd);
1874 }
1875 } else {
7d1b0095 1876 tmp = tcg_temp_new_i32();
da6b5335 1877 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1878 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1879 }
1880 tcg_gen_andi_i32(tmp, tmp, mask);
1881 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1882 tcg_temp_free_i32(tmp);
18c9b560
AZ
1883 return 0;
1884}
1885
a1c7273b 1886/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1887 (ie. an undefined instruction). */
7dcc1f89 1888static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1889{
1890 int rd, wrd;
1891 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1892 TCGv_i32 addr;
1893 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1894
1895 if ((insn & 0x0e000e00) == 0x0c000000) {
1896 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1897 wrd = insn & 0xf;
1898 rdlo = (insn >> 12) & 0xf;
1899 rdhi = (insn >> 16) & 0xf;
d00584b7 1900 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1901 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1902 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1903 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1904 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1905 } else { /* TMCRR */
da6b5335
FN
1906 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1907 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1908 gen_op_iwmmxt_set_mup();
1909 }
1910 return 0;
1911 }
1912
1913 wrd = (insn >> 12) & 0xf;
7d1b0095 1914 addr = tcg_temp_new_i32();
da6b5335 1915 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1916 tcg_temp_free_i32(addr);
18c9b560 1917 return 1;
da6b5335 1918 }
18c9b560 1919 if (insn & ARM_CP_RW_BIT) {
d00584b7 1920 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1921 tmp = tcg_temp_new_i32();
12dcc321 1922 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1923 iwmmxt_store_creg(wrd, tmp);
18c9b560 1924 } else {
e677137d
PB
1925 i = 1;
1926 if (insn & (1 << 8)) {
d00584b7 1927 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1928 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1929 i = 0;
d00584b7 1930 } else { /* WLDRW wRd */
29531141 1931 tmp = tcg_temp_new_i32();
12dcc321 1932 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1933 }
1934 } else {
29531141 1935 tmp = tcg_temp_new_i32();
d00584b7 1936 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1937 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 1938 } else { /* WLDRB */
12dcc321 1939 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1940 }
1941 }
1942 if (i) {
1943 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1944 tcg_temp_free_i32(tmp);
e677137d 1945 }
18c9b560
AZ
1946 gen_op_iwmmxt_movq_wRn_M0(wrd);
1947 }
1948 } else {
d00584b7 1949 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1950 tmp = iwmmxt_load_creg(wrd);
12dcc321 1951 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1952 } else {
1953 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1954 tmp = tcg_temp_new_i32();
e677137d 1955 if (insn & (1 << 8)) {
d00584b7 1956 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1957 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 1958 } else { /* WSTRW wRd */
ecc7b3aa 1959 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1960 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1961 }
1962 } else {
d00584b7 1963 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1964 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1965 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 1966 } else { /* WSTRB */
ecc7b3aa 1967 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1968 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1969 }
1970 }
18c9b560 1971 }
29531141 1972 tcg_temp_free_i32(tmp);
18c9b560 1973 }
7d1b0095 1974 tcg_temp_free_i32(addr);
18c9b560
AZ
1975 return 0;
1976 }
1977
1978 if ((insn & 0x0f000000) != 0x0e000000)
1979 return 1;
1980
1981 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 1982 case 0x000: /* WOR */
18c9b560
AZ
1983 wrd = (insn >> 12) & 0xf;
1984 rd0 = (insn >> 0) & 0xf;
1985 rd1 = (insn >> 16) & 0xf;
1986 gen_op_iwmmxt_movq_M0_wRn(rd0);
1987 gen_op_iwmmxt_orq_M0_wRn(rd1);
1988 gen_op_iwmmxt_setpsr_nz();
1989 gen_op_iwmmxt_movq_wRn_M0(wrd);
1990 gen_op_iwmmxt_set_mup();
1991 gen_op_iwmmxt_set_cup();
1992 break;
d00584b7 1993 case 0x011: /* TMCR */
18c9b560
AZ
1994 if (insn & 0xf)
1995 return 1;
1996 rd = (insn >> 12) & 0xf;
1997 wrd = (insn >> 16) & 0xf;
1998 switch (wrd) {
1999 case ARM_IWMMXT_wCID:
2000 case ARM_IWMMXT_wCASF:
2001 break;
2002 case ARM_IWMMXT_wCon:
2003 gen_op_iwmmxt_set_cup();
2004 /* Fall through. */
2005 case ARM_IWMMXT_wCSSF:
da6b5335
FN
2006 tmp = iwmmxt_load_creg(wrd);
2007 tmp2 = load_reg(s, rd);
f669df27 2008 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 2009 tcg_temp_free_i32(tmp2);
da6b5335 2010 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2011 break;
2012 case ARM_IWMMXT_wCGR0:
2013 case ARM_IWMMXT_wCGR1:
2014 case ARM_IWMMXT_wCGR2:
2015 case ARM_IWMMXT_wCGR3:
2016 gen_op_iwmmxt_set_cup();
da6b5335
FN
2017 tmp = load_reg(s, rd);
2018 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2019 break;
2020 default:
2021 return 1;
2022 }
2023 break;
d00584b7 2024 case 0x100: /* WXOR */
18c9b560
AZ
2025 wrd = (insn >> 12) & 0xf;
2026 rd0 = (insn >> 0) & 0xf;
2027 rd1 = (insn >> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
2029 gen_op_iwmmxt_xorq_M0_wRn(rd1);
2030 gen_op_iwmmxt_setpsr_nz();
2031 gen_op_iwmmxt_movq_wRn_M0(wrd);
2032 gen_op_iwmmxt_set_mup();
2033 gen_op_iwmmxt_set_cup();
2034 break;
d00584b7 2035 case 0x111: /* TMRC */
18c9b560
AZ
2036 if (insn & 0xf)
2037 return 1;
2038 rd = (insn >> 12) & 0xf;
2039 wrd = (insn >> 16) & 0xf;
da6b5335
FN
2040 tmp = iwmmxt_load_creg(wrd);
2041 store_reg(s, rd, tmp);
18c9b560 2042 break;
d00584b7 2043 case 0x300: /* WANDN */
18c9b560
AZ
2044 wrd = (insn >> 12) & 0xf;
2045 rd0 = (insn >> 0) & 0xf;
2046 rd1 = (insn >> 16) & 0xf;
2047 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 2048 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
2049 gen_op_iwmmxt_andq_M0_wRn(rd1);
2050 gen_op_iwmmxt_setpsr_nz();
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
d00584b7 2055 case 0x200: /* WAND */
18c9b560
AZ
2056 wrd = (insn >> 12) & 0xf;
2057 rd0 = (insn >> 0) & 0xf;
2058 rd1 = (insn >> 16) & 0xf;
2059 gen_op_iwmmxt_movq_M0_wRn(rd0);
2060 gen_op_iwmmxt_andq_M0_wRn(rd1);
2061 gen_op_iwmmxt_setpsr_nz();
2062 gen_op_iwmmxt_movq_wRn_M0(wrd);
2063 gen_op_iwmmxt_set_mup();
2064 gen_op_iwmmxt_set_cup();
2065 break;
d00584b7 2066 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
2067 wrd = (insn >> 12) & 0xf;
2068 rd0 = (insn >> 0) & 0xf;
2069 rd1 = (insn >> 16) & 0xf;
2070 gen_op_iwmmxt_movq_M0_wRn(rd0);
2071 if (insn & (1 << 21))
2072 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2073 else
2074 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2075 gen_op_iwmmxt_movq_wRn_M0(wrd);
2076 gen_op_iwmmxt_set_mup();
2077 break;
d00584b7 2078 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
2079 wrd = (insn >> 12) & 0xf;
2080 rd0 = (insn >> 16) & 0xf;
2081 rd1 = (insn >> 0) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0);
2083 switch ((insn >> 22) & 3) {
2084 case 0:
2085 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2086 break;
2087 case 1:
2088 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2089 break;
2090 case 2:
2091 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2092 break;
2093 case 3:
2094 return 1;
2095 }
2096 gen_op_iwmmxt_movq_wRn_M0(wrd);
2097 gen_op_iwmmxt_set_mup();
2098 gen_op_iwmmxt_set_cup();
2099 break;
d00584b7 2100 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
2101 wrd = (insn >> 12) & 0xf;
2102 rd0 = (insn >> 16) & 0xf;
2103 rd1 = (insn >> 0) & 0xf;
2104 gen_op_iwmmxt_movq_M0_wRn(rd0);
2105 switch ((insn >> 22) & 3) {
2106 case 0:
2107 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2108 break;
2109 case 1:
2110 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2111 break;
2112 case 2:
2113 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2114 break;
2115 case 3:
2116 return 1;
2117 }
2118 gen_op_iwmmxt_movq_wRn_M0(wrd);
2119 gen_op_iwmmxt_set_mup();
2120 gen_op_iwmmxt_set_cup();
2121 break;
d00584b7 2122 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
2123 wrd = (insn >> 12) & 0xf;
2124 rd0 = (insn >> 16) & 0xf;
2125 rd1 = (insn >> 0) & 0xf;
2126 gen_op_iwmmxt_movq_M0_wRn(rd0);
2127 if (insn & (1 << 22))
2128 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2129 else
2130 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2131 if (!(insn & (1 << 20)))
2132 gen_op_iwmmxt_addl_M0_wRn(wrd);
2133 gen_op_iwmmxt_movq_wRn_M0(wrd);
2134 gen_op_iwmmxt_set_mup();
2135 break;
d00584b7 2136 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
2137 wrd = (insn >> 12) & 0xf;
2138 rd0 = (insn >> 16) & 0xf;
2139 rd1 = (insn >> 0) & 0xf;
2140 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2141 if (insn & (1 << 21)) {
2142 if (insn & (1 << 20))
2143 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2144 else
2145 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2146 } else {
2147 if (insn & (1 << 20))
2148 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2149 else
2150 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2151 }
18c9b560
AZ
2152 gen_op_iwmmxt_movq_wRn_M0(wrd);
2153 gen_op_iwmmxt_set_mup();
2154 break;
d00584b7 2155 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
2156 wrd = (insn >> 12) & 0xf;
2157 rd0 = (insn >> 16) & 0xf;
2158 rd1 = (insn >> 0) & 0xf;
2159 gen_op_iwmmxt_movq_M0_wRn(rd0);
2160 if (insn & (1 << 21))
2161 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2162 else
2163 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2164 if (!(insn & (1 << 20))) {
e677137d
PB
2165 iwmmxt_load_reg(cpu_V1, wrd);
2166 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2167 }
2168 gen_op_iwmmxt_movq_wRn_M0(wrd);
2169 gen_op_iwmmxt_set_mup();
2170 break;
d00584b7 2171 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2172 wrd = (insn >> 12) & 0xf;
2173 rd0 = (insn >> 16) & 0xf;
2174 rd1 = (insn >> 0) & 0xf;
2175 gen_op_iwmmxt_movq_M0_wRn(rd0);
2176 switch ((insn >> 22) & 3) {
2177 case 0:
2178 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2179 break;
2180 case 1:
2181 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2182 break;
2183 case 2:
2184 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2185 break;
2186 case 3:
2187 return 1;
2188 }
2189 gen_op_iwmmxt_movq_wRn_M0(wrd);
2190 gen_op_iwmmxt_set_mup();
2191 gen_op_iwmmxt_set_cup();
2192 break;
d00584b7 2193 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2194 wrd = (insn >> 12) & 0xf;
2195 rd0 = (insn >> 16) & 0xf;
2196 rd1 = (insn >> 0) & 0xf;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2198 if (insn & (1 << 22)) {
2199 if (insn & (1 << 20))
2200 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2201 else
2202 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2203 } else {
2204 if (insn & (1 << 20))
2205 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2206 else
2207 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2208 }
18c9b560
AZ
2209 gen_op_iwmmxt_movq_wRn_M0(wrd);
2210 gen_op_iwmmxt_set_mup();
2211 gen_op_iwmmxt_set_cup();
2212 break;
d00584b7 2213 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2214 wrd = (insn >> 12) & 0xf;
2215 rd0 = (insn >> 16) & 0xf;
2216 rd1 = (insn >> 0) & 0xf;
2217 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2218 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2219 tcg_gen_andi_i32(tmp, tmp, 7);
2220 iwmmxt_load_reg(cpu_V1, rd1);
2221 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2222 tcg_temp_free_i32(tmp);
18c9b560
AZ
2223 gen_op_iwmmxt_movq_wRn_M0(wrd);
2224 gen_op_iwmmxt_set_mup();
2225 break;
d00584b7 2226 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2227 if (((insn >> 6) & 3) == 3)
2228 return 1;
18c9b560
AZ
2229 rd = (insn >> 12) & 0xf;
2230 wrd = (insn >> 16) & 0xf;
da6b5335 2231 tmp = load_reg(s, rd);
18c9b560
AZ
2232 gen_op_iwmmxt_movq_M0_wRn(wrd);
2233 switch ((insn >> 6) & 3) {
2234 case 0:
da6b5335
FN
2235 tmp2 = tcg_const_i32(0xff);
2236 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2237 break;
2238 case 1:
da6b5335
FN
2239 tmp2 = tcg_const_i32(0xffff);
2240 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2241 break;
2242 case 2:
da6b5335
FN
2243 tmp2 = tcg_const_i32(0xffffffff);
2244 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2245 break;
da6b5335 2246 default:
f764718d
RH
2247 tmp2 = NULL;
2248 tmp3 = NULL;
18c9b560 2249 }
da6b5335 2250 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2251 tcg_temp_free_i32(tmp3);
2252 tcg_temp_free_i32(tmp2);
7d1b0095 2253 tcg_temp_free_i32(tmp);
18c9b560
AZ
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 break;
d00584b7 2257 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2258 rd = (insn >> 12) & 0xf;
2259 wrd = (insn >> 16) & 0xf;
da6b5335 2260 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2261 return 1;
2262 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2263 tmp = tcg_temp_new_i32();
18c9b560
AZ
2264 switch ((insn >> 22) & 3) {
2265 case 0:
da6b5335 2266 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2267 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2268 if (insn & 8) {
2269 tcg_gen_ext8s_i32(tmp, tmp);
2270 } else {
2271 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2272 }
2273 break;
2274 case 1:
da6b5335 2275 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2276 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2277 if (insn & 8) {
2278 tcg_gen_ext16s_i32(tmp, tmp);
2279 } else {
2280 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2281 }
2282 break;
2283 case 2:
da6b5335 2284 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2285 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2286 break;
18c9b560 2287 }
da6b5335 2288 store_reg(s, rd, tmp);
18c9b560 2289 break;
d00584b7 2290 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2291 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2292 return 1;
da6b5335 2293 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2294 switch ((insn >> 22) & 3) {
2295 case 0:
da6b5335 2296 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2297 break;
2298 case 1:
da6b5335 2299 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2300 break;
2301 case 2:
da6b5335 2302 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2303 break;
18c9b560 2304 }
da6b5335
FN
2305 tcg_gen_shli_i32(tmp, tmp, 28);
2306 gen_set_nzcv(tmp);
7d1b0095 2307 tcg_temp_free_i32(tmp);
18c9b560 2308 break;
d00584b7 2309 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2310 if (((insn >> 6) & 3) == 3)
2311 return 1;
18c9b560
AZ
2312 rd = (insn >> 12) & 0xf;
2313 wrd = (insn >> 16) & 0xf;
da6b5335 2314 tmp = load_reg(s, rd);
18c9b560
AZ
2315 switch ((insn >> 6) & 3) {
2316 case 0:
da6b5335 2317 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2318 break;
2319 case 1:
da6b5335 2320 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2321 break;
2322 case 2:
da6b5335 2323 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2324 break;
18c9b560 2325 }
7d1b0095 2326 tcg_temp_free_i32(tmp);
18c9b560
AZ
2327 gen_op_iwmmxt_movq_wRn_M0(wrd);
2328 gen_op_iwmmxt_set_mup();
2329 break;
d00584b7 2330 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2331 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2332 return 1;
da6b5335 2333 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2334 tmp2 = tcg_temp_new_i32();
da6b5335 2335 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2336 switch ((insn >> 22) & 3) {
2337 case 0:
2338 for (i = 0; i < 7; i ++) {
da6b5335
FN
2339 tcg_gen_shli_i32(tmp2, tmp2, 4);
2340 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2341 }
2342 break;
2343 case 1:
2344 for (i = 0; i < 3; i ++) {
da6b5335
FN
2345 tcg_gen_shli_i32(tmp2, tmp2, 8);
2346 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2347 }
2348 break;
2349 case 2:
da6b5335
FN
2350 tcg_gen_shli_i32(tmp2, tmp2, 16);
2351 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2352 break;
18c9b560 2353 }
da6b5335 2354 gen_set_nzcv(tmp);
7d1b0095
PM
2355 tcg_temp_free_i32(tmp2);
2356 tcg_temp_free_i32(tmp);
18c9b560 2357 break;
d00584b7 2358 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2359 wrd = (insn >> 12) & 0xf;
2360 rd0 = (insn >> 16) & 0xf;
2361 gen_op_iwmmxt_movq_M0_wRn(rd0);
2362 switch ((insn >> 22) & 3) {
2363 case 0:
e677137d 2364 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2365 break;
2366 case 1:
e677137d 2367 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2368 break;
2369 case 2:
e677137d 2370 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2371 break;
2372 case 3:
2373 return 1;
2374 }
2375 gen_op_iwmmxt_movq_wRn_M0(wrd);
2376 gen_op_iwmmxt_set_mup();
2377 break;
d00584b7 2378 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2379 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2380 return 1;
da6b5335 2381 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2382 tmp2 = tcg_temp_new_i32();
da6b5335 2383 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2384 switch ((insn >> 22) & 3) {
2385 case 0:
2386 for (i = 0; i < 7; i ++) {
da6b5335
FN
2387 tcg_gen_shli_i32(tmp2, tmp2, 4);
2388 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2389 }
2390 break;
2391 case 1:
2392 for (i = 0; i < 3; i ++) {
da6b5335
FN
2393 tcg_gen_shli_i32(tmp2, tmp2, 8);
2394 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2395 }
2396 break;
2397 case 2:
da6b5335
FN
2398 tcg_gen_shli_i32(tmp2, tmp2, 16);
2399 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2400 break;
18c9b560 2401 }
da6b5335 2402 gen_set_nzcv(tmp);
7d1b0095
PM
2403 tcg_temp_free_i32(tmp2);
2404 tcg_temp_free_i32(tmp);
18c9b560 2405 break;
d00584b7 2406 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2407 rd = (insn >> 12) & 0xf;
2408 rd0 = (insn >> 16) & 0xf;
da6b5335 2409 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2410 return 1;
2411 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2412 tmp = tcg_temp_new_i32();
18c9b560
AZ
2413 switch ((insn >> 22) & 3) {
2414 case 0:
da6b5335 2415 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2416 break;
2417 case 1:
da6b5335 2418 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2419 break;
2420 case 2:
da6b5335 2421 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2422 break;
18c9b560 2423 }
da6b5335 2424 store_reg(s, rd, tmp);
18c9b560 2425 break;
d00584b7 2426 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2427 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2428 wrd = (insn >> 12) & 0xf;
2429 rd0 = (insn >> 16) & 0xf;
2430 rd1 = (insn >> 0) & 0xf;
2431 gen_op_iwmmxt_movq_M0_wRn(rd0);
2432 switch ((insn >> 22) & 3) {
2433 case 0:
2434 if (insn & (1 << 21))
2435 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2436 else
2437 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2438 break;
2439 case 1:
2440 if (insn & (1 << 21))
2441 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2442 else
2443 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2444 break;
2445 case 2:
2446 if (insn & (1 << 21))
2447 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2448 else
2449 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2450 break;
2451 case 3:
2452 return 1;
2453 }
2454 gen_op_iwmmxt_movq_wRn_M0(wrd);
2455 gen_op_iwmmxt_set_mup();
2456 gen_op_iwmmxt_set_cup();
2457 break;
d00584b7 2458 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2459 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2460 wrd = (insn >> 12) & 0xf;
2461 rd0 = (insn >> 16) & 0xf;
2462 gen_op_iwmmxt_movq_M0_wRn(rd0);
2463 switch ((insn >> 22) & 3) {
2464 case 0:
2465 if (insn & (1 << 21))
2466 gen_op_iwmmxt_unpacklsb_M0();
2467 else
2468 gen_op_iwmmxt_unpacklub_M0();
2469 break;
2470 case 1:
2471 if (insn & (1 << 21))
2472 gen_op_iwmmxt_unpacklsw_M0();
2473 else
2474 gen_op_iwmmxt_unpackluw_M0();
2475 break;
2476 case 2:
2477 if (insn & (1 << 21))
2478 gen_op_iwmmxt_unpacklsl_M0();
2479 else
2480 gen_op_iwmmxt_unpacklul_M0();
2481 break;
2482 case 3:
2483 return 1;
2484 }
2485 gen_op_iwmmxt_movq_wRn_M0(wrd);
2486 gen_op_iwmmxt_set_mup();
2487 gen_op_iwmmxt_set_cup();
2488 break;
d00584b7 2489 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2490 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2491 wrd = (insn >> 12) & 0xf;
2492 rd0 = (insn >> 16) & 0xf;
2493 gen_op_iwmmxt_movq_M0_wRn(rd0);
2494 switch ((insn >> 22) & 3) {
2495 case 0:
2496 if (insn & (1 << 21))
2497 gen_op_iwmmxt_unpackhsb_M0();
2498 else
2499 gen_op_iwmmxt_unpackhub_M0();
2500 break;
2501 case 1:
2502 if (insn & (1 << 21))
2503 gen_op_iwmmxt_unpackhsw_M0();
2504 else
2505 gen_op_iwmmxt_unpackhuw_M0();
2506 break;
2507 case 2:
2508 if (insn & (1 << 21))
2509 gen_op_iwmmxt_unpackhsl_M0();
2510 else
2511 gen_op_iwmmxt_unpackhul_M0();
2512 break;
2513 case 3:
2514 return 1;
2515 }
2516 gen_op_iwmmxt_movq_wRn_M0(wrd);
2517 gen_op_iwmmxt_set_mup();
2518 gen_op_iwmmxt_set_cup();
2519 break;
d00584b7 2520 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2521 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2522 if (((insn >> 22) & 3) == 0)
2523 return 1;
18c9b560
AZ
2524 wrd = (insn >> 12) & 0xf;
2525 rd0 = (insn >> 16) & 0xf;
2526 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2527 tmp = tcg_temp_new_i32();
da6b5335 2528 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2529 tcg_temp_free_i32(tmp);
18c9b560 2530 return 1;
da6b5335 2531 }
18c9b560 2532 switch ((insn >> 22) & 3) {
18c9b560 2533 case 1:
477955bd 2534 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2535 break;
2536 case 2:
477955bd 2537 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2538 break;
2539 case 3:
477955bd 2540 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2541 break;
2542 }
7d1b0095 2543 tcg_temp_free_i32(tmp);
18c9b560
AZ
2544 gen_op_iwmmxt_movq_wRn_M0(wrd);
2545 gen_op_iwmmxt_set_mup();
2546 gen_op_iwmmxt_set_cup();
2547 break;
d00584b7 2548 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2549 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2550 if (((insn >> 22) & 3) == 0)
2551 return 1;
18c9b560
AZ
2552 wrd = (insn >> 12) & 0xf;
2553 rd0 = (insn >> 16) & 0xf;
2554 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2555 tmp = tcg_temp_new_i32();
da6b5335 2556 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2557 tcg_temp_free_i32(tmp);
18c9b560 2558 return 1;
da6b5335 2559 }
18c9b560 2560 switch ((insn >> 22) & 3) {
18c9b560 2561 case 1:
477955bd 2562 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2563 break;
2564 case 2:
477955bd 2565 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2566 break;
2567 case 3:
477955bd 2568 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2569 break;
2570 }
7d1b0095 2571 tcg_temp_free_i32(tmp);
18c9b560
AZ
2572 gen_op_iwmmxt_movq_wRn_M0(wrd);
2573 gen_op_iwmmxt_set_mup();
2574 gen_op_iwmmxt_set_cup();
2575 break;
d00584b7 2576 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2577 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2578 if (((insn >> 22) & 3) == 0)
2579 return 1;
18c9b560
AZ
2580 wrd = (insn >> 12) & 0xf;
2581 rd0 = (insn >> 16) & 0xf;
2582 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2583 tmp = tcg_temp_new_i32();
da6b5335 2584 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2585 tcg_temp_free_i32(tmp);
18c9b560 2586 return 1;
da6b5335 2587 }
18c9b560 2588 switch ((insn >> 22) & 3) {
18c9b560 2589 case 1:
477955bd 2590 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2591 break;
2592 case 2:
477955bd 2593 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2594 break;
2595 case 3:
477955bd 2596 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2597 break;
2598 }
7d1b0095 2599 tcg_temp_free_i32(tmp);
18c9b560
AZ
2600 gen_op_iwmmxt_movq_wRn_M0(wrd);
2601 gen_op_iwmmxt_set_mup();
2602 gen_op_iwmmxt_set_cup();
2603 break;
d00584b7 2604 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2605 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2606 if (((insn >> 22) & 3) == 0)
2607 return 1;
18c9b560
AZ
2608 wrd = (insn >> 12) & 0xf;
2609 rd0 = (insn >> 16) & 0xf;
2610 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2611 tmp = tcg_temp_new_i32();
18c9b560 2612 switch ((insn >> 22) & 3) {
18c9b560 2613 case 1:
da6b5335 2614 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2615 tcg_temp_free_i32(tmp);
18c9b560 2616 return 1;
da6b5335 2617 }
477955bd 2618 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2619 break;
2620 case 2:
da6b5335 2621 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2622 tcg_temp_free_i32(tmp);
18c9b560 2623 return 1;
da6b5335 2624 }
477955bd 2625 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2626 break;
2627 case 3:
da6b5335 2628 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2629 tcg_temp_free_i32(tmp);
18c9b560 2630 return 1;
da6b5335 2631 }
477955bd 2632 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2633 break;
2634 }
7d1b0095 2635 tcg_temp_free_i32(tmp);
18c9b560
AZ
2636 gen_op_iwmmxt_movq_wRn_M0(wrd);
2637 gen_op_iwmmxt_set_mup();
2638 gen_op_iwmmxt_set_cup();
2639 break;
d00584b7 2640 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2641 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2642 wrd = (insn >> 12) & 0xf;
2643 rd0 = (insn >> 16) & 0xf;
2644 rd1 = (insn >> 0) & 0xf;
2645 gen_op_iwmmxt_movq_M0_wRn(rd0);
2646 switch ((insn >> 22) & 3) {
2647 case 0:
2648 if (insn & (1 << 21))
2649 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2650 else
2651 gen_op_iwmmxt_minub_M0_wRn(rd1);
2652 break;
2653 case 1:
2654 if (insn & (1 << 21))
2655 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2656 else
2657 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2658 break;
2659 case 2:
2660 if (insn & (1 << 21))
2661 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2662 else
2663 gen_op_iwmmxt_minul_M0_wRn(rd1);
2664 break;
2665 case 3:
2666 return 1;
2667 }
2668 gen_op_iwmmxt_movq_wRn_M0(wrd);
2669 gen_op_iwmmxt_set_mup();
2670 break;
d00584b7 2671 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2672 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2673 wrd = (insn >> 12) & 0xf;
2674 rd0 = (insn >> 16) & 0xf;
2675 rd1 = (insn >> 0) & 0xf;
2676 gen_op_iwmmxt_movq_M0_wRn(rd0);
2677 switch ((insn >> 22) & 3) {
2678 case 0:
2679 if (insn & (1 << 21))
2680 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2681 else
2682 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2683 break;
2684 case 1:
2685 if (insn & (1 << 21))
2686 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2687 else
2688 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2689 break;
2690 case 2:
2691 if (insn & (1 << 21))
2692 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2693 else
2694 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2695 break;
2696 case 3:
2697 return 1;
2698 }
2699 gen_op_iwmmxt_movq_wRn_M0(wrd);
2700 gen_op_iwmmxt_set_mup();
2701 break;
d00584b7 2702 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2703 case 0x402: case 0x502: case 0x602: case 0x702:
2704 wrd = (insn >> 12) & 0xf;
2705 rd0 = (insn >> 16) & 0xf;
2706 rd1 = (insn >> 0) & 0xf;
2707 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2708 tmp = tcg_const_i32((insn >> 20) & 3);
2709 iwmmxt_load_reg(cpu_V1, rd1);
2710 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2711 tcg_temp_free_i32(tmp);
18c9b560
AZ
2712 gen_op_iwmmxt_movq_wRn_M0(wrd);
2713 gen_op_iwmmxt_set_mup();
2714 break;
d00584b7 2715 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2716 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2717 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2718 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2719 wrd = (insn >> 12) & 0xf;
2720 rd0 = (insn >> 16) & 0xf;
2721 rd1 = (insn >> 0) & 0xf;
2722 gen_op_iwmmxt_movq_M0_wRn(rd0);
2723 switch ((insn >> 20) & 0xf) {
2724 case 0x0:
2725 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2726 break;
2727 case 0x1:
2728 gen_op_iwmmxt_subub_M0_wRn(rd1);
2729 break;
2730 case 0x3:
2731 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2732 break;
2733 case 0x4:
2734 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2735 break;
2736 case 0x5:
2737 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2738 break;
2739 case 0x7:
2740 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2741 break;
2742 case 0x8:
2743 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2744 break;
2745 case 0x9:
2746 gen_op_iwmmxt_subul_M0_wRn(rd1);
2747 break;
2748 case 0xb:
2749 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2750 break;
2751 default:
2752 return 1;
2753 }
2754 gen_op_iwmmxt_movq_wRn_M0(wrd);
2755 gen_op_iwmmxt_set_mup();
2756 gen_op_iwmmxt_set_cup();
2757 break;
d00584b7 2758 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2759 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2760 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2761 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2762 wrd = (insn >> 12) & 0xf;
2763 rd0 = (insn >> 16) & 0xf;
2764 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2765 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2766 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2767 tcg_temp_free_i32(tmp);
18c9b560
AZ
2768 gen_op_iwmmxt_movq_wRn_M0(wrd);
2769 gen_op_iwmmxt_set_mup();
2770 gen_op_iwmmxt_set_cup();
2771 break;
d00584b7 2772 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2773 case 0x418: case 0x518: case 0x618: case 0x718:
2774 case 0x818: case 0x918: case 0xa18: case 0xb18:
2775 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2776 wrd = (insn >> 12) & 0xf;
2777 rd0 = (insn >> 16) & 0xf;
2778 rd1 = (insn >> 0) & 0xf;
2779 gen_op_iwmmxt_movq_M0_wRn(rd0);
2780 switch ((insn >> 20) & 0xf) {
2781 case 0x0:
2782 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2783 break;
2784 case 0x1:
2785 gen_op_iwmmxt_addub_M0_wRn(rd1);
2786 break;
2787 case 0x3:
2788 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2789 break;
2790 case 0x4:
2791 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2792 break;
2793 case 0x5:
2794 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2795 break;
2796 case 0x7:
2797 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2798 break;
2799 case 0x8:
2800 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2801 break;
2802 case 0x9:
2803 gen_op_iwmmxt_addul_M0_wRn(rd1);
2804 break;
2805 case 0xb:
2806 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2807 break;
2808 default:
2809 return 1;
2810 }
2811 gen_op_iwmmxt_movq_wRn_M0(wrd);
2812 gen_op_iwmmxt_set_mup();
2813 gen_op_iwmmxt_set_cup();
2814 break;
d00584b7 2815 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2816 case 0x408: case 0x508: case 0x608: case 0x708:
2817 case 0x808: case 0x908: case 0xa08: case 0xb08:
2818 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2819 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2820 return 1;
18c9b560
AZ
2821 wrd = (insn >> 12) & 0xf;
2822 rd0 = (insn >> 16) & 0xf;
2823 rd1 = (insn >> 0) & 0xf;
2824 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2825 switch ((insn >> 22) & 3) {
18c9b560
AZ
2826 case 1:
2827 if (insn & (1 << 21))
2828 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2829 else
2830 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2831 break;
2832 case 2:
2833 if (insn & (1 << 21))
2834 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2835 else
2836 gen_op_iwmmxt_packul_M0_wRn(rd1);
2837 break;
2838 case 3:
2839 if (insn & (1 << 21))
2840 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2841 else
2842 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2843 break;
2844 }
2845 gen_op_iwmmxt_movq_wRn_M0(wrd);
2846 gen_op_iwmmxt_set_mup();
2847 gen_op_iwmmxt_set_cup();
2848 break;
2849 case 0x201: case 0x203: case 0x205: case 0x207:
2850 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2851 case 0x211: case 0x213: case 0x215: case 0x217:
2852 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2853 wrd = (insn >> 5) & 0xf;
2854 rd0 = (insn >> 12) & 0xf;
2855 rd1 = (insn >> 0) & 0xf;
2856 if (rd0 == 0xf || rd1 == 0xf)
2857 return 1;
2858 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2859 tmp = load_reg(s, rd0);
2860 tmp2 = load_reg(s, rd1);
18c9b560 2861 switch ((insn >> 16) & 0xf) {
d00584b7 2862 case 0x0: /* TMIA */
da6b5335 2863 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2864 break;
d00584b7 2865 case 0x8: /* TMIAPH */
da6b5335 2866 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2867 break;
d00584b7 2868 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2869 if (insn & (1 << 16))
da6b5335 2870 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2871 if (insn & (1 << 17))
da6b5335
FN
2872 tcg_gen_shri_i32(tmp2, tmp2, 16);
2873 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2874 break;
2875 default:
7d1b0095
PM
2876 tcg_temp_free_i32(tmp2);
2877 tcg_temp_free_i32(tmp);
18c9b560
AZ
2878 return 1;
2879 }
7d1b0095
PM
2880 tcg_temp_free_i32(tmp2);
2881 tcg_temp_free_i32(tmp);
18c9b560
AZ
2882 gen_op_iwmmxt_movq_wRn_M0(wrd);
2883 gen_op_iwmmxt_set_mup();
2884 break;
2885 default:
2886 return 1;
2887 }
2888
2889 return 0;
2890}
2891
a1c7273b 2892/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2893 (ie. an undefined instruction). */
7dcc1f89 2894static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2895{
2896 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2897 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2898
2899 if ((insn & 0x0ff00f10) == 0x0e200010) {
2900 /* Multiply with Internal Accumulate Format */
2901 rd0 = (insn >> 12) & 0xf;
2902 rd1 = insn & 0xf;
2903 acc = (insn >> 5) & 7;
2904
2905 if (acc != 0)
2906 return 1;
2907
3a554c0f
FN
2908 tmp = load_reg(s, rd0);
2909 tmp2 = load_reg(s, rd1);
18c9b560 2910 switch ((insn >> 16) & 0xf) {
d00584b7 2911 case 0x0: /* MIA */
3a554c0f 2912 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2913 break;
d00584b7 2914 case 0x8: /* MIAPH */
3a554c0f 2915 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2916 break;
d00584b7
PM
2917 case 0xc: /* MIABB */
2918 case 0xd: /* MIABT */
2919 case 0xe: /* MIATB */
2920 case 0xf: /* MIATT */
18c9b560 2921 if (insn & (1 << 16))
3a554c0f 2922 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2923 if (insn & (1 << 17))
3a554c0f
FN
2924 tcg_gen_shri_i32(tmp2, tmp2, 16);
2925 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2926 break;
2927 default:
2928 return 1;
2929 }
7d1b0095
PM
2930 tcg_temp_free_i32(tmp2);
2931 tcg_temp_free_i32(tmp);
18c9b560
AZ
2932
2933 gen_op_iwmmxt_movq_wRn_M0(acc);
2934 return 0;
2935 }
2936
2937 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2938 /* Internal Accumulator Access Format */
2939 rdhi = (insn >> 16) & 0xf;
2940 rdlo = (insn >> 12) & 0xf;
2941 acc = insn & 7;
2942
2943 if (acc != 0)
2944 return 1;
2945
d00584b7 2946 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2947 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2948 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2949 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2950 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2951 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 2952 } else { /* MAR */
3a554c0f
FN
2953 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2954 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2955 }
2956 return 0;
2957 }
2958
2959 return 1;
2960}
2961
9ee6e8bb
PB
2962#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2963#define VFP_SREG(insn, bigbit, smallbit) \
2964 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2965#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2966 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2967 reg = (((insn) >> (bigbit)) & 0x0f) \
2968 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2969 } else { \
2970 if (insn & (1 << (smallbit))) \
2971 return 1; \
2972 reg = ((insn) >> (bigbit)) & 0x0f; \
2973 }} while (0)
2974
2975#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2976#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2977#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2978#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2979#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2980#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2981
4373f3ce 2982/* Move between integer and VFP cores. */
39d5492a 2983static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2984{
39d5492a 2985 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2986 tcg_gen_mov_i32(tmp, cpu_F0s);
2987 return tmp;
2988}
2989
39d5492a 2990static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2991{
2992 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2993 tcg_temp_free_i32(tmp);
4373f3ce
PB
2994}
2995
39d5492a 2996static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2997{
39d5492a 2998 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2999 if (shift)
3000 tcg_gen_shri_i32(var, var, shift);
86831435 3001 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
3002 tcg_gen_shli_i32(tmp, var, 8);
3003 tcg_gen_or_i32(var, var, tmp);
3004 tcg_gen_shli_i32(tmp, var, 16);
3005 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3006 tcg_temp_free_i32(tmp);
ad69471c
PB
3007}
3008
39d5492a 3009static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 3010{
39d5492a 3011 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 3012 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
3013 tcg_gen_shli_i32(tmp, var, 16);
3014 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3015 tcg_temp_free_i32(tmp);
ad69471c
PB
3016}
3017
39d5492a 3018static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 3019{
39d5492a 3020 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
3021 tcg_gen_andi_i32(var, var, 0xffff0000);
3022 tcg_gen_shri_i32(tmp, var, 16);
3023 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3024 tcg_temp_free_i32(tmp);
ad69471c
PB
3025}
3026
39d5492a 3027static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
3028{
3029 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 3030 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
3031 switch (size) {
3032 case 0:
12dcc321 3033 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3034 gen_neon_dup_u8(tmp, 0);
3035 break;
3036 case 1:
12dcc321 3037 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3038 gen_neon_dup_low16(tmp);
3039 break;
3040 case 2:
12dcc321 3041 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3042 break;
3043 default: /* Avoid compiler warnings. */
3044 abort();
3045 }
3046 return tmp;
3047}
3048
04731fb5
WN
3049static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3050 uint32_t dp)
3051{
3052 uint32_t cc = extract32(insn, 20, 2);
3053
3054 if (dp) {
3055 TCGv_i64 frn, frm, dest;
3056 TCGv_i64 tmp, zero, zf, nf, vf;
3057
3058 zero = tcg_const_i64(0);
3059
3060 frn = tcg_temp_new_i64();
3061 frm = tcg_temp_new_i64();
3062 dest = tcg_temp_new_i64();
3063
3064 zf = tcg_temp_new_i64();
3065 nf = tcg_temp_new_i64();
3066 vf = tcg_temp_new_i64();
3067
3068 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3069 tcg_gen_ext_i32_i64(nf, cpu_NF);
3070 tcg_gen_ext_i32_i64(vf, cpu_VF);
3071
3072 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3073 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3074 switch (cc) {
3075 case 0: /* eq: Z */
3076 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3077 frn, frm);
3078 break;
3079 case 1: /* vs: V */
3080 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3081 frn, frm);
3082 break;
3083 case 2: /* ge: N == V -> N ^ V == 0 */
3084 tmp = tcg_temp_new_i64();
3085 tcg_gen_xor_i64(tmp, vf, nf);
3086 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3087 frn, frm);
3088 tcg_temp_free_i64(tmp);
3089 break;
3090 case 3: /* gt: !Z && N == V */
3091 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3092 frn, frm);
3093 tmp = tcg_temp_new_i64();
3094 tcg_gen_xor_i64(tmp, vf, nf);
3095 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3096 dest, frm);
3097 tcg_temp_free_i64(tmp);
3098 break;
3099 }
3100 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3101 tcg_temp_free_i64(frn);
3102 tcg_temp_free_i64(frm);
3103 tcg_temp_free_i64(dest);
3104
3105 tcg_temp_free_i64(zf);
3106 tcg_temp_free_i64(nf);
3107 tcg_temp_free_i64(vf);
3108
3109 tcg_temp_free_i64(zero);
3110 } else {
3111 TCGv_i32 frn, frm, dest;
3112 TCGv_i32 tmp, zero;
3113
3114 zero = tcg_const_i32(0);
3115
3116 frn = tcg_temp_new_i32();
3117 frm = tcg_temp_new_i32();
3118 dest = tcg_temp_new_i32();
3119 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3120 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3121 switch (cc) {
3122 case 0: /* eq: Z */
3123 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3124 frn, frm);
3125 break;
3126 case 1: /* vs: V */
3127 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3128 frn, frm);
3129 break;
3130 case 2: /* ge: N == V -> N ^ V == 0 */
3131 tmp = tcg_temp_new_i32();
3132 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3133 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3134 frn, frm);
3135 tcg_temp_free_i32(tmp);
3136 break;
3137 case 3: /* gt: !Z && N == V */
3138 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3139 frn, frm);
3140 tmp = tcg_temp_new_i32();
3141 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3142 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3143 dest, frm);
3144 tcg_temp_free_i32(tmp);
3145 break;
3146 }
3147 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3148 tcg_temp_free_i32(frn);
3149 tcg_temp_free_i32(frm);
3150 tcg_temp_free_i32(dest);
3151
3152 tcg_temp_free_i32(zero);
3153 }
3154
3155 return 0;
3156}
3157
40cfacdd
WN
3158static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3159 uint32_t rm, uint32_t dp)
3160{
3161 uint32_t vmin = extract32(insn, 6, 1);
3162 TCGv_ptr fpst = get_fpstatus_ptr(0);
3163
3164 if (dp) {
3165 TCGv_i64 frn, frm, dest;
3166
3167 frn = tcg_temp_new_i64();
3168 frm = tcg_temp_new_i64();
3169 dest = tcg_temp_new_i64();
3170
3171 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3172 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3173 if (vmin) {
f71a2ae5 3174 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3175 } else {
f71a2ae5 3176 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3177 }
3178 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3179 tcg_temp_free_i64(frn);
3180 tcg_temp_free_i64(frm);
3181 tcg_temp_free_i64(dest);
3182 } else {
3183 TCGv_i32 frn, frm, dest;
3184
3185 frn = tcg_temp_new_i32();
3186 frm = tcg_temp_new_i32();
3187 dest = tcg_temp_new_i32();
3188
3189 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3190 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3191 if (vmin) {
f71a2ae5 3192 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3193 } else {
f71a2ae5 3194 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3195 }
3196 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3197 tcg_temp_free_i32(frn);
3198 tcg_temp_free_i32(frm);
3199 tcg_temp_free_i32(dest);
3200 }
3201
3202 tcg_temp_free_ptr(fpst);
3203 return 0;
3204}
3205
7655f39b
WN
3206static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3207 int rounding)
3208{
3209 TCGv_ptr fpst = get_fpstatus_ptr(0);
3210 TCGv_i32 tcg_rmode;
3211
3212 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3213 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3214
3215 if (dp) {
3216 TCGv_i64 tcg_op;
3217 TCGv_i64 tcg_res;
3218 tcg_op = tcg_temp_new_i64();
3219 tcg_res = tcg_temp_new_i64();
3220 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3221 gen_helper_rintd(tcg_res, tcg_op, fpst);
3222 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3223 tcg_temp_free_i64(tcg_op);
3224 tcg_temp_free_i64(tcg_res);
3225 } else {
3226 TCGv_i32 tcg_op;
3227 TCGv_i32 tcg_res;
3228 tcg_op = tcg_temp_new_i32();
3229 tcg_res = tcg_temp_new_i32();
3230 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3231 gen_helper_rints(tcg_res, tcg_op, fpst);
3232 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3233 tcg_temp_free_i32(tcg_op);
3234 tcg_temp_free_i32(tcg_res);
3235 }
3236
9b049916 3237 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3238 tcg_temp_free_i32(tcg_rmode);
3239
3240 tcg_temp_free_ptr(fpst);
3241 return 0;
3242}
3243
c9975a83
WN
3244static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3245 int rounding)
3246{
3247 bool is_signed = extract32(insn, 7, 1);
3248 TCGv_ptr fpst = get_fpstatus_ptr(0);
3249 TCGv_i32 tcg_rmode, tcg_shift;
3250
3251 tcg_shift = tcg_const_i32(0);
3252
3253 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3254 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3255
3256 if (dp) {
3257 TCGv_i64 tcg_double, tcg_res;
3258 TCGv_i32 tcg_tmp;
3259 /* Rd is encoded as a single precision register even when the source
3260 * is double precision.
3261 */
3262 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3263 tcg_double = tcg_temp_new_i64();
3264 tcg_res = tcg_temp_new_i64();
3265 tcg_tmp = tcg_temp_new_i32();
3266 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3267 if (is_signed) {
3268 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3269 } else {
3270 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3271 }
ecc7b3aa 3272 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3273 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3274 tcg_temp_free_i32(tcg_tmp);
3275 tcg_temp_free_i64(tcg_res);
3276 tcg_temp_free_i64(tcg_double);
3277 } else {
3278 TCGv_i32 tcg_single, tcg_res;
3279 tcg_single = tcg_temp_new_i32();
3280 tcg_res = tcg_temp_new_i32();
3281 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3282 if (is_signed) {
3283 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3284 } else {
3285 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3286 }
3287 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3288 tcg_temp_free_i32(tcg_res);
3289 tcg_temp_free_i32(tcg_single);
3290 }
3291
9b049916 3292 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3293 tcg_temp_free_i32(tcg_rmode);
3294
3295 tcg_temp_free_i32(tcg_shift);
3296
3297 tcg_temp_free_ptr(fpst);
3298
3299 return 0;
3300}
7655f39b
WN
3301
3302/* Table for converting the most common AArch32 encoding of
3303 * rounding mode to arm_fprounding order (which matches the
3304 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3305 */
3306static const uint8_t fp_decode_rm[] = {
3307 FPROUNDING_TIEAWAY,
3308 FPROUNDING_TIEEVEN,
3309 FPROUNDING_POSINF,
3310 FPROUNDING_NEGINF,
3311};
3312
7dcc1f89 3313static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3314{
3315 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3316
d614a513 3317 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3318 return 1;
3319 }
3320
3321 if (dp) {
3322 VFP_DREG_D(rd, insn);
3323 VFP_DREG_N(rn, insn);
3324 VFP_DREG_M(rm, insn);
3325 } else {
3326 rd = VFP_SREG_D(insn);
3327 rn = VFP_SREG_N(insn);
3328 rm = VFP_SREG_M(insn);
3329 }
3330
3331 if ((insn & 0x0f800e50) == 0x0e000a00) {
3332 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3333 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3334 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3335 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3336 /* VRINTA, VRINTN, VRINTP, VRINTM */
3337 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3338 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3339 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3340 /* VCVTA, VCVTN, VCVTP, VCVTM */
3341 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3342 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3343 }
3344 return 1;
3345}
3346
a1c7273b 3347/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3348 (ie. an undefined instruction). */
7dcc1f89 3349static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3350{
3351 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3352 int dp, veclen;
39d5492a
PM
3353 TCGv_i32 addr;
3354 TCGv_i32 tmp;
3355 TCGv_i32 tmp2;
b7bcbe95 3356
d614a513 3357 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3358 return 1;
d614a513 3359 }
40f137e1 3360
2c7ffc41
PM
3361 /* FIXME: this access check should not take precedence over UNDEF
3362 * for invalid encodings; we will generate incorrect syndrome information
3363 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3364 */
9dbbc748 3365 if (s->fp_excp_el) {
2c7ffc41 3366 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3367 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3368 return 0;
3369 }
3370
5df8bac1 3371 if (!s->vfp_enabled) {
9ee6e8bb 3372 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3373 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3374 return 1;
3375 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3376 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3377 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3378 return 1;
a50c0f51 3379 }
40f137e1 3380 }
6a57f3eb
WN
3381
3382 if (extract32(insn, 28, 4) == 0xf) {
3383 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3384 * only used in v8 and above.
3385 */
7dcc1f89 3386 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3387 }
3388
b7bcbe95
FB
3389 dp = ((insn & 0xf00) == 0xb00);
3390 switch ((insn >> 24) & 0xf) {
3391 case 0xe:
3392 if (insn & (1 << 4)) {
3393 /* single register transfer */
b7bcbe95
FB
3394 rd = (insn >> 12) & 0xf;
3395 if (dp) {
9ee6e8bb
PB
3396 int size;
3397 int pass;
3398
3399 VFP_DREG_N(rn, insn);
3400 if (insn & 0xf)
b7bcbe95 3401 return 1;
9ee6e8bb 3402 if (insn & 0x00c00060
d614a513 3403 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3404 return 1;
d614a513 3405 }
9ee6e8bb
PB
3406
3407 pass = (insn >> 21) & 1;
3408 if (insn & (1 << 22)) {
3409 size = 0;
3410 offset = ((insn >> 5) & 3) * 8;
3411 } else if (insn & (1 << 5)) {
3412 size = 1;
3413 offset = (insn & (1 << 6)) ? 16 : 0;
3414 } else {
3415 size = 2;
3416 offset = 0;
3417 }
18c9b560 3418 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3419 /* vfp->arm */
ad69471c 3420 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3421 switch (size) {
3422 case 0:
9ee6e8bb 3423 if (offset)
ad69471c 3424 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3425 if (insn & (1 << 23))
ad69471c 3426 gen_uxtb(tmp);
9ee6e8bb 3427 else
ad69471c 3428 gen_sxtb(tmp);
9ee6e8bb
PB
3429 break;
3430 case 1:
9ee6e8bb
PB
3431 if (insn & (1 << 23)) {
3432 if (offset) {
ad69471c 3433 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3434 } else {
ad69471c 3435 gen_uxth(tmp);
9ee6e8bb
PB
3436 }
3437 } else {
3438 if (offset) {
ad69471c 3439 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3440 } else {
ad69471c 3441 gen_sxth(tmp);
9ee6e8bb
PB
3442 }
3443 }
3444 break;
3445 case 2:
9ee6e8bb
PB
3446 break;
3447 }
ad69471c 3448 store_reg(s, rd, tmp);
b7bcbe95
FB
3449 } else {
3450 /* arm->vfp */
ad69471c 3451 tmp = load_reg(s, rd);
9ee6e8bb
PB
3452 if (insn & (1 << 23)) {
3453 /* VDUP */
32f91fb7
RH
3454 int vec_size = pass ? 16 : 8;
3455 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0),
3456 vec_size, vec_size, tmp);
3457 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
3458 } else {
3459 /* VMOV */
3460 switch (size) {
3461 case 0:
ad69471c 3462 tmp2 = neon_load_reg(rn, pass);
d593c48e 3463 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3464 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3465 break;
3466 case 1:
ad69471c 3467 tmp2 = neon_load_reg(rn, pass);
d593c48e 3468 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3469 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3470 break;
3471 case 2:
9ee6e8bb
PB
3472 break;
3473 }
ad69471c 3474 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3475 }
b7bcbe95 3476 }
9ee6e8bb
PB
3477 } else { /* !dp */
3478 if ((insn & 0x6f) != 0x00)
3479 return 1;
3480 rn = VFP_SREG_N(insn);
18c9b560 3481 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3482 /* vfp->arm */
3483 if (insn & (1 << 21)) {
3484 /* system register */
40f137e1 3485 rn >>= 1;
9ee6e8bb 3486
b7bcbe95 3487 switch (rn) {
40f137e1 3488 case ARM_VFP_FPSID:
4373f3ce 3489 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3490 VFP3 restricts all id registers to privileged
3491 accesses. */
3492 if (IS_USER(s)
d614a513 3493 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3494 return 1;
d614a513 3495 }
4373f3ce 3496 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3497 break;
40f137e1 3498 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3499 if (IS_USER(s))
3500 return 1;
4373f3ce 3501 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3502 break;
40f137e1
PB
3503 case ARM_VFP_FPINST:
3504 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3505 /* Not present in VFP3. */
3506 if (IS_USER(s)
d614a513 3507 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3508 return 1;
d614a513 3509 }
4373f3ce 3510 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3511 break;
40f137e1 3512 case ARM_VFP_FPSCR:
601d70b9 3513 if (rd == 15) {
4373f3ce
PB
3514 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3515 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3516 } else {
7d1b0095 3517 tmp = tcg_temp_new_i32();
4373f3ce
PB
3518 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3519 }
b7bcbe95 3520 break;
a50c0f51 3521 case ARM_VFP_MVFR2:
d614a513 3522 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3523 return 1;
3524 }
3525 /* fall through */
9ee6e8bb
PB
3526 case ARM_VFP_MVFR0:
3527 case ARM_VFP_MVFR1:
3528 if (IS_USER(s)
d614a513 3529 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3530 return 1;
d614a513 3531 }
4373f3ce 3532 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3533 break;
b7bcbe95
FB
3534 default:
3535 return 1;
3536 }
3537 } else {
3538 gen_mov_F0_vreg(0, rn);
4373f3ce 3539 tmp = gen_vfp_mrs();
b7bcbe95
FB
3540 }
3541 if (rd == 15) {
b5ff1b31 3542 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3543 gen_set_nzcv(tmp);
7d1b0095 3544 tcg_temp_free_i32(tmp);
4373f3ce
PB
3545 } else {
3546 store_reg(s, rd, tmp);
3547 }
b7bcbe95
FB
3548 } else {
3549 /* arm->vfp */
b7bcbe95 3550 if (insn & (1 << 21)) {
40f137e1 3551 rn >>= 1;
b7bcbe95
FB
3552 /* system register */
3553 switch (rn) {
40f137e1 3554 case ARM_VFP_FPSID:
9ee6e8bb
PB
3555 case ARM_VFP_MVFR0:
3556 case ARM_VFP_MVFR1:
b7bcbe95
FB
3557 /* Writes are ignored. */
3558 break;
40f137e1 3559 case ARM_VFP_FPSCR:
e4c1cfa5 3560 tmp = load_reg(s, rd);
4373f3ce 3561 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3562 tcg_temp_free_i32(tmp);
b5ff1b31 3563 gen_lookup_tb(s);
b7bcbe95 3564 break;
40f137e1 3565 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3566 if (IS_USER(s))
3567 return 1;
71b3c3de
JR
3568 /* TODO: VFP subarchitecture support.
3569 * For now, keep the EN bit only */
e4c1cfa5 3570 tmp = load_reg(s, rd);
71b3c3de 3571 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3572 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3573 gen_lookup_tb(s);
3574 break;
3575 case ARM_VFP_FPINST:
3576 case ARM_VFP_FPINST2:
23adb861
PM
3577 if (IS_USER(s)) {
3578 return 1;
3579 }
e4c1cfa5 3580 tmp = load_reg(s, rd);
4373f3ce 3581 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3582 break;
b7bcbe95
FB
3583 default:
3584 return 1;
3585 }
3586 } else {
e4c1cfa5 3587 tmp = load_reg(s, rd);
4373f3ce 3588 gen_vfp_msr(tmp);
b7bcbe95
FB
3589 gen_mov_vreg_F0(0, rn);
3590 }
3591 }
3592 }
3593 } else {
3594 /* data processing */
3595 /* The opcode is in bits 23, 21, 20 and 6. */
3596 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3597 if (dp) {
3598 if (op == 15) {
3599 /* rn is opcode */
3600 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3601 } else {
3602 /* rn is register number */
9ee6e8bb 3603 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3604 }
3605
239c20c7
WN
3606 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3607 ((rn & 0x1e) == 0x6))) {
3608 /* Integer or single/half precision destination. */
9ee6e8bb 3609 rd = VFP_SREG_D(insn);
b7bcbe95 3610 } else {
9ee6e8bb 3611 VFP_DREG_D(rd, insn);
b7bcbe95 3612 }
04595bf6 3613 if (op == 15 &&
239c20c7
WN
3614 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3615 ((rn & 0x1e) == 0x4))) {
3616 /* VCVT from int or half precision is always from S reg
3617 * regardless of dp bit. VCVT with immediate frac_bits
3618 * has same format as SREG_M.
04595bf6
PM
3619 */
3620 rm = VFP_SREG_M(insn);
b7bcbe95 3621 } else {
9ee6e8bb 3622 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3623 }
3624 } else {
9ee6e8bb 3625 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3626 if (op == 15 && rn == 15) {
3627 /* Double precision destination. */
9ee6e8bb
PB
3628 VFP_DREG_D(rd, insn);
3629 } else {
3630 rd = VFP_SREG_D(insn);
3631 }
04595bf6
PM
3632 /* NB that we implicitly rely on the encoding for the frac_bits
3633 * in VCVT of fixed to float being the same as that of an SREG_M
3634 */
9ee6e8bb 3635 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3636 }
3637
69d1fc22 3638 veclen = s->vec_len;
b7bcbe95
FB
3639 if (op == 15 && rn > 3)
3640 veclen = 0;
3641
3642 /* Shut up compiler warnings. */
3643 delta_m = 0;
3644 delta_d = 0;
3645 bank_mask = 0;
3b46e624 3646
b7bcbe95
FB
3647 if (veclen > 0) {
3648 if (dp)
3649 bank_mask = 0xc;
3650 else
3651 bank_mask = 0x18;
3652
3653 /* Figure out what type of vector operation this is. */
3654 if ((rd & bank_mask) == 0) {
3655 /* scalar */
3656 veclen = 0;
3657 } else {
3658 if (dp)
69d1fc22 3659 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3660 else
69d1fc22 3661 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3662
3663 if ((rm & bank_mask) == 0) {
3664 /* mixed scalar/vector */
3665 delta_m = 0;
3666 } else {
3667 /* vector */
3668 delta_m = delta_d;
3669 }
3670 }
3671 }
3672
3673 /* Load the initial operands. */
3674 if (op == 15) {
3675 switch (rn) {
3676 case 16:
3677 case 17:
3678 /* Integer source */
3679 gen_mov_F0_vreg(0, rm);
3680 break;
3681 case 8:
3682 case 9:
3683 /* Compare */
3684 gen_mov_F0_vreg(dp, rd);
3685 gen_mov_F1_vreg(dp, rm);
3686 break;
3687 case 10:
3688 case 11:
3689 /* Compare with zero */
3690 gen_mov_F0_vreg(dp, rd);
3691 gen_vfp_F1_ld0(dp);
3692 break;
9ee6e8bb
PB
3693 case 20:
3694 case 21:
3695 case 22:
3696 case 23:
644ad806
PB
3697 case 28:
3698 case 29:
3699 case 30:
3700 case 31:
9ee6e8bb
PB
3701 /* Source and destination the same. */
3702 gen_mov_F0_vreg(dp, rd);
3703 break;
6e0c0ed1
PM
3704 case 4:
3705 case 5:
3706 case 6:
3707 case 7:
239c20c7
WN
3708 /* VCVTB, VCVTT: only present with the halfprec extension
3709 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3710 * (we choose to UNDEF)
6e0c0ed1 3711 */
d614a513
PM
3712 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3713 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3714 return 1;
3715 }
239c20c7
WN
3716 if (!extract32(rn, 1, 1)) {
3717 /* Half precision source. */
3718 gen_mov_F0_vreg(0, rm);
3719 break;
3720 }
6e0c0ed1 3721 /* Otherwise fall through */
b7bcbe95
FB
3722 default:
3723 /* One source operand. */
3724 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3725 break;
b7bcbe95
FB
3726 }
3727 } else {
3728 /* Two source operands. */
3729 gen_mov_F0_vreg(dp, rn);
3730 gen_mov_F1_vreg(dp, rm);
3731 }
3732
3733 for (;;) {
3734 /* Perform the calculation. */
3735 switch (op) {
605a6aed
PM
3736 case 0: /* VMLA: fd + (fn * fm) */
3737 /* Note that order of inputs to the add matters for NaNs */
3738 gen_vfp_F1_mul(dp);
3739 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3740 gen_vfp_add(dp);
3741 break;
605a6aed 3742 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3743 gen_vfp_mul(dp);
605a6aed
PM
3744 gen_vfp_F1_neg(dp);
3745 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3746 gen_vfp_add(dp);
3747 break;
605a6aed
PM
3748 case 2: /* VNMLS: -fd + (fn * fm) */
3749 /* Note that it isn't valid to replace (-A + B) with (B - A)
3750 * or similar plausible looking simplifications
3751 * because this will give wrong results for NaNs.
3752 */
3753 gen_vfp_F1_mul(dp);
3754 gen_mov_F0_vreg(dp, rd);
3755 gen_vfp_neg(dp);
3756 gen_vfp_add(dp);
b7bcbe95 3757 break;
605a6aed 3758 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3759 gen_vfp_mul(dp);
605a6aed
PM
3760 gen_vfp_F1_neg(dp);
3761 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3762 gen_vfp_neg(dp);
605a6aed 3763 gen_vfp_add(dp);
b7bcbe95
FB
3764 break;
3765 case 4: /* mul: fn * fm */
3766 gen_vfp_mul(dp);
3767 break;
3768 case 5: /* nmul: -(fn * fm) */
3769 gen_vfp_mul(dp);
3770 gen_vfp_neg(dp);
3771 break;
3772 case 6: /* add: fn + fm */
3773 gen_vfp_add(dp);
3774 break;
3775 case 7: /* sub: fn - fm */
3776 gen_vfp_sub(dp);
3777 break;
3778 case 8: /* div: fn / fm */
3779 gen_vfp_div(dp);
3780 break;
da97f52c
PM
3781 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3782 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3783 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3784 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3785 /* These are fused multiply-add, and must be done as one
3786 * floating point operation with no rounding between the
3787 * multiplication and addition steps.
3788 * NB that doing the negations here as separate steps is
3789 * correct : an input NaN should come out with its sign bit
3790 * flipped if it is a negated-input.
3791 */
d614a513 3792 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3793 return 1;
3794 }
3795 if (dp) {
3796 TCGv_ptr fpst;
3797 TCGv_i64 frd;
3798 if (op & 1) {
3799 /* VFNMS, VFMS */
3800 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3801 }
3802 frd = tcg_temp_new_i64();
3803 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3804 if (op & 2) {
3805 /* VFNMA, VFNMS */
3806 gen_helper_vfp_negd(frd, frd);
3807 }
3808 fpst = get_fpstatus_ptr(0);
3809 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3810 cpu_F1d, frd, fpst);
3811 tcg_temp_free_ptr(fpst);
3812 tcg_temp_free_i64(frd);
3813 } else {
3814 TCGv_ptr fpst;
3815 TCGv_i32 frd;
3816 if (op & 1) {
3817 /* VFNMS, VFMS */
3818 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3819 }
3820 frd = tcg_temp_new_i32();
3821 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3822 if (op & 2) {
3823 gen_helper_vfp_negs(frd, frd);
3824 }
3825 fpst = get_fpstatus_ptr(0);
3826 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3827 cpu_F1s, frd, fpst);
3828 tcg_temp_free_ptr(fpst);
3829 tcg_temp_free_i32(frd);
3830 }
3831 break;
9ee6e8bb 3832 case 14: /* fconst */
d614a513
PM
3833 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3834 return 1;
3835 }
9ee6e8bb
PB
3836
3837 n = (insn << 12) & 0x80000000;
3838 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3839 if (dp) {
3840 if (i & 0x40)
3841 i |= 0x3f80;
3842 else
3843 i |= 0x4000;
3844 n |= i << 16;
4373f3ce 3845 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3846 } else {
3847 if (i & 0x40)
3848 i |= 0x780;
3849 else
3850 i |= 0x800;
3851 n |= i << 19;
5b340b51 3852 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3853 }
9ee6e8bb 3854 break;
b7bcbe95
FB
3855 case 15: /* extension space */
3856 switch (rn) {
3857 case 0: /* cpy */
3858 /* no-op */
3859 break;
3860 case 1: /* abs */
3861 gen_vfp_abs(dp);
3862 break;
3863 case 2: /* neg */
3864 gen_vfp_neg(dp);
3865 break;
3866 case 3: /* sqrt */
3867 gen_vfp_sqrt(dp);
3868 break;
239c20c7 3869 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
486624fc
AB
3870 {
3871 TCGv_ptr fpst = get_fpstatus_ptr(false);
3872 TCGv_i32 ahp_mode = get_ahp_flag();
60011498
PB
3873 tmp = gen_vfp_mrs();
3874 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3875 if (dp) {
3876 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3877 fpst, ahp_mode);
239c20c7
WN
3878 } else {
3879 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3880 fpst, ahp_mode);
239c20c7 3881 }
486624fc
AB
3882 tcg_temp_free_i32(ahp_mode);
3883 tcg_temp_free_ptr(fpst);
7d1b0095 3884 tcg_temp_free_i32(tmp);
60011498 3885 break;
486624fc 3886 }
239c20c7 3887 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
486624fc
AB
3888 {
3889 TCGv_ptr fpst = get_fpstatus_ptr(false);
3890 TCGv_i32 ahp = get_ahp_flag();
60011498
PB
3891 tmp = gen_vfp_mrs();
3892 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3893 if (dp) {
3894 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3895 fpst, ahp);
239c20c7
WN
3896 } else {
3897 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3898 fpst, ahp);
239c20c7 3899 }
7d1b0095 3900 tcg_temp_free_i32(tmp);
486624fc
AB
3901 tcg_temp_free_i32(ahp);
3902 tcg_temp_free_ptr(fpst);
60011498 3903 break;
486624fc 3904 }
239c20c7 3905 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
486624fc
AB
3906 {
3907 TCGv_ptr fpst = get_fpstatus_ptr(false);
3908 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3909 tmp = tcg_temp_new_i32();
486624fc 3910
239c20c7
WN
3911 if (dp) {
3912 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3913 fpst, ahp);
239c20c7
WN
3914 } else {
3915 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3916 fpst, ahp);
239c20c7 3917 }
486624fc
AB
3918 tcg_temp_free_i32(ahp);
3919 tcg_temp_free_ptr(fpst);
60011498
PB
3920 gen_mov_F0_vreg(0, rd);
3921 tmp2 = gen_vfp_mrs();
3922 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3923 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3924 tcg_temp_free_i32(tmp2);
60011498
PB
3925 gen_vfp_msr(tmp);
3926 break;
486624fc 3927 }
239c20c7 3928 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
486624fc
AB
3929 {
3930 TCGv_ptr fpst = get_fpstatus_ptr(false);
3931 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3932 tmp = tcg_temp_new_i32();
239c20c7
WN
3933 if (dp) {
3934 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3935 fpst, ahp);
239c20c7
WN
3936 } else {
3937 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3938 fpst, ahp);
239c20c7 3939 }
486624fc
AB
3940 tcg_temp_free_i32(ahp);
3941 tcg_temp_free_ptr(fpst);
60011498
PB
3942 tcg_gen_shli_i32(tmp, tmp, 16);
3943 gen_mov_F0_vreg(0, rd);
3944 tmp2 = gen_vfp_mrs();
3945 tcg_gen_ext16u_i32(tmp2, tmp2);
3946 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3947 tcg_temp_free_i32(tmp2);
60011498
PB
3948 gen_vfp_msr(tmp);
3949 break;
486624fc 3950 }
b7bcbe95
FB
3951 case 8: /* cmp */
3952 gen_vfp_cmp(dp);
3953 break;
3954 case 9: /* cmpe */
3955 gen_vfp_cmpe(dp);
3956 break;
3957 case 10: /* cmpz */
3958 gen_vfp_cmp(dp);
3959 break;
3960 case 11: /* cmpez */
3961 gen_vfp_F1_ld0(dp);
3962 gen_vfp_cmpe(dp);
3963 break;
664c6733
WN
3964 case 12: /* vrintr */
3965 {
3966 TCGv_ptr fpst = get_fpstatus_ptr(0);
3967 if (dp) {
3968 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3969 } else {
3970 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3971 }
3972 tcg_temp_free_ptr(fpst);
3973 break;
3974 }
a290c62a
WN
3975 case 13: /* vrintz */
3976 {
3977 TCGv_ptr fpst = get_fpstatus_ptr(0);
3978 TCGv_i32 tcg_rmode;
3979 tcg_rmode = tcg_const_i32(float_round_to_zero);
9b049916 3980 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
3981 if (dp) {
3982 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3983 } else {
3984 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3985 }
9b049916 3986 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
3987 tcg_temp_free_i32(tcg_rmode);
3988 tcg_temp_free_ptr(fpst);
3989 break;
3990 }
4e82bc01
WN
3991 case 14: /* vrintx */
3992 {
3993 TCGv_ptr fpst = get_fpstatus_ptr(0);
3994 if (dp) {
3995 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3996 } else {
3997 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3998 }
3999 tcg_temp_free_ptr(fpst);
4000 break;
4001 }
b7bcbe95
FB
4002 case 15: /* single<->double conversion */
4003 if (dp)
4373f3ce 4004 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 4005 else
4373f3ce 4006 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
4007 break;
4008 case 16: /* fuito */
5500b06c 4009 gen_vfp_uito(dp, 0);
b7bcbe95
FB
4010 break;
4011 case 17: /* fsito */
5500b06c 4012 gen_vfp_sito(dp, 0);
b7bcbe95 4013 break;
9ee6e8bb 4014 case 20: /* fshto */
d614a513
PM
4015 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4016 return 1;
4017 }
5500b06c 4018 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
4019 break;
4020 case 21: /* fslto */
d614a513
PM
4021 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4022 return 1;
4023 }
5500b06c 4024 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
4025 break;
4026 case 22: /* fuhto */
d614a513
PM
4027 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4028 return 1;
4029 }
5500b06c 4030 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
4031 break;
4032 case 23: /* fulto */
d614a513
PM
4033 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4034 return 1;
4035 }
5500b06c 4036 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 4037 break;
b7bcbe95 4038 case 24: /* ftoui */
5500b06c 4039 gen_vfp_toui(dp, 0);
b7bcbe95
FB
4040 break;
4041 case 25: /* ftouiz */
5500b06c 4042 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
4043 break;
4044 case 26: /* ftosi */
5500b06c 4045 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
4046 break;
4047 case 27: /* ftosiz */
5500b06c 4048 gen_vfp_tosiz(dp, 0);
b7bcbe95 4049 break;
9ee6e8bb 4050 case 28: /* ftosh */
d614a513
PM
4051 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4052 return 1;
4053 }
5500b06c 4054 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
4055 break;
4056 case 29: /* ftosl */
d614a513
PM
4057 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4058 return 1;
4059 }
5500b06c 4060 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
4061 break;
4062 case 30: /* ftouh */
d614a513
PM
4063 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4064 return 1;
4065 }
5500b06c 4066 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
4067 break;
4068 case 31: /* ftoul */
d614a513
PM
4069 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4070 return 1;
4071 }
5500b06c 4072 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 4073 break;
b7bcbe95 4074 default: /* undefined */
b7bcbe95
FB
4075 return 1;
4076 }
4077 break;
4078 default: /* undefined */
b7bcbe95
FB
4079 return 1;
4080 }
4081
4082 /* Write back the result. */
239c20c7
WN
4083 if (op == 15 && (rn >= 8 && rn <= 11)) {
4084 /* Comparison, do nothing. */
4085 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4086 (rn & 0x1e) == 0x6)) {
4087 /* VCVT double to int: always integer result.
4088 * VCVT double to half precision is always a single
4089 * precision result.
4090 */
b7bcbe95 4091 gen_mov_vreg_F0(0, rd);
239c20c7 4092 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
4093 /* conversion */
4094 gen_mov_vreg_F0(!dp, rd);
239c20c7 4095 } else {
b7bcbe95 4096 gen_mov_vreg_F0(dp, rd);
239c20c7 4097 }
b7bcbe95
FB
4098
4099 /* break out of the loop if we have finished */
4100 if (veclen == 0)
4101 break;
4102
4103 if (op == 15 && delta_m == 0) {
4104 /* single source one-many */
4105 while (veclen--) {
4106 rd = ((rd + delta_d) & (bank_mask - 1))
4107 | (rd & bank_mask);
4108 gen_mov_vreg_F0(dp, rd);
4109 }
4110 break;
4111 }
4112 /* Setup the next operands. */
4113 veclen--;
4114 rd = ((rd + delta_d) & (bank_mask - 1))
4115 | (rd & bank_mask);
4116
4117 if (op == 15) {
4118 /* One source operand. */
4119 rm = ((rm + delta_m) & (bank_mask - 1))
4120 | (rm & bank_mask);
4121 gen_mov_F0_vreg(dp, rm);
4122 } else {
4123 /* Two source operands. */
4124 rn = ((rn + delta_d) & (bank_mask - 1))
4125 | (rn & bank_mask);
4126 gen_mov_F0_vreg(dp, rn);
4127 if (delta_m) {
4128 rm = ((rm + delta_m) & (bank_mask - 1))
4129 | (rm & bank_mask);
4130 gen_mov_F1_vreg(dp, rm);
4131 }
4132 }
4133 }
4134 }
4135 break;
4136 case 0xc:
4137 case 0xd:
8387da81 4138 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4139 /* two-register transfer */
4140 rn = (insn >> 16) & 0xf;
4141 rd = (insn >> 12) & 0xf;
4142 if (dp) {
9ee6e8bb
PB
4143 VFP_DREG_M(rm, insn);
4144 } else {
4145 rm = VFP_SREG_M(insn);
4146 }
b7bcbe95 4147
18c9b560 4148 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4149 /* vfp->arm */
4150 if (dp) {
4373f3ce
PB
4151 gen_mov_F0_vreg(0, rm * 2);
4152 tmp = gen_vfp_mrs();
4153 store_reg(s, rd, tmp);
4154 gen_mov_F0_vreg(0, rm * 2 + 1);
4155 tmp = gen_vfp_mrs();
4156 store_reg(s, rn, tmp);
b7bcbe95
FB
4157 } else {
4158 gen_mov_F0_vreg(0, rm);
4373f3ce 4159 tmp = gen_vfp_mrs();
8387da81 4160 store_reg(s, rd, tmp);
b7bcbe95 4161 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4162 tmp = gen_vfp_mrs();
8387da81 4163 store_reg(s, rn, tmp);
b7bcbe95
FB
4164 }
4165 } else {
4166 /* arm->vfp */
4167 if (dp) {
4373f3ce
PB
4168 tmp = load_reg(s, rd);
4169 gen_vfp_msr(tmp);
4170 gen_mov_vreg_F0(0, rm * 2);
4171 tmp = load_reg(s, rn);
4172 gen_vfp_msr(tmp);
4173 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4174 } else {
8387da81 4175 tmp = load_reg(s, rd);
4373f3ce 4176 gen_vfp_msr(tmp);
b7bcbe95 4177 gen_mov_vreg_F0(0, rm);
8387da81 4178 tmp = load_reg(s, rn);
4373f3ce 4179 gen_vfp_msr(tmp);
b7bcbe95
FB
4180 gen_mov_vreg_F0(0, rm + 1);
4181 }
4182 }
4183 } else {
4184 /* Load/store */
4185 rn = (insn >> 16) & 0xf;
4186 if (dp)
9ee6e8bb 4187 VFP_DREG_D(rd, insn);
b7bcbe95 4188 else
9ee6e8bb 4189 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4190 if ((insn & 0x01200000) == 0x01000000) {
4191 /* Single load/store */
4192 offset = (insn & 0xff) << 2;
4193 if ((insn & (1 << 23)) == 0)
4194 offset = -offset;
934814f1
PM
4195 if (s->thumb && rn == 15) {
4196 /* This is actually UNPREDICTABLE */
4197 addr = tcg_temp_new_i32();
4198 tcg_gen_movi_i32(addr, s->pc & ~2);
4199 } else {
4200 addr = load_reg(s, rn);
4201 }
312eea9f 4202 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4203 if (insn & (1 << 20)) {
312eea9f 4204 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4205 gen_mov_vreg_F0(dp, rd);
4206 } else {
4207 gen_mov_F0_vreg(dp, rd);
312eea9f 4208 gen_vfp_st(s, dp, addr);
b7bcbe95 4209 }
7d1b0095 4210 tcg_temp_free_i32(addr);
b7bcbe95
FB
4211 } else {
4212 /* load/store multiple */
934814f1 4213 int w = insn & (1 << 21);
b7bcbe95
FB
4214 if (dp)
4215 n = (insn >> 1) & 0x7f;
4216 else
4217 n = insn & 0xff;
4218
934814f1
PM
4219 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4220 /* P == U , W == 1 => UNDEF */
4221 return 1;
4222 }
4223 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4224 /* UNPREDICTABLE cases for bad immediates: we choose to
4225 * UNDEF to avoid generating huge numbers of TCG ops
4226 */
4227 return 1;
4228 }
4229 if (rn == 15 && w) {
4230 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4231 return 1;
4232 }
4233
4234 if (s->thumb && rn == 15) {
4235 /* This is actually UNPREDICTABLE */
4236 addr = tcg_temp_new_i32();
4237 tcg_gen_movi_i32(addr, s->pc & ~2);
4238 } else {
4239 addr = load_reg(s, rn);
4240 }
b7bcbe95 4241 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4242 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95 4243
8a954faf
PM
4244 if (s->v8m_stackcheck && rn == 13 && w) {
4245 /*
4246 * Here 'addr' is the lowest address we will store to,
4247 * and is either the old SP (if post-increment) or
4248 * the new SP (if pre-decrement). For post-increment
4249 * where the old value is below the limit and the new
4250 * value is above, it is UNKNOWN whether the limit check
4251 * triggers; we choose to trigger.
4252 */
4253 gen_helper_v8m_stackcheck(cpu_env, addr);
4254 }
4255
b7bcbe95
FB
4256 if (dp)
4257 offset = 8;
4258 else
4259 offset = 4;
4260 for (i = 0; i < n; i++) {
18c9b560 4261 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4262 /* load */
312eea9f 4263 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4264 gen_mov_vreg_F0(dp, rd + i);
4265 } else {
4266 /* store */
4267 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4268 gen_vfp_st(s, dp, addr);
b7bcbe95 4269 }
312eea9f 4270 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4271 }
934814f1 4272 if (w) {
b7bcbe95
FB
4273 /* writeback */
4274 if (insn & (1 << 24))
4275 offset = -offset * n;
4276 else if (dp && (insn & 1))
4277 offset = 4;
4278 else
4279 offset = 0;
4280
4281 if (offset != 0)
312eea9f
FN
4282 tcg_gen_addi_i32(addr, addr, offset);
4283 store_reg(s, rn, addr);
4284 } else {
7d1b0095 4285 tcg_temp_free_i32(addr);
b7bcbe95
FB
4286 }
4287 }
4288 }
4289 break;
4290 default:
4291 /* Should never happen. */
4292 return 1;
4293 }
4294 return 0;
4295}
4296
90aa39a1 4297static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4298{
90aa39a1 4299#ifndef CONFIG_USER_ONLY
dcba3a8d 4300 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4301 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4302#else
4303 return true;
4304#endif
4305}
6e256c93 4306
8a6b28c7
EC
4307static void gen_goto_ptr(void)
4308{
7f11636d 4309 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4310}
4311
4cae8f56
AB
4312/* This will end the TB but doesn't guarantee we'll return to
4313 * cpu_loop_exec. Any live exit_requests will be processed as we
4314 * enter the next TB.
4315 */
8a6b28c7 4316static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4317{
4318 if (use_goto_tb(s, dest)) {
57fec1fe 4319 tcg_gen_goto_tb(n);
eaed129d 4320 gen_set_pc_im(s, dest);
07ea28b4 4321 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 4322 } else {
eaed129d 4323 gen_set_pc_im(s, dest);
8a6b28c7 4324 gen_goto_ptr();
6e256c93 4325 }
dcba3a8d 4326 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4327}
4328
8aaca4c0
FB
4329static inline void gen_jmp (DisasContext *s, uint32_t dest)
4330{
b636649f 4331 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4332 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4333 if (s->thumb)
d9ba4830
PB
4334 dest |= 1;
4335 gen_bx_im(s, dest);
8aaca4c0 4336 } else {
6e256c93 4337 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4338 }
4339}
4340
39d5492a 4341static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4342{
ee097184 4343 if (x)
d9ba4830 4344 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4345 else
d9ba4830 4346 gen_sxth(t0);
ee097184 4347 if (y)
d9ba4830 4348 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4349 else
d9ba4830
PB
4350 gen_sxth(t1);
4351 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4352}
4353
4354/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4355static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4356{
b5ff1b31
FB
4357 uint32_t mask;
4358
4359 mask = 0;
4360 if (flags & (1 << 0))
4361 mask |= 0xff;
4362 if (flags & (1 << 1))
4363 mask |= 0xff00;
4364 if (flags & (1 << 2))
4365 mask |= 0xff0000;
4366 if (flags & (1 << 3))
4367 mask |= 0xff000000;
9ee6e8bb 4368
2ae23e75 4369 /* Mask out undefined bits. */
9ee6e8bb 4370 mask &= ~CPSR_RESERVED;
d614a513 4371 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4372 mask &= ~CPSR_T;
d614a513
PM
4373 }
4374 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4375 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4376 }
4377 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4378 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4379 }
4380 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4381 mask &= ~CPSR_IT;
d614a513 4382 }
4051e12c
PM
4383 /* Mask out execution state and reserved bits. */
4384 if (!spsr) {
4385 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4386 }
b5ff1b31
FB
4387 /* Mask out privileged bits. */
4388 if (IS_USER(s))
9ee6e8bb 4389 mask &= CPSR_USER;
b5ff1b31
FB
4390 return mask;
4391}
4392
2fbac54b 4393/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4394static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4395{
39d5492a 4396 TCGv_i32 tmp;
b5ff1b31
FB
4397 if (spsr) {
4398 /* ??? This is also undefined in system mode. */
4399 if (IS_USER(s))
4400 return 1;
d9ba4830
PB
4401
4402 tmp = load_cpu_field(spsr);
4403 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4404 tcg_gen_andi_i32(t0, t0, mask);
4405 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4406 store_cpu_field(tmp, spsr);
b5ff1b31 4407 } else {
2fbac54b 4408 gen_set_cpsr(t0, mask);
b5ff1b31 4409 }
7d1b0095 4410 tcg_temp_free_i32(t0);
b5ff1b31
FB
4411 gen_lookup_tb(s);
4412 return 0;
4413}
4414
2fbac54b
FN
4415/* Returns nonzero if access to the PSR is not permitted. */
4416static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4417{
39d5492a 4418 TCGv_i32 tmp;
7d1b0095 4419 tmp = tcg_temp_new_i32();
2fbac54b
FN
4420 tcg_gen_movi_i32(tmp, val);
4421 return gen_set_psr(s, mask, spsr, tmp);
4422}
4423
8bfd0550
PM
4424static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4425 int *tgtmode, int *regno)
4426{
4427 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4428 * the target mode and register number, and identify the various
4429 * unpredictable cases.
4430 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4431 * + executed in user mode
4432 * + using R15 as the src/dest register
4433 * + accessing an unimplemented register
4434 * + accessing a register that's inaccessible at current PL/security state*
4435 * + accessing a register that you could access with a different insn
4436 * We choose to UNDEF in all these cases.
4437 * Since we don't know which of the various AArch32 modes we are in
4438 * we have to defer some checks to runtime.
4439 * Accesses to Monitor mode registers from Secure EL1 (which implies
4440 * that EL3 is AArch64) must trap to EL3.
4441 *
4442 * If the access checks fail this function will emit code to take
4443 * an exception and return false. Otherwise it will return true,
4444 * and set *tgtmode and *regno appropriately.
4445 */
4446 int exc_target = default_exception_el(s);
4447
4448 /* These instructions are present only in ARMv8, or in ARMv7 with the
4449 * Virtualization Extensions.
4450 */
4451 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4452 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4453 goto undef;
4454 }
4455
4456 if (IS_USER(s) || rn == 15) {
4457 goto undef;
4458 }
4459
4460 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4461 * of registers into (r, sysm).
4462 */
4463 if (r) {
4464 /* SPSRs for other modes */
4465 switch (sysm) {
4466 case 0xe: /* SPSR_fiq */
4467 *tgtmode = ARM_CPU_MODE_FIQ;
4468 break;
4469 case 0x10: /* SPSR_irq */
4470 *tgtmode = ARM_CPU_MODE_IRQ;
4471 break;
4472 case 0x12: /* SPSR_svc */
4473 *tgtmode = ARM_CPU_MODE_SVC;
4474 break;
4475 case 0x14: /* SPSR_abt */
4476 *tgtmode = ARM_CPU_MODE_ABT;
4477 break;
4478 case 0x16: /* SPSR_und */
4479 *tgtmode = ARM_CPU_MODE_UND;
4480 break;
4481 case 0x1c: /* SPSR_mon */
4482 *tgtmode = ARM_CPU_MODE_MON;
4483 break;
4484 case 0x1e: /* SPSR_hyp */
4485 *tgtmode = ARM_CPU_MODE_HYP;
4486 break;
4487 default: /* unallocated */
4488 goto undef;
4489 }
4490 /* We arbitrarily assign SPSR a register number of 16. */
4491 *regno = 16;
4492 } else {
4493 /* general purpose registers for other modes */
4494 switch (sysm) {
4495 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4496 *tgtmode = ARM_CPU_MODE_USR;
4497 *regno = sysm + 8;
4498 break;
4499 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4500 *tgtmode = ARM_CPU_MODE_FIQ;
4501 *regno = sysm;
4502 break;
4503 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4504 *tgtmode = ARM_CPU_MODE_IRQ;
4505 *regno = sysm & 1 ? 13 : 14;
4506 break;
4507 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4508 *tgtmode = ARM_CPU_MODE_SVC;
4509 *regno = sysm & 1 ? 13 : 14;
4510 break;
4511 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4512 *tgtmode = ARM_CPU_MODE_ABT;
4513 *regno = sysm & 1 ? 13 : 14;
4514 break;
4515 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4516 *tgtmode = ARM_CPU_MODE_UND;
4517 *regno = sysm & 1 ? 13 : 14;
4518 break;
4519 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4520 *tgtmode = ARM_CPU_MODE_MON;
4521 *regno = sysm & 1 ? 13 : 14;
4522 break;
4523 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4524 *tgtmode = ARM_CPU_MODE_HYP;
4525 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4526 *regno = sysm & 1 ? 13 : 17;
4527 break;
4528 default: /* unallocated */
4529 goto undef;
4530 }
4531 }
4532
4533 /* Catch the 'accessing inaccessible register' cases we can detect
4534 * at translate time.
4535 */
4536 switch (*tgtmode) {
4537 case ARM_CPU_MODE_MON:
4538 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4539 goto undef;
4540 }
4541 if (s->current_el == 1) {
4542 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4543 * then accesses to Mon registers trap to EL3
4544 */
4545 exc_target = 3;
4546 goto undef;
4547 }
4548 break;
4549 case ARM_CPU_MODE_HYP:
aec4dd09
PM
4550 /*
4551 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
4552 * (and so we can forbid accesses from EL2 or below). elr_hyp
4553 * can be accessed also from Hyp mode, so forbid accesses from
4554 * EL0 or EL1.
8bfd0550 4555 */
aec4dd09
PM
4556 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
4557 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
4558 goto undef;
4559 }
4560 break;
4561 default:
4562 break;
4563 }
4564
4565 return true;
4566
4567undef:
4568 /* If we get here then some access check did not pass */
4569 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4570 return false;
4571}
4572
4573static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4574{
4575 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4576 int tgtmode = 0, regno = 0;
4577
4578 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4579 return;
4580 }
4581
4582 /* Sync state because msr_banked() can raise exceptions */
4583 gen_set_condexec(s);
4584 gen_set_pc_im(s, s->pc - 4);
4585 tcg_reg = load_reg(s, rn);
4586 tcg_tgtmode = tcg_const_i32(tgtmode);
4587 tcg_regno = tcg_const_i32(regno);
4588 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4589 tcg_temp_free_i32(tcg_tgtmode);
4590 tcg_temp_free_i32(tcg_regno);
4591 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4592 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4593}
4594
4595static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4596{
4597 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4598 int tgtmode = 0, regno = 0;
4599
4600 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4601 return;
4602 }
4603
4604 /* Sync state because mrs_banked() can raise exceptions */
4605 gen_set_condexec(s);
4606 gen_set_pc_im(s, s->pc - 4);
4607 tcg_reg = tcg_temp_new_i32();
4608 tcg_tgtmode = tcg_const_i32(tgtmode);
4609 tcg_regno = tcg_const_i32(regno);
4610 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4611 tcg_temp_free_i32(tcg_tgtmode);
4612 tcg_temp_free_i32(tcg_regno);
4613 store_reg(s, rn, tcg_reg);
dcba3a8d 4614 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4615}
4616
fb0e8e79
PM
4617/* Store value to PC as for an exception return (ie don't
4618 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4619 * will do the masking based on the new value of the Thumb bit.
4620 */
4621static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4622{
fb0e8e79
PM
4623 tcg_gen_mov_i32(cpu_R[15], pc);
4624 tcg_temp_free_i32(pc);
b5ff1b31
FB
4625}
4626
b0109805 4627/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4628static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4629{
fb0e8e79
PM
4630 store_pc_exc_ret(s, pc);
4631 /* The cpsr_write_eret helper will mask the low bits of PC
4632 * appropriately depending on the new Thumb bit, so it must
4633 * be called after storing the new PC.
4634 */
e69ad9df
AL
4635 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4636 gen_io_start();
4637 }
235ea1f5 4638 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
4639 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4640 gen_io_end();
4641 }
7d1b0095 4642 tcg_temp_free_i32(cpsr);
b29fd33d 4643 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4644 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4645}
3b46e624 4646
fb0e8e79
PM
4647/* Generate an old-style exception return. Marks pc as dead. */
4648static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4649{
4650 gen_rfe(s, pc, load_cpu_field(spsr));
4651}
4652
c22edfeb
AB
4653/*
4654 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4655 * only call the helper when running single threaded TCG code to ensure
4656 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4657 * just skip this instruction. Currently the SEV/SEVL instructions
4658 * which are *one* of many ways to wake the CPU from WFE are not
4659 * implemented so we can't sleep like WFI does.
4660 */
9ee6e8bb
PB
4661static void gen_nop_hint(DisasContext *s, int val)
4662{
4663 switch (val) {
2399d4e7
EC
4664 /* When running in MTTCG we don't generate jumps to the yield and
4665 * WFE helpers as it won't affect the scheduling of other vCPUs.
4666 * If we wanted to more completely model WFE/SEV so we don't busy
4667 * spin unnecessarily we would need to do something more involved.
4668 */
c87e5a61 4669 case 1: /* yield */
2399d4e7 4670 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4671 gen_set_pc_im(s, s->pc);
dcba3a8d 4672 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4673 }
c87e5a61 4674 break;
9ee6e8bb 4675 case 3: /* wfi */
eaed129d 4676 gen_set_pc_im(s, s->pc);
dcba3a8d 4677 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4678 break;
4679 case 2: /* wfe */
2399d4e7 4680 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4681 gen_set_pc_im(s, s->pc);
dcba3a8d 4682 s->base.is_jmp = DISAS_WFE;
c22edfeb 4683 }
72c1d3af 4684 break;
9ee6e8bb 4685 case 4: /* sev */
12b10571
MR
4686 case 5: /* sevl */
4687 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4688 default: /* nop */
4689 break;
4690 }
4691}
99c475ab 4692
ad69471c 4693#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4694
39d5492a 4695static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4696{
4697 switch (size) {
dd8fbd78
FN
4698 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4699 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4700 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4701 default: abort();
9ee6e8bb 4702 }
9ee6e8bb
PB
4703}
4704
39d5492a 4705static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4706{
4707 switch (size) {
dd8fbd78
FN
4708 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4709 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4710 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4711 default: return;
4712 }
4713}
4714
4715/* 32-bit pairwise ops end up the same as the elementwise versions. */
4716#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4717#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4718#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4719#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4720
ad69471c
PB
4721#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4722 switch ((size << 1) | u) { \
4723 case 0: \
dd8fbd78 4724 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4725 break; \
4726 case 1: \
dd8fbd78 4727 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4728 break; \
4729 case 2: \
dd8fbd78 4730 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4731 break; \
4732 case 3: \
dd8fbd78 4733 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4734 break; \
4735 case 4: \
dd8fbd78 4736 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4737 break; \
4738 case 5: \
dd8fbd78 4739 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4740 break; \
4741 default: return 1; \
4742 }} while (0)
9ee6e8bb
PB
4743
4744#define GEN_NEON_INTEGER_OP(name) do { \
4745 switch ((size << 1) | u) { \
ad69471c 4746 case 0: \
dd8fbd78 4747 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4748 break; \
4749 case 1: \
dd8fbd78 4750 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4751 break; \
4752 case 2: \
dd8fbd78 4753 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4754 break; \
4755 case 3: \
dd8fbd78 4756 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4757 break; \
4758 case 4: \
dd8fbd78 4759 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4760 break; \
4761 case 5: \
dd8fbd78 4762 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4763 break; \
9ee6e8bb
PB
4764 default: return 1; \
4765 }} while (0)
4766
39d5492a 4767static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4768{
39d5492a 4769 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4770 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4771 return tmp;
9ee6e8bb
PB
4772}
4773
39d5492a 4774static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4775{
dd8fbd78 4776 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4777 tcg_temp_free_i32(var);
9ee6e8bb
PB
4778}
4779
39d5492a 4780static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4781{
39d5492a 4782 TCGv_i32 tmp;
9ee6e8bb 4783 if (size == 1) {
0fad6efc
PM
4784 tmp = neon_load_reg(reg & 7, reg >> 4);
4785 if (reg & 8) {
dd8fbd78 4786 gen_neon_dup_high16(tmp);
0fad6efc
PM
4787 } else {
4788 gen_neon_dup_low16(tmp);
dd8fbd78 4789 }
0fad6efc
PM
4790 } else {
4791 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4792 }
dd8fbd78 4793 return tmp;
9ee6e8bb
PB
4794}
4795
02acedf9 4796static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4797{
b13708bb
RH
4798 TCGv_ptr pd, pm;
4799
600b828c 4800 if (!q && size == 2) {
02acedf9
PM
4801 return 1;
4802 }
b13708bb
RH
4803 pd = vfp_reg_ptr(true, rd);
4804 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4805 if (q) {
4806 switch (size) {
4807 case 0:
b13708bb 4808 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4809 break;
4810 case 1:
b13708bb 4811 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4812 break;
4813 case 2:
b13708bb 4814 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4815 break;
4816 default:
4817 abort();
4818 }
4819 } else {
4820 switch (size) {
4821 case 0:
b13708bb 4822 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4823 break;
4824 case 1:
b13708bb 4825 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4826 break;
4827 default:
4828 abort();
4829 }
4830 }
b13708bb
RH
4831 tcg_temp_free_ptr(pd);
4832 tcg_temp_free_ptr(pm);
02acedf9 4833 return 0;
19457615
FN
4834}
4835
d68a6f3a 4836static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4837{
b13708bb
RH
4838 TCGv_ptr pd, pm;
4839
600b828c 4840 if (!q && size == 2) {
d68a6f3a
PM
4841 return 1;
4842 }
b13708bb
RH
4843 pd = vfp_reg_ptr(true, rd);
4844 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4845 if (q) {
4846 switch (size) {
4847 case 0:
b13708bb 4848 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4849 break;
4850 case 1:
b13708bb 4851 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4852 break;
4853 case 2:
b13708bb 4854 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4855 break;
4856 default:
4857 abort();
4858 }
4859 } else {
4860 switch (size) {
4861 case 0:
b13708bb 4862 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4863 break;
4864 case 1:
b13708bb 4865 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4866 break;
4867 default:
4868 abort();
4869 }
4870 }
b13708bb
RH
4871 tcg_temp_free_ptr(pd);
4872 tcg_temp_free_ptr(pm);
d68a6f3a 4873 return 0;
19457615
FN
4874}
4875
39d5492a 4876static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4877{
39d5492a 4878 TCGv_i32 rd, tmp;
19457615 4879
7d1b0095
PM
4880 rd = tcg_temp_new_i32();
4881 tmp = tcg_temp_new_i32();
19457615
FN
4882
4883 tcg_gen_shli_i32(rd, t0, 8);
4884 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4885 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4886 tcg_gen_or_i32(rd, rd, tmp);
4887
4888 tcg_gen_shri_i32(t1, t1, 8);
4889 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4890 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4891 tcg_gen_or_i32(t1, t1, tmp);
4892 tcg_gen_mov_i32(t0, rd);
4893
7d1b0095
PM
4894 tcg_temp_free_i32(tmp);
4895 tcg_temp_free_i32(rd);
19457615
FN
4896}
4897
39d5492a 4898static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4899{
39d5492a 4900 TCGv_i32 rd, tmp;
19457615 4901
7d1b0095
PM
4902 rd = tcg_temp_new_i32();
4903 tmp = tcg_temp_new_i32();
19457615
FN
4904
4905 tcg_gen_shli_i32(rd, t0, 16);
4906 tcg_gen_andi_i32(tmp, t1, 0xffff);
4907 tcg_gen_or_i32(rd, rd, tmp);
4908 tcg_gen_shri_i32(t1, t1, 16);
4909 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4910 tcg_gen_or_i32(t1, t1, tmp);
4911 tcg_gen_mov_i32(t0, rd);
4912
7d1b0095
PM
4913 tcg_temp_free_i32(tmp);
4914 tcg_temp_free_i32(rd);
19457615
FN
4915}
4916
4917
9ee6e8bb
PB
4918static struct {
4919 int nregs;
4920 int interleave;
4921 int spacing;
308e5636 4922} const neon_ls_element_type[11] = {
9ee6e8bb
PB
4923 {4, 4, 1},
4924 {4, 4, 2},
4925 {4, 1, 1},
4926 {4, 2, 1},
4927 {3, 3, 1},
4928 {3, 3, 2},
4929 {3, 1, 1},
4930 {1, 1, 1},
4931 {2, 2, 1},
4932 {2, 2, 2},
4933 {2, 1, 1}
4934};
4935
4936/* Translate a NEON load/store element instruction. Return nonzero if the
4937 instruction is invalid. */
7dcc1f89 4938static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4939{
4940 int rd, rn, rm;
4941 int op;
4942 int nregs;
4943 int interleave;
84496233 4944 int spacing;
9ee6e8bb
PB
4945 int stride;
4946 int size;
4947 int reg;
4948 int pass;
4949 int load;
4950 int shift;
9ee6e8bb 4951 int n;
39d5492a
PM
4952 TCGv_i32 addr;
4953 TCGv_i32 tmp;
4954 TCGv_i32 tmp2;
84496233 4955 TCGv_i64 tmp64;
9ee6e8bb 4956
2c7ffc41
PM
4957 /* FIXME: this access check should not take precedence over UNDEF
4958 * for invalid encodings; we will generate incorrect syndrome information
4959 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4960 */
9dbbc748 4961 if (s->fp_excp_el) {
2c7ffc41 4962 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 4963 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4964 return 0;
4965 }
4966
5df8bac1 4967 if (!s->vfp_enabled)
9ee6e8bb
PB
4968 return 1;
4969 VFP_DREG_D(rd, insn);
4970 rn = (insn >> 16) & 0xf;
4971 rm = insn & 0xf;
4972 load = (insn & (1 << 21)) != 0;
4973 if ((insn & (1 << 23)) == 0) {
4974 /* Load store all elements. */
4975 op = (insn >> 8) & 0xf;
4976 size = (insn >> 6) & 3;
84496233 4977 if (op > 10)
9ee6e8bb 4978 return 1;
f2dd89d0
PM
4979 /* Catch UNDEF cases for bad values of align field */
4980 switch (op & 0xc) {
4981 case 4:
4982 if (((insn >> 5) & 1) == 1) {
4983 return 1;
4984 }
4985 break;
4986 case 8:
4987 if (((insn >> 4) & 3) == 3) {
4988 return 1;
4989 }
4990 break;
4991 default:
4992 break;
4993 }
9ee6e8bb
PB
4994 nregs = neon_ls_element_type[op].nregs;
4995 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4996 spacing = neon_ls_element_type[op].spacing;
4997 if (size == 3 && (interleave | spacing) != 1)
4998 return 1;
e318a60b 4999 addr = tcg_temp_new_i32();
dcc65026 5000 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5001 stride = (1 << size) * interleave;
5002 for (reg = 0; reg < nregs; reg++) {
5003 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
5004 load_reg_var(s, addr, rn);
5005 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 5006 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
5007 load_reg_var(s, addr, rn);
5008 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 5009 }
84496233 5010 if (size == 3) {
8ed1237d 5011 tmp64 = tcg_temp_new_i64();
84496233 5012 if (load) {
12dcc321 5013 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 5014 neon_store_reg64(tmp64, rd);
84496233 5015 } else {
84496233 5016 neon_load_reg64(tmp64, rd);
12dcc321 5017 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 5018 }
8ed1237d 5019 tcg_temp_free_i64(tmp64);
84496233
JR
5020 tcg_gen_addi_i32(addr, addr, stride);
5021 } else {
5022 for (pass = 0; pass < 2; pass++) {
5023 if (size == 2) {
5024 if (load) {
58ab8e96 5025 tmp = tcg_temp_new_i32();
12dcc321 5026 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
5027 neon_store_reg(rd, pass, tmp);
5028 } else {
5029 tmp = neon_load_reg(rd, pass);
12dcc321 5030 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 5031 tcg_temp_free_i32(tmp);
84496233 5032 }
1b2b1e54 5033 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
5034 } else if (size == 1) {
5035 if (load) {
58ab8e96 5036 tmp = tcg_temp_new_i32();
12dcc321 5037 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 5038 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 5039 tmp2 = tcg_temp_new_i32();
12dcc321 5040 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 5041 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
5042 tcg_gen_shli_i32(tmp2, tmp2, 16);
5043 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5044 tcg_temp_free_i32(tmp2);
84496233
JR
5045 neon_store_reg(rd, pass, tmp);
5046 } else {
5047 tmp = neon_load_reg(rd, pass);
7d1b0095 5048 tmp2 = tcg_temp_new_i32();
84496233 5049 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 5050 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 5051 tcg_temp_free_i32(tmp);
84496233 5052 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 5053 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 5054 tcg_temp_free_i32(tmp2);
1b2b1e54 5055 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 5056 }
84496233
JR
5057 } else /* size == 0 */ {
5058 if (load) {
f764718d 5059 tmp2 = NULL;
84496233 5060 for (n = 0; n < 4; n++) {
58ab8e96 5061 tmp = tcg_temp_new_i32();
12dcc321 5062 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
5063 tcg_gen_addi_i32(addr, addr, stride);
5064 if (n == 0) {
5065 tmp2 = tmp;
5066 } else {
41ba8341
PB
5067 tcg_gen_shli_i32(tmp, tmp, n * 8);
5068 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 5069 tcg_temp_free_i32(tmp);
84496233 5070 }
9ee6e8bb 5071 }
84496233
JR
5072 neon_store_reg(rd, pass, tmp2);
5073 } else {
5074 tmp2 = neon_load_reg(rd, pass);
5075 for (n = 0; n < 4; n++) {
7d1b0095 5076 tmp = tcg_temp_new_i32();
84496233
JR
5077 if (n == 0) {
5078 tcg_gen_mov_i32(tmp, tmp2);
5079 } else {
5080 tcg_gen_shri_i32(tmp, tmp2, n * 8);
5081 }
12dcc321 5082 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 5083 tcg_temp_free_i32(tmp);
84496233
JR
5084 tcg_gen_addi_i32(addr, addr, stride);
5085 }
7d1b0095 5086 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5087 }
5088 }
5089 }
5090 }
84496233 5091 rd += spacing;
9ee6e8bb 5092 }
e318a60b 5093 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5094 stride = nregs * 8;
5095 } else {
5096 size = (insn >> 10) & 3;
5097 if (size == 3) {
5098 /* Load single element to all lanes. */
8e18cde3
PM
5099 int a = (insn >> 4) & 1;
5100 if (!load) {
9ee6e8bb 5101 return 1;
8e18cde3 5102 }
9ee6e8bb
PB
5103 size = (insn >> 6) & 3;
5104 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
5105
5106 if (size == 3) {
5107 if (nregs != 4 || a == 0) {
9ee6e8bb 5108 return 1;
99c475ab 5109 }
8e18cde3
PM
5110 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5111 size = 2;
5112 }
5113 if (nregs == 1 && a == 1 && size == 0) {
5114 return 1;
5115 }
5116 if (nregs == 3 && a == 1) {
5117 return 1;
5118 }
e318a60b 5119 addr = tcg_temp_new_i32();
8e18cde3
PM
5120 load_reg_var(s, addr, rn);
5121 if (nregs == 1) {
5122 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5123 tmp = gen_load_and_replicate(s, addr, size);
5124 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5125 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5126 if (insn & (1 << 5)) {
5127 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5128 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5129 }
5130 tcg_temp_free_i32(tmp);
5131 } else {
5132 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5133 stride = (insn & (1 << 5)) ? 2 : 1;
5134 for (reg = 0; reg < nregs; reg++) {
5135 tmp = gen_load_and_replicate(s, addr, size);
5136 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5137 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5138 tcg_temp_free_i32(tmp);
5139 tcg_gen_addi_i32(addr, addr, 1 << size);
5140 rd += stride;
5141 }
9ee6e8bb 5142 }
e318a60b 5143 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5144 stride = (1 << size) * nregs;
5145 } else {
5146 /* Single element. */
93262b16 5147 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
5148 pass = (insn >> 7) & 1;
5149 switch (size) {
5150 case 0:
5151 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
5152 stride = 1;
5153 break;
5154 case 1:
5155 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
5156 stride = (insn & (1 << 5)) ? 2 : 1;
5157 break;
5158 case 2:
5159 shift = 0;
9ee6e8bb
PB
5160 stride = (insn & (1 << 6)) ? 2 : 1;
5161 break;
5162 default:
5163 abort();
5164 }
5165 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5166 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5167 switch (nregs) {
5168 case 1:
5169 if (((idx & (1 << size)) != 0) ||
5170 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5171 return 1;
5172 }
5173 break;
5174 case 3:
5175 if ((idx & 1) != 0) {
5176 return 1;
5177 }
5178 /* fall through */
5179 case 2:
5180 if (size == 2 && (idx & 2) != 0) {
5181 return 1;
5182 }
5183 break;
5184 case 4:
5185 if ((size == 2) && ((idx & 3) == 3)) {
5186 return 1;
5187 }
5188 break;
5189 default:
5190 abort();
5191 }
5192 if ((rd + stride * (nregs - 1)) > 31) {
5193 /* Attempts to write off the end of the register file
5194 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5195 * the neon_load_reg() would write off the end of the array.
5196 */
5197 return 1;
5198 }
e318a60b 5199 addr = tcg_temp_new_i32();
dcc65026 5200 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5201 for (reg = 0; reg < nregs; reg++) {
5202 if (load) {
58ab8e96 5203 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5204 switch (size) {
5205 case 0:
12dcc321 5206 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5207 break;
5208 case 1:
12dcc321 5209 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5210 break;
5211 case 2:
12dcc321 5212 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5213 break;
a50f5b91
PB
5214 default: /* Avoid compiler warnings. */
5215 abort();
9ee6e8bb
PB
5216 }
5217 if (size != 2) {
8f8e3aa4 5218 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5219 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5220 shift, size ? 16 : 8);
7d1b0095 5221 tcg_temp_free_i32(tmp2);
9ee6e8bb 5222 }
8f8e3aa4 5223 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5224 } else { /* Store */
8f8e3aa4
PB
5225 tmp = neon_load_reg(rd, pass);
5226 if (shift)
5227 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5228 switch (size) {
5229 case 0:
12dcc321 5230 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5231 break;
5232 case 1:
12dcc321 5233 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5234 break;
5235 case 2:
12dcc321 5236 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5237 break;
99c475ab 5238 }
58ab8e96 5239 tcg_temp_free_i32(tmp);
99c475ab 5240 }
9ee6e8bb 5241 rd += stride;
1b2b1e54 5242 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5243 }
e318a60b 5244 tcg_temp_free_i32(addr);
9ee6e8bb 5245 stride = nregs * (1 << size);
99c475ab 5246 }
9ee6e8bb
PB
5247 }
5248 if (rm != 15) {
39d5492a 5249 TCGv_i32 base;
b26eefb6
PB
5250
5251 base = load_reg(s, rn);
9ee6e8bb 5252 if (rm == 13) {
b26eefb6 5253 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5254 } else {
39d5492a 5255 TCGv_i32 index;
b26eefb6
PB
5256 index = load_reg(s, rm);
5257 tcg_gen_add_i32(base, base, index);
7d1b0095 5258 tcg_temp_free_i32(index);
9ee6e8bb 5259 }
b26eefb6 5260 store_reg(s, rn, base);
9ee6e8bb
PB
5261 }
5262 return 0;
5263}
3b46e624 5264
39d5492a 5265static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5266{
5267 switch (size) {
5268 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5269 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5270 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5271 default: abort();
5272 }
5273}
5274
39d5492a 5275static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5276{
5277 switch (size) {
02da0b2d
PM
5278 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5279 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5280 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5281 default: abort();
5282 }
5283}
5284
39d5492a 5285static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5286{
5287 switch (size) {
02da0b2d
PM
5288 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5289 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5290 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5291 default: abort();
5292 }
5293}
5294
39d5492a 5295static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5296{
5297 switch (size) {
02da0b2d
PM
5298 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5299 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5300 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5301 default: abort();
5302 }
5303}
5304
39d5492a 5305static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5306 int q, int u)
5307{
5308 if (q) {
5309 if (u) {
5310 switch (size) {
5311 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5312 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5313 default: abort();
5314 }
5315 } else {
5316 switch (size) {
5317 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5318 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5319 default: abort();
5320 }
5321 }
5322 } else {
5323 if (u) {
5324 switch (size) {
b408a9b0
CL
5325 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5326 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5327 default: abort();
5328 }
5329 } else {
5330 switch (size) {
5331 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5332 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5333 default: abort();
5334 }
5335 }
5336 }
5337}
5338
39d5492a 5339static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5340{
5341 if (u) {
5342 switch (size) {
5343 case 0: gen_helper_neon_widen_u8(dest, src); break;
5344 case 1: gen_helper_neon_widen_u16(dest, src); break;
5345 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5346 default: abort();
5347 }
5348 } else {
5349 switch (size) {
5350 case 0: gen_helper_neon_widen_s8(dest, src); break;
5351 case 1: gen_helper_neon_widen_s16(dest, src); break;
5352 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5353 default: abort();
5354 }
5355 }
7d1b0095 5356 tcg_temp_free_i32(src);
ad69471c
PB
5357}
5358
5359static inline void gen_neon_addl(int size)
5360{
5361 switch (size) {
5362 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5363 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5364 case 2: tcg_gen_add_i64(CPU_V001); break;
5365 default: abort();
5366 }
5367}
5368
5369static inline void gen_neon_subl(int size)
5370{
5371 switch (size) {
5372 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5373 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5374 case 2: tcg_gen_sub_i64(CPU_V001); break;
5375 default: abort();
5376 }
5377}
5378
a7812ae4 5379static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5380{
5381 switch (size) {
5382 case 0: gen_helper_neon_negl_u16(var, var); break;
5383 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5384 case 2:
5385 tcg_gen_neg_i64(var, var);
5386 break;
ad69471c
PB
5387 default: abort();
5388 }
5389}
5390
a7812ae4 5391static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5392{
5393 switch (size) {
02da0b2d
PM
5394 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5395 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5396 default: abort();
5397 }
5398}
5399
39d5492a
PM
5400static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5401 int size, int u)
ad69471c 5402{
a7812ae4 5403 TCGv_i64 tmp;
ad69471c
PB
5404
5405 switch ((size << 1) | u) {
5406 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5407 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5408 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5409 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5410 case 4:
5411 tmp = gen_muls_i64_i32(a, b);
5412 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5413 tcg_temp_free_i64(tmp);
ad69471c
PB
5414 break;
5415 case 5:
5416 tmp = gen_mulu_i64_i32(a, b);
5417 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5418 tcg_temp_free_i64(tmp);
ad69471c
PB
5419 break;
5420 default: abort();
5421 }
c6067f04
CL
5422
5423 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5424 Don't forget to clean them now. */
5425 if (size < 2) {
7d1b0095
PM
5426 tcg_temp_free_i32(a);
5427 tcg_temp_free_i32(b);
c6067f04 5428 }
ad69471c
PB
5429}
5430
39d5492a
PM
5431static void gen_neon_narrow_op(int op, int u, int size,
5432 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5433{
5434 if (op) {
5435 if (u) {
5436 gen_neon_unarrow_sats(size, dest, src);
5437 } else {
5438 gen_neon_narrow(size, dest, src);
5439 }
5440 } else {
5441 if (u) {
5442 gen_neon_narrow_satu(size, dest, src);
5443 } else {
5444 gen_neon_narrow_sats(size, dest, src);
5445 }
5446 }
5447}
5448
62698be3
PM
5449/* Symbolic constants for op fields for Neon 3-register same-length.
5450 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5451 * table A7-9.
5452 */
5453#define NEON_3R_VHADD 0
5454#define NEON_3R_VQADD 1
5455#define NEON_3R_VRHADD 2
5456#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5457#define NEON_3R_VHSUB 4
5458#define NEON_3R_VQSUB 5
5459#define NEON_3R_VCGT 6
5460#define NEON_3R_VCGE 7
5461#define NEON_3R_VSHL 8
5462#define NEON_3R_VQSHL 9
5463#define NEON_3R_VRSHL 10
5464#define NEON_3R_VQRSHL 11
5465#define NEON_3R_VMAX 12
5466#define NEON_3R_VMIN 13
5467#define NEON_3R_VABD 14
5468#define NEON_3R_VABA 15
5469#define NEON_3R_VADD_VSUB 16
5470#define NEON_3R_VTST_VCEQ 17
5471#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5472#define NEON_3R_VMUL 19
5473#define NEON_3R_VPMAX 20
5474#define NEON_3R_VPMIN 21
5475#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 5476#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 5477#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 5478#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
5479#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5480#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5481#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5482#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5483#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5484#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5485
5486static const uint8_t neon_3r_sizes[] = {
5487 [NEON_3R_VHADD] = 0x7,
5488 [NEON_3R_VQADD] = 0xf,
5489 [NEON_3R_VRHADD] = 0x7,
5490 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5491 [NEON_3R_VHSUB] = 0x7,
5492 [NEON_3R_VQSUB] = 0xf,
5493 [NEON_3R_VCGT] = 0x7,
5494 [NEON_3R_VCGE] = 0x7,
5495 [NEON_3R_VSHL] = 0xf,
5496 [NEON_3R_VQSHL] = 0xf,
5497 [NEON_3R_VRSHL] = 0xf,
5498 [NEON_3R_VQRSHL] = 0xf,
5499 [NEON_3R_VMAX] = 0x7,
5500 [NEON_3R_VMIN] = 0x7,
5501 [NEON_3R_VABD] = 0x7,
5502 [NEON_3R_VABA] = 0x7,
5503 [NEON_3R_VADD_VSUB] = 0xf,
5504 [NEON_3R_VTST_VCEQ] = 0x7,
5505 [NEON_3R_VML] = 0x7,
5506 [NEON_3R_VMUL] = 0x7,
5507 [NEON_3R_VPMAX] = 0x7,
5508 [NEON_3R_VPMIN] = 0x7,
5509 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 5510 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 5511 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 5512 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
5513 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5514 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5515 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5516 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5517 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5518 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5519};
5520
600b828c
PM
5521/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5522 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5523 * table A7-13.
5524 */
5525#define NEON_2RM_VREV64 0
5526#define NEON_2RM_VREV32 1
5527#define NEON_2RM_VREV16 2
5528#define NEON_2RM_VPADDL 4
5529#define NEON_2RM_VPADDL_U 5
9d935509
AB
5530#define NEON_2RM_AESE 6 /* Includes AESD */
5531#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5532#define NEON_2RM_VCLS 8
5533#define NEON_2RM_VCLZ 9
5534#define NEON_2RM_VCNT 10
5535#define NEON_2RM_VMVN 11
5536#define NEON_2RM_VPADAL 12
5537#define NEON_2RM_VPADAL_U 13
5538#define NEON_2RM_VQABS 14
5539#define NEON_2RM_VQNEG 15
5540#define NEON_2RM_VCGT0 16
5541#define NEON_2RM_VCGE0 17
5542#define NEON_2RM_VCEQ0 18
5543#define NEON_2RM_VCLE0 19
5544#define NEON_2RM_VCLT0 20
f1ecb913 5545#define NEON_2RM_SHA1H 21
600b828c
PM
5546#define NEON_2RM_VABS 22
5547#define NEON_2RM_VNEG 23
5548#define NEON_2RM_VCGT0_F 24
5549#define NEON_2RM_VCGE0_F 25
5550#define NEON_2RM_VCEQ0_F 26
5551#define NEON_2RM_VCLE0_F 27
5552#define NEON_2RM_VCLT0_F 28
5553#define NEON_2RM_VABS_F 30
5554#define NEON_2RM_VNEG_F 31
5555#define NEON_2RM_VSWP 32
5556#define NEON_2RM_VTRN 33
5557#define NEON_2RM_VUZP 34
5558#define NEON_2RM_VZIP 35
5559#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5560#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5561#define NEON_2RM_VSHLL 38
f1ecb913 5562#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5563#define NEON_2RM_VRINTN 40
2ce70625 5564#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5565#define NEON_2RM_VRINTA 42
5566#define NEON_2RM_VRINTZ 43
600b828c 5567#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5568#define NEON_2RM_VRINTM 45
600b828c 5569#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5570#define NEON_2RM_VRINTP 47
901ad525
WN
5571#define NEON_2RM_VCVTAU 48
5572#define NEON_2RM_VCVTAS 49
5573#define NEON_2RM_VCVTNU 50
5574#define NEON_2RM_VCVTNS 51
5575#define NEON_2RM_VCVTPU 52
5576#define NEON_2RM_VCVTPS 53
5577#define NEON_2RM_VCVTMU 54
5578#define NEON_2RM_VCVTMS 55
600b828c
PM
5579#define NEON_2RM_VRECPE 56
5580#define NEON_2RM_VRSQRTE 57
5581#define NEON_2RM_VRECPE_F 58
5582#define NEON_2RM_VRSQRTE_F 59
5583#define NEON_2RM_VCVT_FS 60
5584#define NEON_2RM_VCVT_FU 61
5585#define NEON_2RM_VCVT_SF 62
5586#define NEON_2RM_VCVT_UF 63
5587
5588static int neon_2rm_is_float_op(int op)
5589{
5590 /* Return true if this neon 2reg-misc op is float-to-float */
5591 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5592 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5593 op == NEON_2RM_VRINTM ||
5594 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5595 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5596}
5597
fe8fcf3d
PM
5598static bool neon_2rm_is_v8_op(int op)
5599{
5600 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5601 switch (op) {
5602 case NEON_2RM_VRINTN:
5603 case NEON_2RM_VRINTA:
5604 case NEON_2RM_VRINTM:
5605 case NEON_2RM_VRINTP:
5606 case NEON_2RM_VRINTZ:
5607 case NEON_2RM_VRINTX:
5608 case NEON_2RM_VCVTAU:
5609 case NEON_2RM_VCVTAS:
5610 case NEON_2RM_VCVTNU:
5611 case NEON_2RM_VCVTNS:
5612 case NEON_2RM_VCVTPU:
5613 case NEON_2RM_VCVTPS:
5614 case NEON_2RM_VCVTMU:
5615 case NEON_2RM_VCVTMS:
5616 return true;
5617 default:
5618 return false;
5619 }
5620}
5621
600b828c
PM
5622/* Each entry in this array has bit n set if the insn allows
5623 * size value n (otherwise it will UNDEF). Since unallocated
5624 * op values will have no bits set they always UNDEF.
5625 */
5626static const uint8_t neon_2rm_sizes[] = {
5627 [NEON_2RM_VREV64] = 0x7,
5628 [NEON_2RM_VREV32] = 0x3,
5629 [NEON_2RM_VREV16] = 0x1,
5630 [NEON_2RM_VPADDL] = 0x7,
5631 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5632 [NEON_2RM_AESE] = 0x1,
5633 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5634 [NEON_2RM_VCLS] = 0x7,
5635 [NEON_2RM_VCLZ] = 0x7,
5636 [NEON_2RM_VCNT] = 0x1,
5637 [NEON_2RM_VMVN] = 0x1,
5638 [NEON_2RM_VPADAL] = 0x7,
5639 [NEON_2RM_VPADAL_U] = 0x7,
5640 [NEON_2RM_VQABS] = 0x7,
5641 [NEON_2RM_VQNEG] = 0x7,
5642 [NEON_2RM_VCGT0] = 0x7,
5643 [NEON_2RM_VCGE0] = 0x7,
5644 [NEON_2RM_VCEQ0] = 0x7,
5645 [NEON_2RM_VCLE0] = 0x7,
5646 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5647 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5648 [NEON_2RM_VABS] = 0x7,
5649 [NEON_2RM_VNEG] = 0x7,
5650 [NEON_2RM_VCGT0_F] = 0x4,
5651 [NEON_2RM_VCGE0_F] = 0x4,
5652 [NEON_2RM_VCEQ0_F] = 0x4,
5653 [NEON_2RM_VCLE0_F] = 0x4,
5654 [NEON_2RM_VCLT0_F] = 0x4,
5655 [NEON_2RM_VABS_F] = 0x4,
5656 [NEON_2RM_VNEG_F] = 0x4,
5657 [NEON_2RM_VSWP] = 0x1,
5658 [NEON_2RM_VTRN] = 0x7,
5659 [NEON_2RM_VUZP] = 0x7,
5660 [NEON_2RM_VZIP] = 0x7,
5661 [NEON_2RM_VMOVN] = 0x7,
5662 [NEON_2RM_VQMOVN] = 0x7,
5663 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5664 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5665 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5666 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5667 [NEON_2RM_VRINTA] = 0x4,
5668 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5669 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5670 [NEON_2RM_VRINTM] = 0x4,
600b828c 5671 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5672 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5673 [NEON_2RM_VCVTAU] = 0x4,
5674 [NEON_2RM_VCVTAS] = 0x4,
5675 [NEON_2RM_VCVTNU] = 0x4,
5676 [NEON_2RM_VCVTNS] = 0x4,
5677 [NEON_2RM_VCVTPU] = 0x4,
5678 [NEON_2RM_VCVTPS] = 0x4,
5679 [NEON_2RM_VCVTMU] = 0x4,
5680 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5681 [NEON_2RM_VRECPE] = 0x4,
5682 [NEON_2RM_VRSQRTE] = 0x4,
5683 [NEON_2RM_VRECPE_F] = 0x4,
5684 [NEON_2RM_VRSQRTE_F] = 0x4,
5685 [NEON_2RM_VCVT_FS] = 0x4,
5686 [NEON_2RM_VCVT_FU] = 0x4,
5687 [NEON_2RM_VCVT_SF] = 0x4,
5688 [NEON_2RM_VCVT_UF] = 0x4,
5689};
5690
36a71934
RH
5691
5692/* Expand v8.1 simd helper. */
5693static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5694 int q, int rd, int rn, int rm)
5695{
962fcbf2 5696 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
5697 int opr_sz = (1 + q) * 8;
5698 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5699 vfp_reg_offset(1, rn),
5700 vfp_reg_offset(1, rm), cpu_env,
5701 opr_sz, opr_sz, 0, fn);
5702 return 0;
5703 }
5704 return 1;
5705}
5706
eabcd6fa
RH
5707/*
5708 * Expanders for VBitOps_VBIF, VBIT, VBSL.
5709 */
5710static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5711{
5712 tcg_gen_xor_i64(rn, rn, rm);
5713 tcg_gen_and_i64(rn, rn, rd);
5714 tcg_gen_xor_i64(rd, rm, rn);
5715}
5716
5717static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5718{
5719 tcg_gen_xor_i64(rn, rn, rd);
5720 tcg_gen_and_i64(rn, rn, rm);
5721 tcg_gen_xor_i64(rd, rd, rn);
5722}
5723
5724static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5725{
5726 tcg_gen_xor_i64(rn, rn, rd);
5727 tcg_gen_andc_i64(rn, rn, rm);
5728 tcg_gen_xor_i64(rd, rd, rn);
5729}
5730
5731static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5732{
5733 tcg_gen_xor_vec(vece, rn, rn, rm);
5734 tcg_gen_and_vec(vece, rn, rn, rd);
5735 tcg_gen_xor_vec(vece, rd, rm, rn);
5736}
5737
5738static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5739{
5740 tcg_gen_xor_vec(vece, rn, rn, rd);
5741 tcg_gen_and_vec(vece, rn, rn, rm);
5742 tcg_gen_xor_vec(vece, rd, rd, rn);
5743}
5744
5745static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5746{
5747 tcg_gen_xor_vec(vece, rn, rn, rd);
5748 tcg_gen_andc_vec(vece, rn, rn, rm);
5749 tcg_gen_xor_vec(vece, rd, rd, rn);
5750}
5751
5752const GVecGen3 bsl_op = {
5753 .fni8 = gen_bsl_i64,
5754 .fniv = gen_bsl_vec,
5755 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5756 .load_dest = true
5757};
5758
5759const GVecGen3 bit_op = {
5760 .fni8 = gen_bit_i64,
5761 .fniv = gen_bit_vec,
5762 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5763 .load_dest = true
5764};
5765
5766const GVecGen3 bif_op = {
5767 .fni8 = gen_bif_i64,
5768 .fniv = gen_bif_vec,
5769 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5770 .load_dest = true
5771};
5772
5773
9ee6e8bb
PB
5774/* Translate a NEON data processing instruction. Return nonzero if the
5775 instruction is invalid.
ad69471c
PB
5776 We process data in a mixture of 32-bit and 64-bit chunks.
5777 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5778
7dcc1f89 5779static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5780{
5781 int op;
5782 int q;
eabcd6fa 5783 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
9ee6e8bb
PB
5784 int size;
5785 int shift;
5786 int pass;
5787 int count;
5788 int pairwise;
5789 int u;
eabcd6fa 5790 int vec_size;
ca9a32e4 5791 uint32_t imm, mask;
39d5492a 5792 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 5793 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 5794 TCGv_i64 tmp64;
9ee6e8bb 5795
2c7ffc41
PM
5796 /* FIXME: this access check should not take precedence over UNDEF
5797 * for invalid encodings; we will generate incorrect syndrome information
5798 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5799 */
9dbbc748 5800 if (s->fp_excp_el) {
2c7ffc41 5801 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 5802 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5803 return 0;
5804 }
5805
5df8bac1 5806 if (!s->vfp_enabled)
9ee6e8bb
PB
5807 return 1;
5808 q = (insn & (1 << 6)) != 0;
5809 u = (insn >> 24) & 1;
5810 VFP_DREG_D(rd, insn);
5811 VFP_DREG_N(rn, insn);
5812 VFP_DREG_M(rm, insn);
5813 size = (insn >> 20) & 3;
eabcd6fa
RH
5814 vec_size = q ? 16 : 8;
5815 rd_ofs = neon_reg_offset(rd, 0);
5816 rn_ofs = neon_reg_offset(rn, 0);
5817 rm_ofs = neon_reg_offset(rm, 0);
5818
9ee6e8bb
PB
5819 if ((insn & (1 << 23)) == 0) {
5820 /* Three register same length. */
5821 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5822 /* Catch invalid op and bad size combinations: UNDEF */
5823 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5824 return 1;
5825 }
25f84f79
PM
5826 /* All insns of this form UNDEF for either this condition or the
5827 * superset of cases "Q==1"; we catch the latter later.
5828 */
5829 if (q && ((rd | rn | rm) & 1)) {
5830 return 1;
5831 }
36a71934
RH
5832 switch (op) {
5833 case NEON_3R_SHA:
5834 /* The SHA-1/SHA-256 3-register instructions require special
5835 * treatment here, as their size field is overloaded as an
5836 * op type selector, and they all consume their input in a
5837 * single pass.
5838 */
f1ecb913
AB
5839 if (!q) {
5840 return 1;
5841 }
5842 if (!u) { /* SHA-1 */
962fcbf2 5843 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
5844 return 1;
5845 }
1a66ac61
RH
5846 ptr1 = vfp_reg_ptr(true, rd);
5847 ptr2 = vfp_reg_ptr(true, rn);
5848 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 5849 tmp4 = tcg_const_i32(size);
1a66ac61 5850 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
5851 tcg_temp_free_i32(tmp4);
5852 } else { /* SHA-256 */
962fcbf2 5853 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
5854 return 1;
5855 }
1a66ac61
RH
5856 ptr1 = vfp_reg_ptr(true, rd);
5857 ptr2 = vfp_reg_ptr(true, rn);
5858 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
5859 switch (size) {
5860 case 0:
1a66ac61 5861 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
5862 break;
5863 case 1:
1a66ac61 5864 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
5865 break;
5866 case 2:
1a66ac61 5867 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
5868 break;
5869 }
5870 }
1a66ac61
RH
5871 tcg_temp_free_ptr(ptr1);
5872 tcg_temp_free_ptr(ptr2);
5873 tcg_temp_free_ptr(ptr3);
f1ecb913 5874 return 0;
36a71934
RH
5875
5876 case NEON_3R_VPADD_VQRDMLAH:
5877 if (!u) {
5878 break; /* VPADD */
5879 }
5880 /* VQRDMLAH */
5881 switch (size) {
5882 case 1:
5883 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
5884 q, rd, rn, rm);
5885 case 2:
5886 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
5887 q, rd, rn, rm);
5888 }
5889 return 1;
5890
5891 case NEON_3R_VFM_VQRDMLSH:
5892 if (!u) {
5893 /* VFM, VFMS */
5894 if (size == 1) {
5895 return 1;
5896 }
5897 break;
5898 }
5899 /* VQRDMLSH */
5900 switch (size) {
5901 case 1:
5902 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
5903 q, rd, rn, rm);
5904 case 2:
5905 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
5906 q, rd, rn, rm);
5907 }
5908 return 1;
eabcd6fa
RH
5909
5910 case NEON_3R_LOGIC: /* Logic ops. */
5911 switch ((u << 2) | size) {
5912 case 0: /* VAND */
5913 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
5914 vec_size, vec_size);
5915 break;
5916 case 1: /* VBIC */
5917 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
5918 vec_size, vec_size);
5919 break;
5920 case 2:
5921 if (rn == rm) {
5922 /* VMOV */
5923 tcg_gen_gvec_mov(0, rd_ofs, rn_ofs, vec_size, vec_size);
5924 } else {
5925 /* VORR */
5926 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
5927 vec_size, vec_size);
5928 }
5929 break;
5930 case 3: /* VORN */
5931 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
5932 vec_size, vec_size);
5933 break;
5934 case 4: /* VEOR */
5935 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
5936 vec_size, vec_size);
5937 break;
5938 case 5: /* VBSL */
5939 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
5940 vec_size, vec_size, &bsl_op);
5941 break;
5942 case 6: /* VBIT */
5943 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
5944 vec_size, vec_size, &bit_op);
5945 break;
5946 case 7: /* VBIF */
5947 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
5948 vec_size, vec_size, &bif_op);
5949 break;
5950 }
5951 return 0;
e4717ae0
RH
5952
5953 case NEON_3R_VADD_VSUB:
5954 if (u) {
5955 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
5956 vec_size, vec_size);
5957 } else {
5958 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
5959 vec_size, vec_size);
5960 }
5961 return 0;
f1ecb913 5962 }
eabcd6fa 5963 if (size == 3) {
62698be3 5964 /* 64-bit element instructions. */
9ee6e8bb 5965 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5966 neon_load_reg64(cpu_V0, rn + pass);
5967 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5968 switch (op) {
62698be3 5969 case NEON_3R_VQADD:
9ee6e8bb 5970 if (u) {
02da0b2d
PM
5971 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5972 cpu_V0, cpu_V1);
2c0262af 5973 } else {
02da0b2d
PM
5974 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5975 cpu_V0, cpu_V1);
2c0262af 5976 }
9ee6e8bb 5977 break;
62698be3 5978 case NEON_3R_VQSUB:
9ee6e8bb 5979 if (u) {
02da0b2d
PM
5980 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5981 cpu_V0, cpu_V1);
ad69471c 5982 } else {
02da0b2d
PM
5983 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5984 cpu_V0, cpu_V1);
ad69471c
PB
5985 }
5986 break;
62698be3 5987 case NEON_3R_VSHL:
ad69471c
PB
5988 if (u) {
5989 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5990 } else {
5991 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5992 }
5993 break;
62698be3 5994 case NEON_3R_VQSHL:
ad69471c 5995 if (u) {
02da0b2d
PM
5996 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5997 cpu_V1, cpu_V0);
ad69471c 5998 } else {
02da0b2d
PM
5999 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6000 cpu_V1, cpu_V0);
ad69471c
PB
6001 }
6002 break;
62698be3 6003 case NEON_3R_VRSHL:
ad69471c
PB
6004 if (u) {
6005 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 6006 } else {
ad69471c
PB
6007 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
6008 }
6009 break;
62698be3 6010 case NEON_3R_VQRSHL:
ad69471c 6011 if (u) {
02da0b2d
PM
6012 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
6013 cpu_V1, cpu_V0);
ad69471c 6014 } else {
02da0b2d
PM
6015 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
6016 cpu_V1, cpu_V0);
1e8d4eec 6017 }
9ee6e8bb 6018 break;
9ee6e8bb
PB
6019 default:
6020 abort();
2c0262af 6021 }
ad69471c 6022 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 6023 }
9ee6e8bb 6024 return 0;
2c0262af 6025 }
25f84f79 6026 pairwise = 0;
9ee6e8bb 6027 switch (op) {
62698be3
PM
6028 case NEON_3R_VSHL:
6029 case NEON_3R_VQSHL:
6030 case NEON_3R_VRSHL:
6031 case NEON_3R_VQRSHL:
9ee6e8bb 6032 {
ad69471c
PB
6033 int rtmp;
6034 /* Shift instruction operands are reversed. */
6035 rtmp = rn;
9ee6e8bb 6036 rn = rm;
ad69471c 6037 rm = rtmp;
9ee6e8bb 6038 }
2c0262af 6039 break;
36a71934 6040 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
6041 case NEON_3R_VPMAX:
6042 case NEON_3R_VPMIN:
9ee6e8bb 6043 pairwise = 1;
2c0262af 6044 break;
25f84f79
PM
6045 case NEON_3R_FLOAT_ARITH:
6046 pairwise = (u && size < 2); /* if VPADD (float) */
6047 break;
6048 case NEON_3R_FLOAT_MINMAX:
6049 pairwise = u; /* if VPMIN/VPMAX (float) */
6050 break;
6051 case NEON_3R_FLOAT_CMP:
6052 if (!u && size) {
6053 /* no encoding for U=0 C=1x */
6054 return 1;
6055 }
6056 break;
6057 case NEON_3R_FLOAT_ACMP:
6058 if (!u) {
6059 return 1;
6060 }
6061 break;
505935fc
WN
6062 case NEON_3R_FLOAT_MISC:
6063 /* VMAXNM/VMINNM in ARMv8 */
d614a513 6064 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
6065 return 1;
6066 }
2c0262af 6067 break;
25f84f79
PM
6068 case NEON_3R_VMUL:
6069 if (u && (size != 0)) {
6070 /* UNDEF on invalid size for polynomial subcase */
6071 return 1;
6072 }
2c0262af 6073 break;
36a71934
RH
6074 case NEON_3R_VFM_VQRDMLSH:
6075 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
6076 return 1;
6077 }
6078 break;
9ee6e8bb 6079 default:
2c0262af 6080 break;
9ee6e8bb 6081 }
dd8fbd78 6082
25f84f79
PM
6083 if (pairwise && q) {
6084 /* All the pairwise insns UNDEF if Q is set */
6085 return 1;
6086 }
6087
9ee6e8bb
PB
6088 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6089
6090 if (pairwise) {
6091 /* Pairwise. */
a5a14945
JR
6092 if (pass < 1) {
6093 tmp = neon_load_reg(rn, 0);
6094 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 6095 } else {
a5a14945
JR
6096 tmp = neon_load_reg(rm, 0);
6097 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
6098 }
6099 } else {
6100 /* Elementwise. */
dd8fbd78
FN
6101 tmp = neon_load_reg(rn, pass);
6102 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
6103 }
6104 switch (op) {
62698be3 6105 case NEON_3R_VHADD:
9ee6e8bb
PB
6106 GEN_NEON_INTEGER_OP(hadd);
6107 break;
62698be3 6108 case NEON_3R_VQADD:
02da0b2d 6109 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 6110 break;
62698be3 6111 case NEON_3R_VRHADD:
9ee6e8bb 6112 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 6113 break;
62698be3 6114 case NEON_3R_VHSUB:
9ee6e8bb
PB
6115 GEN_NEON_INTEGER_OP(hsub);
6116 break;
62698be3 6117 case NEON_3R_VQSUB:
02da0b2d 6118 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 6119 break;
62698be3 6120 case NEON_3R_VCGT:
9ee6e8bb
PB
6121 GEN_NEON_INTEGER_OP(cgt);
6122 break;
62698be3 6123 case NEON_3R_VCGE:
9ee6e8bb
PB
6124 GEN_NEON_INTEGER_OP(cge);
6125 break;
62698be3 6126 case NEON_3R_VSHL:
ad69471c 6127 GEN_NEON_INTEGER_OP(shl);
2c0262af 6128 break;
62698be3 6129 case NEON_3R_VQSHL:
02da0b2d 6130 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 6131 break;
62698be3 6132 case NEON_3R_VRSHL:
ad69471c 6133 GEN_NEON_INTEGER_OP(rshl);
2c0262af 6134 break;
62698be3 6135 case NEON_3R_VQRSHL:
02da0b2d 6136 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 6137 break;
62698be3 6138 case NEON_3R_VMAX:
9ee6e8bb
PB
6139 GEN_NEON_INTEGER_OP(max);
6140 break;
62698be3 6141 case NEON_3R_VMIN:
9ee6e8bb
PB
6142 GEN_NEON_INTEGER_OP(min);
6143 break;
62698be3 6144 case NEON_3R_VABD:
9ee6e8bb
PB
6145 GEN_NEON_INTEGER_OP(abd);
6146 break;
62698be3 6147 case NEON_3R_VABA:
9ee6e8bb 6148 GEN_NEON_INTEGER_OP(abd);
7d1b0095 6149 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
6150 tmp2 = neon_load_reg(rd, pass);
6151 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 6152 break;
62698be3 6153 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
6154 if (!u) { /* VTST */
6155 switch (size) {
dd8fbd78
FN
6156 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
6157 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
6158 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 6159 default: abort();
9ee6e8bb
PB
6160 }
6161 } else { /* VCEQ */
6162 switch (size) {
dd8fbd78
FN
6163 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6164 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6165 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 6166 default: abort();
9ee6e8bb
PB
6167 }
6168 }
6169 break;
62698be3 6170 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 6171 switch (size) {
dd8fbd78
FN
6172 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6173 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6174 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 6175 default: abort();
9ee6e8bb 6176 }
7d1b0095 6177 tcg_temp_free_i32(tmp2);
dd8fbd78 6178 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6179 if (u) { /* VMLS */
dd8fbd78 6180 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 6181 } else { /* VMLA */
dd8fbd78 6182 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6183 }
6184 break;
62698be3 6185 case NEON_3R_VMUL:
9ee6e8bb 6186 if (u) { /* polynomial */
dd8fbd78 6187 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
6188 } else { /* Integer */
6189 switch (size) {
dd8fbd78
FN
6190 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6191 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6192 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 6193 default: abort();
9ee6e8bb
PB
6194 }
6195 }
6196 break;
62698be3 6197 case NEON_3R_VPMAX:
9ee6e8bb
PB
6198 GEN_NEON_INTEGER_OP(pmax);
6199 break;
62698be3 6200 case NEON_3R_VPMIN:
9ee6e8bb
PB
6201 GEN_NEON_INTEGER_OP(pmin);
6202 break;
62698be3 6203 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
6204 if (!u) { /* VQDMULH */
6205 switch (size) {
02da0b2d
PM
6206 case 1:
6207 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6208 break;
6209 case 2:
6210 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6211 break;
62698be3 6212 default: abort();
9ee6e8bb 6213 }
62698be3 6214 } else { /* VQRDMULH */
9ee6e8bb 6215 switch (size) {
02da0b2d
PM
6216 case 1:
6217 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6218 break;
6219 case 2:
6220 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6221 break;
62698be3 6222 default: abort();
9ee6e8bb
PB
6223 }
6224 }
6225 break;
36a71934 6226 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 6227 switch (size) {
dd8fbd78
FN
6228 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6229 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6230 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6231 default: abort();
9ee6e8bb
PB
6232 }
6233 break;
62698be3 6234 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6235 {
6236 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6237 switch ((u << 2) | size) {
6238 case 0: /* VADD */
aa47cfdd
PM
6239 case 4: /* VPADD */
6240 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6241 break;
6242 case 2: /* VSUB */
aa47cfdd 6243 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6244 break;
6245 case 6: /* VABD */
aa47cfdd 6246 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6247 break;
6248 default:
62698be3 6249 abort();
9ee6e8bb 6250 }
aa47cfdd 6251 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6252 break;
aa47cfdd 6253 }
62698be3 6254 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6255 {
6256 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6257 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6258 if (!u) {
7d1b0095 6259 tcg_temp_free_i32(tmp2);
dd8fbd78 6260 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6261 if (size == 0) {
aa47cfdd 6262 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6263 } else {
aa47cfdd 6264 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6265 }
6266 }
aa47cfdd 6267 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6268 break;
aa47cfdd 6269 }
62698be3 6270 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6271 {
6272 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6273 if (!u) {
aa47cfdd 6274 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6275 } else {
aa47cfdd
PM
6276 if (size == 0) {
6277 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6278 } else {
6279 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6280 }
b5ff1b31 6281 }
aa47cfdd 6282 tcg_temp_free_ptr(fpstatus);
2c0262af 6283 break;
aa47cfdd 6284 }
62698be3 6285 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6286 {
6287 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6288 if (size == 0) {
6289 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6290 } else {
6291 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6292 }
6293 tcg_temp_free_ptr(fpstatus);
2c0262af 6294 break;
aa47cfdd 6295 }
62698be3 6296 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6297 {
6298 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6299 if (size == 0) {
f71a2ae5 6300 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6301 } else {
f71a2ae5 6302 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6303 }
6304 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6305 break;
aa47cfdd 6306 }
505935fc
WN
6307 case NEON_3R_FLOAT_MISC:
6308 if (u) {
6309 /* VMAXNM/VMINNM */
6310 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6311 if (size == 0) {
f71a2ae5 6312 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6313 } else {
f71a2ae5 6314 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6315 }
6316 tcg_temp_free_ptr(fpstatus);
6317 } else {
6318 if (size == 0) {
6319 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6320 } else {
6321 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6322 }
6323 }
2c0262af 6324 break;
36a71934 6325 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
6326 {
6327 /* VFMA, VFMS: fused multiply-add */
6328 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6329 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6330 if (size) {
6331 /* VFMS */
6332 gen_helper_vfp_negs(tmp, tmp);
6333 }
6334 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6335 tcg_temp_free_i32(tmp3);
6336 tcg_temp_free_ptr(fpstatus);
6337 break;
6338 }
9ee6e8bb
PB
6339 default:
6340 abort();
2c0262af 6341 }
7d1b0095 6342 tcg_temp_free_i32(tmp2);
dd8fbd78 6343
9ee6e8bb
PB
6344 /* Save the result. For elementwise operations we can put it
6345 straight into the destination register. For pairwise operations
6346 we have to be careful to avoid clobbering the source operands. */
6347 if (pairwise && rd == rm) {
dd8fbd78 6348 neon_store_scratch(pass, tmp);
9ee6e8bb 6349 } else {
dd8fbd78 6350 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6351 }
6352
6353 } /* for pass */
6354 if (pairwise && rd == rm) {
6355 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6356 tmp = neon_load_scratch(pass);
6357 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6358 }
6359 }
ad69471c 6360 /* End of 3 register same size operations. */
9ee6e8bb
PB
6361 } else if (insn & (1 << 4)) {
6362 if ((insn & 0x00380080) != 0) {
6363 /* Two registers and shift. */
6364 op = (insn >> 8) & 0xf;
6365 if (insn & (1 << 7)) {
cc13115b
PM
6366 /* 64-bit shift. */
6367 if (op > 7) {
6368 return 1;
6369 }
9ee6e8bb
PB
6370 size = 3;
6371 } else {
6372 size = 2;
6373 while ((insn & (1 << (size + 19))) == 0)
6374 size--;
6375 }
6376 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6377 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6378 by immediate using the variable shift operations. */
6379 if (op < 8) {
6380 /* Shift by immediate:
6381 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6382 if (q && ((rd | rm) & 1)) {
6383 return 1;
6384 }
6385 if (!u && (op == 4 || op == 6)) {
6386 return 1;
6387 }
9ee6e8bb
PB
6388 /* Right shifts are encoded as N - shift, where N is the
6389 element size in bits. */
6390 if (op <= 4)
6391 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6392 if (size == 3) {
6393 count = q + 1;
6394 } else {
6395 count = q ? 4: 2;
6396 }
6397 switch (size) {
6398 case 0:
6399 imm = (uint8_t) shift;
6400 imm |= imm << 8;
6401 imm |= imm << 16;
6402 break;
6403 case 1:
6404 imm = (uint16_t) shift;
6405 imm |= imm << 16;
6406 break;
6407 case 2:
6408 case 3:
6409 imm = shift;
6410 break;
6411 default:
6412 abort();
6413 }
6414
6415 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6416 if (size == 3) {
6417 neon_load_reg64(cpu_V0, rm + pass);
6418 tcg_gen_movi_i64(cpu_V1, imm);
6419 switch (op) {
6420 case 0: /* VSHR */
6421 case 1: /* VSRA */
6422 if (u)
6423 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6424 else
ad69471c 6425 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6426 break;
ad69471c
PB
6427 case 2: /* VRSHR */
6428 case 3: /* VRSRA */
6429 if (u)
6430 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6431 else
ad69471c 6432 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6433 break;
ad69471c 6434 case 4: /* VSRI */
ad69471c
PB
6435 case 5: /* VSHL, VSLI */
6436 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6437 break;
0322b26e 6438 case 6: /* VQSHLU */
02da0b2d
PM
6439 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6440 cpu_V0, cpu_V1);
ad69471c 6441 break;
0322b26e
PM
6442 case 7: /* VQSHL */
6443 if (u) {
02da0b2d 6444 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6445 cpu_V0, cpu_V1);
6446 } else {
02da0b2d 6447 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6448 cpu_V0, cpu_V1);
6449 }
9ee6e8bb 6450 break;
9ee6e8bb 6451 }
ad69471c
PB
6452 if (op == 1 || op == 3) {
6453 /* Accumulate. */
5371cb81 6454 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6455 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6456 } else if (op == 4 || (op == 5 && u)) {
6457 /* Insert */
923e6509
CL
6458 neon_load_reg64(cpu_V1, rd + pass);
6459 uint64_t mask;
6460 if (shift < -63 || shift > 63) {
6461 mask = 0;
6462 } else {
6463 if (op == 4) {
6464 mask = 0xffffffffffffffffull >> -shift;
6465 } else {
6466 mask = 0xffffffffffffffffull << shift;
6467 }
6468 }
6469 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6470 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6471 }
6472 neon_store_reg64(cpu_V0, rd + pass);
6473 } else { /* size < 3 */
6474 /* Operands in T0 and T1. */
dd8fbd78 6475 tmp = neon_load_reg(rm, pass);
7d1b0095 6476 tmp2 = tcg_temp_new_i32();
dd8fbd78 6477 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6478 switch (op) {
6479 case 0: /* VSHR */
6480 case 1: /* VSRA */
6481 GEN_NEON_INTEGER_OP(shl);
6482 break;
6483 case 2: /* VRSHR */
6484 case 3: /* VRSRA */
6485 GEN_NEON_INTEGER_OP(rshl);
6486 break;
6487 case 4: /* VSRI */
ad69471c
PB
6488 case 5: /* VSHL, VSLI */
6489 switch (size) {
dd8fbd78
FN
6490 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6491 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6492 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6493 default: abort();
ad69471c
PB
6494 }
6495 break;
0322b26e 6496 case 6: /* VQSHLU */
ad69471c 6497 switch (size) {
0322b26e 6498 case 0:
02da0b2d
PM
6499 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6500 tmp, tmp2);
0322b26e
PM
6501 break;
6502 case 1:
02da0b2d
PM
6503 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6504 tmp, tmp2);
0322b26e
PM
6505 break;
6506 case 2:
02da0b2d
PM
6507 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6508 tmp, tmp2);
0322b26e
PM
6509 break;
6510 default:
cc13115b 6511 abort();
ad69471c
PB
6512 }
6513 break;
0322b26e 6514 case 7: /* VQSHL */
02da0b2d 6515 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6516 break;
ad69471c 6517 }
7d1b0095 6518 tcg_temp_free_i32(tmp2);
ad69471c
PB
6519
6520 if (op == 1 || op == 3) {
6521 /* Accumulate. */
dd8fbd78 6522 tmp2 = neon_load_reg(rd, pass);
5371cb81 6523 gen_neon_add(size, tmp, tmp2);
7d1b0095 6524 tcg_temp_free_i32(tmp2);
ad69471c
PB
6525 } else if (op == 4 || (op == 5 && u)) {
6526 /* Insert */
6527 switch (size) {
6528 case 0:
6529 if (op == 4)
ca9a32e4 6530 mask = 0xff >> -shift;
ad69471c 6531 else
ca9a32e4
JR
6532 mask = (uint8_t)(0xff << shift);
6533 mask |= mask << 8;
6534 mask |= mask << 16;
ad69471c
PB
6535 break;
6536 case 1:
6537 if (op == 4)
ca9a32e4 6538 mask = 0xffff >> -shift;
ad69471c 6539 else
ca9a32e4
JR
6540 mask = (uint16_t)(0xffff << shift);
6541 mask |= mask << 16;
ad69471c
PB
6542 break;
6543 case 2:
ca9a32e4
JR
6544 if (shift < -31 || shift > 31) {
6545 mask = 0;
6546 } else {
6547 if (op == 4)
6548 mask = 0xffffffffu >> -shift;
6549 else
6550 mask = 0xffffffffu << shift;
6551 }
ad69471c
PB
6552 break;
6553 default:
6554 abort();
6555 }
dd8fbd78 6556 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6557 tcg_gen_andi_i32(tmp, tmp, mask);
6558 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6559 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6560 tcg_temp_free_i32(tmp2);
ad69471c 6561 }
dd8fbd78 6562 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6563 }
6564 } /* for pass */
6565 } else if (op < 10) {
ad69471c 6566 /* Shift by immediate and narrow:
9ee6e8bb 6567 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6568 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6569 if (rm & 1) {
6570 return 1;
6571 }
9ee6e8bb
PB
6572 shift = shift - (1 << (size + 3));
6573 size++;
92cdfaeb 6574 if (size == 3) {
a7812ae4 6575 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6576 neon_load_reg64(cpu_V0, rm);
6577 neon_load_reg64(cpu_V1, rm + 1);
6578 for (pass = 0; pass < 2; pass++) {
6579 TCGv_i64 in;
6580 if (pass == 0) {
6581 in = cpu_V0;
6582 } else {
6583 in = cpu_V1;
6584 }
ad69471c 6585 if (q) {
0b36f4cd 6586 if (input_unsigned) {
92cdfaeb 6587 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6588 } else {
92cdfaeb 6589 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6590 }
ad69471c 6591 } else {
0b36f4cd 6592 if (input_unsigned) {
92cdfaeb 6593 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6594 } else {
92cdfaeb 6595 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6596 }
ad69471c 6597 }
7d1b0095 6598 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6599 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6600 neon_store_reg(rd, pass, tmp);
6601 } /* for pass */
6602 tcg_temp_free_i64(tmp64);
6603 } else {
6604 if (size == 1) {
6605 imm = (uint16_t)shift;
6606 imm |= imm << 16;
2c0262af 6607 } else {
92cdfaeb
PM
6608 /* size == 2 */
6609 imm = (uint32_t)shift;
6610 }
6611 tmp2 = tcg_const_i32(imm);
6612 tmp4 = neon_load_reg(rm + 1, 0);
6613 tmp5 = neon_load_reg(rm + 1, 1);
6614 for (pass = 0; pass < 2; pass++) {
6615 if (pass == 0) {
6616 tmp = neon_load_reg(rm, 0);
6617 } else {
6618 tmp = tmp4;
6619 }
0b36f4cd
CL
6620 gen_neon_shift_narrow(size, tmp, tmp2, q,
6621 input_unsigned);
92cdfaeb
PM
6622 if (pass == 0) {
6623 tmp3 = neon_load_reg(rm, 1);
6624 } else {
6625 tmp3 = tmp5;
6626 }
0b36f4cd
CL
6627 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6628 input_unsigned);
36aa55dc 6629 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6630 tcg_temp_free_i32(tmp);
6631 tcg_temp_free_i32(tmp3);
6632 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6633 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6634 neon_store_reg(rd, pass, tmp);
6635 } /* for pass */
c6067f04 6636 tcg_temp_free_i32(tmp2);
b75263d6 6637 }
9ee6e8bb 6638 } else if (op == 10) {
cc13115b
PM
6639 /* VSHLL, VMOVL */
6640 if (q || (rd & 1)) {
9ee6e8bb 6641 return 1;
cc13115b 6642 }
ad69471c
PB
6643 tmp = neon_load_reg(rm, 0);
6644 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6645 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6646 if (pass == 1)
6647 tmp = tmp2;
6648
6649 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6650
9ee6e8bb
PB
6651 if (shift != 0) {
6652 /* The shift is less than the width of the source
ad69471c
PB
6653 type, so we can just shift the whole register. */
6654 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6655 /* Widen the result of shift: we need to clear
6656 * the potential overflow bits resulting from
6657 * left bits of the narrow input appearing as
6658 * right bits of left the neighbour narrow
6659 * input. */
ad69471c
PB
6660 if (size < 2 || !u) {
6661 uint64_t imm64;
6662 if (size == 0) {
6663 imm = (0xffu >> (8 - shift));
6664 imm |= imm << 16;
acdf01ef 6665 } else if (size == 1) {
ad69471c 6666 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6667 } else {
6668 /* size == 2 */
6669 imm = 0xffffffff >> (32 - shift);
6670 }
6671 if (size < 2) {
6672 imm64 = imm | (((uint64_t)imm) << 32);
6673 } else {
6674 imm64 = imm;
9ee6e8bb 6675 }
acdf01ef 6676 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6677 }
6678 }
ad69471c 6679 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6680 }
f73534a5 6681 } else if (op >= 14) {
9ee6e8bb 6682 /* VCVT fixed-point. */
cc13115b
PM
6683 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6684 return 1;
6685 }
f73534a5
PM
6686 /* We have already masked out the must-be-1 top bit of imm6,
6687 * hence this 32-shift where the ARM ARM has 64-imm6.
6688 */
6689 shift = 32 - shift;
9ee6e8bb 6690 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6691 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6692 if (!(op & 1)) {
9ee6e8bb 6693 if (u)
5500b06c 6694 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6695 else
5500b06c 6696 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6697 } else {
6698 if (u)
5500b06c 6699 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6700 else
5500b06c 6701 gen_vfp_tosl(0, shift, 1);
2c0262af 6702 }
4373f3ce 6703 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6704 }
6705 } else {
9ee6e8bb
PB
6706 return 1;
6707 }
6708 } else { /* (insn & 0x00380080) == 0 */
246fa4ac
RH
6709 int invert, reg_ofs, vec_size;
6710
7d80fee5
PM
6711 if (q && (rd & 1)) {
6712 return 1;
6713 }
9ee6e8bb
PB
6714
6715 op = (insn >> 8) & 0xf;
6716 /* One register and immediate. */
6717 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6718 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6719 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6720 * We choose to not special-case this and will behave as if a
6721 * valid constant encoding of 0 had been given.
6722 */
9ee6e8bb
PB
6723 switch (op) {
6724 case 0: case 1:
6725 /* no-op */
6726 break;
6727 case 2: case 3:
6728 imm <<= 8;
6729 break;
6730 case 4: case 5:
6731 imm <<= 16;
6732 break;
6733 case 6: case 7:
6734 imm <<= 24;
6735 break;
6736 case 8: case 9:
6737 imm |= imm << 16;
6738 break;
6739 case 10: case 11:
6740 imm = (imm << 8) | (imm << 24);
6741 break;
6742 case 12:
8e31209e 6743 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6744 break;
6745 case 13:
6746 imm = (imm << 16) | 0xffff;
6747 break;
6748 case 14:
6749 imm |= (imm << 8) | (imm << 16) | (imm << 24);
246fa4ac 6750 if (invert) {
9ee6e8bb 6751 imm = ~imm;
246fa4ac 6752 }
9ee6e8bb
PB
6753 break;
6754 case 15:
7d80fee5
PM
6755 if (invert) {
6756 return 1;
6757 }
9ee6e8bb
PB
6758 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6759 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6760 break;
6761 }
246fa4ac 6762 if (invert) {
9ee6e8bb 6763 imm = ~imm;
246fa4ac 6764 }
9ee6e8bb 6765
246fa4ac
RH
6766 reg_ofs = neon_reg_offset(rd, 0);
6767 vec_size = q ? 16 : 8;
6768
6769 if (op & 1 && op < 12) {
6770 if (invert) {
6771 /* The immediate value has already been inverted,
6772 * so BIC becomes AND.
6773 */
6774 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
6775 vec_size, vec_size);
9ee6e8bb 6776 } else {
246fa4ac
RH
6777 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
6778 vec_size, vec_size);
6779 }
6780 } else {
6781 /* VMOV, VMVN. */
6782 if (op == 14 && invert) {
6783 TCGv_i64 t64 = tcg_temp_new_i64();
6784
6785 for (pass = 0; pass <= q; ++pass) {
6786 uint64_t val = 0;
a5a14945 6787 int n;
246fa4ac
RH
6788
6789 for (n = 0; n < 8; n++) {
6790 if (imm & (1 << (n + pass * 8))) {
6791 val |= 0xffull << (n * 8);
6792 }
9ee6e8bb 6793 }
246fa4ac
RH
6794 tcg_gen_movi_i64(t64, val);
6795 neon_store_reg64(t64, rd + pass);
9ee6e8bb 6796 }
246fa4ac
RH
6797 tcg_temp_free_i64(t64);
6798 } else {
6799 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
9ee6e8bb
PB
6800 }
6801 }
6802 }
e4b3861d 6803 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6804 if (size != 3) {
6805 op = (insn >> 8) & 0xf;
6806 if ((insn & (1 << 6)) == 0) {
6807 /* Three registers of different lengths. */
6808 int src1_wide;
6809 int src2_wide;
6810 int prewiden;
526d0096
PM
6811 /* undefreq: bit 0 : UNDEF if size == 0
6812 * bit 1 : UNDEF if size == 1
6813 * bit 2 : UNDEF if size == 2
6814 * bit 3 : UNDEF if U == 1
6815 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6816 */
6817 int undefreq;
6818 /* prewiden, src1_wide, src2_wide, undefreq */
6819 static const int neon_3reg_wide[16][4] = {
6820 {1, 0, 0, 0}, /* VADDL */
6821 {1, 1, 0, 0}, /* VADDW */
6822 {1, 0, 0, 0}, /* VSUBL */
6823 {1, 1, 0, 0}, /* VSUBW */
6824 {0, 1, 1, 0}, /* VADDHN */
6825 {0, 0, 0, 0}, /* VABAL */
6826 {0, 1, 1, 0}, /* VSUBHN */
6827 {0, 0, 0, 0}, /* VABDL */
6828 {0, 0, 0, 0}, /* VMLAL */
526d0096 6829 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6830 {0, 0, 0, 0}, /* VMLSL */
526d0096 6831 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6832 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6833 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6834 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6835 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6836 };
6837
6838 prewiden = neon_3reg_wide[op][0];
6839 src1_wide = neon_3reg_wide[op][1];
6840 src2_wide = neon_3reg_wide[op][2];
695272dc 6841 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6842
526d0096
PM
6843 if ((undefreq & (1 << size)) ||
6844 ((undefreq & 8) && u)) {
695272dc
PM
6845 return 1;
6846 }
6847 if ((src1_wide && (rn & 1)) ||
6848 (src2_wide && (rm & 1)) ||
6849 (!src2_wide && (rd & 1))) {
ad69471c 6850 return 1;
695272dc 6851 }
ad69471c 6852
4e624eda
PM
6853 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6854 * outside the loop below as it only performs a single pass.
6855 */
6856 if (op == 14 && size == 2) {
6857 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6858
962fcbf2 6859 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
6860 return 1;
6861 }
6862 tcg_rn = tcg_temp_new_i64();
6863 tcg_rm = tcg_temp_new_i64();
6864 tcg_rd = tcg_temp_new_i64();
6865 neon_load_reg64(tcg_rn, rn);
6866 neon_load_reg64(tcg_rm, rm);
6867 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6868 neon_store_reg64(tcg_rd, rd);
6869 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6870 neon_store_reg64(tcg_rd, rd + 1);
6871 tcg_temp_free_i64(tcg_rn);
6872 tcg_temp_free_i64(tcg_rm);
6873 tcg_temp_free_i64(tcg_rd);
6874 return 0;
6875 }
6876
9ee6e8bb
PB
6877 /* Avoid overlapping operands. Wide source operands are
6878 always aligned so will never overlap with wide
6879 destinations in problematic ways. */
8f8e3aa4 6880 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6881 tmp = neon_load_reg(rm, 1);
6882 neon_store_scratch(2, tmp);
8f8e3aa4 6883 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6884 tmp = neon_load_reg(rn, 1);
6885 neon_store_scratch(2, tmp);
9ee6e8bb 6886 }
f764718d 6887 tmp3 = NULL;
9ee6e8bb 6888 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6889 if (src1_wide) {
6890 neon_load_reg64(cpu_V0, rn + pass);
f764718d 6891 tmp = NULL;
9ee6e8bb 6892 } else {
ad69471c 6893 if (pass == 1 && rd == rn) {
dd8fbd78 6894 tmp = neon_load_scratch(2);
9ee6e8bb 6895 } else {
ad69471c
PB
6896 tmp = neon_load_reg(rn, pass);
6897 }
6898 if (prewiden) {
6899 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6900 }
6901 }
ad69471c
PB
6902 if (src2_wide) {
6903 neon_load_reg64(cpu_V1, rm + pass);
f764718d 6904 tmp2 = NULL;
9ee6e8bb 6905 } else {
ad69471c 6906 if (pass == 1 && rd == rm) {
dd8fbd78 6907 tmp2 = neon_load_scratch(2);
9ee6e8bb 6908 } else {
ad69471c
PB
6909 tmp2 = neon_load_reg(rm, pass);
6910 }
6911 if (prewiden) {
6912 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6913 }
9ee6e8bb
PB
6914 }
6915 switch (op) {
6916 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6917 gen_neon_addl(size);
9ee6e8bb 6918 break;
79b0e534 6919 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6920 gen_neon_subl(size);
9ee6e8bb
PB
6921 break;
6922 case 5: case 7: /* VABAL, VABDL */
6923 switch ((size << 1) | u) {
ad69471c
PB
6924 case 0:
6925 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6926 break;
6927 case 1:
6928 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6929 break;
6930 case 2:
6931 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6932 break;
6933 case 3:
6934 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6935 break;
6936 case 4:
6937 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6938 break;
6939 case 5:
6940 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6941 break;
9ee6e8bb
PB
6942 default: abort();
6943 }
7d1b0095
PM
6944 tcg_temp_free_i32(tmp2);
6945 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6946 break;
6947 case 8: case 9: case 10: case 11: case 12: case 13:
6948 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6949 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6950 break;
6951 case 14: /* Polynomial VMULL */
e5ca24cb 6952 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6953 tcg_temp_free_i32(tmp2);
6954 tcg_temp_free_i32(tmp);
e5ca24cb 6955 break;
695272dc
PM
6956 default: /* 15 is RESERVED: caught earlier */
6957 abort();
9ee6e8bb 6958 }
ebcd88ce
PM
6959 if (op == 13) {
6960 /* VQDMULL */
6961 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6962 neon_store_reg64(cpu_V0, rd + pass);
6963 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6964 /* Accumulate. */
ebcd88ce 6965 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6966 switch (op) {
4dc064e6
PM
6967 case 10: /* VMLSL */
6968 gen_neon_negl(cpu_V0, size);
6969 /* Fall through */
6970 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6971 gen_neon_addl(size);
9ee6e8bb
PB
6972 break;
6973 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6974 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6975 if (op == 11) {
6976 gen_neon_negl(cpu_V0, size);
6977 }
ad69471c
PB
6978 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6979 break;
9ee6e8bb
PB
6980 default:
6981 abort();
6982 }
ad69471c 6983 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6984 } else if (op == 4 || op == 6) {
6985 /* Narrowing operation. */
7d1b0095 6986 tmp = tcg_temp_new_i32();
79b0e534 6987 if (!u) {
9ee6e8bb 6988 switch (size) {
ad69471c
PB
6989 case 0:
6990 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6991 break;
6992 case 1:
6993 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6994 break;
6995 case 2:
6996 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6997 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6998 break;
9ee6e8bb
PB
6999 default: abort();
7000 }
7001 } else {
7002 switch (size) {
ad69471c
PB
7003 case 0:
7004 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
7005 break;
7006 case 1:
7007 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
7008 break;
7009 case 2:
7010 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
7011 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 7012 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 7013 break;
9ee6e8bb
PB
7014 default: abort();
7015 }
7016 }
ad69471c
PB
7017 if (pass == 0) {
7018 tmp3 = tmp;
7019 } else {
7020 neon_store_reg(rd, 0, tmp3);
7021 neon_store_reg(rd, 1, tmp);
7022 }
9ee6e8bb
PB
7023 } else {
7024 /* Write back the result. */
ad69471c 7025 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7026 }
7027 }
7028 } else {
3e3326df
PM
7029 /* Two registers and a scalar. NB that for ops of this form
7030 * the ARM ARM labels bit 24 as Q, but it is in our variable
7031 * 'u', not 'q'.
7032 */
7033 if (size == 0) {
7034 return 1;
7035 }
9ee6e8bb 7036 switch (op) {
9ee6e8bb 7037 case 1: /* Float VMLA scalar */
9ee6e8bb 7038 case 5: /* Floating point VMLS scalar */
9ee6e8bb 7039 case 9: /* Floating point VMUL scalar */
3e3326df
PM
7040 if (size == 1) {
7041 return 1;
7042 }
7043 /* fall through */
7044 case 0: /* Integer VMLA scalar */
7045 case 4: /* Integer VMLS scalar */
7046 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
7047 case 12: /* VQDMULH scalar */
7048 case 13: /* VQRDMULH scalar */
3e3326df
PM
7049 if (u && ((rd | rn) & 1)) {
7050 return 1;
7051 }
dd8fbd78
FN
7052 tmp = neon_get_scalar(size, rm);
7053 neon_store_scratch(0, tmp);
9ee6e8bb 7054 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
7055 tmp = neon_load_scratch(0);
7056 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
7057 if (op == 12) {
7058 if (size == 1) {
02da0b2d 7059 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7060 } else {
02da0b2d 7061 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
7062 }
7063 } else if (op == 13) {
7064 if (size == 1) {
02da0b2d 7065 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7066 } else {
02da0b2d 7067 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
7068 }
7069 } else if (op & 1) {
aa47cfdd
PM
7070 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7071 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
7072 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
7073 } else {
7074 switch (size) {
dd8fbd78
FN
7075 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
7076 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
7077 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 7078 default: abort();
9ee6e8bb
PB
7079 }
7080 }
7d1b0095 7081 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7082 if (op < 8) {
7083 /* Accumulate. */
dd8fbd78 7084 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
7085 switch (op) {
7086 case 0:
dd8fbd78 7087 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
7088 break;
7089 case 1:
aa47cfdd
PM
7090 {
7091 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7092 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
7093 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7094 break;
aa47cfdd 7095 }
9ee6e8bb 7096 case 4:
dd8fbd78 7097 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
7098 break;
7099 case 5:
aa47cfdd
PM
7100 {
7101 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7102 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
7103 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7104 break;
aa47cfdd 7105 }
9ee6e8bb
PB
7106 default:
7107 abort();
7108 }
7d1b0095 7109 tcg_temp_free_i32(tmp2);
9ee6e8bb 7110 }
dd8fbd78 7111 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7112 }
7113 break;
9ee6e8bb 7114 case 3: /* VQDMLAL scalar */
9ee6e8bb 7115 case 7: /* VQDMLSL scalar */
9ee6e8bb 7116 case 11: /* VQDMULL scalar */
3e3326df 7117 if (u == 1) {
ad69471c 7118 return 1;
3e3326df
PM
7119 }
7120 /* fall through */
7121 case 2: /* VMLAL sclar */
7122 case 6: /* VMLSL scalar */
7123 case 10: /* VMULL scalar */
7124 if (rd & 1) {
7125 return 1;
7126 }
dd8fbd78 7127 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
7128 /* We need a copy of tmp2 because gen_neon_mull
7129 * deletes it during pass 0. */
7d1b0095 7130 tmp4 = tcg_temp_new_i32();
c6067f04 7131 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 7132 tmp3 = neon_load_reg(rn, 1);
ad69471c 7133
9ee6e8bb 7134 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7135 if (pass == 0) {
7136 tmp = neon_load_reg(rn, 0);
9ee6e8bb 7137 } else {
dd8fbd78 7138 tmp = tmp3;
c6067f04 7139 tmp2 = tmp4;
9ee6e8bb 7140 }
ad69471c 7141 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
7142 if (op != 11) {
7143 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7144 }
9ee6e8bb 7145 switch (op) {
4dc064e6
PM
7146 case 6:
7147 gen_neon_negl(cpu_V0, size);
7148 /* Fall through */
7149 case 2:
ad69471c 7150 gen_neon_addl(size);
9ee6e8bb
PB
7151 break;
7152 case 3: case 7:
ad69471c 7153 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7154 if (op == 7) {
7155 gen_neon_negl(cpu_V0, size);
7156 }
ad69471c 7157 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
7158 break;
7159 case 10:
7160 /* no-op */
7161 break;
7162 case 11:
ad69471c 7163 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
7164 break;
7165 default:
7166 abort();
7167 }
ad69471c 7168 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7169 }
61adacc8
RH
7170 break;
7171 case 14: /* VQRDMLAH scalar */
7172 case 15: /* VQRDMLSH scalar */
7173 {
7174 NeonGenThreeOpEnvFn *fn;
dd8fbd78 7175
962fcbf2 7176 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
7177 return 1;
7178 }
7179 if (u && ((rd | rn) & 1)) {
7180 return 1;
7181 }
7182 if (op == 14) {
7183 if (size == 1) {
7184 fn = gen_helper_neon_qrdmlah_s16;
7185 } else {
7186 fn = gen_helper_neon_qrdmlah_s32;
7187 }
7188 } else {
7189 if (size == 1) {
7190 fn = gen_helper_neon_qrdmlsh_s16;
7191 } else {
7192 fn = gen_helper_neon_qrdmlsh_s32;
7193 }
7194 }
dd8fbd78 7195
61adacc8
RH
7196 tmp2 = neon_get_scalar(size, rm);
7197 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7198 tmp = neon_load_reg(rn, pass);
7199 tmp3 = neon_load_reg(rd, pass);
7200 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7201 tcg_temp_free_i32(tmp3);
7202 neon_store_reg(rd, pass, tmp);
7203 }
7204 tcg_temp_free_i32(tmp2);
7205 }
9ee6e8bb 7206 break;
61adacc8
RH
7207 default:
7208 g_assert_not_reached();
9ee6e8bb
PB
7209 }
7210 }
7211 } else { /* size == 3 */
7212 if (!u) {
7213 /* Extract. */
9ee6e8bb 7214 imm = (insn >> 8) & 0xf;
ad69471c
PB
7215
7216 if (imm > 7 && !q)
7217 return 1;
7218
52579ea1
PM
7219 if (q && ((rd | rn | rm) & 1)) {
7220 return 1;
7221 }
7222
ad69471c
PB
7223 if (imm == 0) {
7224 neon_load_reg64(cpu_V0, rn);
7225 if (q) {
7226 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 7227 }
ad69471c
PB
7228 } else if (imm == 8) {
7229 neon_load_reg64(cpu_V0, rn + 1);
7230 if (q) {
7231 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7232 }
ad69471c 7233 } else if (q) {
a7812ae4 7234 tmp64 = tcg_temp_new_i64();
ad69471c
PB
7235 if (imm < 8) {
7236 neon_load_reg64(cpu_V0, rn);
a7812ae4 7237 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
7238 } else {
7239 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 7240 neon_load_reg64(tmp64, rm);
ad69471c
PB
7241 }
7242 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 7243 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
7244 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7245 if (imm < 8) {
7246 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7247 } else {
ad69471c
PB
7248 neon_load_reg64(cpu_V1, rm + 1);
7249 imm -= 8;
9ee6e8bb 7250 }
ad69471c 7251 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
7252 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7253 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 7254 tcg_temp_free_i64(tmp64);
ad69471c 7255 } else {
a7812ae4 7256 /* BUGFIX */
ad69471c 7257 neon_load_reg64(cpu_V0, rn);
a7812ae4 7258 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 7259 neon_load_reg64(cpu_V1, rm);
a7812ae4 7260 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
7261 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7262 }
7263 neon_store_reg64(cpu_V0, rd);
7264 if (q) {
7265 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7266 }
7267 } else if ((insn & (1 << 11)) == 0) {
7268 /* Two register misc. */
7269 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7270 size = (insn >> 18) & 3;
600b828c
PM
7271 /* UNDEF for unknown op values and bad op-size combinations */
7272 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7273 return 1;
7274 }
fe8fcf3d
PM
7275 if (neon_2rm_is_v8_op(op) &&
7276 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7277 return 1;
7278 }
fc2a9b37
PM
7279 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7280 q && ((rm | rd) & 1)) {
7281 return 1;
7282 }
9ee6e8bb 7283 switch (op) {
600b828c 7284 case NEON_2RM_VREV64:
9ee6e8bb 7285 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7286 tmp = neon_load_reg(rm, pass * 2);
7287 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7288 switch (size) {
dd8fbd78
FN
7289 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7290 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7291 case 2: /* no-op */ break;
7292 default: abort();
7293 }
dd8fbd78 7294 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7295 if (size == 2) {
dd8fbd78 7296 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7297 } else {
9ee6e8bb 7298 switch (size) {
dd8fbd78
FN
7299 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7300 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7301 default: abort();
7302 }
dd8fbd78 7303 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7304 }
7305 }
7306 break;
600b828c
PM
7307 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7308 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7309 for (pass = 0; pass < q + 1; pass++) {
7310 tmp = neon_load_reg(rm, pass * 2);
7311 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7312 tmp = neon_load_reg(rm, pass * 2 + 1);
7313 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7314 switch (size) {
7315 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7316 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7317 case 2: tcg_gen_add_i64(CPU_V001); break;
7318 default: abort();
7319 }
600b828c 7320 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7321 /* Accumulate. */
ad69471c
PB
7322 neon_load_reg64(cpu_V1, rd + pass);
7323 gen_neon_addl(size);
9ee6e8bb 7324 }
ad69471c 7325 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7326 }
7327 break;
600b828c 7328 case NEON_2RM_VTRN:
9ee6e8bb 7329 if (size == 2) {
a5a14945 7330 int n;
9ee6e8bb 7331 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7332 tmp = neon_load_reg(rm, n);
7333 tmp2 = neon_load_reg(rd, n + 1);
7334 neon_store_reg(rm, n, tmp2);
7335 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7336 }
7337 } else {
7338 goto elementwise;
7339 }
7340 break;
600b828c 7341 case NEON_2RM_VUZP:
02acedf9 7342 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7343 return 1;
9ee6e8bb
PB
7344 }
7345 break;
600b828c 7346 case NEON_2RM_VZIP:
d68a6f3a 7347 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7348 return 1;
9ee6e8bb
PB
7349 }
7350 break;
600b828c
PM
7351 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7352 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7353 if (rm & 1) {
7354 return 1;
7355 }
f764718d 7356 tmp2 = NULL;
9ee6e8bb 7357 for (pass = 0; pass < 2; pass++) {
ad69471c 7358 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7359 tmp = tcg_temp_new_i32();
600b828c
PM
7360 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7361 tmp, cpu_V0);
ad69471c
PB
7362 if (pass == 0) {
7363 tmp2 = tmp;
7364 } else {
7365 neon_store_reg(rd, 0, tmp2);
7366 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7367 }
9ee6e8bb
PB
7368 }
7369 break;
600b828c 7370 case NEON_2RM_VSHLL:
fc2a9b37 7371 if (q || (rd & 1)) {
9ee6e8bb 7372 return 1;
600b828c 7373 }
ad69471c
PB
7374 tmp = neon_load_reg(rm, 0);
7375 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7376 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7377 if (pass == 1)
7378 tmp = tmp2;
7379 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7380 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7381 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7382 }
7383 break;
600b828c 7384 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
7385 {
7386 TCGv_ptr fpst;
7387 TCGv_i32 ahp;
7388
d614a513 7389 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7390 q || (rm & 1)) {
7391 return 1;
7392 }
7d1b0095
PM
7393 tmp = tcg_temp_new_i32();
7394 tmp2 = tcg_temp_new_i32();
486624fc
AB
7395 fpst = get_fpstatus_ptr(true);
7396 ahp = get_ahp_flag();
60011498 7397 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
486624fc 7398 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498 7399 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
486624fc 7400 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7401 tcg_gen_shli_i32(tmp2, tmp2, 16);
7402 tcg_gen_or_i32(tmp2, tmp2, tmp);
7403 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
486624fc 7404 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498
PB
7405 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7406 neon_store_reg(rd, 0, tmp2);
7d1b0095 7407 tmp2 = tcg_temp_new_i32();
486624fc 7408 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7409 tcg_gen_shli_i32(tmp2, tmp2, 16);
7410 tcg_gen_or_i32(tmp2, tmp2, tmp);
7411 neon_store_reg(rd, 1, tmp2);
7d1b0095 7412 tcg_temp_free_i32(tmp);
486624fc
AB
7413 tcg_temp_free_i32(ahp);
7414 tcg_temp_free_ptr(fpst);
60011498 7415 break;
486624fc 7416 }
600b828c 7417 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
7418 {
7419 TCGv_ptr fpst;
7420 TCGv_i32 ahp;
d614a513 7421 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7422 q || (rd & 1)) {
7423 return 1;
7424 }
486624fc
AB
7425 fpst = get_fpstatus_ptr(true);
7426 ahp = get_ahp_flag();
7d1b0095 7427 tmp3 = tcg_temp_new_i32();
60011498
PB
7428 tmp = neon_load_reg(rm, 0);
7429 tmp2 = neon_load_reg(rm, 1);
7430 tcg_gen_ext16u_i32(tmp3, tmp);
486624fc 7431 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7432 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7433 tcg_gen_shri_i32(tmp3, tmp, 16);
486624fc 7434 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7435 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7436 tcg_temp_free_i32(tmp);
60011498 7437 tcg_gen_ext16u_i32(tmp3, tmp2);
486624fc 7438 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7439 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7440 tcg_gen_shri_i32(tmp3, tmp2, 16);
486624fc 7441 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7442 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7443 tcg_temp_free_i32(tmp2);
7444 tcg_temp_free_i32(tmp3);
486624fc
AB
7445 tcg_temp_free_i32(ahp);
7446 tcg_temp_free_ptr(fpst);
60011498 7447 break;
486624fc 7448 }
9d935509 7449 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 7450 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
7451 return 1;
7452 }
1a66ac61
RH
7453 ptr1 = vfp_reg_ptr(true, rd);
7454 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7455
7456 /* Bit 6 is the lowest opcode bit; it distinguishes between
7457 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7458 */
7459 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7460
7461 if (op == NEON_2RM_AESE) {
1a66ac61 7462 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7463 } else {
1a66ac61 7464 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 7465 }
1a66ac61
RH
7466 tcg_temp_free_ptr(ptr1);
7467 tcg_temp_free_ptr(ptr2);
9d935509
AB
7468 tcg_temp_free_i32(tmp3);
7469 break;
f1ecb913 7470 case NEON_2RM_SHA1H:
962fcbf2 7471 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
7472 return 1;
7473 }
1a66ac61
RH
7474 ptr1 = vfp_reg_ptr(true, rd);
7475 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7476
1a66ac61 7477 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 7478
1a66ac61
RH
7479 tcg_temp_free_ptr(ptr1);
7480 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
7481 break;
7482 case NEON_2RM_SHA1SU1:
7483 if ((rm | rd) & 1) {
7484 return 1;
7485 }
7486 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7487 if (q) {
962fcbf2 7488 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
7489 return 1;
7490 }
962fcbf2 7491 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
7492 return 1;
7493 }
1a66ac61
RH
7494 ptr1 = vfp_reg_ptr(true, rd);
7495 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7496 if (q) {
1a66ac61 7497 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 7498 } else {
1a66ac61 7499 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 7500 }
1a66ac61
RH
7501 tcg_temp_free_ptr(ptr1);
7502 tcg_temp_free_ptr(ptr2);
f1ecb913 7503 break;
9ee6e8bb
PB
7504 default:
7505 elementwise:
7506 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7507 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7508 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7509 neon_reg_offset(rm, pass));
f764718d 7510 tmp = NULL;
9ee6e8bb 7511 } else {
dd8fbd78 7512 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7513 }
7514 switch (op) {
600b828c 7515 case NEON_2RM_VREV32:
9ee6e8bb 7516 switch (size) {
dd8fbd78
FN
7517 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7518 case 1: gen_swap_half(tmp); break;
600b828c 7519 default: abort();
9ee6e8bb
PB
7520 }
7521 break;
600b828c 7522 case NEON_2RM_VREV16:
dd8fbd78 7523 gen_rev16(tmp);
9ee6e8bb 7524 break;
600b828c 7525 case NEON_2RM_VCLS:
9ee6e8bb 7526 switch (size) {
dd8fbd78
FN
7527 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7528 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7529 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7530 default: abort();
9ee6e8bb
PB
7531 }
7532 break;
600b828c 7533 case NEON_2RM_VCLZ:
9ee6e8bb 7534 switch (size) {
dd8fbd78
FN
7535 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7536 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7537 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7538 default: abort();
9ee6e8bb
PB
7539 }
7540 break;
600b828c 7541 case NEON_2RM_VCNT:
dd8fbd78 7542 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7543 break;
600b828c 7544 case NEON_2RM_VMVN:
dd8fbd78 7545 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7546 break;
600b828c 7547 case NEON_2RM_VQABS:
9ee6e8bb 7548 switch (size) {
02da0b2d
PM
7549 case 0:
7550 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7551 break;
7552 case 1:
7553 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7554 break;
7555 case 2:
7556 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7557 break;
600b828c 7558 default: abort();
9ee6e8bb
PB
7559 }
7560 break;
600b828c 7561 case NEON_2RM_VQNEG:
9ee6e8bb 7562 switch (size) {
02da0b2d
PM
7563 case 0:
7564 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7565 break;
7566 case 1:
7567 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7568 break;
7569 case 2:
7570 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7571 break;
600b828c 7572 default: abort();
9ee6e8bb
PB
7573 }
7574 break;
600b828c 7575 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7576 tmp2 = tcg_const_i32(0);
9ee6e8bb 7577 switch(size) {
dd8fbd78
FN
7578 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7579 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7580 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7581 default: abort();
9ee6e8bb 7582 }
39d5492a 7583 tcg_temp_free_i32(tmp2);
600b828c 7584 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7585 tcg_gen_not_i32(tmp, tmp);
600b828c 7586 }
9ee6e8bb 7587 break;
600b828c 7588 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7589 tmp2 = tcg_const_i32(0);
9ee6e8bb 7590 switch(size) {
dd8fbd78
FN
7591 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7592 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7593 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7594 default: abort();
9ee6e8bb 7595 }
39d5492a 7596 tcg_temp_free_i32(tmp2);
600b828c 7597 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7598 tcg_gen_not_i32(tmp, tmp);
600b828c 7599 }
9ee6e8bb 7600 break;
600b828c 7601 case NEON_2RM_VCEQ0:
dd8fbd78 7602 tmp2 = tcg_const_i32(0);
9ee6e8bb 7603 switch(size) {
dd8fbd78
FN
7604 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7605 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7606 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7607 default: abort();
9ee6e8bb 7608 }
39d5492a 7609 tcg_temp_free_i32(tmp2);
9ee6e8bb 7610 break;
600b828c 7611 case NEON_2RM_VABS:
9ee6e8bb 7612 switch(size) {
dd8fbd78
FN
7613 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7614 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7615 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7616 default: abort();
9ee6e8bb
PB
7617 }
7618 break;
600b828c 7619 case NEON_2RM_VNEG:
dd8fbd78
FN
7620 tmp2 = tcg_const_i32(0);
7621 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7622 tcg_temp_free_i32(tmp2);
9ee6e8bb 7623 break;
600b828c 7624 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7625 {
7626 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7627 tmp2 = tcg_const_i32(0);
aa47cfdd 7628 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7629 tcg_temp_free_i32(tmp2);
aa47cfdd 7630 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7631 break;
aa47cfdd 7632 }
600b828c 7633 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7634 {
7635 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7636 tmp2 = tcg_const_i32(0);
aa47cfdd 7637 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7638 tcg_temp_free_i32(tmp2);
aa47cfdd 7639 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7640 break;
aa47cfdd 7641 }
600b828c 7642 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7643 {
7644 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7645 tmp2 = tcg_const_i32(0);
aa47cfdd 7646 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7647 tcg_temp_free_i32(tmp2);
aa47cfdd 7648 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7649 break;
aa47cfdd 7650 }
600b828c 7651 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7652 {
7653 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7654 tmp2 = tcg_const_i32(0);
aa47cfdd 7655 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7656 tcg_temp_free_i32(tmp2);
aa47cfdd 7657 tcg_temp_free_ptr(fpstatus);
0e326109 7658 break;
aa47cfdd 7659 }
600b828c 7660 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7661 {
7662 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7663 tmp2 = tcg_const_i32(0);
aa47cfdd 7664 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7665 tcg_temp_free_i32(tmp2);
aa47cfdd 7666 tcg_temp_free_ptr(fpstatus);
0e326109 7667 break;
aa47cfdd 7668 }
600b828c 7669 case NEON_2RM_VABS_F:
4373f3ce 7670 gen_vfp_abs(0);
9ee6e8bb 7671 break;
600b828c 7672 case NEON_2RM_VNEG_F:
4373f3ce 7673 gen_vfp_neg(0);
9ee6e8bb 7674 break;
600b828c 7675 case NEON_2RM_VSWP:
dd8fbd78
FN
7676 tmp2 = neon_load_reg(rd, pass);
7677 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7678 break;
600b828c 7679 case NEON_2RM_VTRN:
dd8fbd78 7680 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7681 switch (size) {
dd8fbd78
FN
7682 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7683 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7684 default: abort();
9ee6e8bb 7685 }
dd8fbd78 7686 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7687 break;
34f7b0a2
WN
7688 case NEON_2RM_VRINTN:
7689 case NEON_2RM_VRINTA:
7690 case NEON_2RM_VRINTM:
7691 case NEON_2RM_VRINTP:
7692 case NEON_2RM_VRINTZ:
7693 {
7694 TCGv_i32 tcg_rmode;
7695 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7696 int rmode;
7697
7698 if (op == NEON_2RM_VRINTZ) {
7699 rmode = FPROUNDING_ZERO;
7700 } else {
7701 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7702 }
7703
7704 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7705 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7706 cpu_env);
7707 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7708 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7709 cpu_env);
7710 tcg_temp_free_ptr(fpstatus);
7711 tcg_temp_free_i32(tcg_rmode);
7712 break;
7713 }
2ce70625
WN
7714 case NEON_2RM_VRINTX:
7715 {
7716 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7717 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7718 tcg_temp_free_ptr(fpstatus);
7719 break;
7720 }
901ad525
WN
7721 case NEON_2RM_VCVTAU:
7722 case NEON_2RM_VCVTAS:
7723 case NEON_2RM_VCVTNU:
7724 case NEON_2RM_VCVTNS:
7725 case NEON_2RM_VCVTPU:
7726 case NEON_2RM_VCVTPS:
7727 case NEON_2RM_VCVTMU:
7728 case NEON_2RM_VCVTMS:
7729 {
7730 bool is_signed = !extract32(insn, 7, 1);
7731 TCGv_ptr fpst = get_fpstatus_ptr(1);
7732 TCGv_i32 tcg_rmode, tcg_shift;
7733 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7734
7735 tcg_shift = tcg_const_i32(0);
7736 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7737 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7738 cpu_env);
7739
7740 if (is_signed) {
7741 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7742 tcg_shift, fpst);
7743 } else {
7744 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7745 tcg_shift, fpst);
7746 }
7747
7748 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7749 cpu_env);
7750 tcg_temp_free_i32(tcg_rmode);
7751 tcg_temp_free_i32(tcg_shift);
7752 tcg_temp_free_ptr(fpst);
7753 break;
7754 }
600b828c 7755 case NEON_2RM_VRECPE:
b6d4443a
AB
7756 {
7757 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7758 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7759 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7760 break;
b6d4443a 7761 }
600b828c 7762 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7763 {
7764 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7765 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7766 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7767 break;
c2fb418e 7768 }
600b828c 7769 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7770 {
7771 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7772 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7773 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7774 break;
b6d4443a 7775 }
600b828c 7776 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7777 {
7778 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7779 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7780 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7781 break;
c2fb418e 7782 }
600b828c 7783 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7784 gen_vfp_sito(0, 1);
9ee6e8bb 7785 break;
600b828c 7786 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7787 gen_vfp_uito(0, 1);
9ee6e8bb 7788 break;
600b828c 7789 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7790 gen_vfp_tosiz(0, 1);
9ee6e8bb 7791 break;
600b828c 7792 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7793 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7794 break;
7795 default:
600b828c
PM
7796 /* Reserved op values were caught by the
7797 * neon_2rm_sizes[] check earlier.
7798 */
7799 abort();
9ee6e8bb 7800 }
600b828c 7801 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7802 tcg_gen_st_f32(cpu_F0s, cpu_env,
7803 neon_reg_offset(rd, pass));
9ee6e8bb 7804 } else {
dd8fbd78 7805 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7806 }
7807 }
7808 break;
7809 }
7810 } else if ((insn & (1 << 10)) == 0) {
7811 /* VTBL, VTBX. */
56907d77
PM
7812 int n = ((insn >> 8) & 3) + 1;
7813 if ((rn + n) > 32) {
7814 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7815 * helper function running off the end of the register file.
7816 */
7817 return 1;
7818 }
7819 n <<= 3;
9ee6e8bb 7820 if (insn & (1 << 6)) {
8f8e3aa4 7821 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7822 } else {
7d1b0095 7823 tmp = tcg_temp_new_i32();
8f8e3aa4 7824 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7825 }
8f8e3aa4 7826 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 7827 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 7828 tmp5 = tcg_const_i32(n);
e7c06c4e 7829 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 7830 tcg_temp_free_i32(tmp);
9ee6e8bb 7831 if (insn & (1 << 6)) {
8f8e3aa4 7832 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7833 } else {
7d1b0095 7834 tmp = tcg_temp_new_i32();
8f8e3aa4 7835 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7836 }
8f8e3aa4 7837 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 7838 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 7839 tcg_temp_free_i32(tmp5);
e7c06c4e 7840 tcg_temp_free_ptr(ptr1);
8f8e3aa4 7841 neon_store_reg(rd, 0, tmp2);
3018f259 7842 neon_store_reg(rd, 1, tmp3);
7d1b0095 7843 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7844 } else if ((insn & 0x380) == 0) {
7845 /* VDUP */
32f91fb7
RH
7846 int element;
7847 TCGMemOp size;
7848
133da6aa
JR
7849 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7850 return 1;
7851 }
9ee6e8bb 7852 if (insn & (1 << 16)) {
32f91fb7
RH
7853 size = MO_8;
7854 element = (insn >> 17) & 7;
9ee6e8bb 7855 } else if (insn & (1 << 17)) {
32f91fb7
RH
7856 size = MO_16;
7857 element = (insn >> 18) & 3;
7858 } else {
7859 size = MO_32;
7860 element = (insn >> 19) & 1;
9ee6e8bb 7861 }
32f91fb7
RH
7862 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
7863 neon_element_offset(rm, element, size),
7864 q ? 16 : 8, q ? 16 : 8);
9ee6e8bb
PB
7865 } else {
7866 return 1;
7867 }
7868 }
7869 }
7870 return 0;
7871}
7872
8b7209fa
RH
7873/* Advanced SIMD three registers of the same length extension.
7874 * 31 25 23 22 20 16 12 11 10 9 8 3 0
7875 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7876 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7877 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7878 */
7879static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
7880{
26c470a7
RH
7881 gen_helper_gvec_3 *fn_gvec = NULL;
7882 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
7883 int rd, rn, rm, opr_sz;
7884 int data = 0;
8b7209fa
RH
7885 bool q;
7886
7887 q = extract32(insn, 6, 1);
7888 VFP_DREG_D(rd, insn);
7889 VFP_DREG_N(rn, insn);
7890 VFP_DREG_M(rm, insn);
7891 if ((rd | rn | rm) & q) {
7892 return 1;
7893 }
7894
7895 if ((insn & 0xfe200f10) == 0xfc200800) {
7896 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
7897 int size = extract32(insn, 20, 1);
7898 data = extract32(insn, 23, 2); /* rot */
962fcbf2 7899 if (!dc_isar_feature(aa32_vcma, s)
5763190f 7900 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
7901 return 1;
7902 }
7903 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
7904 } else if ((insn & 0xfea00f10) == 0xfc800800) {
7905 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
7906 int size = extract32(insn, 20, 1);
7907 data = extract32(insn, 24, 1); /* rot */
962fcbf2 7908 if (!dc_isar_feature(aa32_vcma, s)
5763190f 7909 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
7910 return 1;
7911 }
7912 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
7913 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
7914 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
7915 bool u = extract32(insn, 4, 1);
962fcbf2 7916 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
7917 return 1;
7918 }
7919 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
8b7209fa
RH
7920 } else {
7921 return 1;
7922 }
7923
7924 if (s->fp_excp_el) {
7925 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 7926 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8b7209fa
RH
7927 return 0;
7928 }
7929 if (!s->vfp_enabled) {
7930 return 1;
7931 }
7932
7933 opr_sz = (1 + q) * 8;
26c470a7
RH
7934 if (fn_gvec_ptr) {
7935 TCGv_ptr fpst = get_fpstatus_ptr(1);
7936 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7937 vfp_reg_offset(1, rn),
7938 vfp_reg_offset(1, rm), fpst,
7939 opr_sz, opr_sz, data, fn_gvec_ptr);
7940 tcg_temp_free_ptr(fpst);
7941 } else {
7942 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
7943 vfp_reg_offset(1, rn),
7944 vfp_reg_offset(1, rm),
7945 opr_sz, opr_sz, data, fn_gvec);
7946 }
8b7209fa
RH
7947 return 0;
7948}
7949
638808ff
RH
7950/* Advanced SIMD two registers and a scalar extension.
7951 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7952 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7953 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7954 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7955 *
7956 */
7957
7958static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7959{
26c470a7
RH
7960 gen_helper_gvec_3 *fn_gvec = NULL;
7961 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 7962 int rd, rn, rm, opr_sz, data;
638808ff
RH
7963 bool q;
7964
7965 q = extract32(insn, 6, 1);
7966 VFP_DREG_D(rd, insn);
7967 VFP_DREG_N(rn, insn);
638808ff
RH
7968 if ((rd | rn) & q) {
7969 return 1;
7970 }
7971
7972 if ((insn & 0xff000f10) == 0xfe000800) {
7973 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
7974 int rot = extract32(insn, 20, 2);
7975 int size = extract32(insn, 23, 1);
7976 int index;
7977
962fcbf2 7978 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
7979 return 1;
7980 }
2cc99919 7981 if (size == 0) {
5763190f 7982 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2cc99919
RH
7983 return 1;
7984 }
7985 /* For fp16, rm is just Vm, and index is M. */
7986 rm = extract32(insn, 0, 4);
7987 index = extract32(insn, 5, 1);
7988 } else {
7989 /* For fp32, rm is the usual M:Vm, and index is 0. */
7990 VFP_DREG_M(rm, insn);
7991 index = 0;
7992 }
7993 data = (index << 2) | rot;
7994 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7995 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
7996 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7997 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7998 int u = extract32(insn, 4, 1);
962fcbf2 7999 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
8000 return 1;
8001 }
8002 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
8003 /* rm is just Vm, and index is M. */
8004 data = extract32(insn, 5, 1); /* index */
8005 rm = extract32(insn, 0, 4);
638808ff
RH
8006 } else {
8007 return 1;
8008 }
8009
8010 if (s->fp_excp_el) {
8011 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 8012 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
638808ff
RH
8013 return 0;
8014 }
8015 if (!s->vfp_enabled) {
8016 return 1;
8017 }
8018
8019 opr_sz = (1 + q) * 8;
26c470a7
RH
8020 if (fn_gvec_ptr) {
8021 TCGv_ptr fpst = get_fpstatus_ptr(1);
8022 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
8023 vfp_reg_offset(1, rn),
8024 vfp_reg_offset(1, rm), fpst,
8025 opr_sz, opr_sz, data, fn_gvec_ptr);
8026 tcg_temp_free_ptr(fpst);
8027 } else {
8028 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
8029 vfp_reg_offset(1, rn),
8030 vfp_reg_offset(1, rm),
8031 opr_sz, opr_sz, data, fn_gvec);
8032 }
638808ff
RH
8033 return 0;
8034}
8035
7dcc1f89 8036static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 8037{
4b6a83fb
PM
8038 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
8039 const ARMCPRegInfo *ri;
9ee6e8bb
PB
8040
8041 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
8042
8043 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 8044 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
8045 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
8046 return 1;
8047 }
d614a513 8048 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 8049 return disas_iwmmxt_insn(s, insn);
d614a513 8050 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 8051 return disas_dsp_insn(s, insn);
c0f4af17
PM
8052 }
8053 return 1;
4b6a83fb
PM
8054 }
8055
8056 /* Otherwise treat as a generic register access */
8057 is64 = (insn & (1 << 25)) == 0;
8058 if (!is64 && ((insn & (1 << 4)) == 0)) {
8059 /* cdp */
8060 return 1;
8061 }
8062
8063 crm = insn & 0xf;
8064 if (is64) {
8065 crn = 0;
8066 opc1 = (insn >> 4) & 0xf;
8067 opc2 = 0;
8068 rt2 = (insn >> 16) & 0xf;
8069 } else {
8070 crn = (insn >> 16) & 0xf;
8071 opc1 = (insn >> 21) & 7;
8072 opc2 = (insn >> 5) & 7;
8073 rt2 = 0;
8074 }
8075 isread = (insn >> 20) & 1;
8076 rt = (insn >> 12) & 0xf;
8077
60322b39 8078 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 8079 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
8080 if (ri) {
8081 /* Check access permissions */
dcbff19b 8082 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
8083 return 1;
8084 }
8085
c0f4af17 8086 if (ri->accessfn ||
d614a513 8087 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
8088 /* Emit code to perform further access permissions checks at
8089 * runtime; this may result in an exception.
c0f4af17
PM
8090 * Note that on XScale all cp0..c13 registers do an access check
8091 * call in order to handle c15_cpar.
f59df3f2
PM
8092 */
8093 TCGv_ptr tmpptr;
3f208fd7 8094 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
8095 uint32_t syndrome;
8096
8097 /* Note that since we are an implementation which takes an
8098 * exception on a trapped conditional instruction only if the
8099 * instruction passes its condition code check, we can take
8100 * advantage of the clause in the ARM ARM that allows us to set
8101 * the COND field in the instruction to 0xE in all cases.
8102 * We could fish the actual condition out of the insn (ARM)
8103 * or the condexec bits (Thumb) but it isn't necessary.
8104 */
8105 switch (cpnum) {
8106 case 14:
8107 if (is64) {
8108 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8109 isread, false);
8bcbf37c
PM
8110 } else {
8111 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8112 rt, isread, false);
8bcbf37c
PM
8113 }
8114 break;
8115 case 15:
8116 if (is64) {
8117 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8118 isread, false);
8bcbf37c
PM
8119 } else {
8120 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8121 rt, isread, false);
8bcbf37c
PM
8122 }
8123 break;
8124 default:
8125 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8126 * so this can only happen if this is an ARMv7 or earlier CPU,
8127 * in which case the syndrome information won't actually be
8128 * guest visible.
8129 */
d614a513 8130 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
8131 syndrome = syn_uncategorized();
8132 break;
8133 }
8134
43bfa4a1 8135 gen_set_condexec(s);
3977ee5d 8136 gen_set_pc_im(s, s->pc - 4);
f59df3f2 8137 tmpptr = tcg_const_ptr(ri);
8bcbf37c 8138 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
8139 tcg_isread = tcg_const_i32(isread);
8140 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8141 tcg_isread);
f59df3f2 8142 tcg_temp_free_ptr(tmpptr);
8bcbf37c 8143 tcg_temp_free_i32(tcg_syn);
3f208fd7 8144 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
8145 }
8146
4b6a83fb
PM
8147 /* Handle special cases first */
8148 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8149 case ARM_CP_NOP:
8150 return 0;
8151 case ARM_CP_WFI:
8152 if (isread) {
8153 return 1;
8154 }
eaed129d 8155 gen_set_pc_im(s, s->pc);
dcba3a8d 8156 s->base.is_jmp = DISAS_WFI;
2bee5105 8157 return 0;
4b6a83fb
PM
8158 default:
8159 break;
8160 }
8161
c5a49c63 8162 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8163 gen_io_start();
8164 }
8165
4b6a83fb
PM
8166 if (isread) {
8167 /* Read */
8168 if (is64) {
8169 TCGv_i64 tmp64;
8170 TCGv_i32 tmp;
8171 if (ri->type & ARM_CP_CONST) {
8172 tmp64 = tcg_const_i64(ri->resetvalue);
8173 } else if (ri->readfn) {
8174 TCGv_ptr tmpptr;
4b6a83fb
PM
8175 tmp64 = tcg_temp_new_i64();
8176 tmpptr = tcg_const_ptr(ri);
8177 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8178 tcg_temp_free_ptr(tmpptr);
8179 } else {
8180 tmp64 = tcg_temp_new_i64();
8181 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8182 }
8183 tmp = tcg_temp_new_i32();
ecc7b3aa 8184 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
8185 store_reg(s, rt, tmp);
8186 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 8187 tmp = tcg_temp_new_i32();
ecc7b3aa 8188 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 8189 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
8190 store_reg(s, rt2, tmp);
8191 } else {
39d5492a 8192 TCGv_i32 tmp;
4b6a83fb
PM
8193 if (ri->type & ARM_CP_CONST) {
8194 tmp = tcg_const_i32(ri->resetvalue);
8195 } else if (ri->readfn) {
8196 TCGv_ptr tmpptr;
4b6a83fb
PM
8197 tmp = tcg_temp_new_i32();
8198 tmpptr = tcg_const_ptr(ri);
8199 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8200 tcg_temp_free_ptr(tmpptr);
8201 } else {
8202 tmp = load_cpu_offset(ri->fieldoffset);
8203 }
8204 if (rt == 15) {
8205 /* Destination register of r15 for 32 bit loads sets
8206 * the condition codes from the high 4 bits of the value
8207 */
8208 gen_set_nzcv(tmp);
8209 tcg_temp_free_i32(tmp);
8210 } else {
8211 store_reg(s, rt, tmp);
8212 }
8213 }
8214 } else {
8215 /* Write */
8216 if (ri->type & ARM_CP_CONST) {
8217 /* If not forbidden by access permissions, treat as WI */
8218 return 0;
8219 }
8220
8221 if (is64) {
39d5492a 8222 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
8223 TCGv_i64 tmp64 = tcg_temp_new_i64();
8224 tmplo = load_reg(s, rt);
8225 tmphi = load_reg(s, rt2);
8226 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8227 tcg_temp_free_i32(tmplo);
8228 tcg_temp_free_i32(tmphi);
8229 if (ri->writefn) {
8230 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
8231 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8232 tcg_temp_free_ptr(tmpptr);
8233 } else {
8234 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8235 }
8236 tcg_temp_free_i64(tmp64);
8237 } else {
8238 if (ri->writefn) {
39d5492a 8239 TCGv_i32 tmp;
4b6a83fb 8240 TCGv_ptr tmpptr;
4b6a83fb
PM
8241 tmp = load_reg(s, rt);
8242 tmpptr = tcg_const_ptr(ri);
8243 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8244 tcg_temp_free_ptr(tmpptr);
8245 tcg_temp_free_i32(tmp);
8246 } else {
39d5492a 8247 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
8248 store_cpu_offset(tmp, ri->fieldoffset);
8249 }
8250 }
2452731c
PM
8251 }
8252
c5a49c63 8253 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8254 /* I/O operations must end the TB here (whether read or write) */
8255 gen_io_end();
8256 gen_lookup_tb(s);
8257 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
8258 /* We default to ending the TB on a coprocessor register write,
8259 * but allow this to be suppressed by the register definition
8260 * (usually only necessary to work around guest bugs).
8261 */
2452731c 8262 gen_lookup_tb(s);
4b6a83fb 8263 }
2452731c 8264
4b6a83fb
PM
8265 return 0;
8266 }
8267
626187d8
PM
8268 /* Unknown register; this might be a guest error or a QEMU
8269 * unimplemented feature.
8270 */
8271 if (is64) {
8272 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8273 "64 bit system register cp:%d opc1: %d crm:%d "
8274 "(%s)\n",
8275 isread ? "read" : "write", cpnum, opc1, crm,
8276 s->ns ? "non-secure" : "secure");
626187d8
PM
8277 } else {
8278 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8279 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8280 "(%s)\n",
8281 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8282 s->ns ? "non-secure" : "secure");
626187d8
PM
8283 }
8284
4a9a539f 8285 return 1;
9ee6e8bb
PB
8286}
8287
5e3f878a
PB
8288
8289/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 8290static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 8291{
39d5492a 8292 TCGv_i32 tmp;
7d1b0095 8293 tmp = tcg_temp_new_i32();
ecc7b3aa 8294 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 8295 store_reg(s, rlow, tmp);
7d1b0095 8296 tmp = tcg_temp_new_i32();
5e3f878a 8297 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 8298 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
8299 store_reg(s, rhigh, tmp);
8300}
8301
8302/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 8303static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 8304{
a7812ae4 8305 TCGv_i64 tmp;
39d5492a 8306 TCGv_i32 tmp2;
5e3f878a 8307
36aa55dc 8308 /* Load value and extend to 64 bits. */
a7812ae4 8309 tmp = tcg_temp_new_i64();
5e3f878a
PB
8310 tmp2 = load_reg(s, rlow);
8311 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 8312 tcg_temp_free_i32(tmp2);
5e3f878a 8313 tcg_gen_add_i64(val, val, tmp);
b75263d6 8314 tcg_temp_free_i64(tmp);
5e3f878a
PB
8315}
8316
8317/* load and add a 64-bit value from a register pair. */
a7812ae4 8318static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 8319{
a7812ae4 8320 TCGv_i64 tmp;
39d5492a
PM
8321 TCGv_i32 tmpl;
8322 TCGv_i32 tmph;
5e3f878a
PB
8323
8324 /* Load 64-bit value rd:rn. */
36aa55dc
PB
8325 tmpl = load_reg(s, rlow);
8326 tmph = load_reg(s, rhigh);
a7812ae4 8327 tmp = tcg_temp_new_i64();
36aa55dc 8328 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
8329 tcg_temp_free_i32(tmpl);
8330 tcg_temp_free_i32(tmph);
5e3f878a 8331 tcg_gen_add_i64(val, val, tmp);
b75263d6 8332 tcg_temp_free_i64(tmp);
5e3f878a
PB
8333}
8334
c9f10124 8335/* Set N and Z flags from hi|lo. */
39d5492a 8336static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 8337{
c9f10124
RH
8338 tcg_gen_mov_i32(cpu_NF, hi);
8339 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
8340}
8341
426f5abc
PB
8342/* Load/Store exclusive instructions are implemented by remembering
8343 the value/address loaded, and seeing if these are the same
354161b3 8344 when the store is performed. This should be sufficient to implement
426f5abc 8345 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
8346 regular stores. The compare vs the remembered value is done during
8347 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 8348static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 8349 TCGv_i32 addr, int size)
426f5abc 8350{
94ee24e7 8351 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 8352 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 8353
50225ad0
PM
8354 s->is_ldex = true;
8355
426f5abc 8356 if (size == 3) {
39d5492a 8357 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 8358 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 8359
3448d47b
PM
8360 /* For AArch32, architecturally the 32-bit word at the lowest
8361 * address is always Rt and the one at addr+4 is Rt2, even if
8362 * the CPU is big-endian. That means we don't want to do a
8363 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8364 * for an architecturally 64-bit access, but instead do a
8365 * 64-bit access using MO_BE if appropriate and then split
8366 * the two halves.
8367 * This only makes a difference for BE32 user-mode, where
8368 * frob64() must not flip the two halves of the 64-bit data
8369 * but this code must treat BE32 user-mode like BE32 system.
8370 */
8371 TCGv taddr = gen_aa32_addr(s, addr, opc);
8372
8373 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8374 tcg_temp_free(taddr);
354161b3 8375 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
8376 if (s->be_data == MO_BE) {
8377 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8378 } else {
8379 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8380 }
354161b3
EC
8381 tcg_temp_free_i64(t64);
8382
8383 store_reg(s, rt2, tmp2);
03d05e2d 8384 } else {
354161b3 8385 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 8386 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 8387 }
03d05e2d
PM
8388
8389 store_reg(s, rt, tmp);
8390 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
8391}
8392
8393static void gen_clrex(DisasContext *s)
8394{
03d05e2d 8395 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
8396}
8397
426f5abc 8398static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 8399 TCGv_i32 addr, int size)
426f5abc 8400{
354161b3
EC
8401 TCGv_i32 t0, t1, t2;
8402 TCGv_i64 extaddr;
8403 TCGv taddr;
42a268c2
RH
8404 TCGLabel *done_label;
8405 TCGLabel *fail_label;
354161b3 8406 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
8407
8408 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8409 [addr] = {Rt};
8410 {Rd} = 0;
8411 } else {
8412 {Rd} = 1;
8413 } */
8414 fail_label = gen_new_label();
8415 done_label = gen_new_label();
03d05e2d
PM
8416 extaddr = tcg_temp_new_i64();
8417 tcg_gen_extu_i32_i64(extaddr, addr);
8418 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8419 tcg_temp_free_i64(extaddr);
8420
354161b3
EC
8421 taddr = gen_aa32_addr(s, addr, opc);
8422 t0 = tcg_temp_new_i32();
8423 t1 = load_reg(s, rt);
426f5abc 8424 if (size == 3) {
354161b3
EC
8425 TCGv_i64 o64 = tcg_temp_new_i64();
8426 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 8427
354161b3 8428 t2 = load_reg(s, rt2);
3448d47b
PM
8429 /* For AArch32, architecturally the 32-bit word at the lowest
8430 * address is always Rt and the one at addr+4 is Rt2, even if
8431 * the CPU is big-endian. Since we're going to treat this as a
8432 * single 64-bit BE store, we need to put the two halves in the
8433 * opposite order for BE to LE, so that they end up in the right
8434 * places.
8435 * We don't want gen_aa32_frob64() because that does the wrong
8436 * thing for BE32 usermode.
8437 */
8438 if (s->be_data == MO_BE) {
8439 tcg_gen_concat_i32_i64(n64, t2, t1);
8440 } else {
8441 tcg_gen_concat_i32_i64(n64, t1, t2);
8442 }
354161b3 8443 tcg_temp_free_i32(t2);
03d05e2d 8444
354161b3
EC
8445 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8446 get_mem_index(s), opc);
8447 tcg_temp_free_i64(n64);
8448
354161b3
EC
8449 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8450 tcg_gen_extrl_i64_i32(t0, o64);
8451
8452 tcg_temp_free_i64(o64);
8453 } else {
8454 t2 = tcg_temp_new_i32();
8455 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8456 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8457 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8458 tcg_temp_free_i32(t2);
426f5abc 8459 }
354161b3
EC
8460 tcg_temp_free_i32(t1);
8461 tcg_temp_free(taddr);
8462 tcg_gen_mov_i32(cpu_R[rd], t0);
8463 tcg_temp_free_i32(t0);
426f5abc 8464 tcg_gen_br(done_label);
354161b3 8465
426f5abc
PB
8466 gen_set_label(fail_label);
8467 tcg_gen_movi_i32(cpu_R[rd], 1);
8468 gen_set_label(done_label);
03d05e2d 8469 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 8470}
426f5abc 8471
81465888
PM
8472/* gen_srs:
8473 * @env: CPUARMState
8474 * @s: DisasContext
8475 * @mode: mode field from insn (which stack to store to)
8476 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8477 * @writeback: true if writeback bit set
8478 *
8479 * Generate code for the SRS (Store Return State) insn.
8480 */
8481static void gen_srs(DisasContext *s,
8482 uint32_t mode, uint32_t amode, bool writeback)
8483{
8484 int32_t offset;
cbc0326b
PM
8485 TCGv_i32 addr, tmp;
8486 bool undef = false;
8487
8488 /* SRS is:
8489 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 8490 * and specified mode is monitor mode
cbc0326b
PM
8491 * - UNDEFINED in Hyp mode
8492 * - UNPREDICTABLE in User or System mode
8493 * - UNPREDICTABLE if the specified mode is:
8494 * -- not implemented
8495 * -- not a valid mode number
8496 * -- a mode that's at a higher exception level
8497 * -- Monitor, if we are Non-secure
f01377f5 8498 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 8499 */
ba63cf47 8500 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
8501 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8502 return;
8503 }
8504
8505 if (s->current_el == 0 || s->current_el == 2) {
8506 undef = true;
8507 }
8508
8509 switch (mode) {
8510 case ARM_CPU_MODE_USR:
8511 case ARM_CPU_MODE_FIQ:
8512 case ARM_CPU_MODE_IRQ:
8513 case ARM_CPU_MODE_SVC:
8514 case ARM_CPU_MODE_ABT:
8515 case ARM_CPU_MODE_UND:
8516 case ARM_CPU_MODE_SYS:
8517 break;
8518 case ARM_CPU_MODE_HYP:
8519 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8520 undef = true;
8521 }
8522 break;
8523 case ARM_CPU_MODE_MON:
8524 /* No need to check specifically for "are we non-secure" because
8525 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8526 * so if this isn't EL3 then we must be non-secure.
8527 */
8528 if (s->current_el != 3) {
8529 undef = true;
8530 }
8531 break;
8532 default:
8533 undef = true;
8534 }
8535
8536 if (undef) {
8537 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8538 default_exception_el(s));
8539 return;
8540 }
8541
8542 addr = tcg_temp_new_i32();
8543 tmp = tcg_const_i32(mode);
f01377f5
PM
8544 /* get_r13_banked() will raise an exception if called from System mode */
8545 gen_set_condexec(s);
8546 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8547 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8548 tcg_temp_free_i32(tmp);
8549 switch (amode) {
8550 case 0: /* DA */
8551 offset = -4;
8552 break;
8553 case 1: /* IA */
8554 offset = 0;
8555 break;
8556 case 2: /* DB */
8557 offset = -8;
8558 break;
8559 case 3: /* IB */
8560 offset = 4;
8561 break;
8562 default:
8563 abort();
8564 }
8565 tcg_gen_addi_i32(addr, addr, offset);
8566 tmp = load_reg(s, 14);
12dcc321 8567 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8568 tcg_temp_free_i32(tmp);
81465888
PM
8569 tmp = load_cpu_field(spsr);
8570 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8571 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8572 tcg_temp_free_i32(tmp);
81465888
PM
8573 if (writeback) {
8574 switch (amode) {
8575 case 0:
8576 offset = -8;
8577 break;
8578 case 1:
8579 offset = 4;
8580 break;
8581 case 2:
8582 offset = -4;
8583 break;
8584 case 3:
8585 offset = 0;
8586 break;
8587 default:
8588 abort();
8589 }
8590 tcg_gen_addi_i32(addr, addr, offset);
8591 tmp = tcg_const_i32(mode);
8592 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8593 tcg_temp_free_i32(tmp);
8594 }
8595 tcg_temp_free_i32(addr);
dcba3a8d 8596 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8597}
8598
c2d9644e
RK
8599/* Generate a label used for skipping this instruction */
8600static void arm_gen_condlabel(DisasContext *s)
8601{
8602 if (!s->condjmp) {
8603 s->condlabel = gen_new_label();
8604 s->condjmp = 1;
8605 }
8606}
8607
8608/* Skip this instruction if the ARM condition is false */
8609static void arm_skip_unless(DisasContext *s, uint32_t cond)
8610{
8611 arm_gen_condlabel(s);
8612 arm_gen_test_cc(cond ^ 1, s->condlabel);
8613}
8614
f4df2210 8615static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8616{
f4df2210 8617 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8618 TCGv_i32 tmp;
8619 TCGv_i32 tmp2;
8620 TCGv_i32 tmp3;
8621 TCGv_i32 addr;
a7812ae4 8622 TCGv_i64 tmp64;
9ee6e8bb 8623
e13886e3
PM
8624 /* M variants do not implement ARM mode; this must raise the INVSTATE
8625 * UsageFault exception.
8626 */
b53d8923 8627 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8628 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8629 default_exception_el(s));
8630 return;
b53d8923 8631 }
9ee6e8bb
PB
8632 cond = insn >> 28;
8633 if (cond == 0xf){
be5e7a76
DES
8634 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8635 * choose to UNDEF. In ARMv5 and above the space is used
8636 * for miscellaneous unconditional instructions.
8637 */
8638 ARCH(5);
8639
9ee6e8bb
PB
8640 /* Unconditional instructions. */
8641 if (((insn >> 25) & 7) == 1) {
8642 /* NEON Data processing. */
d614a513 8643 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8644 goto illegal_op;
d614a513 8645 }
9ee6e8bb 8646
7dcc1f89 8647 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8648 goto illegal_op;
7dcc1f89 8649 }
9ee6e8bb
PB
8650 return;
8651 }
8652 if ((insn & 0x0f100000) == 0x04000000) {
8653 /* NEON load/store. */
d614a513 8654 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8655 goto illegal_op;
d614a513 8656 }
9ee6e8bb 8657
7dcc1f89 8658 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8659 goto illegal_op;
7dcc1f89 8660 }
9ee6e8bb
PB
8661 return;
8662 }
6a57f3eb
WN
8663 if ((insn & 0x0f000e10) == 0x0e000a00) {
8664 /* VFP. */
7dcc1f89 8665 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8666 goto illegal_op;
8667 }
8668 return;
8669 }
3d185e5d
PM
8670 if (((insn & 0x0f30f000) == 0x0510f000) ||
8671 ((insn & 0x0f30f010) == 0x0710f000)) {
8672 if ((insn & (1 << 22)) == 0) {
8673 /* PLDW; v7MP */
d614a513 8674 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8675 goto illegal_op;
8676 }
8677 }
8678 /* Otherwise PLD; v5TE+ */
be5e7a76 8679 ARCH(5TE);
3d185e5d
PM
8680 return;
8681 }
8682 if (((insn & 0x0f70f000) == 0x0450f000) ||
8683 ((insn & 0x0f70f010) == 0x0650f000)) {
8684 ARCH(7);
8685 return; /* PLI; V7 */
8686 }
8687 if (((insn & 0x0f700000) == 0x04100000) ||
8688 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8689 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8690 goto illegal_op;
8691 }
8692 return; /* v7MP: Unallocated memory hint: must NOP */
8693 }
8694
8695 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8696 ARCH(6);
8697 /* setend */
9886ecdf
PB
8698 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8699 gen_helper_setend(cpu_env);
dcba3a8d 8700 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8701 }
8702 return;
8703 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8704 switch ((insn >> 4) & 0xf) {
8705 case 1: /* clrex */
8706 ARCH(6K);
426f5abc 8707 gen_clrex(s);
9ee6e8bb
PB
8708 return;
8709 case 4: /* dsb */
8710 case 5: /* dmb */
9ee6e8bb 8711 ARCH(7);
61e4c432 8712 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8713 return;
6df99dec
SS
8714 case 6: /* isb */
8715 /* We need to break the TB after this insn to execute
8716 * self-modifying code correctly and also to take
8717 * any pending interrupts immediately.
8718 */
0b609cc1 8719 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8720 return;
9ee6e8bb
PB
8721 default:
8722 goto illegal_op;
8723 }
8724 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8725 /* srs */
81465888
PM
8726 ARCH(6);
8727 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8728 return;
ea825eee 8729 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8730 /* rfe */
c67b6b71 8731 int32_t offset;
9ee6e8bb
PB
8732 if (IS_USER(s))
8733 goto illegal_op;
8734 ARCH(6);
8735 rn = (insn >> 16) & 0xf;
b0109805 8736 addr = load_reg(s, rn);
9ee6e8bb
PB
8737 i = (insn >> 23) & 3;
8738 switch (i) {
b0109805 8739 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8740 case 1: offset = 0; break; /* IA */
8741 case 2: offset = -8; break; /* DB */
b0109805 8742 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8743 default: abort();
8744 }
8745 if (offset)
b0109805
PB
8746 tcg_gen_addi_i32(addr, addr, offset);
8747 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8748 tmp = tcg_temp_new_i32();
12dcc321 8749 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8750 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8751 tmp2 = tcg_temp_new_i32();
12dcc321 8752 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8753 if (insn & (1 << 21)) {
8754 /* Base writeback. */
8755 switch (i) {
b0109805 8756 case 0: offset = -8; break;
c67b6b71
FN
8757 case 1: offset = 4; break;
8758 case 2: offset = -4; break;
b0109805 8759 case 3: offset = 0; break;
9ee6e8bb
PB
8760 default: abort();
8761 }
8762 if (offset)
b0109805
PB
8763 tcg_gen_addi_i32(addr, addr, offset);
8764 store_reg(s, rn, addr);
8765 } else {
7d1b0095 8766 tcg_temp_free_i32(addr);
9ee6e8bb 8767 }
b0109805 8768 gen_rfe(s, tmp, tmp2);
c67b6b71 8769 return;
9ee6e8bb
PB
8770 } else if ((insn & 0x0e000000) == 0x0a000000) {
8771 /* branch link and change to thumb (blx <offset>) */
8772 int32_t offset;
8773
8774 val = (uint32_t)s->pc;
7d1b0095 8775 tmp = tcg_temp_new_i32();
d9ba4830
PB
8776 tcg_gen_movi_i32(tmp, val);
8777 store_reg(s, 14, tmp);
9ee6e8bb
PB
8778 /* Sign-extend the 24-bit offset */
8779 offset = (((int32_t)insn) << 8) >> 8;
8780 /* offset * 4 + bit24 * 2 + (thumb bit) */
8781 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8782 /* pipeline offset */
8783 val += 4;
be5e7a76 8784 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8785 gen_bx_im(s, val);
9ee6e8bb
PB
8786 return;
8787 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8788 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8789 /* iWMMXt register transfer. */
c0f4af17 8790 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8791 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8792 return;
c0f4af17
PM
8793 }
8794 }
9ee6e8bb 8795 }
8b7209fa
RH
8796 } else if ((insn & 0x0e000a00) == 0x0c000800
8797 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8798 if (disas_neon_insn_3same_ext(s, insn)) {
8799 goto illegal_op;
8800 }
8801 return;
638808ff
RH
8802 } else if ((insn & 0x0f000a00) == 0x0e000800
8803 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8804 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
8805 goto illegal_op;
8806 }
8807 return;
9ee6e8bb
PB
8808 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8809 /* Coprocessor double register transfer. */
be5e7a76 8810 ARCH(5TE);
9ee6e8bb
PB
8811 } else if ((insn & 0x0f000010) == 0x0e000010) {
8812 /* Additional coprocessor register transfer. */
7997d92f 8813 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8814 uint32_t mask;
8815 uint32_t val;
8816 /* cps (privileged) */
8817 if (IS_USER(s))
8818 return;
8819 mask = val = 0;
8820 if (insn & (1 << 19)) {
8821 if (insn & (1 << 8))
8822 mask |= CPSR_A;
8823 if (insn & (1 << 7))
8824 mask |= CPSR_I;
8825 if (insn & (1 << 6))
8826 mask |= CPSR_F;
8827 if (insn & (1 << 18))
8828 val |= mask;
8829 }
7997d92f 8830 if (insn & (1 << 17)) {
9ee6e8bb
PB
8831 mask |= CPSR_M;
8832 val |= (insn & 0x1f);
8833 }
8834 if (mask) {
2fbac54b 8835 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8836 }
8837 return;
8838 }
8839 goto illegal_op;
8840 }
8841 if (cond != 0xe) {
8842 /* if not always execute, we generate a conditional jump to
8843 next instruction */
c2d9644e 8844 arm_skip_unless(s, cond);
9ee6e8bb
PB
8845 }
8846 if ((insn & 0x0f900000) == 0x03000000) {
8847 if ((insn & (1 << 21)) == 0) {
8848 ARCH(6T2);
8849 rd = (insn >> 12) & 0xf;
8850 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8851 if ((insn & (1 << 22)) == 0) {
8852 /* MOVW */
7d1b0095 8853 tmp = tcg_temp_new_i32();
5e3f878a 8854 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8855 } else {
8856 /* MOVT */
5e3f878a 8857 tmp = load_reg(s, rd);
86831435 8858 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8859 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8860 }
5e3f878a 8861 store_reg(s, rd, tmp);
9ee6e8bb
PB
8862 } else {
8863 if (((insn >> 12) & 0xf) != 0xf)
8864 goto illegal_op;
8865 if (((insn >> 16) & 0xf) == 0) {
8866 gen_nop_hint(s, insn & 0xff);
8867 } else {
8868 /* CPSR = immediate */
8869 val = insn & 0xff;
8870 shift = ((insn >> 8) & 0xf) * 2;
8871 if (shift)
8872 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8873 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8874 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8875 i, val)) {
9ee6e8bb 8876 goto illegal_op;
7dcc1f89 8877 }
9ee6e8bb
PB
8878 }
8879 }
8880 } else if ((insn & 0x0f900000) == 0x01000000
8881 && (insn & 0x00000090) != 0x00000090) {
8882 /* miscellaneous instructions */
8883 op1 = (insn >> 21) & 3;
8884 sh = (insn >> 4) & 0xf;
8885 rm = insn & 0xf;
8886 switch (sh) {
8bfd0550
PM
8887 case 0x0: /* MSR, MRS */
8888 if (insn & (1 << 9)) {
8889 /* MSR (banked) and MRS (banked) */
8890 int sysm = extract32(insn, 16, 4) |
8891 (extract32(insn, 8, 1) << 4);
8892 int r = extract32(insn, 22, 1);
8893
8894 if (op1 & 1) {
8895 /* MSR (banked) */
8896 gen_msr_banked(s, r, sysm, rm);
8897 } else {
8898 /* MRS (banked) */
8899 int rd = extract32(insn, 12, 4);
8900
8901 gen_mrs_banked(s, r, sysm, rd);
8902 }
8903 break;
8904 }
8905
8906 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8907 if (op1 & 1) {
8908 /* PSR = reg */
2fbac54b 8909 tmp = load_reg(s, rm);
9ee6e8bb 8910 i = ((op1 & 2) != 0);
7dcc1f89 8911 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8912 goto illegal_op;
8913 } else {
8914 /* reg = PSR */
8915 rd = (insn >> 12) & 0xf;
8916 if (op1 & 2) {
8917 if (IS_USER(s))
8918 goto illegal_op;
d9ba4830 8919 tmp = load_cpu_field(spsr);
9ee6e8bb 8920 } else {
7d1b0095 8921 tmp = tcg_temp_new_i32();
9ef39277 8922 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8923 }
d9ba4830 8924 store_reg(s, rd, tmp);
9ee6e8bb
PB
8925 }
8926 break;
8927 case 0x1:
8928 if (op1 == 1) {
8929 /* branch/exchange thumb (bx). */
be5e7a76 8930 ARCH(4T);
d9ba4830
PB
8931 tmp = load_reg(s, rm);
8932 gen_bx(s, tmp);
9ee6e8bb
PB
8933 } else if (op1 == 3) {
8934 /* clz */
be5e7a76 8935 ARCH(5);
9ee6e8bb 8936 rd = (insn >> 12) & 0xf;
1497c961 8937 tmp = load_reg(s, rm);
7539a012 8938 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8939 store_reg(s, rd, tmp);
9ee6e8bb
PB
8940 } else {
8941 goto illegal_op;
8942 }
8943 break;
8944 case 0x2:
8945 if (op1 == 1) {
8946 ARCH(5J); /* bxj */
8947 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8948 tmp = load_reg(s, rm);
8949 gen_bx(s, tmp);
9ee6e8bb
PB
8950 } else {
8951 goto illegal_op;
8952 }
8953 break;
8954 case 0x3:
8955 if (op1 != 1)
8956 goto illegal_op;
8957
be5e7a76 8958 ARCH(5);
9ee6e8bb 8959 /* branch link/exchange thumb (blx) */
d9ba4830 8960 tmp = load_reg(s, rm);
7d1b0095 8961 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8962 tcg_gen_movi_i32(tmp2, s->pc);
8963 store_reg(s, 14, tmp2);
8964 gen_bx(s, tmp);
9ee6e8bb 8965 break;
eb0ecd5a
WN
8966 case 0x4:
8967 {
8968 /* crc32/crc32c */
8969 uint32_t c = extract32(insn, 8, 4);
8970
8971 /* Check this CPU supports ARMv8 CRC instructions.
8972 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8973 * Bits 8, 10 and 11 should be zero.
8974 */
962fcbf2 8975 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
eb0ecd5a
WN
8976 goto illegal_op;
8977 }
8978
8979 rn = extract32(insn, 16, 4);
8980 rd = extract32(insn, 12, 4);
8981
8982 tmp = load_reg(s, rn);
8983 tmp2 = load_reg(s, rm);
aa633469
PM
8984 if (op1 == 0) {
8985 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8986 } else if (op1 == 1) {
8987 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8988 }
eb0ecd5a
WN
8989 tmp3 = tcg_const_i32(1 << op1);
8990 if (c & 0x2) {
8991 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8992 } else {
8993 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8994 }
8995 tcg_temp_free_i32(tmp2);
8996 tcg_temp_free_i32(tmp3);
8997 store_reg(s, rd, tmp);
8998 break;
8999 }
9ee6e8bb 9000 case 0x5: /* saturating add/subtract */
be5e7a76 9001 ARCH(5TE);
9ee6e8bb
PB
9002 rd = (insn >> 12) & 0xf;
9003 rn = (insn >> 16) & 0xf;
b40d0353 9004 tmp = load_reg(s, rm);
5e3f878a 9005 tmp2 = load_reg(s, rn);
9ee6e8bb 9006 if (op1 & 2)
9ef39277 9007 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 9008 if (op1 & 1)
9ef39277 9009 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9010 else
9ef39277 9011 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9012 tcg_temp_free_i32(tmp2);
5e3f878a 9013 store_reg(s, rd, tmp);
9ee6e8bb 9014 break;
55c544ed
PM
9015 case 0x6: /* ERET */
9016 if (op1 != 3) {
9017 goto illegal_op;
9018 }
9019 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
9020 goto illegal_op;
9021 }
9022 if ((insn & 0x000fff0f) != 0x0000000e) {
9023 /* UNPREDICTABLE; we choose to UNDEF */
9024 goto illegal_op;
9025 }
9026
9027 if (s->current_el == 2) {
9028 tmp = load_cpu_field(elr_el[2]);
9029 } else {
9030 tmp = load_reg(s, 14);
9031 }
9032 gen_exception_return(s, tmp);
9033 break;
49e14940 9034 case 7:
d4a2dc67
PM
9035 {
9036 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 9037 switch (op1) {
19a6e31c
PM
9038 case 0:
9039 /* HLT */
9040 gen_hlt(s, imm16);
9041 break;
37e6456e
PM
9042 case 1:
9043 /* bkpt */
9044 ARCH(5);
c900a2e6 9045 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
9046 break;
9047 case 2:
9048 /* Hypervisor call (v7) */
9049 ARCH(7);
9050 if (IS_USER(s)) {
9051 goto illegal_op;
9052 }
9053 gen_hvc(s, imm16);
9054 break;
9055 case 3:
9056 /* Secure monitor call (v6+) */
9057 ARCH(6K);
9058 if (IS_USER(s)) {
9059 goto illegal_op;
9060 }
9061 gen_smc(s);
9062 break;
9063 default:
19a6e31c 9064 g_assert_not_reached();
49e14940 9065 }
9ee6e8bb 9066 break;
d4a2dc67 9067 }
9ee6e8bb
PB
9068 case 0x8: /* signed multiply */
9069 case 0xa:
9070 case 0xc:
9071 case 0xe:
be5e7a76 9072 ARCH(5TE);
9ee6e8bb
PB
9073 rs = (insn >> 8) & 0xf;
9074 rn = (insn >> 12) & 0xf;
9075 rd = (insn >> 16) & 0xf;
9076 if (op1 == 1) {
9077 /* (32 * 16) >> 16 */
5e3f878a
PB
9078 tmp = load_reg(s, rm);
9079 tmp2 = load_reg(s, rs);
9ee6e8bb 9080 if (sh & 4)
5e3f878a 9081 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9082 else
5e3f878a 9083 gen_sxth(tmp2);
a7812ae4
PB
9084 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9085 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9086 tmp = tcg_temp_new_i32();
ecc7b3aa 9087 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 9088 tcg_temp_free_i64(tmp64);
9ee6e8bb 9089 if ((sh & 2) == 0) {
5e3f878a 9090 tmp2 = load_reg(s, rn);
9ef39277 9091 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9092 tcg_temp_free_i32(tmp2);
9ee6e8bb 9093 }
5e3f878a 9094 store_reg(s, rd, tmp);
9ee6e8bb
PB
9095 } else {
9096 /* 16 * 16 */
5e3f878a
PB
9097 tmp = load_reg(s, rm);
9098 tmp2 = load_reg(s, rs);
9099 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 9100 tcg_temp_free_i32(tmp2);
9ee6e8bb 9101 if (op1 == 2) {
a7812ae4
PB
9102 tmp64 = tcg_temp_new_i64();
9103 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9104 tcg_temp_free_i32(tmp);
a7812ae4
PB
9105 gen_addq(s, tmp64, rn, rd);
9106 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 9107 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9108 } else {
9109 if (op1 == 0) {
5e3f878a 9110 tmp2 = load_reg(s, rn);
9ef39277 9111 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9112 tcg_temp_free_i32(tmp2);
9ee6e8bb 9113 }
5e3f878a 9114 store_reg(s, rd, tmp);
9ee6e8bb
PB
9115 }
9116 }
9117 break;
9118 default:
9119 goto illegal_op;
9120 }
9121 } else if (((insn & 0x0e000000) == 0 &&
9122 (insn & 0x00000090) != 0x90) ||
9123 ((insn & 0x0e000000) == (1 << 25))) {
9124 int set_cc, logic_cc, shiftop;
9125
9126 op1 = (insn >> 21) & 0xf;
9127 set_cc = (insn >> 20) & 1;
9128 logic_cc = table_logic_cc[op1] & set_cc;
9129
9130 /* data processing instruction */
9131 if (insn & (1 << 25)) {
9132 /* immediate operand */
9133 val = insn & 0xff;
9134 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 9135 if (shift) {
9ee6e8bb 9136 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 9137 }
7d1b0095 9138 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
9139 tcg_gen_movi_i32(tmp2, val);
9140 if (logic_cc && shift) {
9141 gen_set_CF_bit31(tmp2);
9142 }
9ee6e8bb
PB
9143 } else {
9144 /* register */
9145 rm = (insn) & 0xf;
e9bb4aa9 9146 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9147 shiftop = (insn >> 5) & 3;
9148 if (!(insn & (1 << 4))) {
9149 shift = (insn >> 7) & 0x1f;
e9bb4aa9 9150 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
9151 } else {
9152 rs = (insn >> 8) & 0xf;
8984bd2e 9153 tmp = load_reg(s, rs);
e9bb4aa9 9154 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
9155 }
9156 }
9157 if (op1 != 0x0f && op1 != 0x0d) {
9158 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
9159 tmp = load_reg(s, rn);
9160 } else {
f764718d 9161 tmp = NULL;
9ee6e8bb
PB
9162 }
9163 rd = (insn >> 12) & 0xf;
9164 switch(op1) {
9165 case 0x00:
e9bb4aa9
JR
9166 tcg_gen_and_i32(tmp, tmp, tmp2);
9167 if (logic_cc) {
9168 gen_logic_CC(tmp);
9169 }
7dcc1f89 9170 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9171 break;
9172 case 0x01:
e9bb4aa9
JR
9173 tcg_gen_xor_i32(tmp, tmp, tmp2);
9174 if (logic_cc) {
9175 gen_logic_CC(tmp);
9176 }
7dcc1f89 9177 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9178 break;
9179 case 0x02:
9180 if (set_cc && rd == 15) {
9181 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 9182 if (IS_USER(s)) {
9ee6e8bb 9183 goto illegal_op;
e9bb4aa9 9184 }
72485ec4 9185 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 9186 gen_exception_return(s, tmp);
9ee6e8bb 9187 } else {
e9bb4aa9 9188 if (set_cc) {
72485ec4 9189 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9190 } else {
9191 tcg_gen_sub_i32(tmp, tmp, tmp2);
9192 }
7dcc1f89 9193 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9194 }
9195 break;
9196 case 0x03:
e9bb4aa9 9197 if (set_cc) {
72485ec4 9198 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9199 } else {
9200 tcg_gen_sub_i32(tmp, tmp2, tmp);
9201 }
7dcc1f89 9202 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9203 break;
9204 case 0x04:
e9bb4aa9 9205 if (set_cc) {
72485ec4 9206 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9207 } else {
9208 tcg_gen_add_i32(tmp, tmp, tmp2);
9209 }
7dcc1f89 9210 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9211 break;
9212 case 0x05:
e9bb4aa9 9213 if (set_cc) {
49b4c31e 9214 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9215 } else {
9216 gen_add_carry(tmp, tmp, tmp2);
9217 }
7dcc1f89 9218 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9219 break;
9220 case 0x06:
e9bb4aa9 9221 if (set_cc) {
2de68a49 9222 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9223 } else {
9224 gen_sub_carry(tmp, tmp, tmp2);
9225 }
7dcc1f89 9226 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9227 break;
9228 case 0x07:
e9bb4aa9 9229 if (set_cc) {
2de68a49 9230 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9231 } else {
9232 gen_sub_carry(tmp, tmp2, tmp);
9233 }
7dcc1f89 9234 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9235 break;
9236 case 0x08:
9237 if (set_cc) {
e9bb4aa9
JR
9238 tcg_gen_and_i32(tmp, tmp, tmp2);
9239 gen_logic_CC(tmp);
9ee6e8bb 9240 }
7d1b0095 9241 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9242 break;
9243 case 0x09:
9244 if (set_cc) {
e9bb4aa9
JR
9245 tcg_gen_xor_i32(tmp, tmp, tmp2);
9246 gen_logic_CC(tmp);
9ee6e8bb 9247 }
7d1b0095 9248 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9249 break;
9250 case 0x0a:
9251 if (set_cc) {
72485ec4 9252 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 9253 }
7d1b0095 9254 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9255 break;
9256 case 0x0b:
9257 if (set_cc) {
72485ec4 9258 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9259 }
7d1b0095 9260 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9261 break;
9262 case 0x0c:
e9bb4aa9
JR
9263 tcg_gen_or_i32(tmp, tmp, tmp2);
9264 if (logic_cc) {
9265 gen_logic_CC(tmp);
9266 }
7dcc1f89 9267 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9268 break;
9269 case 0x0d:
9270 if (logic_cc && rd == 15) {
9271 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 9272 if (IS_USER(s)) {
9ee6e8bb 9273 goto illegal_op;
e9bb4aa9
JR
9274 }
9275 gen_exception_return(s, tmp2);
9ee6e8bb 9276 } else {
e9bb4aa9
JR
9277 if (logic_cc) {
9278 gen_logic_CC(tmp2);
9279 }
7dcc1f89 9280 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9281 }
9282 break;
9283 case 0x0e:
f669df27 9284 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
9285 if (logic_cc) {
9286 gen_logic_CC(tmp);
9287 }
7dcc1f89 9288 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9289 break;
9290 default:
9291 case 0x0f:
e9bb4aa9
JR
9292 tcg_gen_not_i32(tmp2, tmp2);
9293 if (logic_cc) {
9294 gen_logic_CC(tmp2);
9295 }
7dcc1f89 9296 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9297 break;
9298 }
e9bb4aa9 9299 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 9300 tcg_temp_free_i32(tmp2);
e9bb4aa9 9301 }
9ee6e8bb
PB
9302 } else {
9303 /* other instructions */
9304 op1 = (insn >> 24) & 0xf;
9305 switch(op1) {
9306 case 0x0:
9307 case 0x1:
9308 /* multiplies, extra load/stores */
9309 sh = (insn >> 5) & 3;
9310 if (sh == 0) {
9311 if (op1 == 0x0) {
9312 rd = (insn >> 16) & 0xf;
9313 rn = (insn >> 12) & 0xf;
9314 rs = (insn >> 8) & 0xf;
9315 rm = (insn) & 0xf;
9316 op1 = (insn >> 20) & 0xf;
9317 switch (op1) {
9318 case 0: case 1: case 2: case 3: case 6:
9319 /* 32 bit mul */
5e3f878a
PB
9320 tmp = load_reg(s, rs);
9321 tmp2 = load_reg(s, rm);
9322 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9323 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9324 if (insn & (1 << 22)) {
9325 /* Subtract (mls) */
9326 ARCH(6T2);
5e3f878a
PB
9327 tmp2 = load_reg(s, rn);
9328 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 9329 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9330 } else if (insn & (1 << 21)) {
9331 /* Add */
5e3f878a
PB
9332 tmp2 = load_reg(s, rn);
9333 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9334 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9335 }
9336 if (insn & (1 << 20))
5e3f878a
PB
9337 gen_logic_CC(tmp);
9338 store_reg(s, rd, tmp);
9ee6e8bb 9339 break;
8aac08b1
AJ
9340 case 4:
9341 /* 64 bit mul double accumulate (UMAAL) */
9342 ARCH(6);
9343 tmp = load_reg(s, rs);
9344 tmp2 = load_reg(s, rm);
9345 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9346 gen_addq_lo(s, tmp64, rn);
9347 gen_addq_lo(s, tmp64, rd);
9348 gen_storeq_reg(s, rn, rd, tmp64);
9349 tcg_temp_free_i64(tmp64);
9350 break;
9351 case 8: case 9: case 10: case 11:
9352 case 12: case 13: case 14: case 15:
9353 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
9354 tmp = load_reg(s, rs);
9355 tmp2 = load_reg(s, rm);
8aac08b1 9356 if (insn & (1 << 22)) {
c9f10124 9357 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 9358 } else {
c9f10124 9359 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
9360 }
9361 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
9362 TCGv_i32 al = load_reg(s, rn);
9363 TCGv_i32 ah = load_reg(s, rd);
c9f10124 9364 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
9365 tcg_temp_free_i32(al);
9366 tcg_temp_free_i32(ah);
9ee6e8bb 9367 }
8aac08b1 9368 if (insn & (1 << 20)) {
c9f10124 9369 gen_logicq_cc(tmp, tmp2);
8aac08b1 9370 }
c9f10124
RH
9371 store_reg(s, rn, tmp);
9372 store_reg(s, rd, tmp2);
9ee6e8bb 9373 break;
8aac08b1
AJ
9374 default:
9375 goto illegal_op;
9ee6e8bb
PB
9376 }
9377 } else {
9378 rn = (insn >> 16) & 0xf;
9379 rd = (insn >> 12) & 0xf;
9380 if (insn & (1 << 23)) {
9381 /* load/store exclusive */
2359bf80 9382 int op2 = (insn >> 8) & 3;
86753403 9383 op1 = (insn >> 21) & 0x3;
2359bf80
MR
9384
9385 switch (op2) {
9386 case 0: /* lda/stl */
9387 if (op1 == 1) {
9388 goto illegal_op;
9389 }
9390 ARCH(8);
9391 break;
9392 case 1: /* reserved */
9393 goto illegal_op;
9394 case 2: /* ldaex/stlex */
9395 ARCH(8);
9396 break;
9397 case 3: /* ldrex/strex */
9398 if (op1) {
9399 ARCH(6K);
9400 } else {
9401 ARCH(6);
9402 }
9403 break;
9404 }
9405
3174f8e9 9406 addr = tcg_temp_local_new_i32();
98a46317 9407 load_reg_var(s, addr, rn);
2359bf80
MR
9408
9409 /* Since the emulation does not have barriers,
9410 the acquire/release semantics need no special
9411 handling */
9412 if (op2 == 0) {
9413 if (insn & (1 << 20)) {
9414 tmp = tcg_temp_new_i32();
9415 switch (op1) {
9416 case 0: /* lda */
9bb6558a
PM
9417 gen_aa32_ld32u_iss(s, tmp, addr,
9418 get_mem_index(s),
9419 rd | ISSIsAcqRel);
2359bf80
MR
9420 break;
9421 case 2: /* ldab */
9bb6558a
PM
9422 gen_aa32_ld8u_iss(s, tmp, addr,
9423 get_mem_index(s),
9424 rd | ISSIsAcqRel);
2359bf80
MR
9425 break;
9426 case 3: /* ldah */
9bb6558a
PM
9427 gen_aa32_ld16u_iss(s, tmp, addr,
9428 get_mem_index(s),
9429 rd | ISSIsAcqRel);
2359bf80
MR
9430 break;
9431 default:
9432 abort();
9433 }
9434 store_reg(s, rd, tmp);
9435 } else {
9436 rm = insn & 0xf;
9437 tmp = load_reg(s, rm);
9438 switch (op1) {
9439 case 0: /* stl */
9bb6558a
PM
9440 gen_aa32_st32_iss(s, tmp, addr,
9441 get_mem_index(s),
9442 rm | ISSIsAcqRel);
2359bf80
MR
9443 break;
9444 case 2: /* stlb */
9bb6558a
PM
9445 gen_aa32_st8_iss(s, tmp, addr,
9446 get_mem_index(s),
9447 rm | ISSIsAcqRel);
2359bf80
MR
9448 break;
9449 case 3: /* stlh */
9bb6558a
PM
9450 gen_aa32_st16_iss(s, tmp, addr,
9451 get_mem_index(s),
9452 rm | ISSIsAcqRel);
2359bf80
MR
9453 break;
9454 default:
9455 abort();
9456 }
9457 tcg_temp_free_i32(tmp);
9458 }
9459 } else if (insn & (1 << 20)) {
86753403
PB
9460 switch (op1) {
9461 case 0: /* ldrex */
426f5abc 9462 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
9463 break;
9464 case 1: /* ldrexd */
426f5abc 9465 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
9466 break;
9467 case 2: /* ldrexb */
426f5abc 9468 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
9469 break;
9470 case 3: /* ldrexh */
426f5abc 9471 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
9472 break;
9473 default:
9474 abort();
9475 }
9ee6e8bb
PB
9476 } else {
9477 rm = insn & 0xf;
86753403
PB
9478 switch (op1) {
9479 case 0: /* strex */
426f5abc 9480 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
9481 break;
9482 case 1: /* strexd */
502e64fe 9483 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
9484 break;
9485 case 2: /* strexb */
426f5abc 9486 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
9487 break;
9488 case 3: /* strexh */
426f5abc 9489 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
9490 break;
9491 default:
9492 abort();
9493 }
9ee6e8bb 9494 }
39d5492a 9495 tcg_temp_free_i32(addr);
c4869ca6
OS
9496 } else if ((insn & 0x00300f00) == 0) {
9497 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9498 * - SWP, SWPB
9499 */
9500
cf12bce0
EC
9501 TCGv taddr;
9502 TCGMemOp opc = s->be_data;
9503
9ee6e8bb
PB
9504 rm = (insn) & 0xf;
9505
9ee6e8bb 9506 if (insn & (1 << 22)) {
cf12bce0 9507 opc |= MO_UB;
9ee6e8bb 9508 } else {
cf12bce0 9509 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 9510 }
cf12bce0
EC
9511
9512 addr = load_reg(s, rn);
9513 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 9514 tcg_temp_free_i32(addr);
cf12bce0
EC
9515
9516 tmp = load_reg(s, rm);
9517 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9518 get_mem_index(s), opc);
9519 tcg_temp_free(taddr);
9520 store_reg(s, rd, tmp);
c4869ca6
OS
9521 } else {
9522 goto illegal_op;
9ee6e8bb
PB
9523 }
9524 }
9525 } else {
9526 int address_offset;
3960c336 9527 bool load = insn & (1 << 20);
63f26fcf
PM
9528 bool wbit = insn & (1 << 21);
9529 bool pbit = insn & (1 << 24);
3960c336 9530 bool doubleword = false;
9bb6558a
PM
9531 ISSInfo issinfo;
9532
9ee6e8bb
PB
9533 /* Misc load/store */
9534 rn = (insn >> 16) & 0xf;
9535 rd = (insn >> 12) & 0xf;
3960c336 9536
9bb6558a
PM
9537 /* ISS not valid if writeback */
9538 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9539
3960c336
PM
9540 if (!load && (sh & 2)) {
9541 /* doubleword */
9542 ARCH(5TE);
9543 if (rd & 1) {
9544 /* UNPREDICTABLE; we choose to UNDEF */
9545 goto illegal_op;
9546 }
9547 load = (sh & 1) == 0;
9548 doubleword = true;
9549 }
9550
b0109805 9551 addr = load_reg(s, rn);
63f26fcf 9552 if (pbit) {
b0109805 9553 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 9554 }
9ee6e8bb 9555 address_offset = 0;
3960c336
PM
9556
9557 if (doubleword) {
9558 if (!load) {
9ee6e8bb 9559 /* store */
b0109805 9560 tmp = load_reg(s, rd);
12dcc321 9561 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9562 tcg_temp_free_i32(tmp);
b0109805
PB
9563 tcg_gen_addi_i32(addr, addr, 4);
9564 tmp = load_reg(s, rd + 1);
12dcc321 9565 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9566 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9567 } else {
9568 /* load */
5a839c0d 9569 tmp = tcg_temp_new_i32();
12dcc321 9570 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9571 store_reg(s, rd, tmp);
9572 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9573 tmp = tcg_temp_new_i32();
12dcc321 9574 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9575 rd++;
9ee6e8bb
PB
9576 }
9577 address_offset = -4;
3960c336
PM
9578 } else if (load) {
9579 /* load */
9580 tmp = tcg_temp_new_i32();
9581 switch (sh) {
9582 case 1:
9bb6558a
PM
9583 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9584 issinfo);
3960c336
PM
9585 break;
9586 case 2:
9bb6558a
PM
9587 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9588 issinfo);
3960c336
PM
9589 break;
9590 default:
9591 case 3:
9bb6558a
PM
9592 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9593 issinfo);
3960c336
PM
9594 break;
9595 }
9ee6e8bb
PB
9596 } else {
9597 /* store */
b0109805 9598 tmp = load_reg(s, rd);
9bb6558a 9599 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9600 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9601 }
9602 /* Perform base writeback before the loaded value to
9603 ensure correct behavior with overlapping index registers.
b6af0975 9604 ldrd with base writeback is undefined if the
9ee6e8bb 9605 destination and index registers overlap. */
63f26fcf 9606 if (!pbit) {
b0109805
PB
9607 gen_add_datah_offset(s, insn, address_offset, addr);
9608 store_reg(s, rn, addr);
63f26fcf 9609 } else if (wbit) {
9ee6e8bb 9610 if (address_offset)
b0109805
PB
9611 tcg_gen_addi_i32(addr, addr, address_offset);
9612 store_reg(s, rn, addr);
9613 } else {
7d1b0095 9614 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9615 }
9616 if (load) {
9617 /* Complete the load. */
b0109805 9618 store_reg(s, rd, tmp);
9ee6e8bb
PB
9619 }
9620 }
9621 break;
9622 case 0x4:
9623 case 0x5:
9624 goto do_ldst;
9625 case 0x6:
9626 case 0x7:
9627 if (insn & (1 << 4)) {
9628 ARCH(6);
9629 /* Armv6 Media instructions. */
9630 rm = insn & 0xf;
9631 rn = (insn >> 16) & 0xf;
2c0262af 9632 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9633 rs = (insn >> 8) & 0xf;
9634 switch ((insn >> 23) & 3) {
9635 case 0: /* Parallel add/subtract. */
9636 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9637 tmp = load_reg(s, rn);
9638 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9639 sh = (insn >> 5) & 7;
9640 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9641 goto illegal_op;
6ddbc6e4 9642 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9643 tcg_temp_free_i32(tmp2);
6ddbc6e4 9644 store_reg(s, rd, tmp);
9ee6e8bb
PB
9645 break;
9646 case 1:
9647 if ((insn & 0x00700020) == 0) {
6c95676b 9648 /* Halfword pack. */
3670669c
PB
9649 tmp = load_reg(s, rn);
9650 tmp2 = load_reg(s, rm);
9ee6e8bb 9651 shift = (insn >> 7) & 0x1f;
3670669c
PB
9652 if (insn & (1 << 6)) {
9653 /* pkhtb */
22478e79
AZ
9654 if (shift == 0)
9655 shift = 31;
9656 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9657 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9658 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9659 } else {
9660 /* pkhbt */
22478e79
AZ
9661 if (shift)
9662 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9663 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9664 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9665 }
9666 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9667 tcg_temp_free_i32(tmp2);
3670669c 9668 store_reg(s, rd, tmp);
9ee6e8bb
PB
9669 } else if ((insn & 0x00200020) == 0x00200000) {
9670 /* [us]sat */
6ddbc6e4 9671 tmp = load_reg(s, rm);
9ee6e8bb
PB
9672 shift = (insn >> 7) & 0x1f;
9673 if (insn & (1 << 6)) {
9674 if (shift == 0)
9675 shift = 31;
6ddbc6e4 9676 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9677 } else {
6ddbc6e4 9678 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9679 }
9680 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9681 tmp2 = tcg_const_i32(sh);
9682 if (insn & (1 << 22))
9ef39277 9683 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9684 else
9ef39277 9685 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9686 tcg_temp_free_i32(tmp2);
6ddbc6e4 9687 store_reg(s, rd, tmp);
9ee6e8bb
PB
9688 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9689 /* [us]sat16 */
6ddbc6e4 9690 tmp = load_reg(s, rm);
9ee6e8bb 9691 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9692 tmp2 = tcg_const_i32(sh);
9693 if (insn & (1 << 22))
9ef39277 9694 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9695 else
9ef39277 9696 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9697 tcg_temp_free_i32(tmp2);
6ddbc6e4 9698 store_reg(s, rd, tmp);
9ee6e8bb
PB
9699 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9700 /* Select bytes. */
6ddbc6e4
PB
9701 tmp = load_reg(s, rn);
9702 tmp2 = load_reg(s, rm);
7d1b0095 9703 tmp3 = tcg_temp_new_i32();
0ecb72a5 9704 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9705 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9706 tcg_temp_free_i32(tmp3);
9707 tcg_temp_free_i32(tmp2);
6ddbc6e4 9708 store_reg(s, rd, tmp);
9ee6e8bb 9709 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9710 tmp = load_reg(s, rm);
9ee6e8bb 9711 shift = (insn >> 10) & 3;
1301f322 9712 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9713 rotate, a shift is sufficient. */
9714 if (shift != 0)
f669df27 9715 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9716 op1 = (insn >> 20) & 7;
9717 switch (op1) {
5e3f878a
PB
9718 case 0: gen_sxtb16(tmp); break;
9719 case 2: gen_sxtb(tmp); break;
9720 case 3: gen_sxth(tmp); break;
9721 case 4: gen_uxtb16(tmp); break;
9722 case 6: gen_uxtb(tmp); break;
9723 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9724 default: goto illegal_op;
9725 }
9726 if (rn != 15) {
5e3f878a 9727 tmp2 = load_reg(s, rn);
9ee6e8bb 9728 if ((op1 & 3) == 0) {
5e3f878a 9729 gen_add16(tmp, tmp2);
9ee6e8bb 9730 } else {
5e3f878a 9731 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9732 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9733 }
9734 }
6c95676b 9735 store_reg(s, rd, tmp);
9ee6e8bb
PB
9736 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9737 /* rev */
b0109805 9738 tmp = load_reg(s, rm);
9ee6e8bb
PB
9739 if (insn & (1 << 22)) {
9740 if (insn & (1 << 7)) {
b0109805 9741 gen_revsh(tmp);
9ee6e8bb
PB
9742 } else {
9743 ARCH(6T2);
b0109805 9744 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9745 }
9746 } else {
9747 if (insn & (1 << 7))
b0109805 9748 gen_rev16(tmp);
9ee6e8bb 9749 else
66896cb8 9750 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9751 }
b0109805 9752 store_reg(s, rd, tmp);
9ee6e8bb
PB
9753 } else {
9754 goto illegal_op;
9755 }
9756 break;
9757 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9758 switch ((insn >> 20) & 0x7) {
9759 case 5:
9760 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9761 /* op2 not 00x or 11x : UNDEF */
9762 goto illegal_op;
9763 }
838fa72d
AJ
9764 /* Signed multiply most significant [accumulate].
9765 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9766 tmp = load_reg(s, rm);
9767 tmp2 = load_reg(s, rs);
a7812ae4 9768 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9769
955a7dd5 9770 if (rd != 15) {
838fa72d 9771 tmp = load_reg(s, rd);
9ee6e8bb 9772 if (insn & (1 << 6)) {
838fa72d 9773 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9774 } else {
838fa72d 9775 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9776 }
9777 }
838fa72d
AJ
9778 if (insn & (1 << 5)) {
9779 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9780 }
9781 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9782 tmp = tcg_temp_new_i32();
ecc7b3aa 9783 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9784 tcg_temp_free_i64(tmp64);
955a7dd5 9785 store_reg(s, rn, tmp);
41e9564d
PM
9786 break;
9787 case 0:
9788 case 4:
9789 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9790 if (insn & (1 << 7)) {
9791 goto illegal_op;
9792 }
9793 tmp = load_reg(s, rm);
9794 tmp2 = load_reg(s, rs);
9ee6e8bb 9795 if (insn & (1 << 5))
5e3f878a
PB
9796 gen_swap_half(tmp2);
9797 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9798 if (insn & (1 << 22)) {
5e3f878a 9799 /* smlald, smlsld */
33bbd75a
PC
9800 TCGv_i64 tmp64_2;
9801
a7812ae4 9802 tmp64 = tcg_temp_new_i64();
33bbd75a 9803 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9804 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9805 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9806 tcg_temp_free_i32(tmp);
33bbd75a
PC
9807 tcg_temp_free_i32(tmp2);
9808 if (insn & (1 << 6)) {
9809 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9810 } else {
9811 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9812 }
9813 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9814 gen_addq(s, tmp64, rd, rn);
9815 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9816 tcg_temp_free_i64(tmp64);
9ee6e8bb 9817 } else {
5e3f878a 9818 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9819 if (insn & (1 << 6)) {
9820 /* This subtraction cannot overflow. */
9821 tcg_gen_sub_i32(tmp, tmp, tmp2);
9822 } else {
9823 /* This addition cannot overflow 32 bits;
9824 * however it may overflow considered as a
9825 * signed operation, in which case we must set
9826 * the Q flag.
9827 */
9828 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9829 }
9830 tcg_temp_free_i32(tmp2);
22478e79 9831 if (rd != 15)
9ee6e8bb 9832 {
22478e79 9833 tmp2 = load_reg(s, rd);
9ef39277 9834 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9835 tcg_temp_free_i32(tmp2);
9ee6e8bb 9836 }
22478e79 9837 store_reg(s, rn, tmp);
9ee6e8bb 9838 }
41e9564d 9839 break;
b8b8ea05
PM
9840 case 1:
9841 case 3:
9842 /* SDIV, UDIV */
7e0cf8b4 9843 if (!dc_isar_feature(arm_div, s)) {
b8b8ea05
PM
9844 goto illegal_op;
9845 }
9846 if (((insn >> 5) & 7) || (rd != 15)) {
9847 goto illegal_op;
9848 }
9849 tmp = load_reg(s, rm);
9850 tmp2 = load_reg(s, rs);
9851 if (insn & (1 << 21)) {
9852 gen_helper_udiv(tmp, tmp, tmp2);
9853 } else {
9854 gen_helper_sdiv(tmp, tmp, tmp2);
9855 }
9856 tcg_temp_free_i32(tmp2);
9857 store_reg(s, rn, tmp);
9858 break;
41e9564d
PM
9859 default:
9860 goto illegal_op;
9ee6e8bb
PB
9861 }
9862 break;
9863 case 3:
9864 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9865 switch (op1) {
9866 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9867 ARCH(6);
9868 tmp = load_reg(s, rm);
9869 tmp2 = load_reg(s, rs);
9870 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9871 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9872 if (rd != 15) {
9873 tmp2 = load_reg(s, rd);
6ddbc6e4 9874 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9875 tcg_temp_free_i32(tmp2);
9ee6e8bb 9876 }
ded9d295 9877 store_reg(s, rn, tmp);
9ee6e8bb
PB
9878 break;
9879 case 0x20: case 0x24: case 0x28: case 0x2c:
9880 /* Bitfield insert/clear. */
9881 ARCH(6T2);
9882 shift = (insn >> 7) & 0x1f;
9883 i = (insn >> 16) & 0x1f;
45140a57
KB
9884 if (i < shift) {
9885 /* UNPREDICTABLE; we choose to UNDEF */
9886 goto illegal_op;
9887 }
9ee6e8bb
PB
9888 i = i + 1 - shift;
9889 if (rm == 15) {
7d1b0095 9890 tmp = tcg_temp_new_i32();
5e3f878a 9891 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9892 } else {
5e3f878a 9893 tmp = load_reg(s, rm);
9ee6e8bb
PB
9894 }
9895 if (i != 32) {
5e3f878a 9896 tmp2 = load_reg(s, rd);
d593c48e 9897 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9898 tcg_temp_free_i32(tmp2);
9ee6e8bb 9899 }
5e3f878a 9900 store_reg(s, rd, tmp);
9ee6e8bb
PB
9901 break;
9902 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9903 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9904 ARCH(6T2);
5e3f878a 9905 tmp = load_reg(s, rm);
9ee6e8bb
PB
9906 shift = (insn >> 7) & 0x1f;
9907 i = ((insn >> 16) & 0x1f) + 1;
9908 if (shift + i > 32)
9909 goto illegal_op;
9910 if (i < 32) {
9911 if (op1 & 0x20) {
59a71b4c 9912 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9913 } else {
59a71b4c 9914 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9915 }
9916 }
5e3f878a 9917 store_reg(s, rd, tmp);
9ee6e8bb
PB
9918 break;
9919 default:
9920 goto illegal_op;
9921 }
9922 break;
9923 }
9924 break;
9925 }
9926 do_ldst:
9927 /* Check for undefined extension instructions
9928 * per the ARM Bible IE:
9929 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9930 */
9931 sh = (0xf << 20) | (0xf << 4);
9932 if (op1 == 0x7 && ((insn & sh) == sh))
9933 {
9934 goto illegal_op;
9935 }
9936 /* load/store byte/word */
9937 rn = (insn >> 16) & 0xf;
9938 rd = (insn >> 12) & 0xf;
b0109805 9939 tmp2 = load_reg(s, rn);
a99caa48
PM
9940 if ((insn & 0x01200000) == 0x00200000) {
9941 /* ldrt/strt */
579d21cc 9942 i = get_a32_user_mem_index(s);
a99caa48
PM
9943 } else {
9944 i = get_mem_index(s);
9945 }
9ee6e8bb 9946 if (insn & (1 << 24))
b0109805 9947 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9948 if (insn & (1 << 20)) {
9949 /* load */
5a839c0d 9950 tmp = tcg_temp_new_i32();
9ee6e8bb 9951 if (insn & (1 << 22)) {
9bb6558a 9952 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9953 } else {
9bb6558a 9954 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9955 }
9ee6e8bb
PB
9956 } else {
9957 /* store */
b0109805 9958 tmp = load_reg(s, rd);
5a839c0d 9959 if (insn & (1 << 22)) {
9bb6558a 9960 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9961 } else {
9bb6558a 9962 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9963 }
9964 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9965 }
9966 if (!(insn & (1 << 24))) {
b0109805
PB
9967 gen_add_data_offset(s, insn, tmp2);
9968 store_reg(s, rn, tmp2);
9969 } else if (insn & (1 << 21)) {
9970 store_reg(s, rn, tmp2);
9971 } else {
7d1b0095 9972 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9973 }
9974 if (insn & (1 << 20)) {
9975 /* Complete the load. */
7dcc1f89 9976 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9977 }
9978 break;
9979 case 0x08:
9980 case 0x09:
9981 {
da3e53dd
PM
9982 int j, n, loaded_base;
9983 bool exc_return = false;
9984 bool is_load = extract32(insn, 20, 1);
9985 bool user = false;
39d5492a 9986 TCGv_i32 loaded_var;
9ee6e8bb
PB
9987 /* load/store multiple words */
9988 /* XXX: store correct base if write back */
9ee6e8bb 9989 if (insn & (1 << 22)) {
da3e53dd 9990 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9991 if (IS_USER(s))
9992 goto illegal_op; /* only usable in supervisor mode */
9993
da3e53dd
PM
9994 if (is_load && extract32(insn, 15, 1)) {
9995 exc_return = true;
9996 } else {
9997 user = true;
9998 }
9ee6e8bb
PB
9999 }
10000 rn = (insn >> 16) & 0xf;
b0109805 10001 addr = load_reg(s, rn);
9ee6e8bb
PB
10002
10003 /* compute total size */
10004 loaded_base = 0;
f764718d 10005 loaded_var = NULL;
9ee6e8bb
PB
10006 n = 0;
10007 for(i=0;i<16;i++) {
10008 if (insn & (1 << i))
10009 n++;
10010 }
10011 /* XXX: test invalid n == 0 case ? */
10012 if (insn & (1 << 23)) {
10013 if (insn & (1 << 24)) {
10014 /* pre increment */
b0109805 10015 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10016 } else {
10017 /* post increment */
10018 }
10019 } else {
10020 if (insn & (1 << 24)) {
10021 /* pre decrement */
b0109805 10022 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10023 } else {
10024 /* post decrement */
10025 if (n != 1)
b0109805 10026 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10027 }
10028 }
10029 j = 0;
10030 for(i=0;i<16;i++) {
10031 if (insn & (1 << i)) {
da3e53dd 10032 if (is_load) {
9ee6e8bb 10033 /* load */
5a839c0d 10034 tmp = tcg_temp_new_i32();
12dcc321 10035 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 10036 if (user) {
b75263d6 10037 tmp2 = tcg_const_i32(i);
1ce94f81 10038 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 10039 tcg_temp_free_i32(tmp2);
7d1b0095 10040 tcg_temp_free_i32(tmp);
9ee6e8bb 10041 } else if (i == rn) {
b0109805 10042 loaded_var = tmp;
9ee6e8bb 10043 loaded_base = 1;
fb0e8e79
PM
10044 } else if (rn == 15 && exc_return) {
10045 store_pc_exc_ret(s, tmp);
9ee6e8bb 10046 } else {
7dcc1f89 10047 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
10048 }
10049 } else {
10050 /* store */
10051 if (i == 15) {
10052 /* special case: r15 = PC + 8 */
10053 val = (long)s->pc + 4;
7d1b0095 10054 tmp = tcg_temp_new_i32();
b0109805 10055 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 10056 } else if (user) {
7d1b0095 10057 tmp = tcg_temp_new_i32();
b75263d6 10058 tmp2 = tcg_const_i32(i);
9ef39277 10059 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 10060 tcg_temp_free_i32(tmp2);
9ee6e8bb 10061 } else {
b0109805 10062 tmp = load_reg(s, i);
9ee6e8bb 10063 }
12dcc321 10064 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10065 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10066 }
10067 j++;
10068 /* no need to add after the last transfer */
10069 if (j != n)
b0109805 10070 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10071 }
10072 }
10073 if (insn & (1 << 21)) {
10074 /* write back */
10075 if (insn & (1 << 23)) {
10076 if (insn & (1 << 24)) {
10077 /* pre increment */
10078 } else {
10079 /* post increment */
b0109805 10080 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10081 }
10082 } else {
10083 if (insn & (1 << 24)) {
10084 /* pre decrement */
10085 if (n != 1)
b0109805 10086 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10087 } else {
10088 /* post decrement */
b0109805 10089 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10090 }
10091 }
b0109805
PB
10092 store_reg(s, rn, addr);
10093 } else {
7d1b0095 10094 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10095 }
10096 if (loaded_base) {
b0109805 10097 store_reg(s, rn, loaded_var);
9ee6e8bb 10098 }
da3e53dd 10099 if (exc_return) {
9ee6e8bb 10100 /* Restore CPSR from SPSR. */
d9ba4830 10101 tmp = load_cpu_field(spsr);
e69ad9df
AL
10102 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10103 gen_io_start();
10104 }
235ea1f5 10105 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
10106 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10107 gen_io_end();
10108 }
7d1b0095 10109 tcg_temp_free_i32(tmp);
b29fd33d 10110 /* Must exit loop to check un-masked IRQs */
dcba3a8d 10111 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
10112 }
10113 }
10114 break;
10115 case 0xa:
10116 case 0xb:
10117 {
10118 int32_t offset;
10119
10120 /* branch (and link) */
10121 val = (int32_t)s->pc;
10122 if (insn & (1 << 24)) {
7d1b0095 10123 tmp = tcg_temp_new_i32();
5e3f878a
PB
10124 tcg_gen_movi_i32(tmp, val);
10125 store_reg(s, 14, tmp);
9ee6e8bb 10126 }
534df156
PM
10127 offset = sextract32(insn << 2, 0, 26);
10128 val += offset + 4;
9ee6e8bb
PB
10129 gen_jmp(s, val);
10130 }
10131 break;
10132 case 0xc:
10133 case 0xd:
10134 case 0xe:
6a57f3eb
WN
10135 if (((insn >> 8) & 0xe) == 10) {
10136 /* VFP. */
7dcc1f89 10137 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10138 goto illegal_op;
10139 }
7dcc1f89 10140 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 10141 /* Coprocessor. */
9ee6e8bb 10142 goto illegal_op;
6a57f3eb 10143 }
9ee6e8bb
PB
10144 break;
10145 case 0xf:
10146 /* swi */
eaed129d 10147 gen_set_pc_im(s, s->pc);
d4a2dc67 10148 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 10149 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
10150 break;
10151 default:
10152 illegal_op:
73710361
GB
10153 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10154 default_exception_el(s));
9ee6e8bb
PB
10155 break;
10156 }
10157 }
10158}
10159
296e5a0a
PM
10160static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10161{
10162 /* Return true if this is a 16 bit instruction. We must be precise
10163 * about this (matching the decode). We assume that s->pc still
10164 * points to the first 16 bits of the insn.
10165 */
10166 if ((insn >> 11) < 0x1d) {
10167 /* Definitely a 16-bit instruction */
10168 return true;
10169 }
10170
10171 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10172 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10173 * end up actually treating this as two 16-bit insns, though,
10174 * if it's half of a bl/blx pair that might span a page boundary.
10175 */
14120108
JS
10176 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10177 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
10178 /* Thumb2 cores (including all M profile ones) always treat
10179 * 32-bit insns as 32-bit.
10180 */
10181 return false;
10182 }
10183
bfe7ad5b 10184 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
10185 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10186 * is not on the next page; we merge this into a 32-bit
10187 * insn.
10188 */
10189 return false;
10190 }
10191 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10192 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10193 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10194 * -- handle as single 16 bit insn
10195 */
10196 return true;
10197}
10198
9ee6e8bb
PB
10199/* Return true if this is a Thumb-2 logical op. */
10200static int
10201thumb2_logic_op(int op)
10202{
10203 return (op < 8);
10204}
10205
10206/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10207 then set condition code flags based on the result of the operation.
10208 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10209 to the high bit of T1.
10210 Returns zero if the opcode is valid. */
10211
10212static int
39d5492a
PM
10213gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10214 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
10215{
10216 int logic_cc;
10217
10218 logic_cc = 0;
10219 switch (op) {
10220 case 0: /* and */
396e467c 10221 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
10222 logic_cc = conds;
10223 break;
10224 case 1: /* bic */
f669df27 10225 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
10226 logic_cc = conds;
10227 break;
10228 case 2: /* orr */
396e467c 10229 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
10230 logic_cc = conds;
10231 break;
10232 case 3: /* orn */
29501f1b 10233 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
10234 logic_cc = conds;
10235 break;
10236 case 4: /* eor */
396e467c 10237 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
10238 logic_cc = conds;
10239 break;
10240 case 8: /* add */
10241 if (conds)
72485ec4 10242 gen_add_CC(t0, t0, t1);
9ee6e8bb 10243 else
396e467c 10244 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
10245 break;
10246 case 10: /* adc */
10247 if (conds)
49b4c31e 10248 gen_adc_CC(t0, t0, t1);
9ee6e8bb 10249 else
396e467c 10250 gen_adc(t0, t1);
9ee6e8bb
PB
10251 break;
10252 case 11: /* sbc */
2de68a49
RH
10253 if (conds) {
10254 gen_sbc_CC(t0, t0, t1);
10255 } else {
396e467c 10256 gen_sub_carry(t0, t0, t1);
2de68a49 10257 }
9ee6e8bb
PB
10258 break;
10259 case 13: /* sub */
10260 if (conds)
72485ec4 10261 gen_sub_CC(t0, t0, t1);
9ee6e8bb 10262 else
396e467c 10263 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
10264 break;
10265 case 14: /* rsb */
10266 if (conds)
72485ec4 10267 gen_sub_CC(t0, t1, t0);
9ee6e8bb 10268 else
396e467c 10269 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
10270 break;
10271 default: /* 5, 6, 7, 9, 12, 15. */
10272 return 1;
10273 }
10274 if (logic_cc) {
396e467c 10275 gen_logic_CC(t0);
9ee6e8bb 10276 if (shifter_out)
396e467c 10277 gen_set_CF_bit31(t1);
9ee6e8bb
PB
10278 }
10279 return 0;
10280}
10281
2eea841c
PM
10282/* Translate a 32-bit thumb instruction. */
10283static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 10284{
296e5a0a 10285 uint32_t imm, shift, offset;
9ee6e8bb 10286 uint32_t rd, rn, rm, rs;
39d5492a
PM
10287 TCGv_i32 tmp;
10288 TCGv_i32 tmp2;
10289 TCGv_i32 tmp3;
10290 TCGv_i32 addr;
a7812ae4 10291 TCGv_i64 tmp64;
9ee6e8bb
PB
10292 int op;
10293 int shiftop;
10294 int conds;
10295 int logic_cc;
10296
14120108
JS
10297 /*
10298 * ARMv6-M supports a limited subset of Thumb2 instructions.
10299 * Other Thumb1 architectures allow only 32-bit
10300 * combined BL/BLX prefix and suffix.
296e5a0a 10301 */
14120108
JS
10302 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10303 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10304 int i;
10305 bool found = false;
8297cb13
JS
10306 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10307 0xf3b08040 /* dsb */,
10308 0xf3b08050 /* dmb */,
10309 0xf3b08060 /* isb */,
10310 0xf3e08000 /* mrs */,
10311 0xf000d000 /* bl */};
10312 static const uint32_t armv6m_mask[] = {0xffe0d000,
10313 0xfff0d0f0,
10314 0xfff0d0f0,
10315 0xfff0d0f0,
10316 0xffe0d000,
10317 0xf800d000};
14120108
JS
10318
10319 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10320 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10321 found = true;
10322 break;
10323 }
10324 }
10325 if (!found) {
10326 goto illegal_op;
10327 }
10328 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
10329 ARCH(6T2);
10330 }
10331
10332 rn = (insn >> 16) & 0xf;
10333 rs = (insn >> 12) & 0xf;
10334 rd = (insn >> 8) & 0xf;
10335 rm = insn & 0xf;
10336 switch ((insn >> 25) & 0xf) {
10337 case 0: case 1: case 2: case 3:
10338 /* 16-bit instructions. Should never happen. */
10339 abort();
10340 case 4:
10341 if (insn & (1 << 22)) {
ebfe27c5
PM
10342 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10343 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 10344 * table branch, TT.
ebfe27c5 10345 */
76eff04d
PM
10346 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10347 arm_dc_feature(s, ARM_FEATURE_V8)) {
10348 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10349 * - SG (v8M only)
10350 * The bulk of the behaviour for this instruction is implemented
10351 * in v7m_handle_execute_nsc(), which deals with the insn when
10352 * it is executed by a CPU in non-secure state from memory
10353 * which is Secure & NonSecure-Callable.
10354 * Here we only need to handle the remaining cases:
10355 * * in NS memory (including the "security extension not
10356 * implemented" case) : NOP
10357 * * in S memory but CPU already secure (clear IT bits)
10358 * We know that the attribute for the memory this insn is
10359 * in must match the current CPU state, because otherwise
10360 * get_phys_addr_pmsav8 would have generated an exception.
10361 */
10362 if (s->v8m_secure) {
10363 /* Like the IT insn, we don't need to generate any code */
10364 s->condexec_cond = 0;
10365 s->condexec_mask = 0;
10366 }
10367 } else if (insn & 0x01200000) {
ebfe27c5
PM
10368 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10369 * - load/store dual (post-indexed)
10370 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10371 * - load/store dual (literal and immediate)
10372 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10373 * - load/store dual (pre-indexed)
10374 */
910d7692
PM
10375 bool wback = extract32(insn, 21, 1);
10376
9ee6e8bb 10377 if (rn == 15) {
ebfe27c5
PM
10378 if (insn & (1 << 21)) {
10379 /* UNPREDICTABLE */
10380 goto illegal_op;
10381 }
7d1b0095 10382 addr = tcg_temp_new_i32();
b0109805 10383 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 10384 } else {
b0109805 10385 addr = load_reg(s, rn);
9ee6e8bb
PB
10386 }
10387 offset = (insn & 0xff) * 4;
910d7692 10388 if ((insn & (1 << 23)) == 0) {
9ee6e8bb 10389 offset = -offset;
910d7692
PM
10390 }
10391
10392 if (s->v8m_stackcheck && rn == 13 && wback) {
10393 /*
10394 * Here 'addr' is the current SP; if offset is +ve we're
10395 * moving SP up, else down. It is UNKNOWN whether the limit
10396 * check triggers when SP starts below the limit and ends
10397 * up above it; check whichever of the current and final
10398 * SP is lower, so QEMU will trigger in that situation.
10399 */
10400 if ((int32_t)offset < 0) {
10401 TCGv_i32 newsp = tcg_temp_new_i32();
10402
10403 tcg_gen_addi_i32(newsp, addr, offset);
10404 gen_helper_v8m_stackcheck(cpu_env, newsp);
10405 tcg_temp_free_i32(newsp);
10406 } else {
10407 gen_helper_v8m_stackcheck(cpu_env, addr);
10408 }
10409 }
10410
9ee6e8bb 10411 if (insn & (1 << 24)) {
b0109805 10412 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
10413 offset = 0;
10414 }
10415 if (insn & (1 << 20)) {
10416 /* ldrd */
e2592fad 10417 tmp = tcg_temp_new_i32();
12dcc321 10418 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10419 store_reg(s, rs, tmp);
10420 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10421 tmp = tcg_temp_new_i32();
12dcc321 10422 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10423 store_reg(s, rd, tmp);
9ee6e8bb
PB
10424 } else {
10425 /* strd */
b0109805 10426 tmp = load_reg(s, rs);
12dcc321 10427 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10428 tcg_temp_free_i32(tmp);
b0109805
PB
10429 tcg_gen_addi_i32(addr, addr, 4);
10430 tmp = load_reg(s, rd);
12dcc321 10431 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10432 tcg_temp_free_i32(tmp);
9ee6e8bb 10433 }
910d7692 10434 if (wback) {
9ee6e8bb 10435 /* Base writeback. */
b0109805
PB
10436 tcg_gen_addi_i32(addr, addr, offset - 4);
10437 store_reg(s, rn, addr);
10438 } else {
7d1b0095 10439 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10440 }
10441 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
10442 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10443 * - load/store exclusive word
5158de24 10444 * - TT (v8M only)
ebfe27c5
PM
10445 */
10446 if (rs == 15) {
5158de24
PM
10447 if (!(insn & (1 << 20)) &&
10448 arm_dc_feature(s, ARM_FEATURE_M) &&
10449 arm_dc_feature(s, ARM_FEATURE_V8)) {
10450 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10451 * - TT (v8M only)
10452 */
10453 bool alt = insn & (1 << 7);
10454 TCGv_i32 addr, op, ttresp;
10455
10456 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10457 /* we UNDEF for these UNPREDICTABLE cases */
10458 goto illegal_op;
10459 }
10460
10461 if (alt && !s->v8m_secure) {
10462 goto illegal_op;
10463 }
10464
10465 addr = load_reg(s, rn);
10466 op = tcg_const_i32(extract32(insn, 6, 2));
10467 ttresp = tcg_temp_new_i32();
10468 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10469 tcg_temp_free_i32(addr);
10470 tcg_temp_free_i32(op);
10471 store_reg(s, rd, ttresp);
384c6c03 10472 break;
5158de24 10473 }
ebfe27c5
PM
10474 goto illegal_op;
10475 }
39d5492a 10476 addr = tcg_temp_local_new_i32();
98a46317 10477 load_reg_var(s, addr, rn);
426f5abc 10478 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 10479 if (insn & (1 << 20)) {
426f5abc 10480 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 10481 } else {
426f5abc 10482 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 10483 }
39d5492a 10484 tcg_temp_free_i32(addr);
2359bf80 10485 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
10486 /* Table Branch. */
10487 if (rn == 15) {
7d1b0095 10488 addr = tcg_temp_new_i32();
b0109805 10489 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 10490 } else {
b0109805 10491 addr = load_reg(s, rn);
9ee6e8bb 10492 }
b26eefb6 10493 tmp = load_reg(s, rm);
b0109805 10494 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
10495 if (insn & (1 << 4)) {
10496 /* tbh */
b0109805 10497 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10498 tcg_temp_free_i32(tmp);
e2592fad 10499 tmp = tcg_temp_new_i32();
12dcc321 10500 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10501 } else { /* tbb */
7d1b0095 10502 tcg_temp_free_i32(tmp);
e2592fad 10503 tmp = tcg_temp_new_i32();
12dcc321 10504 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10505 }
7d1b0095 10506 tcg_temp_free_i32(addr);
b0109805
PB
10507 tcg_gen_shli_i32(tmp, tmp, 1);
10508 tcg_gen_addi_i32(tmp, tmp, s->pc);
10509 store_reg(s, 15, tmp);
9ee6e8bb 10510 } else {
2359bf80 10511 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 10512 op = (insn >> 4) & 0x3;
2359bf80
MR
10513 switch (op2) {
10514 case 0:
426f5abc 10515 goto illegal_op;
2359bf80
MR
10516 case 1:
10517 /* Load/store exclusive byte/halfword/doubleword */
10518 if (op == 2) {
10519 goto illegal_op;
10520 }
10521 ARCH(7);
10522 break;
10523 case 2:
10524 /* Load-acquire/store-release */
10525 if (op == 3) {
10526 goto illegal_op;
10527 }
10528 /* Fall through */
10529 case 3:
10530 /* Load-acquire/store-release exclusive */
10531 ARCH(8);
10532 break;
426f5abc 10533 }
39d5492a 10534 addr = tcg_temp_local_new_i32();
98a46317 10535 load_reg_var(s, addr, rn);
2359bf80
MR
10536 if (!(op2 & 1)) {
10537 if (insn & (1 << 20)) {
10538 tmp = tcg_temp_new_i32();
10539 switch (op) {
10540 case 0: /* ldab */
9bb6558a
PM
10541 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
10542 rs | ISSIsAcqRel);
2359bf80
MR
10543 break;
10544 case 1: /* ldah */
9bb6558a
PM
10545 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10546 rs | ISSIsAcqRel);
2359bf80
MR
10547 break;
10548 case 2: /* lda */
9bb6558a
PM
10549 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10550 rs | ISSIsAcqRel);
2359bf80
MR
10551 break;
10552 default:
10553 abort();
10554 }
10555 store_reg(s, rs, tmp);
10556 } else {
10557 tmp = load_reg(s, rs);
10558 switch (op) {
10559 case 0: /* stlb */
9bb6558a
PM
10560 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10561 rs | ISSIsAcqRel);
2359bf80
MR
10562 break;
10563 case 1: /* stlh */
9bb6558a
PM
10564 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10565 rs | ISSIsAcqRel);
2359bf80
MR
10566 break;
10567 case 2: /* stl */
9bb6558a
PM
10568 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10569 rs | ISSIsAcqRel);
2359bf80
MR
10570 break;
10571 default:
10572 abort();
10573 }
10574 tcg_temp_free_i32(tmp);
10575 }
10576 } else if (insn & (1 << 20)) {
426f5abc 10577 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 10578 } else {
426f5abc 10579 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 10580 }
39d5492a 10581 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10582 }
10583 } else {
10584 /* Load/store multiple, RFE, SRS. */
10585 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 10586 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 10587 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10588 goto illegal_op;
00115976 10589 }
9ee6e8bb
PB
10590 if (insn & (1 << 20)) {
10591 /* rfe */
b0109805
PB
10592 addr = load_reg(s, rn);
10593 if ((insn & (1 << 24)) == 0)
10594 tcg_gen_addi_i32(addr, addr, -8);
10595 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 10596 tmp = tcg_temp_new_i32();
12dcc321 10597 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10598 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10599 tmp2 = tcg_temp_new_i32();
12dcc321 10600 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
10601 if (insn & (1 << 21)) {
10602 /* Base writeback. */
b0109805
PB
10603 if (insn & (1 << 24)) {
10604 tcg_gen_addi_i32(addr, addr, 4);
10605 } else {
10606 tcg_gen_addi_i32(addr, addr, -4);
10607 }
10608 store_reg(s, rn, addr);
10609 } else {
7d1b0095 10610 tcg_temp_free_i32(addr);
9ee6e8bb 10611 }
b0109805 10612 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10613 } else {
10614 /* srs */
81465888
PM
10615 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10616 insn & (1 << 21));
9ee6e8bb
PB
10617 }
10618 } else {
5856d44e 10619 int i, loaded_base = 0;
39d5492a 10620 TCGv_i32 loaded_var;
7c0ed88e 10621 bool wback = extract32(insn, 21, 1);
9ee6e8bb 10622 /* Load/store multiple. */
b0109805 10623 addr = load_reg(s, rn);
9ee6e8bb
PB
10624 offset = 0;
10625 for (i = 0; i < 16; i++) {
10626 if (insn & (1 << i))
10627 offset += 4;
10628 }
7c0ed88e 10629
9ee6e8bb 10630 if (insn & (1 << 24)) {
b0109805 10631 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10632 }
10633
7c0ed88e
PM
10634 if (s->v8m_stackcheck && rn == 13 && wback) {
10635 /*
10636 * If the writeback is incrementing SP rather than
10637 * decrementing it, and the initial SP is below the
10638 * stack limit but the final written-back SP would
10639 * be above, then then we must not perform any memory
10640 * accesses, but it is IMPDEF whether we generate
10641 * an exception. We choose to do so in this case.
10642 * At this point 'addr' is the lowest address, so
10643 * either the original SP (if incrementing) or our
10644 * final SP (if decrementing), so that's what we check.
10645 */
10646 gen_helper_v8m_stackcheck(cpu_env, addr);
10647 }
10648
f764718d 10649 loaded_var = NULL;
9ee6e8bb
PB
10650 for (i = 0; i < 16; i++) {
10651 if ((insn & (1 << i)) == 0)
10652 continue;
10653 if (insn & (1 << 20)) {
10654 /* Load. */
e2592fad 10655 tmp = tcg_temp_new_i32();
12dcc321 10656 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10657 if (i == 15) {
3bb8a96f 10658 gen_bx_excret(s, tmp);
5856d44e
YO
10659 } else if (i == rn) {
10660 loaded_var = tmp;
10661 loaded_base = 1;
9ee6e8bb 10662 } else {
b0109805 10663 store_reg(s, i, tmp);
9ee6e8bb
PB
10664 }
10665 } else {
10666 /* Store. */
b0109805 10667 tmp = load_reg(s, i);
12dcc321 10668 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10669 tcg_temp_free_i32(tmp);
9ee6e8bb 10670 }
b0109805 10671 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 10672 }
5856d44e
YO
10673 if (loaded_base) {
10674 store_reg(s, rn, loaded_var);
10675 }
7c0ed88e 10676 if (wback) {
9ee6e8bb
PB
10677 /* Base register writeback. */
10678 if (insn & (1 << 24)) {
b0109805 10679 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10680 }
10681 /* Fault if writeback register is in register list. */
10682 if (insn & (1 << rn))
10683 goto illegal_op;
b0109805
PB
10684 store_reg(s, rn, addr);
10685 } else {
7d1b0095 10686 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10687 }
10688 }
10689 }
10690 break;
2af9ab77
JB
10691 case 5:
10692
9ee6e8bb 10693 op = (insn >> 21) & 0xf;
2af9ab77 10694 if (op == 6) {
62b44f05
AR
10695 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10696 goto illegal_op;
10697 }
2af9ab77
JB
10698 /* Halfword pack. */
10699 tmp = load_reg(s, rn);
10700 tmp2 = load_reg(s, rm);
10701 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10702 if (insn & (1 << 5)) {
10703 /* pkhtb */
10704 if (shift == 0)
10705 shift = 31;
10706 tcg_gen_sari_i32(tmp2, tmp2, shift);
10707 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10708 tcg_gen_ext16u_i32(tmp2, tmp2);
10709 } else {
10710 /* pkhbt */
10711 if (shift)
10712 tcg_gen_shli_i32(tmp2, tmp2, shift);
10713 tcg_gen_ext16u_i32(tmp, tmp);
10714 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10715 }
10716 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10717 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10718 store_reg(s, rd, tmp);
10719 } else {
2af9ab77
JB
10720 /* Data processing register constant shift. */
10721 if (rn == 15) {
7d1b0095 10722 tmp = tcg_temp_new_i32();
2af9ab77
JB
10723 tcg_gen_movi_i32(tmp, 0);
10724 } else {
10725 tmp = load_reg(s, rn);
10726 }
10727 tmp2 = load_reg(s, rm);
10728
10729 shiftop = (insn >> 4) & 3;
10730 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10731 conds = (insn & (1 << 20)) != 0;
10732 logic_cc = (conds && thumb2_logic_op(op));
10733 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10734 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10735 goto illegal_op;
7d1b0095 10736 tcg_temp_free_i32(tmp2);
55203189
PM
10737 if (rd == 13 &&
10738 ((op == 2 && rn == 15) ||
10739 (op == 8 && rn == 13) ||
10740 (op == 13 && rn == 13))) {
10741 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
10742 store_sp_checked(s, tmp);
10743 } else if (rd != 15) {
2af9ab77
JB
10744 store_reg(s, rd, tmp);
10745 } else {
7d1b0095 10746 tcg_temp_free_i32(tmp);
2af9ab77 10747 }
3174f8e9 10748 }
9ee6e8bb
PB
10749 break;
10750 case 13: /* Misc data processing. */
10751 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10752 if (op < 4 && (insn & 0xf000) != 0xf000)
10753 goto illegal_op;
10754 switch (op) {
10755 case 0: /* Register controlled shift. */
8984bd2e
PB
10756 tmp = load_reg(s, rn);
10757 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10758 if ((insn & 0x70) != 0)
10759 goto illegal_op;
a2d12f0f
PM
10760 /*
10761 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
10762 * - MOV, MOVS (register-shifted register), flagsetting
10763 */
9ee6e8bb 10764 op = (insn >> 21) & 3;
8984bd2e
PB
10765 logic_cc = (insn & (1 << 20)) != 0;
10766 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10767 if (logic_cc)
10768 gen_logic_CC(tmp);
bedb8a6b 10769 store_reg(s, rd, tmp);
9ee6e8bb
PB
10770 break;
10771 case 1: /* Sign/zero extend. */
62b44f05
AR
10772 op = (insn >> 20) & 7;
10773 switch (op) {
10774 case 0: /* SXTAH, SXTH */
10775 case 1: /* UXTAH, UXTH */
10776 case 4: /* SXTAB, SXTB */
10777 case 5: /* UXTAB, UXTB */
10778 break;
10779 case 2: /* SXTAB16, SXTB16 */
10780 case 3: /* UXTAB16, UXTB16 */
10781 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10782 goto illegal_op;
10783 }
10784 break;
10785 default:
10786 goto illegal_op;
10787 }
10788 if (rn != 15) {
10789 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10790 goto illegal_op;
10791 }
10792 }
5e3f878a 10793 tmp = load_reg(s, rm);
9ee6e8bb 10794 shift = (insn >> 4) & 3;
1301f322 10795 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10796 rotate, a shift is sufficient. */
10797 if (shift != 0)
f669df27 10798 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10799 op = (insn >> 20) & 7;
10800 switch (op) {
5e3f878a
PB
10801 case 0: gen_sxth(tmp); break;
10802 case 1: gen_uxth(tmp); break;
10803 case 2: gen_sxtb16(tmp); break;
10804 case 3: gen_uxtb16(tmp); break;
10805 case 4: gen_sxtb(tmp); break;
10806 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10807 default:
10808 g_assert_not_reached();
9ee6e8bb
PB
10809 }
10810 if (rn != 15) {
5e3f878a 10811 tmp2 = load_reg(s, rn);
9ee6e8bb 10812 if ((op >> 1) == 1) {
5e3f878a 10813 gen_add16(tmp, tmp2);
9ee6e8bb 10814 } else {
5e3f878a 10815 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10816 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10817 }
10818 }
5e3f878a 10819 store_reg(s, rd, tmp);
9ee6e8bb
PB
10820 break;
10821 case 2: /* SIMD add/subtract. */
62b44f05
AR
10822 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10823 goto illegal_op;
10824 }
9ee6e8bb
PB
10825 op = (insn >> 20) & 7;
10826 shift = (insn >> 4) & 7;
10827 if ((op & 3) == 3 || (shift & 3) == 3)
10828 goto illegal_op;
6ddbc6e4
PB
10829 tmp = load_reg(s, rn);
10830 tmp2 = load_reg(s, rm);
10831 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10832 tcg_temp_free_i32(tmp2);
6ddbc6e4 10833 store_reg(s, rd, tmp);
9ee6e8bb
PB
10834 break;
10835 case 3: /* Other data processing. */
10836 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10837 if (op < 4) {
10838 /* Saturating add/subtract. */
62b44f05
AR
10839 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10840 goto illegal_op;
10841 }
d9ba4830
PB
10842 tmp = load_reg(s, rn);
10843 tmp2 = load_reg(s, rm);
9ee6e8bb 10844 if (op & 1)
9ef39277 10845 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10846 if (op & 2)
9ef39277 10847 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10848 else
9ef39277 10849 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10850 tcg_temp_free_i32(tmp2);
9ee6e8bb 10851 } else {
62b44f05
AR
10852 switch (op) {
10853 case 0x0a: /* rbit */
10854 case 0x08: /* rev */
10855 case 0x09: /* rev16 */
10856 case 0x0b: /* revsh */
10857 case 0x18: /* clz */
10858 break;
10859 case 0x10: /* sel */
10860 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10861 goto illegal_op;
10862 }
10863 break;
10864 case 0x20: /* crc32/crc32c */
10865 case 0x21:
10866 case 0x22:
10867 case 0x28:
10868 case 0x29:
10869 case 0x2a:
962fcbf2 10870 if (!dc_isar_feature(aa32_crc32, s)) {
62b44f05
AR
10871 goto illegal_op;
10872 }
10873 break;
10874 default:
10875 goto illegal_op;
10876 }
d9ba4830 10877 tmp = load_reg(s, rn);
9ee6e8bb
PB
10878 switch (op) {
10879 case 0x0a: /* rbit */
d9ba4830 10880 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10881 break;
10882 case 0x08: /* rev */
66896cb8 10883 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10884 break;
10885 case 0x09: /* rev16 */
d9ba4830 10886 gen_rev16(tmp);
9ee6e8bb
PB
10887 break;
10888 case 0x0b: /* revsh */
d9ba4830 10889 gen_revsh(tmp);
9ee6e8bb
PB
10890 break;
10891 case 0x10: /* sel */
d9ba4830 10892 tmp2 = load_reg(s, rm);
7d1b0095 10893 tmp3 = tcg_temp_new_i32();
0ecb72a5 10894 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10895 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10896 tcg_temp_free_i32(tmp3);
10897 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10898 break;
10899 case 0x18: /* clz */
7539a012 10900 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10901 break;
eb0ecd5a
WN
10902 case 0x20:
10903 case 0x21:
10904 case 0x22:
10905 case 0x28:
10906 case 0x29:
10907 case 0x2a:
10908 {
10909 /* crc32/crc32c */
10910 uint32_t sz = op & 0x3;
10911 uint32_t c = op & 0x8;
10912
eb0ecd5a 10913 tmp2 = load_reg(s, rm);
aa633469
PM
10914 if (sz == 0) {
10915 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10916 } else if (sz == 1) {
10917 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10918 }
eb0ecd5a
WN
10919 tmp3 = tcg_const_i32(1 << sz);
10920 if (c) {
10921 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10922 } else {
10923 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10924 }
10925 tcg_temp_free_i32(tmp2);
10926 tcg_temp_free_i32(tmp3);
10927 break;
10928 }
9ee6e8bb 10929 default:
62b44f05 10930 g_assert_not_reached();
9ee6e8bb
PB
10931 }
10932 }
d9ba4830 10933 store_reg(s, rd, tmp);
9ee6e8bb
PB
10934 break;
10935 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10936 switch ((insn >> 20) & 7) {
10937 case 0: /* 32 x 32 -> 32 */
10938 case 7: /* Unsigned sum of absolute differences. */
10939 break;
10940 case 1: /* 16 x 16 -> 32 */
10941 case 2: /* Dual multiply add. */
10942 case 3: /* 32 * 16 -> 32msb */
10943 case 4: /* Dual multiply subtract. */
10944 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10945 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10946 goto illegal_op;
10947 }
10948 break;
10949 }
9ee6e8bb 10950 op = (insn >> 4) & 0xf;
d9ba4830
PB
10951 tmp = load_reg(s, rn);
10952 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10953 switch ((insn >> 20) & 7) {
10954 case 0: /* 32 x 32 -> 32 */
d9ba4830 10955 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10956 tcg_temp_free_i32(tmp2);
9ee6e8bb 10957 if (rs != 15) {
d9ba4830 10958 tmp2 = load_reg(s, rs);
9ee6e8bb 10959 if (op)
d9ba4830 10960 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10961 else
d9ba4830 10962 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10963 tcg_temp_free_i32(tmp2);
9ee6e8bb 10964 }
9ee6e8bb
PB
10965 break;
10966 case 1: /* 16 x 16 -> 32 */
d9ba4830 10967 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10968 tcg_temp_free_i32(tmp2);
9ee6e8bb 10969 if (rs != 15) {
d9ba4830 10970 tmp2 = load_reg(s, rs);
9ef39277 10971 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10972 tcg_temp_free_i32(tmp2);
9ee6e8bb 10973 }
9ee6e8bb
PB
10974 break;
10975 case 2: /* Dual multiply add. */
10976 case 4: /* Dual multiply subtract. */
10977 if (op)
d9ba4830
PB
10978 gen_swap_half(tmp2);
10979 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10980 if (insn & (1 << 22)) {
e1d177b9 10981 /* This subtraction cannot overflow. */
d9ba4830 10982 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10983 } else {
e1d177b9
PM
10984 /* This addition cannot overflow 32 bits;
10985 * however it may overflow considered as a signed
10986 * operation, in which case we must set the Q flag.
10987 */
9ef39277 10988 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10989 }
7d1b0095 10990 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10991 if (rs != 15)
10992 {
d9ba4830 10993 tmp2 = load_reg(s, rs);
9ef39277 10994 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10995 tcg_temp_free_i32(tmp2);
9ee6e8bb 10996 }
9ee6e8bb
PB
10997 break;
10998 case 3: /* 32 * 16 -> 32msb */
10999 if (op)
d9ba4830 11000 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 11001 else
d9ba4830 11002 gen_sxth(tmp2);
a7812ae4
PB
11003 tmp64 = gen_muls_i64_i32(tmp, tmp2);
11004 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 11005 tmp = tcg_temp_new_i32();
ecc7b3aa 11006 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 11007 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
11008 if (rs != 15)
11009 {
d9ba4830 11010 tmp2 = load_reg(s, rs);
9ef39277 11011 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11012 tcg_temp_free_i32(tmp2);
9ee6e8bb 11013 }
9ee6e8bb 11014 break;
838fa72d
AJ
11015 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11016 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11017 if (rs != 15) {
838fa72d
AJ
11018 tmp = load_reg(s, rs);
11019 if (insn & (1 << 20)) {
11020 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 11021 } else {
838fa72d 11022 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 11023 }
2c0262af 11024 }
838fa72d
AJ
11025 if (insn & (1 << 4)) {
11026 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
11027 }
11028 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 11029 tmp = tcg_temp_new_i32();
ecc7b3aa 11030 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 11031 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
11032 break;
11033 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 11034 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 11035 tcg_temp_free_i32(tmp2);
9ee6e8bb 11036 if (rs != 15) {
d9ba4830
PB
11037 tmp2 = load_reg(s, rs);
11038 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11039 tcg_temp_free_i32(tmp2);
5fd46862 11040 }
9ee6e8bb 11041 break;
2c0262af 11042 }
d9ba4830 11043 store_reg(s, rd, tmp);
2c0262af 11044 break;
9ee6e8bb
PB
11045 case 6: case 7: /* 64-bit multiply, Divide. */
11046 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
11047 tmp = load_reg(s, rn);
11048 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11049 if ((op & 0x50) == 0x10) {
11050 /* sdiv, udiv */
7e0cf8b4 11051 if (!dc_isar_feature(thumb_div, s)) {
9ee6e8bb 11052 goto illegal_op;
47789990 11053 }
9ee6e8bb 11054 if (op & 0x20)
5e3f878a 11055 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 11056 else
5e3f878a 11057 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 11058 tcg_temp_free_i32(tmp2);
5e3f878a 11059 store_reg(s, rd, tmp);
9ee6e8bb
PB
11060 } else if ((op & 0xe) == 0xc) {
11061 /* Dual multiply accumulate long. */
62b44f05
AR
11062 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11063 tcg_temp_free_i32(tmp);
11064 tcg_temp_free_i32(tmp2);
11065 goto illegal_op;
11066 }
9ee6e8bb 11067 if (op & 1)
5e3f878a
PB
11068 gen_swap_half(tmp2);
11069 gen_smul_dual(tmp, tmp2);
9ee6e8bb 11070 if (op & 0x10) {
5e3f878a 11071 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 11072 } else {
5e3f878a 11073 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 11074 }
7d1b0095 11075 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11076 /* BUGFIX */
11077 tmp64 = tcg_temp_new_i64();
11078 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11079 tcg_temp_free_i32(tmp);
a7812ae4
PB
11080 gen_addq(s, tmp64, rs, rd);
11081 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11082 tcg_temp_free_i64(tmp64);
2c0262af 11083 } else {
9ee6e8bb
PB
11084 if (op & 0x20) {
11085 /* Unsigned 64-bit multiply */
a7812ae4 11086 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 11087 } else {
9ee6e8bb
PB
11088 if (op & 8) {
11089 /* smlalxy */
62b44f05
AR
11090 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11091 tcg_temp_free_i32(tmp2);
11092 tcg_temp_free_i32(tmp);
11093 goto illegal_op;
11094 }
5e3f878a 11095 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 11096 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11097 tmp64 = tcg_temp_new_i64();
11098 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11099 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11100 } else {
11101 /* Signed 64-bit multiply */
a7812ae4 11102 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11103 }
b5ff1b31 11104 }
9ee6e8bb
PB
11105 if (op & 4) {
11106 /* umaal */
62b44f05
AR
11107 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11108 tcg_temp_free_i64(tmp64);
11109 goto illegal_op;
11110 }
a7812ae4
PB
11111 gen_addq_lo(s, tmp64, rs);
11112 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
11113 } else if (op & 0x40) {
11114 /* 64-bit accumulate. */
a7812ae4 11115 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 11116 }
a7812ae4 11117 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11118 tcg_temp_free_i64(tmp64);
5fd46862 11119 }
2c0262af 11120 break;
9ee6e8bb
PB
11121 }
11122 break;
11123 case 6: case 7: case 14: case 15:
11124 /* Coprocessor. */
7517748e
PM
11125 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11126 /* We don't currently implement M profile FP support,
b1e5336a
PM
11127 * so this entire space should give a NOCP fault, with
11128 * the exception of the v8M VLLDM and VLSTM insns, which
11129 * must be NOPs in Secure state and UNDEF in Nonsecure state.
7517748e 11130 */
b1e5336a
PM
11131 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
11132 (insn & 0xffa00f00) == 0xec200a00) {
11133 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
11134 * - VLLDM, VLSTM
11135 * We choose to UNDEF if the RAZ bits are non-zero.
11136 */
11137 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
11138 goto illegal_op;
11139 }
11140 /* Just NOP since FP support is not implemented */
11141 break;
11142 }
11143 /* All other insns: NOCP */
7517748e
PM
11144 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
11145 default_exception_el(s));
11146 break;
11147 }
0052087e
RH
11148 if ((insn & 0xfe000a00) == 0xfc000800
11149 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11150 /* The Thumb2 and ARM encodings are identical. */
11151 if (disas_neon_insn_3same_ext(s, insn)) {
11152 goto illegal_op;
11153 }
11154 } else if ((insn & 0xff000a00) == 0xfe000800
11155 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11156 /* The Thumb2 and ARM encodings are identical. */
11157 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
11158 goto illegal_op;
11159 }
11160 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 11161 /* Translate into the equivalent ARM encoding. */
f06053e3 11162 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 11163 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 11164 goto illegal_op;
7dcc1f89 11165 }
6a57f3eb 11166 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 11167 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
11168 goto illegal_op;
11169 }
9ee6e8bb
PB
11170 } else {
11171 if (insn & (1 << 28))
11172 goto illegal_op;
7dcc1f89 11173 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 11174 goto illegal_op;
7dcc1f89 11175 }
9ee6e8bb
PB
11176 }
11177 break;
11178 case 8: case 9: case 10: case 11:
11179 if (insn & (1 << 15)) {
11180 /* Branches, misc control. */
11181 if (insn & 0x5000) {
11182 /* Unconditional branch. */
11183 /* signextend(hw1[10:0]) -> offset[:12]. */
11184 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
11185 /* hw1[10:0] -> offset[11:1]. */
11186 offset |= (insn & 0x7ff) << 1;
11187 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
11188 offset[24:22] already have the same value because of the
11189 sign extension above. */
11190 offset ^= ((~insn) & (1 << 13)) << 10;
11191 offset ^= ((~insn) & (1 << 11)) << 11;
11192
9ee6e8bb
PB
11193 if (insn & (1 << 14)) {
11194 /* Branch and link. */
3174f8e9 11195 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 11196 }
3b46e624 11197
b0109805 11198 offset += s->pc;
9ee6e8bb
PB
11199 if (insn & (1 << 12)) {
11200 /* b/bl */
b0109805 11201 gen_jmp(s, offset);
9ee6e8bb
PB
11202 } else {
11203 /* blx */
b0109805 11204 offset &= ~(uint32_t)2;
be5e7a76 11205 /* thumb2 bx, no need to check */
b0109805 11206 gen_bx_im(s, offset);
2c0262af 11207 }
9ee6e8bb
PB
11208 } else if (((insn >> 23) & 7) == 7) {
11209 /* Misc control */
11210 if (insn & (1 << 13))
11211 goto illegal_op;
11212
11213 if (insn & (1 << 26)) {
001b3cab
PM
11214 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11215 goto illegal_op;
11216 }
37e6456e
PM
11217 if (!(insn & (1 << 20))) {
11218 /* Hypervisor call (v7) */
11219 int imm16 = extract32(insn, 16, 4) << 12
11220 | extract32(insn, 0, 12);
11221 ARCH(7);
11222 if (IS_USER(s)) {
11223 goto illegal_op;
11224 }
11225 gen_hvc(s, imm16);
11226 } else {
11227 /* Secure monitor call (v6+) */
11228 ARCH(6K);
11229 if (IS_USER(s)) {
11230 goto illegal_op;
11231 }
11232 gen_smc(s);
11233 }
2c0262af 11234 } else {
9ee6e8bb
PB
11235 op = (insn >> 20) & 7;
11236 switch (op) {
11237 case 0: /* msr cpsr. */
b53d8923 11238 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 11239 tmp = load_reg(s, rn);
b28b3377
PM
11240 /* the constant is the mask and SYSm fields */
11241 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 11242 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 11243 tcg_temp_free_i32(addr);
7d1b0095 11244 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11245 gen_lookup_tb(s);
11246 break;
11247 }
11248 /* fall through */
11249 case 1: /* msr spsr. */
b53d8923 11250 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11251 goto illegal_op;
b53d8923 11252 }
8bfd0550
PM
11253
11254 if (extract32(insn, 5, 1)) {
11255 /* MSR (banked) */
11256 int sysm = extract32(insn, 8, 4) |
11257 (extract32(insn, 4, 1) << 4);
11258 int r = op & 1;
11259
11260 gen_msr_banked(s, r, sysm, rm);
11261 break;
11262 }
11263
11264 /* MSR (for PSRs) */
2fbac54b
FN
11265 tmp = load_reg(s, rn);
11266 if (gen_set_psr(s,
7dcc1f89 11267 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 11268 op == 1, tmp))
9ee6e8bb
PB
11269 goto illegal_op;
11270 break;
11271 case 2: /* cps, nop-hint. */
11272 if (((insn >> 8) & 7) == 0) {
11273 gen_nop_hint(s, insn & 0xff);
11274 }
11275 /* Implemented as NOP in user mode. */
11276 if (IS_USER(s))
11277 break;
11278 offset = 0;
11279 imm = 0;
11280 if (insn & (1 << 10)) {
11281 if (insn & (1 << 7))
11282 offset |= CPSR_A;
11283 if (insn & (1 << 6))
11284 offset |= CPSR_I;
11285 if (insn & (1 << 5))
11286 offset |= CPSR_F;
11287 if (insn & (1 << 9))
11288 imm = CPSR_A | CPSR_I | CPSR_F;
11289 }
11290 if (insn & (1 << 8)) {
11291 offset |= 0x1f;
11292 imm |= (insn & 0x1f);
11293 }
11294 if (offset) {
2fbac54b 11295 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
11296 }
11297 break;
11298 case 3: /* Special control operations. */
14120108 11299 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 11300 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
11301 goto illegal_op;
11302 }
9ee6e8bb
PB
11303 op = (insn >> 4) & 0xf;
11304 switch (op) {
11305 case 2: /* clrex */
426f5abc 11306 gen_clrex(s);
9ee6e8bb
PB
11307 break;
11308 case 4: /* dsb */
11309 case 5: /* dmb */
61e4c432 11310 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 11311 break;
6df99dec
SS
11312 case 6: /* isb */
11313 /* We need to break the TB after this insn
11314 * to execute self-modifying code correctly
11315 * and also to take any pending interrupts
11316 * immediately.
11317 */
0b609cc1 11318 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 11319 break;
9ee6e8bb
PB
11320 default:
11321 goto illegal_op;
11322 }
11323 break;
11324 case 4: /* bxj */
9d7c59c8
PM
11325 /* Trivial implementation equivalent to bx.
11326 * This instruction doesn't exist at all for M-profile.
11327 */
11328 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11329 goto illegal_op;
11330 }
d9ba4830
PB
11331 tmp = load_reg(s, rn);
11332 gen_bx(s, tmp);
9ee6e8bb
PB
11333 break;
11334 case 5: /* Exception return. */
b8b45b68
RV
11335 if (IS_USER(s)) {
11336 goto illegal_op;
11337 }
11338 if (rn != 14 || rd != 15) {
11339 goto illegal_op;
11340 }
55c544ed
PM
11341 if (s->current_el == 2) {
11342 /* ERET from Hyp uses ELR_Hyp, not LR */
11343 if (insn & 0xff) {
11344 goto illegal_op;
11345 }
11346 tmp = load_cpu_field(elr_el[2]);
11347 } else {
11348 tmp = load_reg(s, rn);
11349 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11350 }
b8b45b68
RV
11351 gen_exception_return(s, tmp);
11352 break;
8bfd0550 11353 case 6: /* MRS */
43ac6574
PM
11354 if (extract32(insn, 5, 1) &&
11355 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11356 /* MRS (banked) */
11357 int sysm = extract32(insn, 16, 4) |
11358 (extract32(insn, 4, 1) << 4);
11359
11360 gen_mrs_banked(s, 0, sysm, rd);
11361 break;
11362 }
11363
3d54026f
PM
11364 if (extract32(insn, 16, 4) != 0xf) {
11365 goto illegal_op;
11366 }
11367 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11368 extract32(insn, 0, 8) != 0) {
11369 goto illegal_op;
11370 }
11371
8bfd0550 11372 /* mrs cpsr */
7d1b0095 11373 tmp = tcg_temp_new_i32();
b53d8923 11374 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
11375 addr = tcg_const_i32(insn & 0xff);
11376 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 11377 tcg_temp_free_i32(addr);
9ee6e8bb 11378 } else {
9ef39277 11379 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 11380 }
8984bd2e 11381 store_reg(s, rd, tmp);
9ee6e8bb 11382 break;
8bfd0550 11383 case 7: /* MRS */
43ac6574
PM
11384 if (extract32(insn, 5, 1) &&
11385 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11386 /* MRS (banked) */
11387 int sysm = extract32(insn, 16, 4) |
11388 (extract32(insn, 4, 1) << 4);
11389
11390 gen_mrs_banked(s, 1, sysm, rd);
11391 break;
11392 }
11393
11394 /* mrs spsr. */
9ee6e8bb 11395 /* Not accessible in user mode. */
b53d8923 11396 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11397 goto illegal_op;
b53d8923 11398 }
3d54026f
PM
11399
11400 if (extract32(insn, 16, 4) != 0xf ||
11401 extract32(insn, 0, 8) != 0) {
11402 goto illegal_op;
11403 }
11404
d9ba4830
PB
11405 tmp = load_cpu_field(spsr);
11406 store_reg(s, rd, tmp);
9ee6e8bb 11407 break;
2c0262af
FB
11408 }
11409 }
9ee6e8bb
PB
11410 } else {
11411 /* Conditional branch. */
11412 op = (insn >> 22) & 0xf;
11413 /* Generate a conditional jump to next instruction. */
c2d9644e 11414 arm_skip_unless(s, op);
9ee6e8bb
PB
11415
11416 /* offset[11:1] = insn[10:0] */
11417 offset = (insn & 0x7ff) << 1;
11418 /* offset[17:12] = insn[21:16]. */
11419 offset |= (insn & 0x003f0000) >> 4;
11420 /* offset[31:20] = insn[26]. */
11421 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11422 /* offset[18] = insn[13]. */
11423 offset |= (insn & (1 << 13)) << 5;
11424 /* offset[19] = insn[11]. */
11425 offset |= (insn & (1 << 11)) << 8;
11426
11427 /* jump to the offset */
b0109805 11428 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
11429 }
11430 } else {
55203189
PM
11431 /*
11432 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
11433 * - Data-processing (modified immediate, plain binary immediate)
11434 */
9ee6e8bb 11435 if (insn & (1 << 25)) {
55203189
PM
11436 /*
11437 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
11438 * - Data-processing (plain binary immediate)
11439 */
9ee6e8bb
PB
11440 if (insn & (1 << 24)) {
11441 if (insn & (1 << 20))
11442 goto illegal_op;
11443 /* Bitfield/Saturate. */
11444 op = (insn >> 21) & 7;
11445 imm = insn & 0x1f;
11446 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 11447 if (rn == 15) {
7d1b0095 11448 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
11449 tcg_gen_movi_i32(tmp, 0);
11450 } else {
11451 tmp = load_reg(s, rn);
11452 }
9ee6e8bb
PB
11453 switch (op) {
11454 case 2: /* Signed bitfield extract. */
11455 imm++;
11456 if (shift + imm > 32)
11457 goto illegal_op;
59a71b4c
RH
11458 if (imm < 32) {
11459 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11460 }
9ee6e8bb
PB
11461 break;
11462 case 6: /* Unsigned bitfield extract. */
11463 imm++;
11464 if (shift + imm > 32)
11465 goto illegal_op;
59a71b4c
RH
11466 if (imm < 32) {
11467 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11468 }
9ee6e8bb
PB
11469 break;
11470 case 3: /* Bitfield insert/clear. */
11471 if (imm < shift)
11472 goto illegal_op;
11473 imm = imm + 1 - shift;
11474 if (imm != 32) {
6ddbc6e4 11475 tmp2 = load_reg(s, rd);
d593c48e 11476 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 11477 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11478 }
11479 break;
11480 case 7:
11481 goto illegal_op;
11482 default: /* Saturate. */
9ee6e8bb
PB
11483 if (shift) {
11484 if (op & 1)
6ddbc6e4 11485 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 11486 else
6ddbc6e4 11487 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 11488 }
6ddbc6e4 11489 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
11490 if (op & 4) {
11491 /* Unsigned. */
62b44f05
AR
11492 if ((op & 1) && shift == 0) {
11493 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11494 tcg_temp_free_i32(tmp);
11495 tcg_temp_free_i32(tmp2);
11496 goto illegal_op;
11497 }
9ef39277 11498 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11499 } else {
9ef39277 11500 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 11501 }
2c0262af 11502 } else {
9ee6e8bb 11503 /* Signed. */
62b44f05
AR
11504 if ((op & 1) && shift == 0) {
11505 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11506 tcg_temp_free_i32(tmp);
11507 tcg_temp_free_i32(tmp2);
11508 goto illegal_op;
11509 }
9ef39277 11510 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11511 } else {
9ef39277 11512 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 11513 }
2c0262af 11514 }
b75263d6 11515 tcg_temp_free_i32(tmp2);
9ee6e8bb 11516 break;
2c0262af 11517 }
6ddbc6e4 11518 store_reg(s, rd, tmp);
9ee6e8bb
PB
11519 } else {
11520 imm = ((insn & 0x04000000) >> 15)
11521 | ((insn & 0x7000) >> 4) | (insn & 0xff);
11522 if (insn & (1 << 22)) {
11523 /* 16-bit immediate. */
11524 imm |= (insn >> 4) & 0xf000;
11525 if (insn & (1 << 23)) {
11526 /* movt */
5e3f878a 11527 tmp = load_reg(s, rd);
86831435 11528 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 11529 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 11530 } else {
9ee6e8bb 11531 /* movw */
7d1b0095 11532 tmp = tcg_temp_new_i32();
5e3f878a 11533 tcg_gen_movi_i32(tmp, imm);
2c0262af 11534 }
55203189 11535 store_reg(s, rd, tmp);
2c0262af 11536 } else {
9ee6e8bb
PB
11537 /* Add/sub 12-bit immediate. */
11538 if (rn == 15) {
b0109805 11539 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 11540 if (insn & (1 << 23))
b0109805 11541 offset -= imm;
9ee6e8bb 11542 else
b0109805 11543 offset += imm;
7d1b0095 11544 tmp = tcg_temp_new_i32();
5e3f878a 11545 tcg_gen_movi_i32(tmp, offset);
55203189 11546 store_reg(s, rd, tmp);
2c0262af 11547 } else {
5e3f878a 11548 tmp = load_reg(s, rn);
9ee6e8bb 11549 if (insn & (1 << 23))
5e3f878a 11550 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 11551 else
5e3f878a 11552 tcg_gen_addi_i32(tmp, tmp, imm);
55203189
PM
11553 if (rn == 13 && rd == 13) {
11554 /* ADD SP, SP, imm or SUB SP, SP, imm */
11555 store_sp_checked(s, tmp);
11556 } else {
11557 store_reg(s, rd, tmp);
11558 }
2c0262af 11559 }
9ee6e8bb 11560 }
191abaa2 11561 }
9ee6e8bb 11562 } else {
55203189
PM
11563 /*
11564 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
11565 * - Data-processing (modified immediate)
11566 */
9ee6e8bb
PB
11567 int shifter_out = 0;
11568 /* modified 12-bit immediate. */
11569 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11570 imm = (insn & 0xff);
11571 switch (shift) {
11572 case 0: /* XY */
11573 /* Nothing to do. */
11574 break;
11575 case 1: /* 00XY00XY */
11576 imm |= imm << 16;
11577 break;
11578 case 2: /* XY00XY00 */
11579 imm |= imm << 16;
11580 imm <<= 8;
11581 break;
11582 case 3: /* XYXYXYXY */
11583 imm |= imm << 16;
11584 imm |= imm << 8;
11585 break;
11586 default: /* Rotated constant. */
11587 shift = (shift << 1) | (imm >> 7);
11588 imm |= 0x80;
11589 imm = imm << (32 - shift);
11590 shifter_out = 1;
11591 break;
b5ff1b31 11592 }
7d1b0095 11593 tmp2 = tcg_temp_new_i32();
3174f8e9 11594 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 11595 rn = (insn >> 16) & 0xf;
3174f8e9 11596 if (rn == 15) {
7d1b0095 11597 tmp = tcg_temp_new_i32();
3174f8e9
FN
11598 tcg_gen_movi_i32(tmp, 0);
11599 } else {
11600 tmp = load_reg(s, rn);
11601 }
9ee6e8bb
PB
11602 op = (insn >> 21) & 0xf;
11603 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 11604 shifter_out, tmp, tmp2))
9ee6e8bb 11605 goto illegal_op;
7d1b0095 11606 tcg_temp_free_i32(tmp2);
9ee6e8bb 11607 rd = (insn >> 8) & 0xf;
55203189
PM
11608 if (rd == 13 && rn == 13
11609 && (op == 8 || op == 13)) {
11610 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
11611 store_sp_checked(s, tmp);
11612 } else if (rd != 15) {
3174f8e9
FN
11613 store_reg(s, rd, tmp);
11614 } else {
7d1b0095 11615 tcg_temp_free_i32(tmp);
2c0262af 11616 }
2c0262af 11617 }
9ee6e8bb
PB
11618 }
11619 break;
11620 case 12: /* Load/store single data item. */
11621 {
11622 int postinc = 0;
11623 int writeback = 0;
a99caa48 11624 int memidx;
9bb6558a
PM
11625 ISSInfo issinfo;
11626
9ee6e8bb 11627 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 11628 if (disas_neon_ls_insn(s, insn)) {
c1713132 11629 goto illegal_op;
7dcc1f89 11630 }
9ee6e8bb
PB
11631 break;
11632 }
a2fdc890
PM
11633 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
11634 if (rs == 15) {
11635 if (!(insn & (1 << 20))) {
11636 goto illegal_op;
11637 }
11638 if (op != 2) {
11639 /* Byte or halfword load space with dest == r15 : memory hints.
11640 * Catch them early so we don't emit pointless addressing code.
11641 * This space is a mix of:
11642 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11643 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11644 * cores)
11645 * unallocated hints, which must be treated as NOPs
11646 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11647 * which is easiest for the decoding logic
11648 * Some space which must UNDEF
11649 */
11650 int op1 = (insn >> 23) & 3;
11651 int op2 = (insn >> 6) & 0x3f;
11652 if (op & 2) {
11653 goto illegal_op;
11654 }
11655 if (rn == 15) {
02afbf64
PM
11656 /* UNPREDICTABLE, unallocated hint or
11657 * PLD/PLDW/PLI (literal)
11658 */
2eea841c 11659 return;
a2fdc890
PM
11660 }
11661 if (op1 & 1) {
2eea841c 11662 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11663 }
11664 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 11665 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11666 }
11667 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 11668 goto illegal_op;
a2fdc890
PM
11669 }
11670 }
a99caa48 11671 memidx = get_mem_index(s);
9ee6e8bb 11672 if (rn == 15) {
7d1b0095 11673 addr = tcg_temp_new_i32();
9ee6e8bb
PB
11674 /* PC relative. */
11675 /* s->pc has already been incremented by 4. */
11676 imm = s->pc & 0xfffffffc;
11677 if (insn & (1 << 23))
11678 imm += insn & 0xfff;
11679 else
11680 imm -= insn & 0xfff;
b0109805 11681 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 11682 } else {
b0109805 11683 addr = load_reg(s, rn);
9ee6e8bb
PB
11684 if (insn & (1 << 23)) {
11685 /* Positive offset. */
11686 imm = insn & 0xfff;
b0109805 11687 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 11688 } else {
9ee6e8bb 11689 imm = insn & 0xff;
2a0308c5
PM
11690 switch ((insn >> 8) & 0xf) {
11691 case 0x0: /* Shifted Register. */
9ee6e8bb 11692 shift = (insn >> 4) & 0xf;
2a0308c5
PM
11693 if (shift > 3) {
11694 tcg_temp_free_i32(addr);
18c9b560 11695 goto illegal_op;
2a0308c5 11696 }
b26eefb6 11697 tmp = load_reg(s, rm);
9ee6e8bb 11698 if (shift)
b26eefb6 11699 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 11700 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11701 tcg_temp_free_i32(tmp);
9ee6e8bb 11702 break;
2a0308c5 11703 case 0xc: /* Negative offset. */
b0109805 11704 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 11705 break;
2a0308c5 11706 case 0xe: /* User privilege. */
b0109805 11707 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 11708 memidx = get_a32_user_mem_index(s);
9ee6e8bb 11709 break;
2a0308c5 11710 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
11711 imm = -imm;
11712 /* Fall through. */
2a0308c5 11713 case 0xb: /* Post-increment. */
9ee6e8bb
PB
11714 postinc = 1;
11715 writeback = 1;
11716 break;
2a0308c5 11717 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
11718 imm = -imm;
11719 /* Fall through. */
2a0308c5 11720 case 0xf: /* Pre-increment. */
9ee6e8bb
PB
11721 writeback = 1;
11722 break;
11723 default:
2a0308c5 11724 tcg_temp_free_i32(addr);
b7bcbe95 11725 goto illegal_op;
9ee6e8bb
PB
11726 }
11727 }
11728 }
9bb6558a
PM
11729
11730 issinfo = writeback ? ISSInvalid : rs;
11731
0bc003ba
PM
11732 if (s->v8m_stackcheck && rn == 13 && writeback) {
11733 /*
11734 * Stackcheck. Here we know 'addr' is the current SP;
11735 * if imm is +ve we're moving SP up, else down. It is
11736 * UNKNOWN whether the limit check triggers when SP starts
11737 * below the limit and ends up above it; we chose to do so.
11738 */
11739 if ((int32_t)imm < 0) {
11740 TCGv_i32 newsp = tcg_temp_new_i32();
11741
11742 tcg_gen_addi_i32(newsp, addr, imm);
11743 gen_helper_v8m_stackcheck(cpu_env, newsp);
11744 tcg_temp_free_i32(newsp);
11745 } else {
11746 gen_helper_v8m_stackcheck(cpu_env, addr);
11747 }
11748 }
11749
11750 if (writeback && !postinc) {
11751 tcg_gen_addi_i32(addr, addr, imm);
11752 }
11753
9ee6e8bb
PB
11754 if (insn & (1 << 20)) {
11755 /* Load. */
5a839c0d 11756 tmp = tcg_temp_new_i32();
a2fdc890 11757 switch (op) {
5a839c0d 11758 case 0:
9bb6558a 11759 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11760 break;
11761 case 4:
9bb6558a 11762 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11763 break;
11764 case 1:
9bb6558a 11765 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11766 break;
11767 case 5:
9bb6558a 11768 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11769 break;
11770 case 2:
9bb6558a 11771 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11772 break;
2a0308c5 11773 default:
5a839c0d 11774 tcg_temp_free_i32(tmp);
2a0308c5
PM
11775 tcg_temp_free_i32(addr);
11776 goto illegal_op;
a2fdc890
PM
11777 }
11778 if (rs == 15) {
3bb8a96f 11779 gen_bx_excret(s, tmp);
9ee6e8bb 11780 } else {
a2fdc890 11781 store_reg(s, rs, tmp);
9ee6e8bb
PB
11782 }
11783 } else {
11784 /* Store. */
b0109805 11785 tmp = load_reg(s, rs);
9ee6e8bb 11786 switch (op) {
5a839c0d 11787 case 0:
9bb6558a 11788 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11789 break;
11790 case 1:
9bb6558a 11791 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11792 break;
11793 case 2:
9bb6558a 11794 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11795 break;
2a0308c5 11796 default:
5a839c0d 11797 tcg_temp_free_i32(tmp);
2a0308c5
PM
11798 tcg_temp_free_i32(addr);
11799 goto illegal_op;
b7bcbe95 11800 }
5a839c0d 11801 tcg_temp_free_i32(tmp);
2c0262af 11802 }
9ee6e8bb 11803 if (postinc)
b0109805
PB
11804 tcg_gen_addi_i32(addr, addr, imm);
11805 if (writeback) {
11806 store_reg(s, rn, addr);
11807 } else {
7d1b0095 11808 tcg_temp_free_i32(addr);
b0109805 11809 }
9ee6e8bb
PB
11810 }
11811 break;
11812 default:
11813 goto illegal_op;
2c0262af 11814 }
2eea841c 11815 return;
9ee6e8bb 11816illegal_op:
2eea841c
PM
11817 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11818 default_exception_el(s));
2c0262af
FB
11819}
11820
296e5a0a 11821static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 11822{
296e5a0a 11823 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
11824 int32_t offset;
11825 int i;
39d5492a
PM
11826 TCGv_i32 tmp;
11827 TCGv_i32 tmp2;
11828 TCGv_i32 addr;
99c475ab 11829
99c475ab
FB
11830 switch (insn >> 12) {
11831 case 0: case 1:
396e467c 11832
99c475ab
FB
11833 rd = insn & 7;
11834 op = (insn >> 11) & 3;
11835 if (op == 3) {
a2d12f0f
PM
11836 /*
11837 * 0b0001_1xxx_xxxx_xxxx
11838 * - Add, subtract (three low registers)
11839 * - Add, subtract (two low registers and immediate)
11840 */
99c475ab 11841 rn = (insn >> 3) & 7;
396e467c 11842 tmp = load_reg(s, rn);
99c475ab
FB
11843 if (insn & (1 << 10)) {
11844 /* immediate */
7d1b0095 11845 tmp2 = tcg_temp_new_i32();
396e467c 11846 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11847 } else {
11848 /* reg */
11849 rm = (insn >> 6) & 7;
396e467c 11850 tmp2 = load_reg(s, rm);
99c475ab 11851 }
9ee6e8bb
PB
11852 if (insn & (1 << 9)) {
11853 if (s->condexec_mask)
396e467c 11854 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11855 else
72485ec4 11856 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11857 } else {
11858 if (s->condexec_mask)
396e467c 11859 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11860 else
72485ec4 11861 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11862 }
7d1b0095 11863 tcg_temp_free_i32(tmp2);
396e467c 11864 store_reg(s, rd, tmp);
99c475ab
FB
11865 } else {
11866 /* shift immediate */
11867 rm = (insn >> 3) & 7;
11868 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11869 tmp = load_reg(s, rm);
11870 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11871 if (!s->condexec_mask)
11872 gen_logic_CC(tmp);
11873 store_reg(s, rd, tmp);
99c475ab
FB
11874 }
11875 break;
11876 case 2: case 3:
a2d12f0f
PM
11877 /*
11878 * 0b001x_xxxx_xxxx_xxxx
11879 * - Add, subtract, compare, move (one low register and immediate)
11880 */
99c475ab
FB
11881 op = (insn >> 11) & 3;
11882 rd = (insn >> 8) & 0x7;
396e467c 11883 if (op == 0) { /* mov */
7d1b0095 11884 tmp = tcg_temp_new_i32();
396e467c 11885 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11886 if (!s->condexec_mask)
396e467c
FN
11887 gen_logic_CC(tmp);
11888 store_reg(s, rd, tmp);
11889 } else {
11890 tmp = load_reg(s, rd);
7d1b0095 11891 tmp2 = tcg_temp_new_i32();
396e467c
FN
11892 tcg_gen_movi_i32(tmp2, insn & 0xff);
11893 switch (op) {
11894 case 1: /* cmp */
72485ec4 11895 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11896 tcg_temp_free_i32(tmp);
11897 tcg_temp_free_i32(tmp2);
396e467c
FN
11898 break;
11899 case 2: /* add */
11900 if (s->condexec_mask)
11901 tcg_gen_add_i32(tmp, tmp, tmp2);
11902 else
72485ec4 11903 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11904 tcg_temp_free_i32(tmp2);
396e467c
FN
11905 store_reg(s, rd, tmp);
11906 break;
11907 case 3: /* sub */
11908 if (s->condexec_mask)
11909 tcg_gen_sub_i32(tmp, tmp, tmp2);
11910 else
72485ec4 11911 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11912 tcg_temp_free_i32(tmp2);
396e467c
FN
11913 store_reg(s, rd, tmp);
11914 break;
11915 }
99c475ab 11916 }
99c475ab
FB
11917 break;
11918 case 4:
11919 if (insn & (1 << 11)) {
11920 rd = (insn >> 8) & 7;
5899f386
FB
11921 /* load pc-relative. Bit 1 of PC is ignored. */
11922 val = s->pc + 2 + ((insn & 0xff) * 4);
11923 val &= ~(uint32_t)2;
7d1b0095 11924 addr = tcg_temp_new_i32();
b0109805 11925 tcg_gen_movi_i32(addr, val);
c40c8556 11926 tmp = tcg_temp_new_i32();
9bb6558a
PM
11927 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11928 rd | ISSIs16Bit);
7d1b0095 11929 tcg_temp_free_i32(addr);
b0109805 11930 store_reg(s, rd, tmp);
99c475ab
FB
11931 break;
11932 }
11933 if (insn & (1 << 10)) {
ebfe27c5
PM
11934 /* 0b0100_01xx_xxxx_xxxx
11935 * - data processing extended, branch and exchange
11936 */
99c475ab
FB
11937 rd = (insn & 7) | ((insn >> 4) & 8);
11938 rm = (insn >> 3) & 0xf;
11939 op = (insn >> 8) & 3;
11940 switch (op) {
11941 case 0: /* add */
396e467c
FN
11942 tmp = load_reg(s, rd);
11943 tmp2 = load_reg(s, rm);
11944 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11945 tcg_temp_free_i32(tmp2);
55203189
PM
11946 if (rd == 13) {
11947 /* ADD SP, SP, reg */
11948 store_sp_checked(s, tmp);
11949 } else {
11950 store_reg(s, rd, tmp);
11951 }
99c475ab
FB
11952 break;
11953 case 1: /* cmp */
396e467c
FN
11954 tmp = load_reg(s, rd);
11955 tmp2 = load_reg(s, rm);
72485ec4 11956 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11957 tcg_temp_free_i32(tmp2);
11958 tcg_temp_free_i32(tmp);
99c475ab
FB
11959 break;
11960 case 2: /* mov/cpy */
396e467c 11961 tmp = load_reg(s, rm);
55203189
PM
11962 if (rd == 13) {
11963 /* MOV SP, reg */
11964 store_sp_checked(s, tmp);
11965 } else {
11966 store_reg(s, rd, tmp);
11967 }
99c475ab 11968 break;
ebfe27c5
PM
11969 case 3:
11970 {
11971 /* 0b0100_0111_xxxx_xxxx
11972 * - branch [and link] exchange thumb register
11973 */
11974 bool link = insn & (1 << 7);
11975
fb602cb7 11976 if (insn & 3) {
ebfe27c5
PM
11977 goto undef;
11978 }
11979 if (link) {
be5e7a76 11980 ARCH(5);
ebfe27c5 11981 }
fb602cb7
PM
11982 if ((insn & 4)) {
11983 /* BXNS/BLXNS: only exists for v8M with the
11984 * security extensions, and always UNDEF if NonSecure.
11985 * We don't implement these in the user-only mode
11986 * either (in theory you can use them from Secure User
11987 * mode but they are too tied in to system emulation.)
11988 */
11989 if (!s->v8m_secure || IS_USER_ONLY) {
11990 goto undef;
11991 }
11992 if (link) {
3e3fa230 11993 gen_blxns(s, rm);
fb602cb7
PM
11994 } else {
11995 gen_bxns(s, rm);
11996 }
11997 break;
11998 }
11999 /* BLX/BX */
ebfe27c5
PM
12000 tmp = load_reg(s, rm);
12001 if (link) {
99c475ab 12002 val = (uint32_t)s->pc | 1;
7d1b0095 12003 tmp2 = tcg_temp_new_i32();
b0109805
PB
12004 tcg_gen_movi_i32(tmp2, val);
12005 store_reg(s, 14, tmp2);
3bb8a96f
PM
12006 gen_bx(s, tmp);
12007 } else {
12008 /* Only BX works as exception-return, not BLX */
12009 gen_bx_excret(s, tmp);
99c475ab 12010 }
99c475ab
FB
12011 break;
12012 }
ebfe27c5 12013 }
99c475ab
FB
12014 break;
12015 }
12016
a2d12f0f
PM
12017 /*
12018 * 0b0100_00xx_xxxx_xxxx
12019 * - Data-processing (two low registers)
12020 */
99c475ab
FB
12021 rd = insn & 7;
12022 rm = (insn >> 3) & 7;
12023 op = (insn >> 6) & 0xf;
12024 if (op == 2 || op == 3 || op == 4 || op == 7) {
12025 /* the shift/rotate ops want the operands backwards */
12026 val = rm;
12027 rm = rd;
12028 rd = val;
12029 val = 1;
12030 } else {
12031 val = 0;
12032 }
12033
396e467c 12034 if (op == 9) { /* neg */
7d1b0095 12035 tmp = tcg_temp_new_i32();
396e467c
FN
12036 tcg_gen_movi_i32(tmp, 0);
12037 } else if (op != 0xf) { /* mvn doesn't read its first operand */
12038 tmp = load_reg(s, rd);
12039 } else {
f764718d 12040 tmp = NULL;
396e467c 12041 }
99c475ab 12042
396e467c 12043 tmp2 = load_reg(s, rm);
5899f386 12044 switch (op) {
99c475ab 12045 case 0x0: /* and */
396e467c 12046 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 12047 if (!s->condexec_mask)
396e467c 12048 gen_logic_CC(tmp);
99c475ab
FB
12049 break;
12050 case 0x1: /* eor */
396e467c 12051 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 12052 if (!s->condexec_mask)
396e467c 12053 gen_logic_CC(tmp);
99c475ab
FB
12054 break;
12055 case 0x2: /* lsl */
9ee6e8bb 12056 if (s->condexec_mask) {
365af80e 12057 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 12058 } else {
9ef39277 12059 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12060 gen_logic_CC(tmp2);
9ee6e8bb 12061 }
99c475ab
FB
12062 break;
12063 case 0x3: /* lsr */
9ee6e8bb 12064 if (s->condexec_mask) {
365af80e 12065 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 12066 } else {
9ef39277 12067 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12068 gen_logic_CC(tmp2);
9ee6e8bb 12069 }
99c475ab
FB
12070 break;
12071 case 0x4: /* asr */
9ee6e8bb 12072 if (s->condexec_mask) {
365af80e 12073 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 12074 } else {
9ef39277 12075 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12076 gen_logic_CC(tmp2);
9ee6e8bb 12077 }
99c475ab
FB
12078 break;
12079 case 0x5: /* adc */
49b4c31e 12080 if (s->condexec_mask) {
396e467c 12081 gen_adc(tmp, tmp2);
49b4c31e
RH
12082 } else {
12083 gen_adc_CC(tmp, tmp, tmp2);
12084 }
99c475ab
FB
12085 break;
12086 case 0x6: /* sbc */
2de68a49 12087 if (s->condexec_mask) {
396e467c 12088 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
12089 } else {
12090 gen_sbc_CC(tmp, tmp, tmp2);
12091 }
99c475ab
FB
12092 break;
12093 case 0x7: /* ror */
9ee6e8bb 12094 if (s->condexec_mask) {
f669df27
AJ
12095 tcg_gen_andi_i32(tmp, tmp, 0x1f);
12096 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 12097 } else {
9ef39277 12098 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12099 gen_logic_CC(tmp2);
9ee6e8bb 12100 }
99c475ab
FB
12101 break;
12102 case 0x8: /* tst */
396e467c
FN
12103 tcg_gen_and_i32(tmp, tmp, tmp2);
12104 gen_logic_CC(tmp);
99c475ab 12105 rd = 16;
5899f386 12106 break;
99c475ab 12107 case 0x9: /* neg */
9ee6e8bb 12108 if (s->condexec_mask)
396e467c 12109 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 12110 else
72485ec4 12111 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12112 break;
12113 case 0xa: /* cmp */
72485ec4 12114 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12115 rd = 16;
12116 break;
12117 case 0xb: /* cmn */
72485ec4 12118 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
12119 rd = 16;
12120 break;
12121 case 0xc: /* orr */
396e467c 12122 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 12123 if (!s->condexec_mask)
396e467c 12124 gen_logic_CC(tmp);
99c475ab
FB
12125 break;
12126 case 0xd: /* mul */
7b2919a0 12127 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 12128 if (!s->condexec_mask)
396e467c 12129 gen_logic_CC(tmp);
99c475ab
FB
12130 break;
12131 case 0xe: /* bic */
f669df27 12132 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 12133 if (!s->condexec_mask)
396e467c 12134 gen_logic_CC(tmp);
99c475ab
FB
12135 break;
12136 case 0xf: /* mvn */
396e467c 12137 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 12138 if (!s->condexec_mask)
396e467c 12139 gen_logic_CC(tmp2);
99c475ab 12140 val = 1;
5899f386 12141 rm = rd;
99c475ab
FB
12142 break;
12143 }
12144 if (rd != 16) {
396e467c
FN
12145 if (val) {
12146 store_reg(s, rm, tmp2);
12147 if (op != 0xf)
7d1b0095 12148 tcg_temp_free_i32(tmp);
396e467c
FN
12149 } else {
12150 store_reg(s, rd, tmp);
7d1b0095 12151 tcg_temp_free_i32(tmp2);
396e467c
FN
12152 }
12153 } else {
7d1b0095
PM
12154 tcg_temp_free_i32(tmp);
12155 tcg_temp_free_i32(tmp2);
99c475ab
FB
12156 }
12157 break;
12158
12159 case 5:
12160 /* load/store register offset. */
12161 rd = insn & 7;
12162 rn = (insn >> 3) & 7;
12163 rm = (insn >> 6) & 7;
12164 op = (insn >> 9) & 7;
b0109805 12165 addr = load_reg(s, rn);
b26eefb6 12166 tmp = load_reg(s, rm);
b0109805 12167 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12168 tcg_temp_free_i32(tmp);
99c475ab 12169
c40c8556 12170 if (op < 3) { /* store */
b0109805 12171 tmp = load_reg(s, rd);
c40c8556
PM
12172 } else {
12173 tmp = tcg_temp_new_i32();
12174 }
99c475ab
FB
12175
12176 switch (op) {
12177 case 0: /* str */
9bb6558a 12178 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12179 break;
12180 case 1: /* strh */
9bb6558a 12181 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12182 break;
12183 case 2: /* strb */
9bb6558a 12184 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12185 break;
12186 case 3: /* ldrsb */
9bb6558a 12187 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12188 break;
12189 case 4: /* ldr */
9bb6558a 12190 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12191 break;
12192 case 5: /* ldrh */
9bb6558a 12193 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12194 break;
12195 case 6: /* ldrb */
9bb6558a 12196 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12197 break;
12198 case 7: /* ldrsh */
9bb6558a 12199 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12200 break;
12201 }
c40c8556 12202 if (op >= 3) { /* load */
b0109805 12203 store_reg(s, rd, tmp);
c40c8556
PM
12204 } else {
12205 tcg_temp_free_i32(tmp);
12206 }
7d1b0095 12207 tcg_temp_free_i32(addr);
99c475ab
FB
12208 break;
12209
12210 case 6:
12211 /* load/store word immediate offset */
12212 rd = insn & 7;
12213 rn = (insn >> 3) & 7;
b0109805 12214 addr = load_reg(s, rn);
99c475ab 12215 val = (insn >> 4) & 0x7c;
b0109805 12216 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12217
12218 if (insn & (1 << 11)) {
12219 /* load */
c40c8556 12220 tmp = tcg_temp_new_i32();
12dcc321 12221 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12222 store_reg(s, rd, tmp);
99c475ab
FB
12223 } else {
12224 /* store */
b0109805 12225 tmp = load_reg(s, rd);
12dcc321 12226 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12227 tcg_temp_free_i32(tmp);
99c475ab 12228 }
7d1b0095 12229 tcg_temp_free_i32(addr);
99c475ab
FB
12230 break;
12231
12232 case 7:
12233 /* load/store byte immediate offset */
12234 rd = insn & 7;
12235 rn = (insn >> 3) & 7;
b0109805 12236 addr = load_reg(s, rn);
99c475ab 12237 val = (insn >> 6) & 0x1f;
b0109805 12238 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12239
12240 if (insn & (1 << 11)) {
12241 /* load */
c40c8556 12242 tmp = tcg_temp_new_i32();
9bb6558a 12243 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12244 store_reg(s, rd, tmp);
99c475ab
FB
12245 } else {
12246 /* store */
b0109805 12247 tmp = load_reg(s, rd);
9bb6558a 12248 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12249 tcg_temp_free_i32(tmp);
99c475ab 12250 }
7d1b0095 12251 tcg_temp_free_i32(addr);
99c475ab
FB
12252 break;
12253
12254 case 8:
12255 /* load/store halfword immediate offset */
12256 rd = insn & 7;
12257 rn = (insn >> 3) & 7;
b0109805 12258 addr = load_reg(s, rn);
99c475ab 12259 val = (insn >> 5) & 0x3e;
b0109805 12260 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12261
12262 if (insn & (1 << 11)) {
12263 /* load */
c40c8556 12264 tmp = tcg_temp_new_i32();
9bb6558a 12265 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12266 store_reg(s, rd, tmp);
99c475ab
FB
12267 } else {
12268 /* store */
b0109805 12269 tmp = load_reg(s, rd);
9bb6558a 12270 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12271 tcg_temp_free_i32(tmp);
99c475ab 12272 }
7d1b0095 12273 tcg_temp_free_i32(addr);
99c475ab
FB
12274 break;
12275
12276 case 9:
12277 /* load/store from stack */
12278 rd = (insn >> 8) & 7;
b0109805 12279 addr = load_reg(s, 13);
99c475ab 12280 val = (insn & 0xff) * 4;
b0109805 12281 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12282
12283 if (insn & (1 << 11)) {
12284 /* load */
c40c8556 12285 tmp = tcg_temp_new_i32();
9bb6558a 12286 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12287 store_reg(s, rd, tmp);
99c475ab
FB
12288 } else {
12289 /* store */
b0109805 12290 tmp = load_reg(s, rd);
9bb6558a 12291 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12292 tcg_temp_free_i32(tmp);
99c475ab 12293 }
7d1b0095 12294 tcg_temp_free_i32(addr);
99c475ab
FB
12295 break;
12296
12297 case 10:
55203189
PM
12298 /*
12299 * 0b1010_xxxx_xxxx_xxxx
12300 * - Add PC/SP (immediate)
12301 */
99c475ab 12302 rd = (insn >> 8) & 7;
5899f386
FB
12303 if (insn & (1 << 11)) {
12304 /* SP */
5e3f878a 12305 tmp = load_reg(s, 13);
5899f386
FB
12306 } else {
12307 /* PC. bit 1 is ignored. */
7d1b0095 12308 tmp = tcg_temp_new_i32();
5e3f878a 12309 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 12310 }
99c475ab 12311 val = (insn & 0xff) * 4;
5e3f878a
PB
12312 tcg_gen_addi_i32(tmp, tmp, val);
12313 store_reg(s, rd, tmp);
99c475ab
FB
12314 break;
12315
12316 case 11:
12317 /* misc */
12318 op = (insn >> 8) & 0xf;
12319 switch (op) {
12320 case 0:
55203189
PM
12321 /*
12322 * 0b1011_0000_xxxx_xxxx
12323 * - ADD (SP plus immediate)
12324 * - SUB (SP minus immediate)
12325 */
b26eefb6 12326 tmp = load_reg(s, 13);
99c475ab
FB
12327 val = (insn & 0x7f) * 4;
12328 if (insn & (1 << 7))
6a0d8a1d 12329 val = -(int32_t)val;
b26eefb6 12330 tcg_gen_addi_i32(tmp, tmp, val);
55203189 12331 store_sp_checked(s, tmp);
99c475ab
FB
12332 break;
12333
9ee6e8bb
PB
12334 case 2: /* sign/zero extend. */
12335 ARCH(6);
12336 rd = insn & 7;
12337 rm = (insn >> 3) & 7;
b0109805 12338 tmp = load_reg(s, rm);
9ee6e8bb 12339 switch ((insn >> 6) & 3) {
b0109805
PB
12340 case 0: gen_sxth(tmp); break;
12341 case 1: gen_sxtb(tmp); break;
12342 case 2: gen_uxth(tmp); break;
12343 case 3: gen_uxtb(tmp); break;
9ee6e8bb 12344 }
b0109805 12345 store_reg(s, rd, tmp);
9ee6e8bb 12346 break;
99c475ab 12347 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
12348 /*
12349 * 0b1011_x10x_xxxx_xxxx
12350 * - push/pop
12351 */
b0109805 12352 addr = load_reg(s, 13);
5899f386
FB
12353 if (insn & (1 << 8))
12354 offset = 4;
99c475ab 12355 else
5899f386
FB
12356 offset = 0;
12357 for (i = 0; i < 8; i++) {
12358 if (insn & (1 << i))
12359 offset += 4;
12360 }
12361 if ((insn & (1 << 11)) == 0) {
b0109805 12362 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12363 }
aa369e5c
PM
12364
12365 if (s->v8m_stackcheck) {
12366 /*
12367 * Here 'addr' is the lower of "old SP" and "new SP";
12368 * if this is a pop that starts below the limit and ends
12369 * above it, it is UNKNOWN whether the limit check triggers;
12370 * we choose to trigger.
12371 */
12372 gen_helper_v8m_stackcheck(cpu_env, addr);
12373 }
12374
99c475ab
FB
12375 for (i = 0; i < 8; i++) {
12376 if (insn & (1 << i)) {
12377 if (insn & (1 << 11)) {
12378 /* pop */
c40c8556 12379 tmp = tcg_temp_new_i32();
12dcc321 12380 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12381 store_reg(s, i, tmp);
99c475ab
FB
12382 } else {
12383 /* push */
b0109805 12384 tmp = load_reg(s, i);
12dcc321 12385 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12386 tcg_temp_free_i32(tmp);
99c475ab 12387 }
5899f386 12388 /* advance to the next address. */
b0109805 12389 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12390 }
12391 }
f764718d 12392 tmp = NULL;
99c475ab
FB
12393 if (insn & (1 << 8)) {
12394 if (insn & (1 << 11)) {
12395 /* pop pc */
c40c8556 12396 tmp = tcg_temp_new_i32();
12dcc321 12397 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
12398 /* don't set the pc until the rest of the instruction
12399 has completed */
12400 } else {
12401 /* push lr */
b0109805 12402 tmp = load_reg(s, 14);
12dcc321 12403 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12404 tcg_temp_free_i32(tmp);
99c475ab 12405 }
b0109805 12406 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 12407 }
5899f386 12408 if ((insn & (1 << 11)) == 0) {
b0109805 12409 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12410 }
99c475ab 12411 /* write back the new stack pointer */
b0109805 12412 store_reg(s, 13, addr);
99c475ab 12413 /* set the new PC value */
be5e7a76 12414 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 12415 store_reg_from_load(s, 15, tmp);
be5e7a76 12416 }
99c475ab
FB
12417 break;
12418
9ee6e8bb
PB
12419 case 1: case 3: case 9: case 11: /* czb */
12420 rm = insn & 7;
d9ba4830 12421 tmp = load_reg(s, rm);
c2d9644e 12422 arm_gen_condlabel(s);
9ee6e8bb 12423 if (insn & (1 << 11))
cb63669a 12424 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 12425 else
cb63669a 12426 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 12427 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
12428 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
12429 val = (uint32_t)s->pc + 2;
12430 val += offset;
12431 gen_jmp(s, val);
12432 break;
12433
12434 case 15: /* IT, nop-hint. */
12435 if ((insn & 0xf) == 0) {
12436 gen_nop_hint(s, (insn >> 4) & 0xf);
12437 break;
12438 }
12439 /* If Then. */
12440 s->condexec_cond = (insn >> 4) & 0xe;
12441 s->condexec_mask = insn & 0x1f;
12442 /* No actual code generated for this insn, just setup state. */
12443 break;
12444
06c949e6 12445 case 0xe: /* bkpt */
d4a2dc67
PM
12446 {
12447 int imm8 = extract32(insn, 0, 8);
be5e7a76 12448 ARCH(5);
c900a2e6 12449 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 12450 break;
d4a2dc67 12451 }
06c949e6 12452
19a6e31c
PM
12453 case 0xa: /* rev, and hlt */
12454 {
12455 int op1 = extract32(insn, 6, 2);
12456
12457 if (op1 == 2) {
12458 /* HLT */
12459 int imm6 = extract32(insn, 0, 6);
12460
12461 gen_hlt(s, imm6);
12462 break;
12463 }
12464
12465 /* Otherwise this is rev */
9ee6e8bb
PB
12466 ARCH(6);
12467 rn = (insn >> 3) & 0x7;
12468 rd = insn & 0x7;
b0109805 12469 tmp = load_reg(s, rn);
19a6e31c 12470 switch (op1) {
66896cb8 12471 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
12472 case 1: gen_rev16(tmp); break;
12473 case 3: gen_revsh(tmp); break;
19a6e31c
PM
12474 default:
12475 g_assert_not_reached();
9ee6e8bb 12476 }
b0109805 12477 store_reg(s, rd, tmp);
9ee6e8bb 12478 break;
19a6e31c 12479 }
9ee6e8bb 12480
d9e028c1
PM
12481 case 6:
12482 switch ((insn >> 5) & 7) {
12483 case 2:
12484 /* setend */
12485 ARCH(6);
9886ecdf
PB
12486 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12487 gen_helper_setend(cpu_env);
dcba3a8d 12488 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 12489 }
9ee6e8bb 12490 break;
d9e028c1
PM
12491 case 3:
12492 /* cps */
12493 ARCH(6);
12494 if (IS_USER(s)) {
12495 break;
8984bd2e 12496 }
b53d8923 12497 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
12498 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
12499 /* FAULTMASK */
12500 if (insn & 1) {
12501 addr = tcg_const_i32(19);
12502 gen_helper_v7m_msr(cpu_env, addr, tmp);
12503 tcg_temp_free_i32(addr);
12504 }
12505 /* PRIMASK */
12506 if (insn & 2) {
12507 addr = tcg_const_i32(16);
12508 gen_helper_v7m_msr(cpu_env, addr, tmp);
12509 tcg_temp_free_i32(addr);
12510 }
12511 tcg_temp_free_i32(tmp);
12512 gen_lookup_tb(s);
12513 } else {
12514 if (insn & (1 << 4)) {
12515 shift = CPSR_A | CPSR_I | CPSR_F;
12516 } else {
12517 shift = 0;
12518 }
12519 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 12520 }
d9e028c1
PM
12521 break;
12522 default:
12523 goto undef;
9ee6e8bb
PB
12524 }
12525 break;
12526
99c475ab
FB
12527 default:
12528 goto undef;
12529 }
12530 break;
12531
12532 case 12:
a7d3970d 12533 {
99c475ab 12534 /* load/store multiple */
f764718d 12535 TCGv_i32 loaded_var = NULL;
99c475ab 12536 rn = (insn >> 8) & 0x7;
b0109805 12537 addr = load_reg(s, rn);
99c475ab
FB
12538 for (i = 0; i < 8; i++) {
12539 if (insn & (1 << i)) {
99c475ab
FB
12540 if (insn & (1 << 11)) {
12541 /* load */
c40c8556 12542 tmp = tcg_temp_new_i32();
12dcc321 12543 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
12544 if (i == rn) {
12545 loaded_var = tmp;
12546 } else {
12547 store_reg(s, i, tmp);
12548 }
99c475ab
FB
12549 } else {
12550 /* store */
b0109805 12551 tmp = load_reg(s, i);
12dcc321 12552 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12553 tcg_temp_free_i32(tmp);
99c475ab 12554 }
5899f386 12555 /* advance to the next address */
b0109805 12556 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12557 }
12558 }
b0109805 12559 if ((insn & (1 << rn)) == 0) {
a7d3970d 12560 /* base reg not in list: base register writeback */
b0109805
PB
12561 store_reg(s, rn, addr);
12562 } else {
a7d3970d
PM
12563 /* base reg in list: if load, complete it now */
12564 if (insn & (1 << 11)) {
12565 store_reg(s, rn, loaded_var);
12566 }
7d1b0095 12567 tcg_temp_free_i32(addr);
b0109805 12568 }
99c475ab 12569 break;
a7d3970d 12570 }
99c475ab
FB
12571 case 13:
12572 /* conditional branch or swi */
12573 cond = (insn >> 8) & 0xf;
12574 if (cond == 0xe)
12575 goto undef;
12576
12577 if (cond == 0xf) {
12578 /* swi */
eaed129d 12579 gen_set_pc_im(s, s->pc);
d4a2dc67 12580 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 12581 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
12582 break;
12583 }
12584 /* generate a conditional jump to next instruction */
c2d9644e 12585 arm_skip_unless(s, cond);
99c475ab
FB
12586
12587 /* jump to the offset */
5899f386 12588 val = (uint32_t)s->pc + 2;
99c475ab 12589 offset = ((int32_t)insn << 24) >> 24;
5899f386 12590 val += offset << 1;
8aaca4c0 12591 gen_jmp(s, val);
99c475ab
FB
12592 break;
12593
12594 case 14:
358bf29e 12595 if (insn & (1 << 11)) {
296e5a0a
PM
12596 /* thumb_insn_is_16bit() ensures we can't get here for
12597 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12598 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12599 */
12600 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12601 ARCH(5);
12602 offset = ((insn & 0x7ff) << 1);
12603 tmp = load_reg(s, 14);
12604 tcg_gen_addi_i32(tmp, tmp, offset);
12605 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12606
12607 tmp2 = tcg_temp_new_i32();
12608 tcg_gen_movi_i32(tmp2, s->pc | 1);
12609 store_reg(s, 14, tmp2);
12610 gen_bx(s, tmp);
358bf29e
PB
12611 break;
12612 }
9ee6e8bb 12613 /* unconditional branch */
99c475ab
FB
12614 val = (uint32_t)s->pc;
12615 offset = ((int32_t)insn << 21) >> 21;
12616 val += (offset << 1) + 2;
8aaca4c0 12617 gen_jmp(s, val);
99c475ab
FB
12618 break;
12619
12620 case 15:
296e5a0a
PM
12621 /* thumb_insn_is_16bit() ensures we can't get here for
12622 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12623 */
12624 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12625
12626 if (insn & (1 << 11)) {
12627 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
12628 offset = ((insn & 0x7ff) << 1) | 1;
12629 tmp = load_reg(s, 14);
12630 tcg_gen_addi_i32(tmp, tmp, offset);
12631
12632 tmp2 = tcg_temp_new_i32();
12633 tcg_gen_movi_i32(tmp2, s->pc | 1);
12634 store_reg(s, 14, tmp2);
12635 gen_bx(s, tmp);
12636 } else {
12637 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
12638 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
12639
12640 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
12641 }
9ee6e8bb 12642 break;
99c475ab
FB
12643 }
12644 return;
9ee6e8bb 12645illegal_op:
99c475ab 12646undef:
73710361
GB
12647 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
12648 default_exception_el(s));
99c475ab
FB
12649}
12650
541ebcd4
PM
12651static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
12652{
12653 /* Return true if the insn at dc->pc might cross a page boundary.
12654 * (False positives are OK, false negatives are not.)
5b8d7289
PM
12655 * We know this is a Thumb insn, and our caller ensures we are
12656 * only called if dc->pc is less than 4 bytes from the page
12657 * boundary, so we cross the page if the first 16 bits indicate
12658 * that this is a 32 bit insn.
541ebcd4 12659 */
5b8d7289 12660 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 12661
5b8d7289 12662 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
12663}
12664
b542683d 12665static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 12666{
1d8a5535 12667 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 12668 CPUARMState *env = cs->env_ptr;
4e5e1215 12669 ARMCPU *cpu = arm_env_get_cpu(env);
3b46e624 12670
962fcbf2 12671 dc->isar = &cpu->isar;
dcba3a8d 12672 dc->pc = dc->base.pc_first;
e50e6a20 12673 dc->condjmp = 0;
3926cc84 12674
40f860cd 12675 dc->aarch64 = 0;
cef9ee70
SS
12676 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
12677 * there is no secure EL1, so we route exceptions to EL3.
12678 */
12679 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
12680 !arm_el_is_aa64(env, 3);
1d8a5535
LV
12681 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
12682 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
12683 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
12684 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
12685 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
12686 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
c1e37810 12687 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 12688#if !defined(CONFIG_USER_ONLY)
c1e37810 12689 dc->user = (dc->current_el == 0);
3926cc84 12690#endif
1d8a5535
LV
12691 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
12692 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
12693 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
12694 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
12695 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
12696 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
12697 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
fb602cb7
PM
12698 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12699 regime_is_secure(env, dc->mmu_idx);
4730fb85 12700 dc->v8m_stackcheck = ARM_TBFLAG_STACKCHECK(dc->base.tb->flags);
60322b39 12701 dc->cp_regs = cpu->cp_regs;
a984e42c 12702 dc->features = env->features;
40f860cd 12703
50225ad0
PM
12704 /* Single step state. The code-generation logic here is:
12705 * SS_ACTIVE == 0:
12706 * generate code with no special handling for single-stepping (except
12707 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12708 * this happens anyway because those changes are all system register or
12709 * PSTATE writes).
12710 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12711 * emit code for one insn
12712 * emit code to clear PSTATE.SS
12713 * emit code to generate software step exception for completed step
12714 * end TB (as usual for having generated an exception)
12715 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12716 * emit code to generate a software step exception
12717 * end the TB
12718 */
1d8a5535
LV
12719 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
12720 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
50225ad0
PM
12721 dc->is_ldex = false;
12722 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
12723
bfe7ad5b 12724 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 12725
f7708456
RH
12726 /* If architectural single step active, limit to 1. */
12727 if (is_singlestepping(dc)) {
b542683d 12728 dc->base.max_insns = 1;
f7708456
RH
12729 }
12730
d0264d86
RH
12731 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12732 to those left on the page. */
12733 if (!dc->thumb) {
bfe7ad5b 12734 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 12735 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
12736 }
12737
a7812ae4
PB
12738 cpu_F0s = tcg_temp_new_i32();
12739 cpu_F1s = tcg_temp_new_i32();
12740 cpu_F0d = tcg_temp_new_i64();
12741 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
12742 cpu_V0 = cpu_F0d;
12743 cpu_V1 = cpu_F1d;
e677137d 12744 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 12745 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
12746}
12747
b1476854
LV
12748static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12749{
12750 DisasContext *dc = container_of(dcbase, DisasContext, base);
12751
12752 /* A note on handling of the condexec (IT) bits:
12753 *
12754 * We want to avoid the overhead of having to write the updated condexec
12755 * bits back to the CPUARMState for every instruction in an IT block. So:
12756 * (1) if the condexec bits are not already zero then we write
12757 * zero back into the CPUARMState now. This avoids complications trying
12758 * to do it at the end of the block. (For example if we don't do this
12759 * it's hard to identify whether we can safely skip writing condexec
12760 * at the end of the TB, which we definitely want to do for the case
12761 * where a TB doesn't do anything with the IT state at all.)
12762 * (2) if we are going to leave the TB then we call gen_set_condexec()
12763 * which will write the correct value into CPUARMState if zero is wrong.
12764 * This is done both for leaving the TB at the end, and for leaving
12765 * it because of an exception we know will happen, which is done in
12766 * gen_exception_insn(). The latter is necessary because we need to
12767 * leave the TB with the PC/IT state just prior to execution of the
12768 * instruction which caused the exception.
12769 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12770 * then the CPUARMState will be wrong and we need to reset it.
12771 * This is handled in the same way as restoration of the
12772 * PC in these situations; we save the value of the condexec bits
12773 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12774 * then uses this to restore them after an exception.
12775 *
12776 * Note that there are no instructions which can read the condexec
12777 * bits, and none which can write non-static values to them, so
12778 * we don't need to care about whether CPUARMState is correct in the
12779 * middle of a TB.
12780 */
12781
12782 /* Reset the conditional execution bits immediately. This avoids
12783 complications trying to do it at the end of the block. */
12784 if (dc->condexec_mask || dc->condexec_cond) {
12785 TCGv_i32 tmp = tcg_temp_new_i32();
12786 tcg_gen_movi_i32(tmp, 0);
12787 store_cpu_field(tmp, condexec_bits);
12788 }
12789}
12790
f62bd897
LV
12791static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12792{
12793 DisasContext *dc = container_of(dcbase, DisasContext, base);
12794
f62bd897
LV
12795 tcg_gen_insn_start(dc->pc,
12796 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12797 0);
15fa08f8 12798 dc->insn_start = tcg_last_op();
f62bd897
LV
12799}
12800
a68956ad
LV
12801static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12802 const CPUBreakpoint *bp)
12803{
12804 DisasContext *dc = container_of(dcbase, DisasContext, base);
12805
12806 if (bp->flags & BP_CPU) {
12807 gen_set_condexec(dc);
12808 gen_set_pc_im(dc, dc->pc);
12809 gen_helper_check_breakpoints(cpu_env);
12810 /* End the TB early; it's likely not going to be executed */
12811 dc->base.is_jmp = DISAS_TOO_MANY;
12812 } else {
12813 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12814 /* The address covered by the breakpoint must be
12815 included in [tb->pc, tb->pc + tb->size) in order
12816 to for it to be properly cleared -- thus we
12817 increment the PC here so that the logic setting
12818 tb->size below does the right thing. */
12819 /* TODO: Advance PC by correct instruction length to
12820 * avoid disassembler error messages */
12821 dc->pc += 2;
12822 dc->base.is_jmp = DISAS_NORETURN;
12823 }
12824
12825 return true;
12826}
12827
722ef0a5 12828static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 12829{
13189a90
LV
12830#ifdef CONFIG_USER_ONLY
12831 /* Intercept jump to the magic kernel page. */
12832 if (dc->pc >= 0xffff0000) {
12833 /* We always get here via a jump, so know we are not in a
12834 conditional execution block. */
12835 gen_exception_internal(EXCP_KERNEL_TRAP);
12836 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12837 return true;
13189a90
LV
12838 }
12839#endif
12840
12841 if (dc->ss_active && !dc->pstate_ss) {
12842 /* Singlestep state is Active-pending.
12843 * If we're in this state at the start of a TB then either
12844 * a) we just took an exception to an EL which is being debugged
12845 * and this is the first insn in the exception handler
12846 * b) debug exceptions were masked and we just unmasked them
12847 * without changing EL (eg by clearing PSTATE.D)
12848 * In either case we're going to take a swstep exception in the
12849 * "did not step an insn" case, and so the syndrome ISV and EX
12850 * bits should be zero.
12851 */
12852 assert(dc->base.num_insns == 1);
12853 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12854 default_exception_el(dc));
12855 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12856 return true;
13189a90
LV
12857 }
12858
722ef0a5
RH
12859 return false;
12860}
13189a90 12861
d0264d86 12862static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12863{
13189a90
LV
12864 if (dc->condjmp && !dc->base.is_jmp) {
12865 gen_set_label(dc->condlabel);
12866 dc->condjmp = 0;
12867 }
13189a90 12868 dc->base.pc_next = dc->pc;
23169224 12869 translator_loop_temp_check(&dc->base);
13189a90
LV
12870}
12871
722ef0a5
RH
12872static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12873{
12874 DisasContext *dc = container_of(dcbase, DisasContext, base);
12875 CPUARMState *env = cpu->env_ptr;
12876 unsigned int insn;
12877
12878 if (arm_pre_translate_insn(dc)) {
12879 return;
12880 }
12881
12882 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 12883 dc->insn = insn;
722ef0a5
RH
12884 dc->pc += 4;
12885 disas_arm_insn(dc, insn);
12886
d0264d86
RH
12887 arm_post_translate_insn(dc);
12888
12889 /* ARM is a fixed-length ISA. We performed the cross-page check
12890 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12891}
12892
dcf14dfb
PM
12893static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12894{
12895 /* Return true if this Thumb insn is always unconditional,
12896 * even inside an IT block. This is true of only a very few
12897 * instructions: BKPT, HLT, and SG.
12898 *
12899 * A larger class of instructions are UNPREDICTABLE if used
12900 * inside an IT block; we do not need to detect those here, because
12901 * what we do by default (perform the cc check and update the IT
12902 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12903 * choice for those situations.
12904 *
12905 * insn is either a 16-bit or a 32-bit instruction; the two are
12906 * distinguishable because for the 16-bit case the top 16 bits
12907 * are zeroes, and that isn't a valid 32-bit encoding.
12908 */
12909 if ((insn & 0xffffff00) == 0xbe00) {
12910 /* BKPT */
12911 return true;
12912 }
12913
12914 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12915 !arm_dc_feature(s, ARM_FEATURE_M)) {
12916 /* HLT: v8A only. This is unconditional even when it is going to
12917 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12918 * For v7 cores this was a plain old undefined encoding and so
12919 * honours its cc check. (We might be using the encoding as
12920 * a semihosting trap, but we don't change the cc check behaviour
12921 * on that account, because a debugger connected to a real v7A
12922 * core and emulating semihosting traps by catching the UNDEF
12923 * exception would also only see cases where the cc check passed.
12924 * No guest code should be trying to do a HLT semihosting trap
12925 * in an IT block anyway.
12926 */
12927 return true;
12928 }
12929
12930 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12931 arm_dc_feature(s, ARM_FEATURE_M)) {
12932 /* SG: v8M only */
12933 return true;
12934 }
12935
12936 return false;
12937}
12938
722ef0a5
RH
12939static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12940{
12941 DisasContext *dc = container_of(dcbase, DisasContext, base);
12942 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12943 uint32_t insn;
12944 bool is_16bit;
722ef0a5
RH
12945
12946 if (arm_pre_translate_insn(dc)) {
12947 return;
12948 }
12949
296e5a0a
PM
12950 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12951 is_16bit = thumb_insn_is_16bit(dc, insn);
12952 dc->pc += 2;
12953 if (!is_16bit) {
12954 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12955
12956 insn = insn << 16 | insn2;
12957 dc->pc += 2;
12958 }
58803318 12959 dc->insn = insn;
296e5a0a 12960
dcf14dfb 12961 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12962 uint32_t cond = dc->condexec_cond;
12963
12964 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
c2d9644e 12965 arm_skip_unless(dc, cond);
296e5a0a
PM
12966 }
12967 }
12968
12969 if (is_16bit) {
12970 disas_thumb_insn(dc, insn);
12971 } else {
2eea841c 12972 disas_thumb2_insn(dc, insn);
296e5a0a 12973 }
722ef0a5
RH
12974
12975 /* Advance the Thumb condexec condition. */
12976 if (dc->condexec_mask) {
12977 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12978 ((dc->condexec_mask >> 4) & 1));
12979 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12980 if (dc->condexec_mask == 0) {
12981 dc->condexec_cond = 0;
12982 }
12983 }
12984
d0264d86
RH
12985 arm_post_translate_insn(dc);
12986
12987 /* Thumb is a variable-length ISA. Stop translation when the next insn
12988 * will touch a new page. This ensures that prefetch aborts occur at
12989 * the right place.
12990 *
12991 * We want to stop the TB if the next insn starts in a new page,
12992 * or if it spans between this page and the next. This means that
12993 * if we're looking at the last halfword in the page we need to
12994 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12995 * or a 32-bit Thumb insn (which won't).
12996 * This is to avoid generating a silly TB with a single 16-bit insn
12997 * in it at the end of this page (which would execute correctly
12998 * but isn't very efficient).
12999 */
13000 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
13001 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
13002 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
13003 && insn_crosses_page(env, dc)))) {
13004 dc->base.is_jmp = DISAS_TOO_MANY;
13005 }
722ef0a5
RH
13006}
13007
70d3c035 13008static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 13009{
70d3c035 13010 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 13011
c5a49c63 13012 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
13013 /* FIXME: This can theoretically happen with self-modifying code. */
13014 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 13015 }
9ee6e8bb 13016
b5ff1b31 13017 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
13018 instruction was a conditional branch or trap, and the PC has
13019 already been written. */
f021b2c4 13020 gen_set_condexec(dc);
dcba3a8d 13021 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
13022 /* Exception return branches need some special case code at the
13023 * end of the TB, which is complex enough that it has to
13024 * handle the single-step vs not and the condition-failed
13025 * insn codepath itself.
13026 */
13027 gen_bx_excret_final_code(dc);
13028 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 13029 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 13030 switch (dc->base.is_jmp) {
7999a5c8 13031 case DISAS_SWI:
50225ad0 13032 gen_ss_advance(dc);
73710361
GB
13033 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13034 default_exception_el(dc));
7999a5c8
SF
13035 break;
13036 case DISAS_HVC:
37e6456e 13037 gen_ss_advance(dc);
73710361 13038 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
13039 break;
13040 case DISAS_SMC:
37e6456e 13041 gen_ss_advance(dc);
73710361 13042 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
13043 break;
13044 case DISAS_NEXT:
a68956ad 13045 case DISAS_TOO_MANY:
7999a5c8
SF
13046 case DISAS_UPDATE:
13047 gen_set_pc_im(dc, dc->pc);
13048 /* fall through */
13049 default:
5425415e
PM
13050 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
13051 gen_singlestep_exception(dc);
a0c231e6
RH
13052 break;
13053 case DISAS_NORETURN:
13054 break;
7999a5c8 13055 }
8aaca4c0 13056 } else {
9ee6e8bb
PB
13057 /* While branches must always occur at the end of an IT block,
13058 there are a few other things that can cause us to terminate
65626741 13059 the TB in the middle of an IT block:
9ee6e8bb
PB
13060 - Exception generating instructions (bkpt, swi, undefined).
13061 - Page boundaries.
13062 - Hardware watchpoints.
13063 Hardware breakpoints have already been handled and skip this code.
13064 */
dcba3a8d 13065 switch(dc->base.is_jmp) {
8aaca4c0 13066 case DISAS_NEXT:
a68956ad 13067 case DISAS_TOO_MANY:
6e256c93 13068 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 13069 break;
577bf808 13070 case DISAS_JUMP:
8a6b28c7
EC
13071 gen_goto_ptr();
13072 break;
e8d52302
AB
13073 case DISAS_UPDATE:
13074 gen_set_pc_im(dc, dc->pc);
13075 /* fall through */
577bf808 13076 default:
8aaca4c0 13077 /* indicate that the hash table must be used to find the next TB */
07ea28b4 13078 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 13079 break;
a0c231e6 13080 case DISAS_NORETURN:
8aaca4c0
FB
13081 /* nothing more to generate */
13082 break;
9ee6e8bb 13083 case DISAS_WFI:
58803318
SS
13084 {
13085 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
13086 !(dc->insn & (1U << 31))) ? 2 : 4);
13087
13088 gen_helper_wfi(cpu_env, tmp);
13089 tcg_temp_free_i32(tmp);
84549b6d
PM
13090 /* The helper doesn't necessarily throw an exception, but we
13091 * must go back to the main loop to check for interrupts anyway.
13092 */
07ea28b4 13093 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 13094 break;
58803318 13095 }
72c1d3af
PM
13096 case DISAS_WFE:
13097 gen_helper_wfe(cpu_env);
13098 break;
c87e5a61
PM
13099 case DISAS_YIELD:
13100 gen_helper_yield(cpu_env);
13101 break;
9ee6e8bb 13102 case DISAS_SWI:
73710361
GB
13103 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13104 default_exception_el(dc));
9ee6e8bb 13105 break;
37e6456e 13106 case DISAS_HVC:
73710361 13107 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
13108 break;
13109 case DISAS_SMC:
73710361 13110 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 13111 break;
8aaca4c0 13112 }
f021b2c4
PM
13113 }
13114
13115 if (dc->condjmp) {
13116 /* "Condition failed" instruction codepath for the branch/trap insn */
13117 gen_set_label(dc->condlabel);
13118 gen_set_condexec(dc);
b636649f 13119 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
13120 gen_set_pc_im(dc, dc->pc);
13121 gen_singlestep_exception(dc);
13122 } else {
6e256c93 13123 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 13124 }
2c0262af 13125 }
23169224
LV
13126
13127 /* Functions above can change dc->pc, so re-align db->pc_next */
13128 dc->base.pc_next = dc->pc;
70d3c035
LV
13129}
13130
4013f7fc
LV
13131static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
13132{
13133 DisasContext *dc = container_of(dcbase, DisasContext, base);
13134
13135 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 13136 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
13137}
13138
23169224
LV
13139static const TranslatorOps arm_translator_ops = {
13140 .init_disas_context = arm_tr_init_disas_context,
13141 .tb_start = arm_tr_tb_start,
13142 .insn_start = arm_tr_insn_start,
13143 .breakpoint_check = arm_tr_breakpoint_check,
13144 .translate_insn = arm_tr_translate_insn,
13145 .tb_stop = arm_tr_tb_stop,
13146 .disas_log = arm_tr_disas_log,
13147};
13148
722ef0a5
RH
13149static const TranslatorOps thumb_translator_ops = {
13150 .init_disas_context = arm_tr_init_disas_context,
13151 .tb_start = arm_tr_tb_start,
13152 .insn_start = arm_tr_insn_start,
13153 .breakpoint_check = arm_tr_breakpoint_check,
13154 .translate_insn = thumb_tr_translate_insn,
13155 .tb_stop = arm_tr_tb_stop,
13156 .disas_log = arm_tr_disas_log,
13157};
13158
70d3c035 13159/* generate intermediate code for basic block 'tb'. */
23169224 13160void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 13161{
23169224
LV
13162 DisasContext dc;
13163 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 13164
722ef0a5
RH
13165 if (ARM_TBFLAG_THUMB(tb->flags)) {
13166 ops = &thumb_translator_ops;
13167 }
23169224 13168#ifdef TARGET_AARCH64
70d3c035 13169 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
23169224 13170 ops = &aarch64_translator_ops;
2c0262af
FB
13171 }
13172#endif
23169224
LV
13173
13174 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
13175}
13176
878096ee
AF
13177void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
13178 int flags)
2c0262af 13179{
878096ee
AF
13180 ARMCPU *cpu = ARM_CPU(cs);
13181 CPUARMState *env = &cpu->env;
2c0262af
FB
13182 int i;
13183
17731115
PM
13184 if (is_a64(env)) {
13185 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
13186 return;
13187 }
13188
2c0262af 13189 for(i=0;i<16;i++) {
7fe48483 13190 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 13191 if ((i % 4) == 3)
7fe48483 13192 cpu_fprintf(f, "\n");
2c0262af 13193 else
7fe48483 13194 cpu_fprintf(f, " ");
2c0262af 13195 }
06e5cf7a 13196
5b906f35
PM
13197 if (arm_feature(env, ARM_FEATURE_M)) {
13198 uint32_t xpsr = xpsr_read(env);
13199 const char *mode;
1e577cc7
PM
13200 const char *ns_status = "";
13201
13202 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
13203 ns_status = env->v7m.secure ? "S " : "NS ";
13204 }
5b906f35
PM
13205
13206 if (xpsr & XPSR_EXCP) {
13207 mode = "handler";
13208 } else {
8bfc26ea 13209 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
13210 mode = "unpriv-thread";
13211 } else {
13212 mode = "priv-thread";
13213 }
13214 }
13215
1e577cc7 13216 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
13217 xpsr,
13218 xpsr & XPSR_N ? 'N' : '-',
13219 xpsr & XPSR_Z ? 'Z' : '-',
13220 xpsr & XPSR_C ? 'C' : '-',
13221 xpsr & XPSR_V ? 'V' : '-',
13222 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 13223 ns_status,
5b906f35 13224 mode);
06e5cf7a 13225 } else {
5b906f35
PM
13226 uint32_t psr = cpsr_read(env);
13227 const char *ns_status = "";
13228
13229 if (arm_feature(env, ARM_FEATURE_EL3) &&
13230 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
13231 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
13232 }
13233
13234 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
13235 psr,
13236 psr & CPSR_N ? 'N' : '-',
13237 psr & CPSR_Z ? 'Z' : '-',
13238 psr & CPSR_C ? 'C' : '-',
13239 psr & CPSR_V ? 'V' : '-',
13240 psr & CPSR_T ? 'T' : 'A',
13241 ns_status,
81e37284 13242 aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
5b906f35 13243 }
b7bcbe95 13244
f2617cfc
PM
13245 if (flags & CPU_DUMP_FPU) {
13246 int numvfpregs = 0;
13247 if (arm_feature(env, ARM_FEATURE_VFP)) {
13248 numvfpregs += 16;
13249 }
13250 if (arm_feature(env, ARM_FEATURE_VFP3)) {
13251 numvfpregs += 16;
13252 }
13253 for (i = 0; i < numvfpregs; i++) {
9a2b5256 13254 uint64_t v = *aa32_vfp_dreg(env, i);
f2617cfc
PM
13255 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
13256 i * 2, (uint32_t)v,
13257 i * 2 + 1, (uint32_t)(v >> 32),
13258 i, v);
13259 }
13260 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 13261 }
2c0262af 13262}
a6b025d3 13263
bad729e2
RH
13264void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
13265 target_ulong *data)
d2856f1a 13266{
3926cc84 13267 if (is_a64(env)) {
bad729e2 13268 env->pc = data[0];
40f860cd 13269 env->condexec_bits = 0;
aaa1f954 13270 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13271 } else {
bad729e2
RH
13272 env->regs[15] = data[0];
13273 env->condexec_bits = data[1];
aaa1f954 13274 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13275 }
d2856f1a 13276}