]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Add v8M stack limit checks on NS function calls
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
1d854765 31#include "arm_ldst.h"
19a6e31c 32#include "exec/semihost.h"
1497c961 33
2ef6175a
RH
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
2c0262af 36
a7e30d84 37#include "trace-tcg.h"
508127e2 38#include "exec/log.h"
a7e30d84
LV
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
c99a55d3 45#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d 55
b5ff1b31
FB
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
155c3eac
FN
75static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
61adacc8
RH
79/* Function prototypes for gen_ functions calling Neon helpers. */
80typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
81 TCGv_i32, TCGv_i32);
82
b26eefb6
PB
83/* initialize TCG globals. */
84void arm_translate_init(void)
85{
155c3eac
FN
86 int i;
87
155c3eac 88 for (i = 0; i < 16; i++) {
e1ccc054 89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
e1ccc054
RH
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 97
e1ccc054 98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 102
14ade10f 103 a64_translate_init();
b26eefb6
PB
104}
105
9bb6558a
PM
106/* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
108 */
109typedef enum ISSInfo {
110 ISSNone = 0,
111 ISSRegMask = 0x1f,
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
116} ISSInfo;
117
118/* Save the syndrome information for a Data Abort */
119static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
120{
121 uint32_t syn;
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
128
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
132 */
133 return;
134 }
135
136 if (srt == 15) {
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
139 * the call sites.
140 */
141 return;
142 }
143
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
147}
148
8bd5c820 149static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 150{
8bd5c820 151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
152 * insns:
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
155 */
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
8bd5c820 160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
161 case ARMMMUIdx_S1E3:
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
8bd5c820 164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
b9f587d6 173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
177 case ARMMMUIdx_S2NS:
178 default:
179 g_assert_not_reached();
180 }
181}
182
39d5492a 183static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 184{
39d5492a 185 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
187 return tmp;
188}
189
0ecb72a5 190#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 191
39d5492a 192static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
193{
194 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 195 tcg_temp_free_i32(var);
d9ba4830
PB
196}
197
198#define store_cpu_field(var, name) \
0ecb72a5 199 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 200
b26eefb6 201/* Set a variable to the value of a CPU register. */
39d5492a 202static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
203{
204 if (reg == 15) {
205 uint32_t addr;
b90372ad 206 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
207 if (s->thumb)
208 addr = (long)s->pc + 2;
209 else
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
212 } else {
155c3eac 213 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
214 }
215}
216
217/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 218static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 219{
39d5492a 220 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
221 load_reg_var(s, tmp, reg);
222 return tmp;
223}
224
225/* Set a CPU register. The source must be a temporary and will be
226 marked as dead. */
39d5492a 227static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
228{
229 if (reg == 15) {
9b6a3ea7
PM
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
234 */
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 236 s->base.is_jmp = DISAS_JUMP;
b26eefb6 237 }
155c3eac 238 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 239 tcg_temp_free_i32(var);
b26eefb6
PB
240}
241
55203189
PM
242/*
243 * Variant of store_reg which applies v8M stack-limit checks before updating
244 * SP. If the check fails this will result in an exception being taken.
245 * We disable the stack checks for CONFIG_USER_ONLY because we have
246 * no idea what the stack limits should be in that case.
247 * If stack checking is not being done this just acts like store_reg().
248 */
249static void store_sp_checked(DisasContext *s, TCGv_i32 var)
250{
251#ifndef CONFIG_USER_ONLY
252 if (s->v8m_stackcheck) {
253 gen_helper_v8m_stackcheck(cpu_env, var);
254 }
255#endif
256 store_reg(s, 13, var);
257}
258
b26eefb6 259/* Value extensions. */
86831435
PB
260#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
261#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
262#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
263#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
264
1497c961
PB
265#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
266#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 267
b26eefb6 268
39d5492a 269static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 270{
39d5492a 271 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 272 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
273 tcg_temp_free_i32(tmp_mask);
274}
d9ba4830
PB
275/* Set NZCV flags from the high 4 bits of var. */
276#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
277
d4a2dc67 278static void gen_exception_internal(int excp)
d9ba4830 279{
d4a2dc67
PM
280 TCGv_i32 tcg_excp = tcg_const_i32(excp);
281
282 assert(excp_is_internal(excp));
283 gen_helper_exception_internal(cpu_env, tcg_excp);
284 tcg_temp_free_i32(tcg_excp);
285}
286
73710361 287static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
288{
289 TCGv_i32 tcg_excp = tcg_const_i32(excp);
290 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 291 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 292
73710361
GB
293 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
294 tcg_syn, tcg_el);
295
296 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
297 tcg_temp_free_i32(tcg_syn);
298 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
299}
300
50225ad0
PM
301static void gen_ss_advance(DisasContext *s)
302{
303 /* If the singlestep state is Active-not-pending, advance to
304 * Active-pending.
305 */
306 if (s->ss_active) {
307 s->pstate_ss = 0;
308 gen_helper_clear_pstate_ss(cpu_env);
309 }
310}
311
312static void gen_step_complete_exception(DisasContext *s)
313{
314 /* We just completed step of an insn. Move from Active-not-pending
315 * to Active-pending, and then also take the swstep exception.
316 * This corresponds to making the (IMPDEF) choice to prioritize
317 * swstep exceptions over asynchronous exceptions taken to an exception
318 * level where debug is disabled. This choice has the advantage that
319 * we do not need to maintain internal state corresponding to the
320 * ISV/EX syndrome bits between completion of the step and generation
321 * of the exception, and our syndrome information is always correct.
322 */
323 gen_ss_advance(s);
73710361
GB
324 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
325 default_exception_el(s));
dcba3a8d 326 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
327}
328
5425415e
PM
329static void gen_singlestep_exception(DisasContext *s)
330{
331 /* Generate the right kind of exception for singlestep, which is
332 * either the architectural singlestep or EXCP_DEBUG for QEMU's
333 * gdb singlestepping.
334 */
335 if (s->ss_active) {
336 gen_step_complete_exception(s);
337 } else {
338 gen_exception_internal(EXCP_DEBUG);
339 }
340}
341
b636649f
PM
342static inline bool is_singlestepping(DisasContext *s)
343{
344 /* Return true if we are singlestepping either because of
345 * architectural singlestep or QEMU gdbstub singlestep. This does
346 * not include the command line '-singlestep' mode which is rather
347 * misnamed as it only means "one instruction per TB" and doesn't
348 * affect the code we generate.
349 */
dcba3a8d 350 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
351}
352
39d5492a 353static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 354{
39d5492a
PM
355 TCGv_i32 tmp1 = tcg_temp_new_i32();
356 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
357 tcg_gen_ext16s_i32(tmp1, a);
358 tcg_gen_ext16s_i32(tmp2, b);
3670669c 359 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 360 tcg_temp_free_i32(tmp2);
3670669c
PB
361 tcg_gen_sari_i32(a, a, 16);
362 tcg_gen_sari_i32(b, b, 16);
363 tcg_gen_mul_i32(b, b, a);
364 tcg_gen_mov_i32(a, tmp1);
7d1b0095 365 tcg_temp_free_i32(tmp1);
3670669c
PB
366}
367
368/* Byteswap each halfword. */
39d5492a 369static void gen_rev16(TCGv_i32 var)
3670669c 370{
39d5492a 371 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 372 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 373 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
374 tcg_gen_and_i32(tmp, tmp, mask);
375 tcg_gen_and_i32(var, var, mask);
3670669c 376 tcg_gen_shli_i32(var, var, 8);
3670669c 377 tcg_gen_or_i32(var, var, tmp);
68cedf73 378 tcg_temp_free_i32(mask);
7d1b0095 379 tcg_temp_free_i32(tmp);
3670669c
PB
380}
381
382/* Byteswap low halfword and sign extend. */
39d5492a 383static void gen_revsh(TCGv_i32 var)
3670669c 384{
1a855029
AJ
385 tcg_gen_ext16u_i32(var, var);
386 tcg_gen_bswap16_i32(var, var);
387 tcg_gen_ext16s_i32(var, var);
3670669c
PB
388}
389
838fa72d 390/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 391static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 392{
838fa72d
AJ
393 TCGv_i64 tmp64 = tcg_temp_new_i64();
394
395 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 396 tcg_temp_free_i32(b);
838fa72d
AJ
397 tcg_gen_shli_i64(tmp64, tmp64, 32);
398 tcg_gen_add_i64(a, tmp64, a);
399
400 tcg_temp_free_i64(tmp64);
401 return a;
402}
403
404/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 405static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
406{
407 TCGv_i64 tmp64 = tcg_temp_new_i64();
408
409 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 410 tcg_temp_free_i32(b);
838fa72d
AJ
411 tcg_gen_shli_i64(tmp64, tmp64, 32);
412 tcg_gen_sub_i64(a, tmp64, a);
413
414 tcg_temp_free_i64(tmp64);
415 return a;
3670669c
PB
416}
417
5e3f878a 418/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 419static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 420{
39d5492a
PM
421 TCGv_i32 lo = tcg_temp_new_i32();
422 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 423 TCGv_i64 ret;
5e3f878a 424
831d7fe8 425 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 426 tcg_temp_free_i32(a);
7d1b0095 427 tcg_temp_free_i32(b);
831d7fe8
RH
428
429 ret = tcg_temp_new_i64();
430 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
431 tcg_temp_free_i32(lo);
432 tcg_temp_free_i32(hi);
831d7fe8
RH
433
434 return ret;
5e3f878a
PB
435}
436
39d5492a 437static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 438{
39d5492a
PM
439 TCGv_i32 lo = tcg_temp_new_i32();
440 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 441 TCGv_i64 ret;
5e3f878a 442
831d7fe8 443 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 444 tcg_temp_free_i32(a);
7d1b0095 445 tcg_temp_free_i32(b);
831d7fe8
RH
446
447 ret = tcg_temp_new_i64();
448 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
449 tcg_temp_free_i32(lo);
450 tcg_temp_free_i32(hi);
831d7fe8
RH
451
452 return ret;
5e3f878a
PB
453}
454
8f01245e 455/* Swap low and high halfwords. */
39d5492a 456static void gen_swap_half(TCGv_i32 var)
8f01245e 457{
39d5492a 458 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
459 tcg_gen_shri_i32(tmp, var, 16);
460 tcg_gen_shli_i32(var, var, 16);
461 tcg_gen_or_i32(var, var, tmp);
7d1b0095 462 tcg_temp_free_i32(tmp);
8f01245e
PB
463}
464
b26eefb6
PB
465/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
466 tmp = (t0 ^ t1) & 0x8000;
467 t0 &= ~0x8000;
468 t1 &= ~0x8000;
469 t0 = (t0 + t1) ^ tmp;
470 */
471
39d5492a 472static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 473{
39d5492a 474 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
475 tcg_gen_xor_i32(tmp, t0, t1);
476 tcg_gen_andi_i32(tmp, tmp, 0x8000);
477 tcg_gen_andi_i32(t0, t0, ~0x8000);
478 tcg_gen_andi_i32(t1, t1, ~0x8000);
479 tcg_gen_add_i32(t0, t0, t1);
480 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
481 tcg_temp_free_i32(tmp);
482 tcg_temp_free_i32(t1);
b26eefb6
PB
483}
484
485/* Set CF to the top bit of var. */
39d5492a 486static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 487{
66c374de 488 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
489}
490
491/* Set N and Z flags from var. */
39d5492a 492static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 493{
66c374de
AJ
494 tcg_gen_mov_i32(cpu_NF, var);
495 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
496}
497
498/* T0 += T1 + CF. */
39d5492a 499static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 500{
396e467c 501 tcg_gen_add_i32(t0, t0, t1);
66c374de 502 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
503}
504
e9bb4aa9 505/* dest = T0 + T1 + CF. */
39d5492a 506static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 507{
e9bb4aa9 508 tcg_gen_add_i32(dest, t0, t1);
66c374de 509 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
510}
511
3670669c 512/* dest = T0 - T1 + CF - 1. */
39d5492a 513static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 514{
3670669c 515 tcg_gen_sub_i32(dest, t0, t1);
66c374de 516 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 517 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
518}
519
72485ec4 520/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 521static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 522{
39d5492a 523 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
524 tcg_gen_movi_i32(tmp, 0);
525 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 526 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 527 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
528 tcg_gen_xor_i32(tmp, t0, t1);
529 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
530 tcg_temp_free_i32(tmp);
531 tcg_gen_mov_i32(dest, cpu_NF);
532}
533
49b4c31e 534/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 535static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 536{
39d5492a 537 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
538 if (TCG_TARGET_HAS_add2_i32) {
539 tcg_gen_movi_i32(tmp, 0);
540 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 541 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
542 } else {
543 TCGv_i64 q0 = tcg_temp_new_i64();
544 TCGv_i64 q1 = tcg_temp_new_i64();
545 tcg_gen_extu_i32_i64(q0, t0);
546 tcg_gen_extu_i32_i64(q1, t1);
547 tcg_gen_add_i64(q0, q0, q1);
548 tcg_gen_extu_i32_i64(q1, cpu_CF);
549 tcg_gen_add_i64(q0, q0, q1);
550 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
551 tcg_temp_free_i64(q0);
552 tcg_temp_free_i64(q1);
553 }
554 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
555 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
556 tcg_gen_xor_i32(tmp, t0, t1);
557 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
558 tcg_temp_free_i32(tmp);
559 tcg_gen_mov_i32(dest, cpu_NF);
560}
561
72485ec4 562/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 563static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 564{
39d5492a 565 TCGv_i32 tmp;
72485ec4
AJ
566 tcg_gen_sub_i32(cpu_NF, t0, t1);
567 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
568 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
569 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
570 tmp = tcg_temp_new_i32();
571 tcg_gen_xor_i32(tmp, t0, t1);
572 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
573 tcg_temp_free_i32(tmp);
574 tcg_gen_mov_i32(dest, cpu_NF);
575}
576
e77f0832 577/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 578static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 579{
39d5492a 580 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
581 tcg_gen_not_i32(tmp, t1);
582 gen_adc_CC(dest, t0, tmp);
39d5492a 583 tcg_temp_free_i32(tmp);
2de68a49
RH
584}
585
365af80e 586#define GEN_SHIFT(name) \
39d5492a 587static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 588{ \
39d5492a 589 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
590 tmp1 = tcg_temp_new_i32(); \
591 tcg_gen_andi_i32(tmp1, t1, 0xff); \
592 tmp2 = tcg_const_i32(0); \
593 tmp3 = tcg_const_i32(0x1f); \
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
595 tcg_temp_free_i32(tmp3); \
596 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
597 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
598 tcg_temp_free_i32(tmp2); \
599 tcg_temp_free_i32(tmp1); \
600}
601GEN_SHIFT(shl)
602GEN_SHIFT(shr)
603#undef GEN_SHIFT
604
39d5492a 605static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 606{
39d5492a 607 TCGv_i32 tmp1, tmp2;
365af80e
AJ
608 tmp1 = tcg_temp_new_i32();
609 tcg_gen_andi_i32(tmp1, t1, 0xff);
610 tmp2 = tcg_const_i32(0x1f);
611 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
612 tcg_temp_free_i32(tmp2);
613 tcg_gen_sar_i32(dest, t0, tmp1);
614 tcg_temp_free_i32(tmp1);
615}
616
39d5492a 617static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 618{
39d5492a
PM
619 TCGv_i32 c0 = tcg_const_i32(0);
620 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
621 tcg_gen_neg_i32(tmp, src);
622 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
623 tcg_temp_free_i32(c0);
624 tcg_temp_free_i32(tmp);
625}
ad69471c 626
39d5492a 627static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 628{
9a119ff6 629 if (shift == 0) {
66c374de 630 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 631 } else {
66c374de
AJ
632 tcg_gen_shri_i32(cpu_CF, var, shift);
633 if (shift != 31) {
634 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
635 }
9a119ff6 636 }
9a119ff6 637}
b26eefb6 638
9a119ff6 639/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
640static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
641 int shift, int flags)
9a119ff6
PB
642{
643 switch (shiftop) {
644 case 0: /* LSL */
645 if (shift != 0) {
646 if (flags)
647 shifter_out_im(var, 32 - shift);
648 tcg_gen_shli_i32(var, var, shift);
649 }
650 break;
651 case 1: /* LSR */
652 if (shift == 0) {
653 if (flags) {
66c374de 654 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
655 }
656 tcg_gen_movi_i32(var, 0);
657 } else {
658 if (flags)
659 shifter_out_im(var, shift - 1);
660 tcg_gen_shri_i32(var, var, shift);
661 }
662 break;
663 case 2: /* ASR */
664 if (shift == 0)
665 shift = 32;
666 if (flags)
667 shifter_out_im(var, shift - 1);
668 if (shift == 32)
669 shift = 31;
670 tcg_gen_sari_i32(var, var, shift);
671 break;
672 case 3: /* ROR/RRX */
673 if (shift != 0) {
674 if (flags)
675 shifter_out_im(var, shift - 1);
f669df27 676 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 677 } else {
39d5492a 678 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 679 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
680 if (flags)
681 shifter_out_im(var, 0);
682 tcg_gen_shri_i32(var, var, 1);
b26eefb6 683 tcg_gen_or_i32(var, var, tmp);
7d1b0095 684 tcg_temp_free_i32(tmp);
b26eefb6
PB
685 }
686 }
687};
688
39d5492a
PM
689static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
690 TCGv_i32 shift, int flags)
8984bd2e
PB
691{
692 if (flags) {
693 switch (shiftop) {
9ef39277
BS
694 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
695 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
696 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
697 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
698 }
699 } else {
700 switch (shiftop) {
365af80e
AJ
701 case 0:
702 gen_shl(var, var, shift);
703 break;
704 case 1:
705 gen_shr(var, var, shift);
706 break;
707 case 2:
708 gen_sar(var, var, shift);
709 break;
f669df27
AJ
710 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
711 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
712 }
713 }
7d1b0095 714 tcg_temp_free_i32(shift);
8984bd2e
PB
715}
716
6ddbc6e4
PB
717#define PAS_OP(pfx) \
718 switch (op2) { \
719 case 0: gen_pas_helper(glue(pfx,add16)); break; \
720 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
721 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
722 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
723 case 4: gen_pas_helper(glue(pfx,add8)); break; \
724 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
725 }
39d5492a 726static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 727{
a7812ae4 728 TCGv_ptr tmp;
6ddbc6e4
PB
729
730 switch (op1) {
731#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
732 case 1:
a7812ae4 733 tmp = tcg_temp_new_ptr();
0ecb72a5 734 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 735 PAS_OP(s)
b75263d6 736 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
737 break;
738 case 5:
a7812ae4 739 tmp = tcg_temp_new_ptr();
0ecb72a5 740 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 741 PAS_OP(u)
b75263d6 742 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
743 break;
744#undef gen_pas_helper
745#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
746 case 2:
747 PAS_OP(q);
748 break;
749 case 3:
750 PAS_OP(sh);
751 break;
752 case 6:
753 PAS_OP(uq);
754 break;
755 case 7:
756 PAS_OP(uh);
757 break;
758#undef gen_pas_helper
759 }
760}
9ee6e8bb
PB
761#undef PAS_OP
762
6ddbc6e4
PB
763/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
764#define PAS_OP(pfx) \
ed89a2f1 765 switch (op1) { \
6ddbc6e4
PB
766 case 0: gen_pas_helper(glue(pfx,add8)); break; \
767 case 1: gen_pas_helper(glue(pfx,add16)); break; \
768 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
769 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
770 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
771 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
772 }
39d5492a 773static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 774{
a7812ae4 775 TCGv_ptr tmp;
6ddbc6e4 776
ed89a2f1 777 switch (op2) {
6ddbc6e4
PB
778#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
779 case 0:
a7812ae4 780 tmp = tcg_temp_new_ptr();
0ecb72a5 781 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 782 PAS_OP(s)
b75263d6 783 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
784 break;
785 case 4:
a7812ae4 786 tmp = tcg_temp_new_ptr();
0ecb72a5 787 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 788 PAS_OP(u)
b75263d6 789 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
790 break;
791#undef gen_pas_helper
792#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
793 case 1:
794 PAS_OP(q);
795 break;
796 case 2:
797 PAS_OP(sh);
798 break;
799 case 5:
800 PAS_OP(uq);
801 break;
802 case 6:
803 PAS_OP(uh);
804 break;
805#undef gen_pas_helper
806 }
807}
9ee6e8bb
PB
808#undef PAS_OP
809
39fb730a 810/*
6c2c63d3 811 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
812 * This is common between ARM and Aarch64 targets.
813 */
6c2c63d3 814void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 815{
6c2c63d3
RH
816 TCGv_i32 value;
817 TCGCond cond;
818 bool global = true;
d9ba4830 819
d9ba4830
PB
820 switch (cc) {
821 case 0: /* eq: Z */
d9ba4830 822 case 1: /* ne: !Z */
6c2c63d3
RH
823 cond = TCG_COND_EQ;
824 value = cpu_ZF;
d9ba4830 825 break;
6c2c63d3 826
d9ba4830 827 case 2: /* cs: C */
d9ba4830 828 case 3: /* cc: !C */
6c2c63d3
RH
829 cond = TCG_COND_NE;
830 value = cpu_CF;
d9ba4830 831 break;
6c2c63d3 832
d9ba4830 833 case 4: /* mi: N */
d9ba4830 834 case 5: /* pl: !N */
6c2c63d3
RH
835 cond = TCG_COND_LT;
836 value = cpu_NF;
d9ba4830 837 break;
6c2c63d3 838
d9ba4830 839 case 6: /* vs: V */
d9ba4830 840 case 7: /* vc: !V */
6c2c63d3
RH
841 cond = TCG_COND_LT;
842 value = cpu_VF;
d9ba4830 843 break;
6c2c63d3 844
d9ba4830 845 case 8: /* hi: C && !Z */
6c2c63d3
RH
846 case 9: /* ls: !C || Z -> !(C && !Z) */
847 cond = TCG_COND_NE;
848 value = tcg_temp_new_i32();
849 global = false;
850 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
851 ZF is non-zero for !Z; so AND the two subexpressions. */
852 tcg_gen_neg_i32(value, cpu_CF);
853 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 854 break;
6c2c63d3 855
d9ba4830 856 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 857 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
858 /* Since we're only interested in the sign bit, == 0 is >= 0. */
859 cond = TCG_COND_GE;
860 value = tcg_temp_new_i32();
861 global = false;
862 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 863 break;
6c2c63d3 864
d9ba4830 865 case 12: /* gt: !Z && N == V */
d9ba4830 866 case 13: /* le: Z || N != V */
6c2c63d3
RH
867 cond = TCG_COND_NE;
868 value = tcg_temp_new_i32();
869 global = false;
870 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
871 * the sign bit then AND with ZF to yield the result. */
872 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
873 tcg_gen_sari_i32(value, value, 31);
874 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 875 break;
6c2c63d3 876
9305eac0
RH
877 case 14: /* always */
878 case 15: /* always */
879 /* Use the ALWAYS condition, which will fold early.
880 * It doesn't matter what we use for the value. */
881 cond = TCG_COND_ALWAYS;
882 value = cpu_ZF;
883 goto no_invert;
884
d9ba4830
PB
885 default:
886 fprintf(stderr, "Bad condition code 0x%x\n", cc);
887 abort();
888 }
6c2c63d3
RH
889
890 if (cc & 1) {
891 cond = tcg_invert_cond(cond);
892 }
893
9305eac0 894 no_invert:
6c2c63d3
RH
895 cmp->cond = cond;
896 cmp->value = value;
897 cmp->value_global = global;
898}
899
900void arm_free_cc(DisasCompare *cmp)
901{
902 if (!cmp->value_global) {
903 tcg_temp_free_i32(cmp->value);
904 }
905}
906
907void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
908{
909 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
910}
911
912void arm_gen_test_cc(int cc, TCGLabel *label)
913{
914 DisasCompare cmp;
915 arm_test_cc(&cmp, cc);
916 arm_jump_cc(&cmp, label);
917 arm_free_cc(&cmp);
d9ba4830 918}
2c0262af 919
b1d8e52e 920static const uint8_t table_logic_cc[16] = {
2c0262af
FB
921 1, /* and */
922 1, /* xor */
923 0, /* sub */
924 0, /* rsb */
925 0, /* add */
926 0, /* adc */
927 0, /* sbc */
928 0, /* rsc */
929 1, /* andl */
930 1, /* xorl */
931 0, /* cmp */
932 0, /* cmn */
933 1, /* orr */
934 1, /* mov */
935 1, /* bic */
936 1, /* mvn */
937};
3b46e624 938
4d5e8c96
PM
939static inline void gen_set_condexec(DisasContext *s)
940{
941 if (s->condexec_mask) {
942 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
943 TCGv_i32 tmp = tcg_temp_new_i32();
944 tcg_gen_movi_i32(tmp, val);
945 store_cpu_field(tmp, condexec_bits);
946 }
947}
948
949static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
950{
951 tcg_gen_movi_i32(cpu_R[15], val);
952}
953
d9ba4830
PB
954/* Set PC and Thumb state from an immediate address. */
955static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 956{
39d5492a 957 TCGv_i32 tmp;
99c475ab 958
dcba3a8d 959 s->base.is_jmp = DISAS_JUMP;
d9ba4830 960 if (s->thumb != (addr & 1)) {
7d1b0095 961 tmp = tcg_temp_new_i32();
d9ba4830 962 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 963 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 964 tcg_temp_free_i32(tmp);
d9ba4830 965 }
155c3eac 966 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
967}
968
969/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 970static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 971{
dcba3a8d 972 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
973 tcg_gen_andi_i32(cpu_R[15], var, ~1);
974 tcg_gen_andi_i32(var, var, 1);
975 store_cpu_field(var, thumb);
d9ba4830
PB
976}
977
3bb8a96f
PM
978/* Set PC and Thumb state from var. var is marked as dead.
979 * For M-profile CPUs, include logic to detect exception-return
980 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
981 * and BX reg, and no others, and happens only for code in Handler mode.
982 */
983static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
984{
985 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 986 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
987 */
988 gen_bx(s, var);
d02a8698
PM
989 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
990 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 991 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
992 }
993}
994
995static inline void gen_bx_excret_final_code(DisasContext *s)
996{
997 /* Generate the code to finish possible exception return and end the TB */
998 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
999 uint32_t min_magic;
1000
1001 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
1002 /* Covers FNC_RETURN and EXC_RETURN magic */
1003 min_magic = FNC_RETURN_MIN_MAGIC;
1004 } else {
1005 /* EXC_RETURN magic only */
1006 min_magic = EXC_RETURN_MIN_MAGIC;
1007 }
3bb8a96f
PM
1008
1009 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 1010 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
1011 /* No: end the TB as we would for a DISAS_JMP */
1012 if (is_singlestepping(s)) {
1013 gen_singlestep_exception(s);
1014 } else {
07ea28b4 1015 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
1016 }
1017 gen_set_label(excret_label);
1018 /* Yes: this is an exception return.
1019 * At this point in runtime env->regs[15] and env->thumb will hold
1020 * the exception-return magic number, which do_v7m_exception_exit()
1021 * will read. Nothing else will be able to see those values because
1022 * the cpu-exec main loop guarantees that we will always go straight
1023 * from raising the exception to the exception-handling code.
1024 *
1025 * gen_ss_advance(s) does nothing on M profile currently but
1026 * calling it is conceptually the right thing as we have executed
1027 * this instruction (compare SWI, HVC, SMC handling).
1028 */
1029 gen_ss_advance(s);
1030 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1031}
1032
fb602cb7
PM
1033static inline void gen_bxns(DisasContext *s, int rm)
1034{
1035 TCGv_i32 var = load_reg(s, rm);
1036
1037 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1038 * we need to sync state before calling it, but:
1039 * - we don't need to do gen_set_pc_im() because the bxns helper will
1040 * always set the PC itself
1041 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1042 * unless it's outside an IT block or the last insn in an IT block,
1043 * so we know that condexec == 0 (already set at the top of the TB)
1044 * is correct in the non-UNPREDICTABLE cases, and we can choose
1045 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1046 */
1047 gen_helper_v7m_bxns(cpu_env, var);
1048 tcg_temp_free_i32(var);
ef475b5d 1049 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1050}
1051
3e3fa230
PM
1052static inline void gen_blxns(DisasContext *s, int rm)
1053{
1054 TCGv_i32 var = load_reg(s, rm);
1055
1056 /* We don't need to sync condexec state, for the same reason as bxns.
1057 * We do however need to set the PC, because the blxns helper reads it.
1058 * The blxns helper may throw an exception.
1059 */
1060 gen_set_pc_im(s, s->pc);
1061 gen_helper_v7m_blxns(cpu_env, var);
1062 tcg_temp_free_i32(var);
1063 s->base.is_jmp = DISAS_EXIT;
1064}
1065
21aeb343
JR
1066/* Variant of store_reg which uses branch&exchange logic when storing
1067 to r15 in ARM architecture v7 and above. The source must be a temporary
1068 and will be marked as dead. */
7dcc1f89 1069static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1070{
1071 if (reg == 15 && ENABLE_ARCH_7) {
1072 gen_bx(s, var);
1073 } else {
1074 store_reg(s, reg, var);
1075 }
1076}
1077
be5e7a76
DES
1078/* Variant of store_reg which uses branch&exchange logic when storing
1079 * to r15 in ARM architecture v5T and above. This is used for storing
1080 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1081 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1082static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1083{
1084 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1085 gen_bx_excret(s, var);
be5e7a76
DES
1086 } else {
1087 store_reg(s, reg, var);
1088 }
1089}
1090
e334bd31
PB
1091#ifdef CONFIG_USER_ONLY
1092#define IS_USER_ONLY 1
1093#else
1094#define IS_USER_ONLY 0
1095#endif
1096
08307563
PM
1097/* Abstractions of "generate code to do a guest load/store for
1098 * AArch32", where a vaddr is always 32 bits (and is zero
1099 * extended if we're a 64 bit core) and data is also
1100 * 32 bits unless specifically doing a 64 bit access.
1101 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1102 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1103 */
08307563 1104
7f5616f5 1105static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1106{
7f5616f5
RH
1107 TCGv addr = tcg_temp_new();
1108 tcg_gen_extu_i32_tl(addr, a32);
1109
e334bd31 1110 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1111 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1112 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1113 }
7f5616f5 1114 return addr;
08307563
PM
1115}
1116
7f5616f5
RH
1117static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1118 int index, TCGMemOp opc)
08307563 1119{
2aeba0d0
JS
1120 TCGv addr;
1121
1122 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1123 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1124 opc |= MO_ALIGN;
1125 }
1126
1127 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1128 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1129 tcg_temp_free(addr);
08307563
PM
1130}
1131
7f5616f5
RH
1132static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1133 int index, TCGMemOp opc)
1134{
2aeba0d0
JS
1135 TCGv addr;
1136
1137 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1138 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1139 opc |= MO_ALIGN;
1140 }
1141
1142 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1143 tcg_gen_qemu_st_i32(val, addr, index, opc);
1144 tcg_temp_free(addr);
1145}
08307563 1146
7f5616f5 1147#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1148static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1149 TCGv_i32 a32, int index) \
08307563 1150{ \
7f5616f5 1151 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1152} \
1153static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1154 TCGv_i32 val, \
1155 TCGv_i32 a32, int index, \
1156 ISSInfo issinfo) \
1157{ \
1158 gen_aa32_ld##SUFF(s, val, a32, index); \
1159 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1160}
1161
7f5616f5 1162#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1163static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1164 TCGv_i32 a32, int index) \
08307563 1165{ \
7f5616f5 1166 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1167} \
1168static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1169 TCGv_i32 val, \
1170 TCGv_i32 a32, int index, \
1171 ISSInfo issinfo) \
1172{ \
1173 gen_aa32_st##SUFF(s, val, a32, index); \
1174 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1175}
1176
7f5616f5 1177static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1178{
e334bd31
PB
1179 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1180 if (!IS_USER_ONLY && s->sctlr_b) {
1181 tcg_gen_rotri_i64(val, val, 32);
1182 }
08307563
PM
1183}
1184
7f5616f5
RH
1185static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1186 int index, TCGMemOp opc)
08307563 1187{
7f5616f5
RH
1188 TCGv addr = gen_aa32_addr(s, a32, opc);
1189 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1190 gen_aa32_frob64(s, val);
1191 tcg_temp_free(addr);
1192}
1193
1194static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1195 TCGv_i32 a32, int index)
1196{
1197 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1198}
1199
1200static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1201 int index, TCGMemOp opc)
1202{
1203 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1204
1205 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1206 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1207 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1208 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1209 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1210 tcg_temp_free_i64(tmp);
e334bd31 1211 } else {
7f5616f5 1212 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1213 }
7f5616f5 1214 tcg_temp_free(addr);
08307563
PM
1215}
1216
7f5616f5
RH
1217static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1218 TCGv_i32 a32, int index)
1219{
1220 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1221}
08307563 1222
7f5616f5
RH
1223DO_GEN_LD(8s, MO_SB)
1224DO_GEN_LD(8u, MO_UB)
1225DO_GEN_LD(16s, MO_SW)
1226DO_GEN_LD(16u, MO_UW)
1227DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1228DO_GEN_ST(8, MO_UB)
1229DO_GEN_ST(16, MO_UW)
1230DO_GEN_ST(32, MO_UL)
08307563 1231
37e6456e
PM
1232static inline void gen_hvc(DisasContext *s, int imm16)
1233{
1234 /* The pre HVC helper handles cases when HVC gets trapped
1235 * as an undefined insn by runtime configuration (ie before
1236 * the insn really executes).
1237 */
1238 gen_set_pc_im(s, s->pc - 4);
1239 gen_helper_pre_hvc(cpu_env);
1240 /* Otherwise we will treat this as a real exception which
1241 * happens after execution of the insn. (The distinction matters
1242 * for the PC value reported to the exception handler and also
1243 * for single stepping.)
1244 */
1245 s->svc_imm = imm16;
1246 gen_set_pc_im(s, s->pc);
dcba3a8d 1247 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1248}
1249
1250static inline void gen_smc(DisasContext *s)
1251{
1252 /* As with HVC, we may take an exception either before or after
1253 * the insn executes.
1254 */
1255 TCGv_i32 tmp;
1256
1257 gen_set_pc_im(s, s->pc - 4);
1258 tmp = tcg_const_i32(syn_aa32_smc());
1259 gen_helper_pre_smc(cpu_env, tmp);
1260 tcg_temp_free_i32(tmp);
1261 gen_set_pc_im(s, s->pc);
dcba3a8d 1262 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1263}
1264
d4a2dc67
PM
1265static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1266{
1267 gen_set_condexec(s);
1268 gen_set_pc_im(s, s->pc - offset);
1269 gen_exception_internal(excp);
dcba3a8d 1270 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1271}
1272
73710361
GB
1273static void gen_exception_insn(DisasContext *s, int offset, int excp,
1274 int syn, uint32_t target_el)
d4a2dc67
PM
1275{
1276 gen_set_condexec(s);
1277 gen_set_pc_im(s, s->pc - offset);
73710361 1278 gen_exception(excp, syn, target_el);
dcba3a8d 1279 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1280}
1281
c900a2e6
PM
1282static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1283{
1284 TCGv_i32 tcg_syn;
1285
1286 gen_set_condexec(s);
1287 gen_set_pc_im(s, s->pc - offset);
1288 tcg_syn = tcg_const_i32(syn);
1289 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1290 tcg_temp_free_i32(tcg_syn);
1291 s->base.is_jmp = DISAS_NORETURN;
1292}
1293
b5ff1b31
FB
1294/* Force a TB lookup after an instruction that changes the CPU state. */
1295static inline void gen_lookup_tb(DisasContext *s)
1296{
a6445c52 1297 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1298 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1299}
1300
19a6e31c
PM
1301static inline void gen_hlt(DisasContext *s, int imm)
1302{
1303 /* HLT. This has two purposes.
1304 * Architecturally, it is an external halting debug instruction.
1305 * Since QEMU doesn't implement external debug, we treat this as
1306 * it is required for halting debug disabled: it will UNDEF.
1307 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1308 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1309 * must trigger semihosting even for ARMv7 and earlier, where
1310 * HLT was an undefined encoding.
1311 * In system mode, we don't allow userspace access to
1312 * semihosting, to provide some semblance of security
1313 * (and for consistency with our 32-bit semihosting).
1314 */
1315 if (semihosting_enabled() &&
1316#ifndef CONFIG_USER_ONLY
1317 s->current_el != 0 &&
1318#endif
1319 (imm == (s->thumb ? 0x3c : 0xf000))) {
1320 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1321 return;
1322 }
1323
1324 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1325 default_exception_el(s));
1326}
1327
b0109805 1328static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1329 TCGv_i32 var)
2c0262af 1330{
1e8d4eec 1331 int val, rm, shift, shiftop;
39d5492a 1332 TCGv_i32 offset;
2c0262af
FB
1333
1334 if (!(insn & (1 << 25))) {
1335 /* immediate */
1336 val = insn & 0xfff;
1337 if (!(insn & (1 << 23)))
1338 val = -val;
537730b9 1339 if (val != 0)
b0109805 1340 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1341 } else {
1342 /* shift/register */
1343 rm = (insn) & 0xf;
1344 shift = (insn >> 7) & 0x1f;
1e8d4eec 1345 shiftop = (insn >> 5) & 3;
b26eefb6 1346 offset = load_reg(s, rm);
9a119ff6 1347 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1348 if (!(insn & (1 << 23)))
b0109805 1349 tcg_gen_sub_i32(var, var, offset);
2c0262af 1350 else
b0109805 1351 tcg_gen_add_i32(var, var, offset);
7d1b0095 1352 tcg_temp_free_i32(offset);
2c0262af
FB
1353 }
1354}
1355
191f9a93 1356static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1357 int extra, TCGv_i32 var)
2c0262af
FB
1358{
1359 int val, rm;
39d5492a 1360 TCGv_i32 offset;
3b46e624 1361
2c0262af
FB
1362 if (insn & (1 << 22)) {
1363 /* immediate */
1364 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1365 if (!(insn & (1 << 23)))
1366 val = -val;
18acad92 1367 val += extra;
537730b9 1368 if (val != 0)
b0109805 1369 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1370 } else {
1371 /* register */
191f9a93 1372 if (extra)
b0109805 1373 tcg_gen_addi_i32(var, var, extra);
2c0262af 1374 rm = (insn) & 0xf;
b26eefb6 1375 offset = load_reg(s, rm);
2c0262af 1376 if (!(insn & (1 << 23)))
b0109805 1377 tcg_gen_sub_i32(var, var, offset);
2c0262af 1378 else
b0109805 1379 tcg_gen_add_i32(var, var, offset);
7d1b0095 1380 tcg_temp_free_i32(offset);
2c0262af
FB
1381 }
1382}
1383
5aaebd13
PM
1384static TCGv_ptr get_fpstatus_ptr(int neon)
1385{
1386 TCGv_ptr statusptr = tcg_temp_new_ptr();
1387 int offset;
1388 if (neon) {
0ecb72a5 1389 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1390 } else {
0ecb72a5 1391 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1392 }
1393 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1394 return statusptr;
1395}
1396
4373f3ce
PB
1397#define VFP_OP2(name) \
1398static inline void gen_vfp_##name(int dp) \
1399{ \
ae1857ec
PM
1400 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1401 if (dp) { \
1402 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1403 } else { \
1404 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1405 } \
1406 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1407}
1408
4373f3ce
PB
1409VFP_OP2(add)
1410VFP_OP2(sub)
1411VFP_OP2(mul)
1412VFP_OP2(div)
1413
1414#undef VFP_OP2
1415
605a6aed
PM
1416static inline void gen_vfp_F1_mul(int dp)
1417{
1418 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1419 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1420 if (dp) {
ae1857ec 1421 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1422 } else {
ae1857ec 1423 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1424 }
ae1857ec 1425 tcg_temp_free_ptr(fpst);
605a6aed
PM
1426}
1427
1428static inline void gen_vfp_F1_neg(int dp)
1429{
1430 /* Like gen_vfp_neg() but put result in F1 */
1431 if (dp) {
1432 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1433 } else {
1434 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1435 }
1436}
1437
4373f3ce
PB
1438static inline void gen_vfp_abs(int dp)
1439{
1440 if (dp)
1441 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1442 else
1443 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1444}
1445
1446static inline void gen_vfp_neg(int dp)
1447{
1448 if (dp)
1449 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1450 else
1451 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1452}
1453
1454static inline void gen_vfp_sqrt(int dp)
1455{
1456 if (dp)
1457 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1458 else
1459 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1460}
1461
1462static inline void gen_vfp_cmp(int dp)
1463{
1464 if (dp)
1465 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1466 else
1467 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1468}
1469
1470static inline void gen_vfp_cmpe(int dp)
1471{
1472 if (dp)
1473 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1474 else
1475 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1476}
1477
1478static inline void gen_vfp_F1_ld0(int dp)
1479{
1480 if (dp)
5b340b51 1481 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1482 else
5b340b51 1483 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1484}
1485
5500b06c
PM
1486#define VFP_GEN_ITOF(name) \
1487static inline void gen_vfp_##name(int dp, int neon) \
1488{ \
5aaebd13 1489 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1490 if (dp) { \
1491 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1492 } else { \
1493 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1494 } \
b7fa9214 1495 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1496}
1497
5500b06c
PM
1498VFP_GEN_ITOF(uito)
1499VFP_GEN_ITOF(sito)
1500#undef VFP_GEN_ITOF
4373f3ce 1501
5500b06c
PM
1502#define VFP_GEN_FTOI(name) \
1503static inline void gen_vfp_##name(int dp, int neon) \
1504{ \
5aaebd13 1505 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1506 if (dp) { \
1507 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1508 } else { \
1509 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1510 } \
b7fa9214 1511 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1512}
1513
5500b06c
PM
1514VFP_GEN_FTOI(toui)
1515VFP_GEN_FTOI(touiz)
1516VFP_GEN_FTOI(tosi)
1517VFP_GEN_FTOI(tosiz)
1518#undef VFP_GEN_FTOI
4373f3ce 1519
16d5b3ca 1520#define VFP_GEN_FIX(name, round) \
5500b06c 1521static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1522{ \
39d5492a 1523 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1524 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1525 if (dp) { \
16d5b3ca
WN
1526 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1527 statusptr); \
5500b06c 1528 } else { \
16d5b3ca
WN
1529 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1530 statusptr); \
5500b06c 1531 } \
b75263d6 1532 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1533 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1534}
16d5b3ca
WN
1535VFP_GEN_FIX(tosh, _round_to_zero)
1536VFP_GEN_FIX(tosl, _round_to_zero)
1537VFP_GEN_FIX(touh, _round_to_zero)
1538VFP_GEN_FIX(toul, _round_to_zero)
1539VFP_GEN_FIX(shto, )
1540VFP_GEN_FIX(slto, )
1541VFP_GEN_FIX(uhto, )
1542VFP_GEN_FIX(ulto, )
4373f3ce 1543#undef VFP_GEN_FIX
9ee6e8bb 1544
39d5492a 1545static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1546{
08307563 1547 if (dp) {
12dcc321 1548 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1549 } else {
12dcc321 1550 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1551 }
b5ff1b31
FB
1552}
1553
39d5492a 1554static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1555{
08307563 1556 if (dp) {
12dcc321 1557 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1558 } else {
12dcc321 1559 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1560 }
b5ff1b31
FB
1561}
1562
c39c2b90 1563static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1564{
9a2b5256 1565 if (dp) {
c39c2b90 1566 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1567 } else {
c39c2b90 1568 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1569 if (reg & 1) {
1570 ofs += offsetof(CPU_DoubleU, l.upper);
1571 } else {
1572 ofs += offsetof(CPU_DoubleU, l.lower);
1573 }
1574 return ofs;
8e96005d
FB
1575 }
1576}
9ee6e8bb
PB
1577
1578/* Return the offset of a 32-bit piece of a NEON register.
1579 zero is the least significant end of the register. */
1580static inline long
1581neon_reg_offset (int reg, int n)
1582{
1583 int sreg;
1584 sreg = reg * 2 + n;
1585 return vfp_reg_offset(0, sreg);
1586}
1587
39d5492a 1588static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1589{
39d5492a 1590 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1591 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1592 return tmp;
1593}
1594
39d5492a 1595static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1596{
1597 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1598 tcg_temp_free_i32(var);
8f8e3aa4
PB
1599}
1600
a7812ae4 1601static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1602{
1603 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1604}
1605
a7812ae4 1606static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1607{
1608 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1609}
1610
1a66ac61
RH
1611static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1612{
1613 TCGv_ptr ret = tcg_temp_new_ptr();
1614 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1615 return ret;
1616}
1617
4373f3ce
PB
1618#define tcg_gen_ld_f32 tcg_gen_ld_i32
1619#define tcg_gen_ld_f64 tcg_gen_ld_i64
1620#define tcg_gen_st_f32 tcg_gen_st_i32
1621#define tcg_gen_st_f64 tcg_gen_st_i64
1622
b7bcbe95
FB
1623static inline void gen_mov_F0_vreg(int dp, int reg)
1624{
1625 if (dp)
4373f3ce 1626 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1627 else
4373f3ce 1628 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1629}
1630
1631static inline void gen_mov_F1_vreg(int dp, int reg)
1632{
1633 if (dp)
4373f3ce 1634 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1635 else
4373f3ce 1636 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1637}
1638
1639static inline void gen_mov_vreg_F0(int dp, int reg)
1640{
1641 if (dp)
4373f3ce 1642 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1643 else
4373f3ce 1644 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1645}
1646
d00584b7 1647#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1648
a7812ae4 1649static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1650{
0ecb72a5 1651 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1652}
1653
a7812ae4 1654static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1655{
0ecb72a5 1656 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1657}
1658
39d5492a 1659static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1660{
39d5492a 1661 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1662 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1663 return var;
e677137d
PB
1664}
1665
39d5492a 1666static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1667{
0ecb72a5 1668 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1669 tcg_temp_free_i32(var);
e677137d
PB
1670}
1671
1672static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1673{
1674 iwmmxt_store_reg(cpu_M0, rn);
1675}
1676
1677static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1678{
1679 iwmmxt_load_reg(cpu_M0, rn);
1680}
1681
1682static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1683{
1684 iwmmxt_load_reg(cpu_V1, rn);
1685 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1686}
1687
1688static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1689{
1690 iwmmxt_load_reg(cpu_V1, rn);
1691 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1692}
1693
1694static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1695{
1696 iwmmxt_load_reg(cpu_V1, rn);
1697 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1698}
1699
1700#define IWMMXT_OP(name) \
1701static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1702{ \
1703 iwmmxt_load_reg(cpu_V1, rn); \
1704 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1705}
1706
477955bd
PM
1707#define IWMMXT_OP_ENV(name) \
1708static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1709{ \
1710 iwmmxt_load_reg(cpu_V1, rn); \
1711 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1712}
1713
1714#define IWMMXT_OP_ENV_SIZE(name) \
1715IWMMXT_OP_ENV(name##b) \
1716IWMMXT_OP_ENV(name##w) \
1717IWMMXT_OP_ENV(name##l)
e677137d 1718
477955bd 1719#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1720static inline void gen_op_iwmmxt_##name##_M0(void) \
1721{ \
477955bd 1722 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1723}
1724
1725IWMMXT_OP(maddsq)
1726IWMMXT_OP(madduq)
1727IWMMXT_OP(sadb)
1728IWMMXT_OP(sadw)
1729IWMMXT_OP(mulslw)
1730IWMMXT_OP(mulshw)
1731IWMMXT_OP(mululw)
1732IWMMXT_OP(muluhw)
1733IWMMXT_OP(macsw)
1734IWMMXT_OP(macuw)
1735
477955bd
PM
1736IWMMXT_OP_ENV_SIZE(unpackl)
1737IWMMXT_OP_ENV_SIZE(unpackh)
1738
1739IWMMXT_OP_ENV1(unpacklub)
1740IWMMXT_OP_ENV1(unpackluw)
1741IWMMXT_OP_ENV1(unpacklul)
1742IWMMXT_OP_ENV1(unpackhub)
1743IWMMXT_OP_ENV1(unpackhuw)
1744IWMMXT_OP_ENV1(unpackhul)
1745IWMMXT_OP_ENV1(unpacklsb)
1746IWMMXT_OP_ENV1(unpacklsw)
1747IWMMXT_OP_ENV1(unpacklsl)
1748IWMMXT_OP_ENV1(unpackhsb)
1749IWMMXT_OP_ENV1(unpackhsw)
1750IWMMXT_OP_ENV1(unpackhsl)
1751
1752IWMMXT_OP_ENV_SIZE(cmpeq)
1753IWMMXT_OP_ENV_SIZE(cmpgtu)
1754IWMMXT_OP_ENV_SIZE(cmpgts)
1755
1756IWMMXT_OP_ENV_SIZE(mins)
1757IWMMXT_OP_ENV_SIZE(minu)
1758IWMMXT_OP_ENV_SIZE(maxs)
1759IWMMXT_OP_ENV_SIZE(maxu)
1760
1761IWMMXT_OP_ENV_SIZE(subn)
1762IWMMXT_OP_ENV_SIZE(addn)
1763IWMMXT_OP_ENV_SIZE(subu)
1764IWMMXT_OP_ENV_SIZE(addu)
1765IWMMXT_OP_ENV_SIZE(subs)
1766IWMMXT_OP_ENV_SIZE(adds)
1767
1768IWMMXT_OP_ENV(avgb0)
1769IWMMXT_OP_ENV(avgb1)
1770IWMMXT_OP_ENV(avgw0)
1771IWMMXT_OP_ENV(avgw1)
e677137d 1772
477955bd
PM
1773IWMMXT_OP_ENV(packuw)
1774IWMMXT_OP_ENV(packul)
1775IWMMXT_OP_ENV(packuq)
1776IWMMXT_OP_ENV(packsw)
1777IWMMXT_OP_ENV(packsl)
1778IWMMXT_OP_ENV(packsq)
e677137d 1779
e677137d
PB
1780static void gen_op_iwmmxt_set_mup(void)
1781{
39d5492a 1782 TCGv_i32 tmp;
e677137d
PB
1783 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1784 tcg_gen_ori_i32(tmp, tmp, 2);
1785 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1786}
1787
1788static void gen_op_iwmmxt_set_cup(void)
1789{
39d5492a 1790 TCGv_i32 tmp;
e677137d
PB
1791 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1792 tcg_gen_ori_i32(tmp, tmp, 1);
1793 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1794}
1795
1796static void gen_op_iwmmxt_setpsr_nz(void)
1797{
39d5492a 1798 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1799 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1800 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1801}
1802
1803static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1804{
1805 iwmmxt_load_reg(cpu_V1, rn);
86831435 1806 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1807 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1808}
1809
39d5492a
PM
1810static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1811 TCGv_i32 dest)
18c9b560
AZ
1812{
1813 int rd;
1814 uint32_t offset;
39d5492a 1815 TCGv_i32 tmp;
18c9b560
AZ
1816
1817 rd = (insn >> 16) & 0xf;
da6b5335 1818 tmp = load_reg(s, rd);
18c9b560
AZ
1819
1820 offset = (insn & 0xff) << ((insn >> 7) & 2);
1821 if (insn & (1 << 24)) {
1822 /* Pre indexed */
1823 if (insn & (1 << 23))
da6b5335 1824 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1825 else
da6b5335
FN
1826 tcg_gen_addi_i32(tmp, tmp, -offset);
1827 tcg_gen_mov_i32(dest, tmp);
18c9b560 1828 if (insn & (1 << 21))
da6b5335
FN
1829 store_reg(s, rd, tmp);
1830 else
7d1b0095 1831 tcg_temp_free_i32(tmp);
18c9b560
AZ
1832 } else if (insn & (1 << 21)) {
1833 /* Post indexed */
da6b5335 1834 tcg_gen_mov_i32(dest, tmp);
18c9b560 1835 if (insn & (1 << 23))
da6b5335 1836 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1837 else
da6b5335
FN
1838 tcg_gen_addi_i32(tmp, tmp, -offset);
1839 store_reg(s, rd, tmp);
18c9b560
AZ
1840 } else if (!(insn & (1 << 23)))
1841 return 1;
1842 return 0;
1843}
1844
39d5492a 1845static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1846{
1847 int rd = (insn >> 0) & 0xf;
39d5492a 1848 TCGv_i32 tmp;
18c9b560 1849
da6b5335
FN
1850 if (insn & (1 << 8)) {
1851 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1852 return 1;
da6b5335
FN
1853 } else {
1854 tmp = iwmmxt_load_creg(rd);
1855 }
1856 } else {
7d1b0095 1857 tmp = tcg_temp_new_i32();
da6b5335 1858 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1859 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1860 }
1861 tcg_gen_andi_i32(tmp, tmp, mask);
1862 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1863 tcg_temp_free_i32(tmp);
18c9b560
AZ
1864 return 0;
1865}
1866
a1c7273b 1867/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1868 (ie. an undefined instruction). */
7dcc1f89 1869static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1870{
1871 int rd, wrd;
1872 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1873 TCGv_i32 addr;
1874 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1875
1876 if ((insn & 0x0e000e00) == 0x0c000000) {
1877 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1878 wrd = insn & 0xf;
1879 rdlo = (insn >> 12) & 0xf;
1880 rdhi = (insn >> 16) & 0xf;
d00584b7 1881 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1882 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1883 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1884 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1885 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1886 } else { /* TMCRR */
da6b5335
FN
1887 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1888 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1889 gen_op_iwmmxt_set_mup();
1890 }
1891 return 0;
1892 }
1893
1894 wrd = (insn >> 12) & 0xf;
7d1b0095 1895 addr = tcg_temp_new_i32();
da6b5335 1896 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1897 tcg_temp_free_i32(addr);
18c9b560 1898 return 1;
da6b5335 1899 }
18c9b560 1900 if (insn & ARM_CP_RW_BIT) {
d00584b7 1901 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1902 tmp = tcg_temp_new_i32();
12dcc321 1903 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1904 iwmmxt_store_creg(wrd, tmp);
18c9b560 1905 } else {
e677137d
PB
1906 i = 1;
1907 if (insn & (1 << 8)) {
d00584b7 1908 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1909 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1910 i = 0;
d00584b7 1911 } else { /* WLDRW wRd */
29531141 1912 tmp = tcg_temp_new_i32();
12dcc321 1913 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1914 }
1915 } else {
29531141 1916 tmp = tcg_temp_new_i32();
d00584b7 1917 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1918 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 1919 } else { /* WLDRB */
12dcc321 1920 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1921 }
1922 }
1923 if (i) {
1924 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1925 tcg_temp_free_i32(tmp);
e677137d 1926 }
18c9b560
AZ
1927 gen_op_iwmmxt_movq_wRn_M0(wrd);
1928 }
1929 } else {
d00584b7 1930 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1931 tmp = iwmmxt_load_creg(wrd);
12dcc321 1932 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1933 } else {
1934 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1935 tmp = tcg_temp_new_i32();
e677137d 1936 if (insn & (1 << 8)) {
d00584b7 1937 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1938 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 1939 } else { /* WSTRW wRd */
ecc7b3aa 1940 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1941 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1942 }
1943 } else {
d00584b7 1944 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1945 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1946 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 1947 } else { /* WSTRB */
ecc7b3aa 1948 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1949 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1950 }
1951 }
18c9b560 1952 }
29531141 1953 tcg_temp_free_i32(tmp);
18c9b560 1954 }
7d1b0095 1955 tcg_temp_free_i32(addr);
18c9b560
AZ
1956 return 0;
1957 }
1958
1959 if ((insn & 0x0f000000) != 0x0e000000)
1960 return 1;
1961
1962 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 1963 case 0x000: /* WOR */
18c9b560
AZ
1964 wrd = (insn >> 12) & 0xf;
1965 rd0 = (insn >> 0) & 0xf;
1966 rd1 = (insn >> 16) & 0xf;
1967 gen_op_iwmmxt_movq_M0_wRn(rd0);
1968 gen_op_iwmmxt_orq_M0_wRn(rd1);
1969 gen_op_iwmmxt_setpsr_nz();
1970 gen_op_iwmmxt_movq_wRn_M0(wrd);
1971 gen_op_iwmmxt_set_mup();
1972 gen_op_iwmmxt_set_cup();
1973 break;
d00584b7 1974 case 0x011: /* TMCR */
18c9b560
AZ
1975 if (insn & 0xf)
1976 return 1;
1977 rd = (insn >> 12) & 0xf;
1978 wrd = (insn >> 16) & 0xf;
1979 switch (wrd) {
1980 case ARM_IWMMXT_wCID:
1981 case ARM_IWMMXT_wCASF:
1982 break;
1983 case ARM_IWMMXT_wCon:
1984 gen_op_iwmmxt_set_cup();
1985 /* Fall through. */
1986 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1987 tmp = iwmmxt_load_creg(wrd);
1988 tmp2 = load_reg(s, rd);
f669df27 1989 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1990 tcg_temp_free_i32(tmp2);
da6b5335 1991 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1992 break;
1993 case ARM_IWMMXT_wCGR0:
1994 case ARM_IWMMXT_wCGR1:
1995 case ARM_IWMMXT_wCGR2:
1996 case ARM_IWMMXT_wCGR3:
1997 gen_op_iwmmxt_set_cup();
da6b5335
FN
1998 tmp = load_reg(s, rd);
1999 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2000 break;
2001 default:
2002 return 1;
2003 }
2004 break;
d00584b7 2005 case 0x100: /* WXOR */
18c9b560
AZ
2006 wrd = (insn >> 12) & 0xf;
2007 rd0 = (insn >> 0) & 0xf;
2008 rd1 = (insn >> 16) & 0xf;
2009 gen_op_iwmmxt_movq_M0_wRn(rd0);
2010 gen_op_iwmmxt_xorq_M0_wRn(rd1);
2011 gen_op_iwmmxt_setpsr_nz();
2012 gen_op_iwmmxt_movq_wRn_M0(wrd);
2013 gen_op_iwmmxt_set_mup();
2014 gen_op_iwmmxt_set_cup();
2015 break;
d00584b7 2016 case 0x111: /* TMRC */
18c9b560
AZ
2017 if (insn & 0xf)
2018 return 1;
2019 rd = (insn >> 12) & 0xf;
2020 wrd = (insn >> 16) & 0xf;
da6b5335
FN
2021 tmp = iwmmxt_load_creg(wrd);
2022 store_reg(s, rd, tmp);
18c9b560 2023 break;
d00584b7 2024 case 0x300: /* WANDN */
18c9b560
AZ
2025 wrd = (insn >> 12) & 0xf;
2026 rd0 = (insn >> 0) & 0xf;
2027 rd1 = (insn >> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 2029 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
2030 gen_op_iwmmxt_andq_M0_wRn(rd1);
2031 gen_op_iwmmxt_setpsr_nz();
2032 gen_op_iwmmxt_movq_wRn_M0(wrd);
2033 gen_op_iwmmxt_set_mup();
2034 gen_op_iwmmxt_set_cup();
2035 break;
d00584b7 2036 case 0x200: /* WAND */
18c9b560
AZ
2037 wrd = (insn >> 12) & 0xf;
2038 rd0 = (insn >> 0) & 0xf;
2039 rd1 = (insn >> 16) & 0xf;
2040 gen_op_iwmmxt_movq_M0_wRn(rd0);
2041 gen_op_iwmmxt_andq_M0_wRn(rd1);
2042 gen_op_iwmmxt_setpsr_nz();
2043 gen_op_iwmmxt_movq_wRn_M0(wrd);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2046 break;
d00584b7 2047 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
2048 wrd = (insn >> 12) & 0xf;
2049 rd0 = (insn >> 0) & 0xf;
2050 rd1 = (insn >> 16) & 0xf;
2051 gen_op_iwmmxt_movq_M0_wRn(rd0);
2052 if (insn & (1 << 21))
2053 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2054 else
2055 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2056 gen_op_iwmmxt_movq_wRn_M0(wrd);
2057 gen_op_iwmmxt_set_mup();
2058 break;
d00584b7 2059 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
2060 wrd = (insn >> 12) & 0xf;
2061 rd0 = (insn >> 16) & 0xf;
2062 rd1 = (insn >> 0) & 0xf;
2063 gen_op_iwmmxt_movq_M0_wRn(rd0);
2064 switch ((insn >> 22) & 3) {
2065 case 0:
2066 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2067 break;
2068 case 1:
2069 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2070 break;
2071 case 2:
2072 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2073 break;
2074 case 3:
2075 return 1;
2076 }
2077 gen_op_iwmmxt_movq_wRn_M0(wrd);
2078 gen_op_iwmmxt_set_mup();
2079 gen_op_iwmmxt_set_cup();
2080 break;
d00584b7 2081 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
2082 wrd = (insn >> 12) & 0xf;
2083 rd0 = (insn >> 16) & 0xf;
2084 rd1 = (insn >> 0) & 0xf;
2085 gen_op_iwmmxt_movq_M0_wRn(rd0);
2086 switch ((insn >> 22) & 3) {
2087 case 0:
2088 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2089 break;
2090 case 1:
2091 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2092 break;
2093 case 2:
2094 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2095 break;
2096 case 3:
2097 return 1;
2098 }
2099 gen_op_iwmmxt_movq_wRn_M0(wrd);
2100 gen_op_iwmmxt_set_mup();
2101 gen_op_iwmmxt_set_cup();
2102 break;
d00584b7 2103 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
2104 wrd = (insn >> 12) & 0xf;
2105 rd0 = (insn >> 16) & 0xf;
2106 rd1 = (insn >> 0) & 0xf;
2107 gen_op_iwmmxt_movq_M0_wRn(rd0);
2108 if (insn & (1 << 22))
2109 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2110 else
2111 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2112 if (!(insn & (1 << 20)))
2113 gen_op_iwmmxt_addl_M0_wRn(wrd);
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 break;
d00584b7 2117 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
2118 wrd = (insn >> 12) & 0xf;
2119 rd0 = (insn >> 16) & 0xf;
2120 rd1 = (insn >> 0) & 0xf;
2121 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2122 if (insn & (1 << 21)) {
2123 if (insn & (1 << 20))
2124 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2125 else
2126 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2127 } else {
2128 if (insn & (1 << 20))
2129 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2132 }
18c9b560
AZ
2133 gen_op_iwmmxt_movq_wRn_M0(wrd);
2134 gen_op_iwmmxt_set_mup();
2135 break;
d00584b7 2136 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
2137 wrd = (insn >> 12) & 0xf;
2138 rd0 = (insn >> 16) & 0xf;
2139 rd1 = (insn >> 0) & 0xf;
2140 gen_op_iwmmxt_movq_M0_wRn(rd0);
2141 if (insn & (1 << 21))
2142 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2143 else
2144 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2145 if (!(insn & (1 << 20))) {
e677137d
PB
2146 iwmmxt_load_reg(cpu_V1, wrd);
2147 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2148 }
2149 gen_op_iwmmxt_movq_wRn_M0(wrd);
2150 gen_op_iwmmxt_set_mup();
2151 break;
d00584b7 2152 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2153 wrd = (insn >> 12) & 0xf;
2154 rd0 = (insn >> 16) & 0xf;
2155 rd1 = (insn >> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0);
2157 switch ((insn >> 22) & 3) {
2158 case 0:
2159 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2160 break;
2161 case 1:
2162 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2163 break;
2164 case 2:
2165 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2166 break;
2167 case 3:
2168 return 1;
2169 }
2170 gen_op_iwmmxt_movq_wRn_M0(wrd);
2171 gen_op_iwmmxt_set_mup();
2172 gen_op_iwmmxt_set_cup();
2173 break;
d00584b7 2174 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2175 wrd = (insn >> 12) & 0xf;
2176 rd0 = (insn >> 16) & 0xf;
2177 rd1 = (insn >> 0) & 0xf;
2178 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2179 if (insn & (1 << 22)) {
2180 if (insn & (1 << 20))
2181 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2182 else
2183 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2184 } else {
2185 if (insn & (1 << 20))
2186 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2187 else
2188 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2189 }
18c9b560
AZ
2190 gen_op_iwmmxt_movq_wRn_M0(wrd);
2191 gen_op_iwmmxt_set_mup();
2192 gen_op_iwmmxt_set_cup();
2193 break;
d00584b7 2194 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2195 wrd = (insn >> 12) & 0xf;
2196 rd0 = (insn >> 16) & 0xf;
2197 rd1 = (insn >> 0) & 0xf;
2198 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2199 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2200 tcg_gen_andi_i32(tmp, tmp, 7);
2201 iwmmxt_load_reg(cpu_V1, rd1);
2202 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2203 tcg_temp_free_i32(tmp);
18c9b560
AZ
2204 gen_op_iwmmxt_movq_wRn_M0(wrd);
2205 gen_op_iwmmxt_set_mup();
2206 break;
d00584b7 2207 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2208 if (((insn >> 6) & 3) == 3)
2209 return 1;
18c9b560
AZ
2210 rd = (insn >> 12) & 0xf;
2211 wrd = (insn >> 16) & 0xf;
da6b5335 2212 tmp = load_reg(s, rd);
18c9b560
AZ
2213 gen_op_iwmmxt_movq_M0_wRn(wrd);
2214 switch ((insn >> 6) & 3) {
2215 case 0:
da6b5335
FN
2216 tmp2 = tcg_const_i32(0xff);
2217 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2218 break;
2219 case 1:
da6b5335
FN
2220 tmp2 = tcg_const_i32(0xffff);
2221 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2222 break;
2223 case 2:
da6b5335
FN
2224 tmp2 = tcg_const_i32(0xffffffff);
2225 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2226 break;
da6b5335 2227 default:
f764718d
RH
2228 tmp2 = NULL;
2229 tmp3 = NULL;
18c9b560 2230 }
da6b5335 2231 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2232 tcg_temp_free_i32(tmp3);
2233 tcg_temp_free_i32(tmp2);
7d1b0095 2234 tcg_temp_free_i32(tmp);
18c9b560
AZ
2235 gen_op_iwmmxt_movq_wRn_M0(wrd);
2236 gen_op_iwmmxt_set_mup();
2237 break;
d00584b7 2238 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2239 rd = (insn >> 12) & 0xf;
2240 wrd = (insn >> 16) & 0xf;
da6b5335 2241 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2242 return 1;
2243 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2244 tmp = tcg_temp_new_i32();
18c9b560
AZ
2245 switch ((insn >> 22) & 3) {
2246 case 0:
da6b5335 2247 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2248 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2249 if (insn & 8) {
2250 tcg_gen_ext8s_i32(tmp, tmp);
2251 } else {
2252 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2253 }
2254 break;
2255 case 1:
da6b5335 2256 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2257 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2258 if (insn & 8) {
2259 tcg_gen_ext16s_i32(tmp, tmp);
2260 } else {
2261 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2262 }
2263 break;
2264 case 2:
da6b5335 2265 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2266 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2267 break;
18c9b560 2268 }
da6b5335 2269 store_reg(s, rd, tmp);
18c9b560 2270 break;
d00584b7 2271 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2272 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2273 return 1;
da6b5335 2274 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2275 switch ((insn >> 22) & 3) {
2276 case 0:
da6b5335 2277 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2278 break;
2279 case 1:
da6b5335 2280 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2281 break;
2282 case 2:
da6b5335 2283 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2284 break;
18c9b560 2285 }
da6b5335
FN
2286 tcg_gen_shli_i32(tmp, tmp, 28);
2287 gen_set_nzcv(tmp);
7d1b0095 2288 tcg_temp_free_i32(tmp);
18c9b560 2289 break;
d00584b7 2290 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2291 if (((insn >> 6) & 3) == 3)
2292 return 1;
18c9b560
AZ
2293 rd = (insn >> 12) & 0xf;
2294 wrd = (insn >> 16) & 0xf;
da6b5335 2295 tmp = load_reg(s, rd);
18c9b560
AZ
2296 switch ((insn >> 6) & 3) {
2297 case 0:
da6b5335 2298 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2299 break;
2300 case 1:
da6b5335 2301 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2302 break;
2303 case 2:
da6b5335 2304 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2305 break;
18c9b560 2306 }
7d1b0095 2307 tcg_temp_free_i32(tmp);
18c9b560
AZ
2308 gen_op_iwmmxt_movq_wRn_M0(wrd);
2309 gen_op_iwmmxt_set_mup();
2310 break;
d00584b7 2311 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2312 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2313 return 1;
da6b5335 2314 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2315 tmp2 = tcg_temp_new_i32();
da6b5335 2316 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2317 switch ((insn >> 22) & 3) {
2318 case 0:
2319 for (i = 0; i < 7; i ++) {
da6b5335
FN
2320 tcg_gen_shli_i32(tmp2, tmp2, 4);
2321 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2322 }
2323 break;
2324 case 1:
2325 for (i = 0; i < 3; i ++) {
da6b5335
FN
2326 tcg_gen_shli_i32(tmp2, tmp2, 8);
2327 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2328 }
2329 break;
2330 case 2:
da6b5335
FN
2331 tcg_gen_shli_i32(tmp2, tmp2, 16);
2332 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2333 break;
18c9b560 2334 }
da6b5335 2335 gen_set_nzcv(tmp);
7d1b0095
PM
2336 tcg_temp_free_i32(tmp2);
2337 tcg_temp_free_i32(tmp);
18c9b560 2338 break;
d00584b7 2339 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2340 wrd = (insn >> 12) & 0xf;
2341 rd0 = (insn >> 16) & 0xf;
2342 gen_op_iwmmxt_movq_M0_wRn(rd0);
2343 switch ((insn >> 22) & 3) {
2344 case 0:
e677137d 2345 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2346 break;
2347 case 1:
e677137d 2348 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2349 break;
2350 case 2:
e677137d 2351 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2352 break;
2353 case 3:
2354 return 1;
2355 }
2356 gen_op_iwmmxt_movq_wRn_M0(wrd);
2357 gen_op_iwmmxt_set_mup();
2358 break;
d00584b7 2359 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2360 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2361 return 1;
da6b5335 2362 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2363 tmp2 = tcg_temp_new_i32();
da6b5335 2364 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2365 switch ((insn >> 22) & 3) {
2366 case 0:
2367 for (i = 0; i < 7; i ++) {
da6b5335
FN
2368 tcg_gen_shli_i32(tmp2, tmp2, 4);
2369 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2370 }
2371 break;
2372 case 1:
2373 for (i = 0; i < 3; i ++) {
da6b5335
FN
2374 tcg_gen_shli_i32(tmp2, tmp2, 8);
2375 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2376 }
2377 break;
2378 case 2:
da6b5335
FN
2379 tcg_gen_shli_i32(tmp2, tmp2, 16);
2380 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2381 break;
18c9b560 2382 }
da6b5335 2383 gen_set_nzcv(tmp);
7d1b0095
PM
2384 tcg_temp_free_i32(tmp2);
2385 tcg_temp_free_i32(tmp);
18c9b560 2386 break;
d00584b7 2387 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2388 rd = (insn >> 12) & 0xf;
2389 rd0 = (insn >> 16) & 0xf;
da6b5335 2390 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2391 return 1;
2392 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2393 tmp = tcg_temp_new_i32();
18c9b560
AZ
2394 switch ((insn >> 22) & 3) {
2395 case 0:
da6b5335 2396 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2397 break;
2398 case 1:
da6b5335 2399 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2400 break;
2401 case 2:
da6b5335 2402 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2403 break;
18c9b560 2404 }
da6b5335 2405 store_reg(s, rd, tmp);
18c9b560 2406 break;
d00584b7 2407 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2408 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2409 wrd = (insn >> 12) & 0xf;
2410 rd0 = (insn >> 16) & 0xf;
2411 rd1 = (insn >> 0) & 0xf;
2412 gen_op_iwmmxt_movq_M0_wRn(rd0);
2413 switch ((insn >> 22) & 3) {
2414 case 0:
2415 if (insn & (1 << 21))
2416 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2417 else
2418 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2419 break;
2420 case 1:
2421 if (insn & (1 << 21))
2422 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2423 else
2424 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2425 break;
2426 case 2:
2427 if (insn & (1 << 21))
2428 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2429 else
2430 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2431 break;
2432 case 3:
2433 return 1;
2434 }
2435 gen_op_iwmmxt_movq_wRn_M0(wrd);
2436 gen_op_iwmmxt_set_mup();
2437 gen_op_iwmmxt_set_cup();
2438 break;
d00584b7 2439 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2440 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2441 wrd = (insn >> 12) & 0xf;
2442 rd0 = (insn >> 16) & 0xf;
2443 gen_op_iwmmxt_movq_M0_wRn(rd0);
2444 switch ((insn >> 22) & 3) {
2445 case 0:
2446 if (insn & (1 << 21))
2447 gen_op_iwmmxt_unpacklsb_M0();
2448 else
2449 gen_op_iwmmxt_unpacklub_M0();
2450 break;
2451 case 1:
2452 if (insn & (1 << 21))
2453 gen_op_iwmmxt_unpacklsw_M0();
2454 else
2455 gen_op_iwmmxt_unpackluw_M0();
2456 break;
2457 case 2:
2458 if (insn & (1 << 21))
2459 gen_op_iwmmxt_unpacklsl_M0();
2460 else
2461 gen_op_iwmmxt_unpacklul_M0();
2462 break;
2463 case 3:
2464 return 1;
2465 }
2466 gen_op_iwmmxt_movq_wRn_M0(wrd);
2467 gen_op_iwmmxt_set_mup();
2468 gen_op_iwmmxt_set_cup();
2469 break;
d00584b7 2470 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2471 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2472 wrd = (insn >> 12) & 0xf;
2473 rd0 = (insn >> 16) & 0xf;
2474 gen_op_iwmmxt_movq_M0_wRn(rd0);
2475 switch ((insn >> 22) & 3) {
2476 case 0:
2477 if (insn & (1 << 21))
2478 gen_op_iwmmxt_unpackhsb_M0();
2479 else
2480 gen_op_iwmmxt_unpackhub_M0();
2481 break;
2482 case 1:
2483 if (insn & (1 << 21))
2484 gen_op_iwmmxt_unpackhsw_M0();
2485 else
2486 gen_op_iwmmxt_unpackhuw_M0();
2487 break;
2488 case 2:
2489 if (insn & (1 << 21))
2490 gen_op_iwmmxt_unpackhsl_M0();
2491 else
2492 gen_op_iwmmxt_unpackhul_M0();
2493 break;
2494 case 3:
2495 return 1;
2496 }
2497 gen_op_iwmmxt_movq_wRn_M0(wrd);
2498 gen_op_iwmmxt_set_mup();
2499 gen_op_iwmmxt_set_cup();
2500 break;
d00584b7 2501 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2502 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2503 if (((insn >> 22) & 3) == 0)
2504 return 1;
18c9b560
AZ
2505 wrd = (insn >> 12) & 0xf;
2506 rd0 = (insn >> 16) & 0xf;
2507 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2508 tmp = tcg_temp_new_i32();
da6b5335 2509 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2510 tcg_temp_free_i32(tmp);
18c9b560 2511 return 1;
da6b5335 2512 }
18c9b560 2513 switch ((insn >> 22) & 3) {
18c9b560 2514 case 1:
477955bd 2515 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2516 break;
2517 case 2:
477955bd 2518 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2519 break;
2520 case 3:
477955bd 2521 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2522 break;
2523 }
7d1b0095 2524 tcg_temp_free_i32(tmp);
18c9b560
AZ
2525 gen_op_iwmmxt_movq_wRn_M0(wrd);
2526 gen_op_iwmmxt_set_mup();
2527 gen_op_iwmmxt_set_cup();
2528 break;
d00584b7 2529 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2530 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2531 if (((insn >> 22) & 3) == 0)
2532 return 1;
18c9b560
AZ
2533 wrd = (insn >> 12) & 0xf;
2534 rd0 = (insn >> 16) & 0xf;
2535 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2536 tmp = tcg_temp_new_i32();
da6b5335 2537 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2538 tcg_temp_free_i32(tmp);
18c9b560 2539 return 1;
da6b5335 2540 }
18c9b560 2541 switch ((insn >> 22) & 3) {
18c9b560 2542 case 1:
477955bd 2543 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2544 break;
2545 case 2:
477955bd 2546 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2547 break;
2548 case 3:
477955bd 2549 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2550 break;
2551 }
7d1b0095 2552 tcg_temp_free_i32(tmp);
18c9b560
AZ
2553 gen_op_iwmmxt_movq_wRn_M0(wrd);
2554 gen_op_iwmmxt_set_mup();
2555 gen_op_iwmmxt_set_cup();
2556 break;
d00584b7 2557 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2558 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2559 if (((insn >> 22) & 3) == 0)
2560 return 1;
18c9b560
AZ
2561 wrd = (insn >> 12) & 0xf;
2562 rd0 = (insn >> 16) & 0xf;
2563 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2564 tmp = tcg_temp_new_i32();
da6b5335 2565 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2566 tcg_temp_free_i32(tmp);
18c9b560 2567 return 1;
da6b5335 2568 }
18c9b560 2569 switch ((insn >> 22) & 3) {
18c9b560 2570 case 1:
477955bd 2571 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2572 break;
2573 case 2:
477955bd 2574 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2575 break;
2576 case 3:
477955bd 2577 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2578 break;
2579 }
7d1b0095 2580 tcg_temp_free_i32(tmp);
18c9b560
AZ
2581 gen_op_iwmmxt_movq_wRn_M0(wrd);
2582 gen_op_iwmmxt_set_mup();
2583 gen_op_iwmmxt_set_cup();
2584 break;
d00584b7 2585 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2586 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2587 if (((insn >> 22) & 3) == 0)
2588 return 1;
18c9b560
AZ
2589 wrd = (insn >> 12) & 0xf;
2590 rd0 = (insn >> 16) & 0xf;
2591 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2592 tmp = tcg_temp_new_i32();
18c9b560 2593 switch ((insn >> 22) & 3) {
18c9b560 2594 case 1:
da6b5335 2595 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2596 tcg_temp_free_i32(tmp);
18c9b560 2597 return 1;
da6b5335 2598 }
477955bd 2599 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2600 break;
2601 case 2:
da6b5335 2602 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2603 tcg_temp_free_i32(tmp);
18c9b560 2604 return 1;
da6b5335 2605 }
477955bd 2606 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2607 break;
2608 case 3:
da6b5335 2609 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2610 tcg_temp_free_i32(tmp);
18c9b560 2611 return 1;
da6b5335 2612 }
477955bd 2613 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2614 break;
2615 }
7d1b0095 2616 tcg_temp_free_i32(tmp);
18c9b560
AZ
2617 gen_op_iwmmxt_movq_wRn_M0(wrd);
2618 gen_op_iwmmxt_set_mup();
2619 gen_op_iwmmxt_set_cup();
2620 break;
d00584b7 2621 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2622 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2623 wrd = (insn >> 12) & 0xf;
2624 rd0 = (insn >> 16) & 0xf;
2625 rd1 = (insn >> 0) & 0xf;
2626 gen_op_iwmmxt_movq_M0_wRn(rd0);
2627 switch ((insn >> 22) & 3) {
2628 case 0:
2629 if (insn & (1 << 21))
2630 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2631 else
2632 gen_op_iwmmxt_minub_M0_wRn(rd1);
2633 break;
2634 case 1:
2635 if (insn & (1 << 21))
2636 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2637 else
2638 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2639 break;
2640 case 2:
2641 if (insn & (1 << 21))
2642 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2643 else
2644 gen_op_iwmmxt_minul_M0_wRn(rd1);
2645 break;
2646 case 3:
2647 return 1;
2648 }
2649 gen_op_iwmmxt_movq_wRn_M0(wrd);
2650 gen_op_iwmmxt_set_mup();
2651 break;
d00584b7 2652 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2653 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2654 wrd = (insn >> 12) & 0xf;
2655 rd0 = (insn >> 16) & 0xf;
2656 rd1 = (insn >> 0) & 0xf;
2657 gen_op_iwmmxt_movq_M0_wRn(rd0);
2658 switch ((insn >> 22) & 3) {
2659 case 0:
2660 if (insn & (1 << 21))
2661 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2662 else
2663 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2664 break;
2665 case 1:
2666 if (insn & (1 << 21))
2667 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2668 else
2669 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2670 break;
2671 case 2:
2672 if (insn & (1 << 21))
2673 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2674 else
2675 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2676 break;
2677 case 3:
2678 return 1;
2679 }
2680 gen_op_iwmmxt_movq_wRn_M0(wrd);
2681 gen_op_iwmmxt_set_mup();
2682 break;
d00584b7 2683 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2684 case 0x402: case 0x502: case 0x602: case 0x702:
2685 wrd = (insn >> 12) & 0xf;
2686 rd0 = (insn >> 16) & 0xf;
2687 rd1 = (insn >> 0) & 0xf;
2688 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2689 tmp = tcg_const_i32((insn >> 20) & 3);
2690 iwmmxt_load_reg(cpu_V1, rd1);
2691 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2692 tcg_temp_free_i32(tmp);
18c9b560
AZ
2693 gen_op_iwmmxt_movq_wRn_M0(wrd);
2694 gen_op_iwmmxt_set_mup();
2695 break;
d00584b7 2696 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2697 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2698 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2699 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2700 wrd = (insn >> 12) & 0xf;
2701 rd0 = (insn >> 16) & 0xf;
2702 rd1 = (insn >> 0) & 0xf;
2703 gen_op_iwmmxt_movq_M0_wRn(rd0);
2704 switch ((insn >> 20) & 0xf) {
2705 case 0x0:
2706 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2707 break;
2708 case 0x1:
2709 gen_op_iwmmxt_subub_M0_wRn(rd1);
2710 break;
2711 case 0x3:
2712 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2713 break;
2714 case 0x4:
2715 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2716 break;
2717 case 0x5:
2718 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2719 break;
2720 case 0x7:
2721 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2722 break;
2723 case 0x8:
2724 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2725 break;
2726 case 0x9:
2727 gen_op_iwmmxt_subul_M0_wRn(rd1);
2728 break;
2729 case 0xb:
2730 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2731 break;
2732 default:
2733 return 1;
2734 }
2735 gen_op_iwmmxt_movq_wRn_M0(wrd);
2736 gen_op_iwmmxt_set_mup();
2737 gen_op_iwmmxt_set_cup();
2738 break;
d00584b7 2739 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2740 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2741 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2742 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2743 wrd = (insn >> 12) & 0xf;
2744 rd0 = (insn >> 16) & 0xf;
2745 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2746 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2747 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2748 tcg_temp_free_i32(tmp);
18c9b560
AZ
2749 gen_op_iwmmxt_movq_wRn_M0(wrd);
2750 gen_op_iwmmxt_set_mup();
2751 gen_op_iwmmxt_set_cup();
2752 break;
d00584b7 2753 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2754 case 0x418: case 0x518: case 0x618: case 0x718:
2755 case 0x818: case 0x918: case 0xa18: case 0xb18:
2756 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2757 wrd = (insn >> 12) & 0xf;
2758 rd0 = (insn >> 16) & 0xf;
2759 rd1 = (insn >> 0) & 0xf;
2760 gen_op_iwmmxt_movq_M0_wRn(rd0);
2761 switch ((insn >> 20) & 0xf) {
2762 case 0x0:
2763 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2764 break;
2765 case 0x1:
2766 gen_op_iwmmxt_addub_M0_wRn(rd1);
2767 break;
2768 case 0x3:
2769 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2770 break;
2771 case 0x4:
2772 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2773 break;
2774 case 0x5:
2775 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2776 break;
2777 case 0x7:
2778 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2779 break;
2780 case 0x8:
2781 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2782 break;
2783 case 0x9:
2784 gen_op_iwmmxt_addul_M0_wRn(rd1);
2785 break;
2786 case 0xb:
2787 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2788 break;
2789 default:
2790 return 1;
2791 }
2792 gen_op_iwmmxt_movq_wRn_M0(wrd);
2793 gen_op_iwmmxt_set_mup();
2794 gen_op_iwmmxt_set_cup();
2795 break;
d00584b7 2796 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2797 case 0x408: case 0x508: case 0x608: case 0x708:
2798 case 0x808: case 0x908: case 0xa08: case 0xb08:
2799 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2800 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2801 return 1;
18c9b560
AZ
2802 wrd = (insn >> 12) & 0xf;
2803 rd0 = (insn >> 16) & 0xf;
2804 rd1 = (insn >> 0) & 0xf;
2805 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2806 switch ((insn >> 22) & 3) {
18c9b560
AZ
2807 case 1:
2808 if (insn & (1 << 21))
2809 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2810 else
2811 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2812 break;
2813 case 2:
2814 if (insn & (1 << 21))
2815 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2816 else
2817 gen_op_iwmmxt_packul_M0_wRn(rd1);
2818 break;
2819 case 3:
2820 if (insn & (1 << 21))
2821 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2822 else
2823 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2824 break;
2825 }
2826 gen_op_iwmmxt_movq_wRn_M0(wrd);
2827 gen_op_iwmmxt_set_mup();
2828 gen_op_iwmmxt_set_cup();
2829 break;
2830 case 0x201: case 0x203: case 0x205: case 0x207:
2831 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2832 case 0x211: case 0x213: case 0x215: case 0x217:
2833 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2834 wrd = (insn >> 5) & 0xf;
2835 rd0 = (insn >> 12) & 0xf;
2836 rd1 = (insn >> 0) & 0xf;
2837 if (rd0 == 0xf || rd1 == 0xf)
2838 return 1;
2839 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2840 tmp = load_reg(s, rd0);
2841 tmp2 = load_reg(s, rd1);
18c9b560 2842 switch ((insn >> 16) & 0xf) {
d00584b7 2843 case 0x0: /* TMIA */
da6b5335 2844 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2845 break;
d00584b7 2846 case 0x8: /* TMIAPH */
da6b5335 2847 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2848 break;
d00584b7 2849 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2850 if (insn & (1 << 16))
da6b5335 2851 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2852 if (insn & (1 << 17))
da6b5335
FN
2853 tcg_gen_shri_i32(tmp2, tmp2, 16);
2854 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2855 break;
2856 default:
7d1b0095
PM
2857 tcg_temp_free_i32(tmp2);
2858 tcg_temp_free_i32(tmp);
18c9b560
AZ
2859 return 1;
2860 }
7d1b0095
PM
2861 tcg_temp_free_i32(tmp2);
2862 tcg_temp_free_i32(tmp);
18c9b560
AZ
2863 gen_op_iwmmxt_movq_wRn_M0(wrd);
2864 gen_op_iwmmxt_set_mup();
2865 break;
2866 default:
2867 return 1;
2868 }
2869
2870 return 0;
2871}
2872
a1c7273b 2873/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2874 (ie. an undefined instruction). */
7dcc1f89 2875static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2876{
2877 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2878 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2879
2880 if ((insn & 0x0ff00f10) == 0x0e200010) {
2881 /* Multiply with Internal Accumulate Format */
2882 rd0 = (insn >> 12) & 0xf;
2883 rd1 = insn & 0xf;
2884 acc = (insn >> 5) & 7;
2885
2886 if (acc != 0)
2887 return 1;
2888
3a554c0f
FN
2889 tmp = load_reg(s, rd0);
2890 tmp2 = load_reg(s, rd1);
18c9b560 2891 switch ((insn >> 16) & 0xf) {
d00584b7 2892 case 0x0: /* MIA */
3a554c0f 2893 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2894 break;
d00584b7 2895 case 0x8: /* MIAPH */
3a554c0f 2896 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2897 break;
d00584b7
PM
2898 case 0xc: /* MIABB */
2899 case 0xd: /* MIABT */
2900 case 0xe: /* MIATB */
2901 case 0xf: /* MIATT */
18c9b560 2902 if (insn & (1 << 16))
3a554c0f 2903 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2904 if (insn & (1 << 17))
3a554c0f
FN
2905 tcg_gen_shri_i32(tmp2, tmp2, 16);
2906 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2907 break;
2908 default:
2909 return 1;
2910 }
7d1b0095
PM
2911 tcg_temp_free_i32(tmp2);
2912 tcg_temp_free_i32(tmp);
18c9b560
AZ
2913
2914 gen_op_iwmmxt_movq_wRn_M0(acc);
2915 return 0;
2916 }
2917
2918 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2919 /* Internal Accumulator Access Format */
2920 rdhi = (insn >> 16) & 0xf;
2921 rdlo = (insn >> 12) & 0xf;
2922 acc = insn & 7;
2923
2924 if (acc != 0)
2925 return 1;
2926
d00584b7 2927 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2928 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2929 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2930 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2931 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2932 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 2933 } else { /* MAR */
3a554c0f
FN
2934 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2935 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2936 }
2937 return 0;
2938 }
2939
2940 return 1;
2941}
2942
9ee6e8bb
PB
2943#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2944#define VFP_SREG(insn, bigbit, smallbit) \
2945 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2946#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2947 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2948 reg = (((insn) >> (bigbit)) & 0x0f) \
2949 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2950 } else { \
2951 if (insn & (1 << (smallbit))) \
2952 return 1; \
2953 reg = ((insn) >> (bigbit)) & 0x0f; \
2954 }} while (0)
2955
2956#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2957#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2958#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2959#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2960#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2961#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2962
4373f3ce 2963/* Move between integer and VFP cores. */
39d5492a 2964static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2965{
39d5492a 2966 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2967 tcg_gen_mov_i32(tmp, cpu_F0s);
2968 return tmp;
2969}
2970
39d5492a 2971static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2972{
2973 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2974 tcg_temp_free_i32(tmp);
4373f3ce
PB
2975}
2976
39d5492a 2977static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2978{
39d5492a 2979 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2980 if (shift)
2981 tcg_gen_shri_i32(var, var, shift);
86831435 2982 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2983 tcg_gen_shli_i32(tmp, var, 8);
2984 tcg_gen_or_i32(var, var, tmp);
2985 tcg_gen_shli_i32(tmp, var, 16);
2986 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2987 tcg_temp_free_i32(tmp);
ad69471c
PB
2988}
2989
39d5492a 2990static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2991{
39d5492a 2992 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2993 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2994 tcg_gen_shli_i32(tmp, var, 16);
2995 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2996 tcg_temp_free_i32(tmp);
ad69471c
PB
2997}
2998
39d5492a 2999static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 3000{
39d5492a 3001 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
3002 tcg_gen_andi_i32(var, var, 0xffff0000);
3003 tcg_gen_shri_i32(tmp, var, 16);
3004 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3005 tcg_temp_free_i32(tmp);
ad69471c
PB
3006}
3007
39d5492a 3008static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
3009{
3010 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 3011 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
3012 switch (size) {
3013 case 0:
12dcc321 3014 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3015 gen_neon_dup_u8(tmp, 0);
3016 break;
3017 case 1:
12dcc321 3018 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3019 gen_neon_dup_low16(tmp);
3020 break;
3021 case 2:
12dcc321 3022 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3023 break;
3024 default: /* Avoid compiler warnings. */
3025 abort();
3026 }
3027 return tmp;
3028}
3029
04731fb5
WN
3030static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3031 uint32_t dp)
3032{
3033 uint32_t cc = extract32(insn, 20, 2);
3034
3035 if (dp) {
3036 TCGv_i64 frn, frm, dest;
3037 TCGv_i64 tmp, zero, zf, nf, vf;
3038
3039 zero = tcg_const_i64(0);
3040
3041 frn = tcg_temp_new_i64();
3042 frm = tcg_temp_new_i64();
3043 dest = tcg_temp_new_i64();
3044
3045 zf = tcg_temp_new_i64();
3046 nf = tcg_temp_new_i64();
3047 vf = tcg_temp_new_i64();
3048
3049 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3050 tcg_gen_ext_i32_i64(nf, cpu_NF);
3051 tcg_gen_ext_i32_i64(vf, cpu_VF);
3052
3053 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3054 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3055 switch (cc) {
3056 case 0: /* eq: Z */
3057 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3058 frn, frm);
3059 break;
3060 case 1: /* vs: V */
3061 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3062 frn, frm);
3063 break;
3064 case 2: /* ge: N == V -> N ^ V == 0 */
3065 tmp = tcg_temp_new_i64();
3066 tcg_gen_xor_i64(tmp, vf, nf);
3067 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3068 frn, frm);
3069 tcg_temp_free_i64(tmp);
3070 break;
3071 case 3: /* gt: !Z && N == V */
3072 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3073 frn, frm);
3074 tmp = tcg_temp_new_i64();
3075 tcg_gen_xor_i64(tmp, vf, nf);
3076 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3077 dest, frm);
3078 tcg_temp_free_i64(tmp);
3079 break;
3080 }
3081 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3082 tcg_temp_free_i64(frn);
3083 tcg_temp_free_i64(frm);
3084 tcg_temp_free_i64(dest);
3085
3086 tcg_temp_free_i64(zf);
3087 tcg_temp_free_i64(nf);
3088 tcg_temp_free_i64(vf);
3089
3090 tcg_temp_free_i64(zero);
3091 } else {
3092 TCGv_i32 frn, frm, dest;
3093 TCGv_i32 tmp, zero;
3094
3095 zero = tcg_const_i32(0);
3096
3097 frn = tcg_temp_new_i32();
3098 frm = tcg_temp_new_i32();
3099 dest = tcg_temp_new_i32();
3100 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3101 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3102 switch (cc) {
3103 case 0: /* eq: Z */
3104 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3105 frn, frm);
3106 break;
3107 case 1: /* vs: V */
3108 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3109 frn, frm);
3110 break;
3111 case 2: /* ge: N == V -> N ^ V == 0 */
3112 tmp = tcg_temp_new_i32();
3113 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3114 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3115 frn, frm);
3116 tcg_temp_free_i32(tmp);
3117 break;
3118 case 3: /* gt: !Z && N == V */
3119 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3120 frn, frm);
3121 tmp = tcg_temp_new_i32();
3122 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3123 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3124 dest, frm);
3125 tcg_temp_free_i32(tmp);
3126 break;
3127 }
3128 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3129 tcg_temp_free_i32(frn);
3130 tcg_temp_free_i32(frm);
3131 tcg_temp_free_i32(dest);
3132
3133 tcg_temp_free_i32(zero);
3134 }
3135
3136 return 0;
3137}
3138
40cfacdd
WN
3139static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3140 uint32_t rm, uint32_t dp)
3141{
3142 uint32_t vmin = extract32(insn, 6, 1);
3143 TCGv_ptr fpst = get_fpstatus_ptr(0);
3144
3145 if (dp) {
3146 TCGv_i64 frn, frm, dest;
3147
3148 frn = tcg_temp_new_i64();
3149 frm = tcg_temp_new_i64();
3150 dest = tcg_temp_new_i64();
3151
3152 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3153 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3154 if (vmin) {
f71a2ae5 3155 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3156 } else {
f71a2ae5 3157 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3158 }
3159 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3160 tcg_temp_free_i64(frn);
3161 tcg_temp_free_i64(frm);
3162 tcg_temp_free_i64(dest);
3163 } else {
3164 TCGv_i32 frn, frm, dest;
3165
3166 frn = tcg_temp_new_i32();
3167 frm = tcg_temp_new_i32();
3168 dest = tcg_temp_new_i32();
3169
3170 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3171 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3172 if (vmin) {
f71a2ae5 3173 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3174 } else {
f71a2ae5 3175 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3176 }
3177 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3178 tcg_temp_free_i32(frn);
3179 tcg_temp_free_i32(frm);
3180 tcg_temp_free_i32(dest);
3181 }
3182
3183 tcg_temp_free_ptr(fpst);
3184 return 0;
3185}
3186
7655f39b
WN
3187static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3188 int rounding)
3189{
3190 TCGv_ptr fpst = get_fpstatus_ptr(0);
3191 TCGv_i32 tcg_rmode;
3192
3193 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3194 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3195
3196 if (dp) {
3197 TCGv_i64 tcg_op;
3198 TCGv_i64 tcg_res;
3199 tcg_op = tcg_temp_new_i64();
3200 tcg_res = tcg_temp_new_i64();
3201 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3202 gen_helper_rintd(tcg_res, tcg_op, fpst);
3203 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3204 tcg_temp_free_i64(tcg_op);
3205 tcg_temp_free_i64(tcg_res);
3206 } else {
3207 TCGv_i32 tcg_op;
3208 TCGv_i32 tcg_res;
3209 tcg_op = tcg_temp_new_i32();
3210 tcg_res = tcg_temp_new_i32();
3211 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3212 gen_helper_rints(tcg_res, tcg_op, fpst);
3213 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3214 tcg_temp_free_i32(tcg_op);
3215 tcg_temp_free_i32(tcg_res);
3216 }
3217
9b049916 3218 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3219 tcg_temp_free_i32(tcg_rmode);
3220
3221 tcg_temp_free_ptr(fpst);
3222 return 0;
3223}
3224
c9975a83
WN
3225static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3226 int rounding)
3227{
3228 bool is_signed = extract32(insn, 7, 1);
3229 TCGv_ptr fpst = get_fpstatus_ptr(0);
3230 TCGv_i32 tcg_rmode, tcg_shift;
3231
3232 tcg_shift = tcg_const_i32(0);
3233
3234 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3235 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3236
3237 if (dp) {
3238 TCGv_i64 tcg_double, tcg_res;
3239 TCGv_i32 tcg_tmp;
3240 /* Rd is encoded as a single precision register even when the source
3241 * is double precision.
3242 */
3243 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3244 tcg_double = tcg_temp_new_i64();
3245 tcg_res = tcg_temp_new_i64();
3246 tcg_tmp = tcg_temp_new_i32();
3247 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3248 if (is_signed) {
3249 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3250 } else {
3251 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3252 }
ecc7b3aa 3253 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3254 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3255 tcg_temp_free_i32(tcg_tmp);
3256 tcg_temp_free_i64(tcg_res);
3257 tcg_temp_free_i64(tcg_double);
3258 } else {
3259 TCGv_i32 tcg_single, tcg_res;
3260 tcg_single = tcg_temp_new_i32();
3261 tcg_res = tcg_temp_new_i32();
3262 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3263 if (is_signed) {
3264 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3265 } else {
3266 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3267 }
3268 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3269 tcg_temp_free_i32(tcg_res);
3270 tcg_temp_free_i32(tcg_single);
3271 }
3272
9b049916 3273 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3274 tcg_temp_free_i32(tcg_rmode);
3275
3276 tcg_temp_free_i32(tcg_shift);
3277
3278 tcg_temp_free_ptr(fpst);
3279
3280 return 0;
3281}
7655f39b
WN
3282
3283/* Table for converting the most common AArch32 encoding of
3284 * rounding mode to arm_fprounding order (which matches the
3285 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3286 */
3287static const uint8_t fp_decode_rm[] = {
3288 FPROUNDING_TIEAWAY,
3289 FPROUNDING_TIEEVEN,
3290 FPROUNDING_POSINF,
3291 FPROUNDING_NEGINF,
3292};
3293
7dcc1f89 3294static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3295{
3296 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3297
d614a513 3298 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3299 return 1;
3300 }
3301
3302 if (dp) {
3303 VFP_DREG_D(rd, insn);
3304 VFP_DREG_N(rn, insn);
3305 VFP_DREG_M(rm, insn);
3306 } else {
3307 rd = VFP_SREG_D(insn);
3308 rn = VFP_SREG_N(insn);
3309 rm = VFP_SREG_M(insn);
3310 }
3311
3312 if ((insn & 0x0f800e50) == 0x0e000a00) {
3313 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3314 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3315 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3316 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3317 /* VRINTA, VRINTN, VRINTP, VRINTM */
3318 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3319 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3320 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3321 /* VCVTA, VCVTN, VCVTP, VCVTM */
3322 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3323 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3324 }
3325 return 1;
3326}
3327
a1c7273b 3328/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3329 (ie. an undefined instruction). */
7dcc1f89 3330static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3331{
3332 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3333 int dp, veclen;
39d5492a
PM
3334 TCGv_i32 addr;
3335 TCGv_i32 tmp;
3336 TCGv_i32 tmp2;
b7bcbe95 3337
d614a513 3338 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3339 return 1;
d614a513 3340 }
40f137e1 3341
2c7ffc41
PM
3342 /* FIXME: this access check should not take precedence over UNDEF
3343 * for invalid encodings; we will generate incorrect syndrome information
3344 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3345 */
9dbbc748 3346 if (s->fp_excp_el) {
2c7ffc41 3347 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3348 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3349 return 0;
3350 }
3351
5df8bac1 3352 if (!s->vfp_enabled) {
9ee6e8bb 3353 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3354 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3355 return 1;
3356 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3357 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3358 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3359 return 1;
a50c0f51 3360 }
40f137e1 3361 }
6a57f3eb
WN
3362
3363 if (extract32(insn, 28, 4) == 0xf) {
3364 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3365 * only used in v8 and above.
3366 */
7dcc1f89 3367 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3368 }
3369
b7bcbe95
FB
3370 dp = ((insn & 0xf00) == 0xb00);
3371 switch ((insn >> 24) & 0xf) {
3372 case 0xe:
3373 if (insn & (1 << 4)) {
3374 /* single register transfer */
b7bcbe95
FB
3375 rd = (insn >> 12) & 0xf;
3376 if (dp) {
9ee6e8bb
PB
3377 int size;
3378 int pass;
3379
3380 VFP_DREG_N(rn, insn);
3381 if (insn & 0xf)
b7bcbe95 3382 return 1;
9ee6e8bb 3383 if (insn & 0x00c00060
d614a513 3384 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3385 return 1;
d614a513 3386 }
9ee6e8bb
PB
3387
3388 pass = (insn >> 21) & 1;
3389 if (insn & (1 << 22)) {
3390 size = 0;
3391 offset = ((insn >> 5) & 3) * 8;
3392 } else if (insn & (1 << 5)) {
3393 size = 1;
3394 offset = (insn & (1 << 6)) ? 16 : 0;
3395 } else {
3396 size = 2;
3397 offset = 0;
3398 }
18c9b560 3399 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3400 /* vfp->arm */
ad69471c 3401 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3402 switch (size) {
3403 case 0:
9ee6e8bb 3404 if (offset)
ad69471c 3405 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3406 if (insn & (1 << 23))
ad69471c 3407 gen_uxtb(tmp);
9ee6e8bb 3408 else
ad69471c 3409 gen_sxtb(tmp);
9ee6e8bb
PB
3410 break;
3411 case 1:
9ee6e8bb
PB
3412 if (insn & (1 << 23)) {
3413 if (offset) {
ad69471c 3414 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3415 } else {
ad69471c 3416 gen_uxth(tmp);
9ee6e8bb
PB
3417 }
3418 } else {
3419 if (offset) {
ad69471c 3420 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3421 } else {
ad69471c 3422 gen_sxth(tmp);
9ee6e8bb
PB
3423 }
3424 }
3425 break;
3426 case 2:
9ee6e8bb
PB
3427 break;
3428 }
ad69471c 3429 store_reg(s, rd, tmp);
b7bcbe95
FB
3430 } else {
3431 /* arm->vfp */
ad69471c 3432 tmp = load_reg(s, rd);
9ee6e8bb
PB
3433 if (insn & (1 << 23)) {
3434 /* VDUP */
3435 if (size == 0) {
ad69471c 3436 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3437 } else if (size == 1) {
ad69471c 3438 gen_neon_dup_low16(tmp);
9ee6e8bb 3439 }
cbbccffc 3440 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3441 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3442 tcg_gen_mov_i32(tmp2, tmp);
3443 neon_store_reg(rn, n, tmp2);
3444 }
3445 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3446 } else {
3447 /* VMOV */
3448 switch (size) {
3449 case 0:
ad69471c 3450 tmp2 = neon_load_reg(rn, pass);
d593c48e 3451 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3452 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3453 break;
3454 case 1:
ad69471c 3455 tmp2 = neon_load_reg(rn, pass);
d593c48e 3456 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3457 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3458 break;
3459 case 2:
9ee6e8bb
PB
3460 break;
3461 }
ad69471c 3462 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3463 }
b7bcbe95 3464 }
9ee6e8bb
PB
3465 } else { /* !dp */
3466 if ((insn & 0x6f) != 0x00)
3467 return 1;
3468 rn = VFP_SREG_N(insn);
18c9b560 3469 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3470 /* vfp->arm */
3471 if (insn & (1 << 21)) {
3472 /* system register */
40f137e1 3473 rn >>= 1;
9ee6e8bb 3474
b7bcbe95 3475 switch (rn) {
40f137e1 3476 case ARM_VFP_FPSID:
4373f3ce 3477 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3478 VFP3 restricts all id registers to privileged
3479 accesses. */
3480 if (IS_USER(s)
d614a513 3481 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3482 return 1;
d614a513 3483 }
4373f3ce 3484 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3485 break;
40f137e1 3486 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3487 if (IS_USER(s))
3488 return 1;
4373f3ce 3489 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3490 break;
40f137e1
PB
3491 case ARM_VFP_FPINST:
3492 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3493 /* Not present in VFP3. */
3494 if (IS_USER(s)
d614a513 3495 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3496 return 1;
d614a513 3497 }
4373f3ce 3498 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3499 break;
40f137e1 3500 case ARM_VFP_FPSCR:
601d70b9 3501 if (rd == 15) {
4373f3ce
PB
3502 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3503 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3504 } else {
7d1b0095 3505 tmp = tcg_temp_new_i32();
4373f3ce
PB
3506 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3507 }
b7bcbe95 3508 break;
a50c0f51 3509 case ARM_VFP_MVFR2:
d614a513 3510 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3511 return 1;
3512 }
3513 /* fall through */
9ee6e8bb
PB
3514 case ARM_VFP_MVFR0:
3515 case ARM_VFP_MVFR1:
3516 if (IS_USER(s)
d614a513 3517 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3518 return 1;
d614a513 3519 }
4373f3ce 3520 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3521 break;
b7bcbe95
FB
3522 default:
3523 return 1;
3524 }
3525 } else {
3526 gen_mov_F0_vreg(0, rn);
4373f3ce 3527 tmp = gen_vfp_mrs();
b7bcbe95
FB
3528 }
3529 if (rd == 15) {
b5ff1b31 3530 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3531 gen_set_nzcv(tmp);
7d1b0095 3532 tcg_temp_free_i32(tmp);
4373f3ce
PB
3533 } else {
3534 store_reg(s, rd, tmp);
3535 }
b7bcbe95
FB
3536 } else {
3537 /* arm->vfp */
b7bcbe95 3538 if (insn & (1 << 21)) {
40f137e1 3539 rn >>= 1;
b7bcbe95
FB
3540 /* system register */
3541 switch (rn) {
40f137e1 3542 case ARM_VFP_FPSID:
9ee6e8bb
PB
3543 case ARM_VFP_MVFR0:
3544 case ARM_VFP_MVFR1:
b7bcbe95
FB
3545 /* Writes are ignored. */
3546 break;
40f137e1 3547 case ARM_VFP_FPSCR:
e4c1cfa5 3548 tmp = load_reg(s, rd);
4373f3ce 3549 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3550 tcg_temp_free_i32(tmp);
b5ff1b31 3551 gen_lookup_tb(s);
b7bcbe95 3552 break;
40f137e1 3553 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3554 if (IS_USER(s))
3555 return 1;
71b3c3de
JR
3556 /* TODO: VFP subarchitecture support.
3557 * For now, keep the EN bit only */
e4c1cfa5 3558 tmp = load_reg(s, rd);
71b3c3de 3559 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3560 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3561 gen_lookup_tb(s);
3562 break;
3563 case ARM_VFP_FPINST:
3564 case ARM_VFP_FPINST2:
23adb861
PM
3565 if (IS_USER(s)) {
3566 return 1;
3567 }
e4c1cfa5 3568 tmp = load_reg(s, rd);
4373f3ce 3569 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3570 break;
b7bcbe95
FB
3571 default:
3572 return 1;
3573 }
3574 } else {
e4c1cfa5 3575 tmp = load_reg(s, rd);
4373f3ce 3576 gen_vfp_msr(tmp);
b7bcbe95
FB
3577 gen_mov_vreg_F0(0, rn);
3578 }
3579 }
3580 }
3581 } else {
3582 /* data processing */
3583 /* The opcode is in bits 23, 21, 20 and 6. */
3584 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3585 if (dp) {
3586 if (op == 15) {
3587 /* rn is opcode */
3588 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3589 } else {
3590 /* rn is register number */
9ee6e8bb 3591 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3592 }
3593
239c20c7
WN
3594 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3595 ((rn & 0x1e) == 0x6))) {
3596 /* Integer or single/half precision destination. */
9ee6e8bb 3597 rd = VFP_SREG_D(insn);
b7bcbe95 3598 } else {
9ee6e8bb 3599 VFP_DREG_D(rd, insn);
b7bcbe95 3600 }
04595bf6 3601 if (op == 15 &&
239c20c7
WN
3602 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3603 ((rn & 0x1e) == 0x4))) {
3604 /* VCVT from int or half precision is always from S reg
3605 * regardless of dp bit. VCVT with immediate frac_bits
3606 * has same format as SREG_M.
04595bf6
PM
3607 */
3608 rm = VFP_SREG_M(insn);
b7bcbe95 3609 } else {
9ee6e8bb 3610 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3611 }
3612 } else {
9ee6e8bb 3613 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3614 if (op == 15 && rn == 15) {
3615 /* Double precision destination. */
9ee6e8bb
PB
3616 VFP_DREG_D(rd, insn);
3617 } else {
3618 rd = VFP_SREG_D(insn);
3619 }
04595bf6
PM
3620 /* NB that we implicitly rely on the encoding for the frac_bits
3621 * in VCVT of fixed to float being the same as that of an SREG_M
3622 */
9ee6e8bb 3623 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3624 }
3625
69d1fc22 3626 veclen = s->vec_len;
b7bcbe95
FB
3627 if (op == 15 && rn > 3)
3628 veclen = 0;
3629
3630 /* Shut up compiler warnings. */
3631 delta_m = 0;
3632 delta_d = 0;
3633 bank_mask = 0;
3b46e624 3634
b7bcbe95
FB
3635 if (veclen > 0) {
3636 if (dp)
3637 bank_mask = 0xc;
3638 else
3639 bank_mask = 0x18;
3640
3641 /* Figure out what type of vector operation this is. */
3642 if ((rd & bank_mask) == 0) {
3643 /* scalar */
3644 veclen = 0;
3645 } else {
3646 if (dp)
69d1fc22 3647 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3648 else
69d1fc22 3649 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3650
3651 if ((rm & bank_mask) == 0) {
3652 /* mixed scalar/vector */
3653 delta_m = 0;
3654 } else {
3655 /* vector */
3656 delta_m = delta_d;
3657 }
3658 }
3659 }
3660
3661 /* Load the initial operands. */
3662 if (op == 15) {
3663 switch (rn) {
3664 case 16:
3665 case 17:
3666 /* Integer source */
3667 gen_mov_F0_vreg(0, rm);
3668 break;
3669 case 8:
3670 case 9:
3671 /* Compare */
3672 gen_mov_F0_vreg(dp, rd);
3673 gen_mov_F1_vreg(dp, rm);
3674 break;
3675 case 10:
3676 case 11:
3677 /* Compare with zero */
3678 gen_mov_F0_vreg(dp, rd);
3679 gen_vfp_F1_ld0(dp);
3680 break;
9ee6e8bb
PB
3681 case 20:
3682 case 21:
3683 case 22:
3684 case 23:
644ad806
PB
3685 case 28:
3686 case 29:
3687 case 30:
3688 case 31:
9ee6e8bb
PB
3689 /* Source and destination the same. */
3690 gen_mov_F0_vreg(dp, rd);
3691 break;
6e0c0ed1
PM
3692 case 4:
3693 case 5:
3694 case 6:
3695 case 7:
239c20c7
WN
3696 /* VCVTB, VCVTT: only present with the halfprec extension
3697 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3698 * (we choose to UNDEF)
6e0c0ed1 3699 */
d614a513
PM
3700 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3701 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3702 return 1;
3703 }
239c20c7
WN
3704 if (!extract32(rn, 1, 1)) {
3705 /* Half precision source. */
3706 gen_mov_F0_vreg(0, rm);
3707 break;
3708 }
6e0c0ed1 3709 /* Otherwise fall through */
b7bcbe95
FB
3710 default:
3711 /* One source operand. */
3712 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3713 break;
b7bcbe95
FB
3714 }
3715 } else {
3716 /* Two source operands. */
3717 gen_mov_F0_vreg(dp, rn);
3718 gen_mov_F1_vreg(dp, rm);
3719 }
3720
3721 for (;;) {
3722 /* Perform the calculation. */
3723 switch (op) {
605a6aed
PM
3724 case 0: /* VMLA: fd + (fn * fm) */
3725 /* Note that order of inputs to the add matters for NaNs */
3726 gen_vfp_F1_mul(dp);
3727 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3728 gen_vfp_add(dp);
3729 break;
605a6aed 3730 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3731 gen_vfp_mul(dp);
605a6aed
PM
3732 gen_vfp_F1_neg(dp);
3733 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3734 gen_vfp_add(dp);
3735 break;
605a6aed
PM
3736 case 2: /* VNMLS: -fd + (fn * fm) */
3737 /* Note that it isn't valid to replace (-A + B) with (B - A)
3738 * or similar plausible looking simplifications
3739 * because this will give wrong results for NaNs.
3740 */
3741 gen_vfp_F1_mul(dp);
3742 gen_mov_F0_vreg(dp, rd);
3743 gen_vfp_neg(dp);
3744 gen_vfp_add(dp);
b7bcbe95 3745 break;
605a6aed 3746 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3747 gen_vfp_mul(dp);
605a6aed
PM
3748 gen_vfp_F1_neg(dp);
3749 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3750 gen_vfp_neg(dp);
605a6aed 3751 gen_vfp_add(dp);
b7bcbe95
FB
3752 break;
3753 case 4: /* mul: fn * fm */
3754 gen_vfp_mul(dp);
3755 break;
3756 case 5: /* nmul: -(fn * fm) */
3757 gen_vfp_mul(dp);
3758 gen_vfp_neg(dp);
3759 break;
3760 case 6: /* add: fn + fm */
3761 gen_vfp_add(dp);
3762 break;
3763 case 7: /* sub: fn - fm */
3764 gen_vfp_sub(dp);
3765 break;
3766 case 8: /* div: fn / fm */
3767 gen_vfp_div(dp);
3768 break;
da97f52c
PM
3769 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3770 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3771 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3772 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3773 /* These are fused multiply-add, and must be done as one
3774 * floating point operation with no rounding between the
3775 * multiplication and addition steps.
3776 * NB that doing the negations here as separate steps is
3777 * correct : an input NaN should come out with its sign bit
3778 * flipped if it is a negated-input.
3779 */
d614a513 3780 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3781 return 1;
3782 }
3783 if (dp) {
3784 TCGv_ptr fpst;
3785 TCGv_i64 frd;
3786 if (op & 1) {
3787 /* VFNMS, VFMS */
3788 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3789 }
3790 frd = tcg_temp_new_i64();
3791 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3792 if (op & 2) {
3793 /* VFNMA, VFNMS */
3794 gen_helper_vfp_negd(frd, frd);
3795 }
3796 fpst = get_fpstatus_ptr(0);
3797 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3798 cpu_F1d, frd, fpst);
3799 tcg_temp_free_ptr(fpst);
3800 tcg_temp_free_i64(frd);
3801 } else {
3802 TCGv_ptr fpst;
3803 TCGv_i32 frd;
3804 if (op & 1) {
3805 /* VFNMS, VFMS */
3806 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3807 }
3808 frd = tcg_temp_new_i32();
3809 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3810 if (op & 2) {
3811 gen_helper_vfp_negs(frd, frd);
3812 }
3813 fpst = get_fpstatus_ptr(0);
3814 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3815 cpu_F1s, frd, fpst);
3816 tcg_temp_free_ptr(fpst);
3817 tcg_temp_free_i32(frd);
3818 }
3819 break;
9ee6e8bb 3820 case 14: /* fconst */
d614a513
PM
3821 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3822 return 1;
3823 }
9ee6e8bb
PB
3824
3825 n = (insn << 12) & 0x80000000;
3826 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3827 if (dp) {
3828 if (i & 0x40)
3829 i |= 0x3f80;
3830 else
3831 i |= 0x4000;
3832 n |= i << 16;
4373f3ce 3833 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3834 } else {
3835 if (i & 0x40)
3836 i |= 0x780;
3837 else
3838 i |= 0x800;
3839 n |= i << 19;
5b340b51 3840 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3841 }
9ee6e8bb 3842 break;
b7bcbe95
FB
3843 case 15: /* extension space */
3844 switch (rn) {
3845 case 0: /* cpy */
3846 /* no-op */
3847 break;
3848 case 1: /* abs */
3849 gen_vfp_abs(dp);
3850 break;
3851 case 2: /* neg */
3852 gen_vfp_neg(dp);
3853 break;
3854 case 3: /* sqrt */
3855 gen_vfp_sqrt(dp);
3856 break;
239c20c7 3857 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
486624fc
AB
3858 {
3859 TCGv_ptr fpst = get_fpstatus_ptr(false);
3860 TCGv_i32 ahp_mode = get_ahp_flag();
60011498
PB
3861 tmp = gen_vfp_mrs();
3862 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3863 if (dp) {
3864 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3865 fpst, ahp_mode);
239c20c7
WN
3866 } else {
3867 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3868 fpst, ahp_mode);
239c20c7 3869 }
486624fc
AB
3870 tcg_temp_free_i32(ahp_mode);
3871 tcg_temp_free_ptr(fpst);
7d1b0095 3872 tcg_temp_free_i32(tmp);
60011498 3873 break;
486624fc 3874 }
239c20c7 3875 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
486624fc
AB
3876 {
3877 TCGv_ptr fpst = get_fpstatus_ptr(false);
3878 TCGv_i32 ahp = get_ahp_flag();
60011498
PB
3879 tmp = gen_vfp_mrs();
3880 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3881 if (dp) {
3882 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3883 fpst, ahp);
239c20c7
WN
3884 } else {
3885 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3886 fpst, ahp);
239c20c7 3887 }
7d1b0095 3888 tcg_temp_free_i32(tmp);
486624fc
AB
3889 tcg_temp_free_i32(ahp);
3890 tcg_temp_free_ptr(fpst);
60011498 3891 break;
486624fc 3892 }
239c20c7 3893 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
486624fc
AB
3894 {
3895 TCGv_ptr fpst = get_fpstatus_ptr(false);
3896 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3897 tmp = tcg_temp_new_i32();
486624fc 3898
239c20c7
WN
3899 if (dp) {
3900 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3901 fpst, ahp);
239c20c7
WN
3902 } else {
3903 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3904 fpst, ahp);
239c20c7 3905 }
486624fc
AB
3906 tcg_temp_free_i32(ahp);
3907 tcg_temp_free_ptr(fpst);
60011498
PB
3908 gen_mov_F0_vreg(0, rd);
3909 tmp2 = gen_vfp_mrs();
3910 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3911 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3912 tcg_temp_free_i32(tmp2);
60011498
PB
3913 gen_vfp_msr(tmp);
3914 break;
486624fc 3915 }
239c20c7 3916 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
486624fc
AB
3917 {
3918 TCGv_ptr fpst = get_fpstatus_ptr(false);
3919 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3920 tmp = tcg_temp_new_i32();
239c20c7
WN
3921 if (dp) {
3922 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3923 fpst, ahp);
239c20c7
WN
3924 } else {
3925 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3926 fpst, ahp);
239c20c7 3927 }
486624fc
AB
3928 tcg_temp_free_i32(ahp);
3929 tcg_temp_free_ptr(fpst);
60011498
PB
3930 tcg_gen_shli_i32(tmp, tmp, 16);
3931 gen_mov_F0_vreg(0, rd);
3932 tmp2 = gen_vfp_mrs();
3933 tcg_gen_ext16u_i32(tmp2, tmp2);
3934 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3935 tcg_temp_free_i32(tmp2);
60011498
PB
3936 gen_vfp_msr(tmp);
3937 break;
486624fc 3938 }
b7bcbe95
FB
3939 case 8: /* cmp */
3940 gen_vfp_cmp(dp);
3941 break;
3942 case 9: /* cmpe */
3943 gen_vfp_cmpe(dp);
3944 break;
3945 case 10: /* cmpz */
3946 gen_vfp_cmp(dp);
3947 break;
3948 case 11: /* cmpez */
3949 gen_vfp_F1_ld0(dp);
3950 gen_vfp_cmpe(dp);
3951 break;
664c6733
WN
3952 case 12: /* vrintr */
3953 {
3954 TCGv_ptr fpst = get_fpstatus_ptr(0);
3955 if (dp) {
3956 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3957 } else {
3958 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3959 }
3960 tcg_temp_free_ptr(fpst);
3961 break;
3962 }
a290c62a
WN
3963 case 13: /* vrintz */
3964 {
3965 TCGv_ptr fpst = get_fpstatus_ptr(0);
3966 TCGv_i32 tcg_rmode;
3967 tcg_rmode = tcg_const_i32(float_round_to_zero);
9b049916 3968 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
3969 if (dp) {
3970 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3971 } else {
3972 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3973 }
9b049916 3974 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
3975 tcg_temp_free_i32(tcg_rmode);
3976 tcg_temp_free_ptr(fpst);
3977 break;
3978 }
4e82bc01
WN
3979 case 14: /* vrintx */
3980 {
3981 TCGv_ptr fpst = get_fpstatus_ptr(0);
3982 if (dp) {
3983 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3984 } else {
3985 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3986 }
3987 tcg_temp_free_ptr(fpst);
3988 break;
3989 }
b7bcbe95
FB
3990 case 15: /* single<->double conversion */
3991 if (dp)
4373f3ce 3992 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3993 else
4373f3ce 3994 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3995 break;
3996 case 16: /* fuito */
5500b06c 3997 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3998 break;
3999 case 17: /* fsito */
5500b06c 4000 gen_vfp_sito(dp, 0);
b7bcbe95 4001 break;
9ee6e8bb 4002 case 20: /* fshto */
d614a513
PM
4003 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4004 return 1;
4005 }
5500b06c 4006 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
4007 break;
4008 case 21: /* fslto */
d614a513
PM
4009 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4010 return 1;
4011 }
5500b06c 4012 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
4013 break;
4014 case 22: /* fuhto */
d614a513
PM
4015 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4016 return 1;
4017 }
5500b06c 4018 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
4019 break;
4020 case 23: /* fulto */
d614a513
PM
4021 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4022 return 1;
4023 }
5500b06c 4024 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 4025 break;
b7bcbe95 4026 case 24: /* ftoui */
5500b06c 4027 gen_vfp_toui(dp, 0);
b7bcbe95
FB
4028 break;
4029 case 25: /* ftouiz */
5500b06c 4030 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
4031 break;
4032 case 26: /* ftosi */
5500b06c 4033 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
4034 break;
4035 case 27: /* ftosiz */
5500b06c 4036 gen_vfp_tosiz(dp, 0);
b7bcbe95 4037 break;
9ee6e8bb 4038 case 28: /* ftosh */
d614a513
PM
4039 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4040 return 1;
4041 }
5500b06c 4042 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
4043 break;
4044 case 29: /* ftosl */
d614a513
PM
4045 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4046 return 1;
4047 }
5500b06c 4048 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
4049 break;
4050 case 30: /* ftouh */
d614a513
PM
4051 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4052 return 1;
4053 }
5500b06c 4054 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
4055 break;
4056 case 31: /* ftoul */
d614a513
PM
4057 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4058 return 1;
4059 }
5500b06c 4060 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 4061 break;
b7bcbe95 4062 default: /* undefined */
b7bcbe95
FB
4063 return 1;
4064 }
4065 break;
4066 default: /* undefined */
b7bcbe95
FB
4067 return 1;
4068 }
4069
4070 /* Write back the result. */
239c20c7
WN
4071 if (op == 15 && (rn >= 8 && rn <= 11)) {
4072 /* Comparison, do nothing. */
4073 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4074 (rn & 0x1e) == 0x6)) {
4075 /* VCVT double to int: always integer result.
4076 * VCVT double to half precision is always a single
4077 * precision result.
4078 */
b7bcbe95 4079 gen_mov_vreg_F0(0, rd);
239c20c7 4080 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
4081 /* conversion */
4082 gen_mov_vreg_F0(!dp, rd);
239c20c7 4083 } else {
b7bcbe95 4084 gen_mov_vreg_F0(dp, rd);
239c20c7 4085 }
b7bcbe95
FB
4086
4087 /* break out of the loop if we have finished */
4088 if (veclen == 0)
4089 break;
4090
4091 if (op == 15 && delta_m == 0) {
4092 /* single source one-many */
4093 while (veclen--) {
4094 rd = ((rd + delta_d) & (bank_mask - 1))
4095 | (rd & bank_mask);
4096 gen_mov_vreg_F0(dp, rd);
4097 }
4098 break;
4099 }
4100 /* Setup the next operands. */
4101 veclen--;
4102 rd = ((rd + delta_d) & (bank_mask - 1))
4103 | (rd & bank_mask);
4104
4105 if (op == 15) {
4106 /* One source operand. */
4107 rm = ((rm + delta_m) & (bank_mask - 1))
4108 | (rm & bank_mask);
4109 gen_mov_F0_vreg(dp, rm);
4110 } else {
4111 /* Two source operands. */
4112 rn = ((rn + delta_d) & (bank_mask - 1))
4113 | (rn & bank_mask);
4114 gen_mov_F0_vreg(dp, rn);
4115 if (delta_m) {
4116 rm = ((rm + delta_m) & (bank_mask - 1))
4117 | (rm & bank_mask);
4118 gen_mov_F1_vreg(dp, rm);
4119 }
4120 }
4121 }
4122 }
4123 break;
4124 case 0xc:
4125 case 0xd:
8387da81 4126 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4127 /* two-register transfer */
4128 rn = (insn >> 16) & 0xf;
4129 rd = (insn >> 12) & 0xf;
4130 if (dp) {
9ee6e8bb
PB
4131 VFP_DREG_M(rm, insn);
4132 } else {
4133 rm = VFP_SREG_M(insn);
4134 }
b7bcbe95 4135
18c9b560 4136 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4137 /* vfp->arm */
4138 if (dp) {
4373f3ce
PB
4139 gen_mov_F0_vreg(0, rm * 2);
4140 tmp = gen_vfp_mrs();
4141 store_reg(s, rd, tmp);
4142 gen_mov_F0_vreg(0, rm * 2 + 1);
4143 tmp = gen_vfp_mrs();
4144 store_reg(s, rn, tmp);
b7bcbe95
FB
4145 } else {
4146 gen_mov_F0_vreg(0, rm);
4373f3ce 4147 tmp = gen_vfp_mrs();
8387da81 4148 store_reg(s, rd, tmp);
b7bcbe95 4149 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4150 tmp = gen_vfp_mrs();
8387da81 4151 store_reg(s, rn, tmp);
b7bcbe95
FB
4152 }
4153 } else {
4154 /* arm->vfp */
4155 if (dp) {
4373f3ce
PB
4156 tmp = load_reg(s, rd);
4157 gen_vfp_msr(tmp);
4158 gen_mov_vreg_F0(0, rm * 2);
4159 tmp = load_reg(s, rn);
4160 gen_vfp_msr(tmp);
4161 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4162 } else {
8387da81 4163 tmp = load_reg(s, rd);
4373f3ce 4164 gen_vfp_msr(tmp);
b7bcbe95 4165 gen_mov_vreg_F0(0, rm);
8387da81 4166 tmp = load_reg(s, rn);
4373f3ce 4167 gen_vfp_msr(tmp);
b7bcbe95
FB
4168 gen_mov_vreg_F0(0, rm + 1);
4169 }
4170 }
4171 } else {
4172 /* Load/store */
4173 rn = (insn >> 16) & 0xf;
4174 if (dp)
9ee6e8bb 4175 VFP_DREG_D(rd, insn);
b7bcbe95 4176 else
9ee6e8bb 4177 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4178 if ((insn & 0x01200000) == 0x01000000) {
4179 /* Single load/store */
4180 offset = (insn & 0xff) << 2;
4181 if ((insn & (1 << 23)) == 0)
4182 offset = -offset;
934814f1
PM
4183 if (s->thumb && rn == 15) {
4184 /* This is actually UNPREDICTABLE */
4185 addr = tcg_temp_new_i32();
4186 tcg_gen_movi_i32(addr, s->pc & ~2);
4187 } else {
4188 addr = load_reg(s, rn);
4189 }
312eea9f 4190 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4191 if (insn & (1 << 20)) {
312eea9f 4192 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4193 gen_mov_vreg_F0(dp, rd);
4194 } else {
4195 gen_mov_F0_vreg(dp, rd);
312eea9f 4196 gen_vfp_st(s, dp, addr);
b7bcbe95 4197 }
7d1b0095 4198 tcg_temp_free_i32(addr);
b7bcbe95
FB
4199 } else {
4200 /* load/store multiple */
934814f1 4201 int w = insn & (1 << 21);
b7bcbe95
FB
4202 if (dp)
4203 n = (insn >> 1) & 0x7f;
4204 else
4205 n = insn & 0xff;
4206
934814f1
PM
4207 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4208 /* P == U , W == 1 => UNDEF */
4209 return 1;
4210 }
4211 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4212 /* UNPREDICTABLE cases for bad immediates: we choose to
4213 * UNDEF to avoid generating huge numbers of TCG ops
4214 */
4215 return 1;
4216 }
4217 if (rn == 15 && w) {
4218 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4219 return 1;
4220 }
4221
4222 if (s->thumb && rn == 15) {
4223 /* This is actually UNPREDICTABLE */
4224 addr = tcg_temp_new_i32();
4225 tcg_gen_movi_i32(addr, s->pc & ~2);
4226 } else {
4227 addr = load_reg(s, rn);
4228 }
b7bcbe95 4229 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4230 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4231
4232 if (dp)
4233 offset = 8;
4234 else
4235 offset = 4;
4236 for (i = 0; i < n; i++) {
18c9b560 4237 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4238 /* load */
312eea9f 4239 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4240 gen_mov_vreg_F0(dp, rd + i);
4241 } else {
4242 /* store */
4243 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4244 gen_vfp_st(s, dp, addr);
b7bcbe95 4245 }
312eea9f 4246 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4247 }
934814f1 4248 if (w) {
b7bcbe95
FB
4249 /* writeback */
4250 if (insn & (1 << 24))
4251 offset = -offset * n;
4252 else if (dp && (insn & 1))
4253 offset = 4;
4254 else
4255 offset = 0;
4256
4257 if (offset != 0)
312eea9f
FN
4258 tcg_gen_addi_i32(addr, addr, offset);
4259 store_reg(s, rn, addr);
4260 } else {
7d1b0095 4261 tcg_temp_free_i32(addr);
b7bcbe95
FB
4262 }
4263 }
4264 }
4265 break;
4266 default:
4267 /* Should never happen. */
4268 return 1;
4269 }
4270 return 0;
4271}
4272
90aa39a1 4273static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4274{
90aa39a1 4275#ifndef CONFIG_USER_ONLY
dcba3a8d 4276 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4277 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4278#else
4279 return true;
4280#endif
4281}
6e256c93 4282
8a6b28c7
EC
4283static void gen_goto_ptr(void)
4284{
7f11636d 4285 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4286}
4287
4cae8f56
AB
4288/* This will end the TB but doesn't guarantee we'll return to
4289 * cpu_loop_exec. Any live exit_requests will be processed as we
4290 * enter the next TB.
4291 */
8a6b28c7 4292static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4293{
4294 if (use_goto_tb(s, dest)) {
57fec1fe 4295 tcg_gen_goto_tb(n);
eaed129d 4296 gen_set_pc_im(s, dest);
07ea28b4 4297 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 4298 } else {
eaed129d 4299 gen_set_pc_im(s, dest);
8a6b28c7 4300 gen_goto_ptr();
6e256c93 4301 }
dcba3a8d 4302 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4303}
4304
8aaca4c0
FB
4305static inline void gen_jmp (DisasContext *s, uint32_t dest)
4306{
b636649f 4307 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4308 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4309 if (s->thumb)
d9ba4830
PB
4310 dest |= 1;
4311 gen_bx_im(s, dest);
8aaca4c0 4312 } else {
6e256c93 4313 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4314 }
4315}
4316
39d5492a 4317static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4318{
ee097184 4319 if (x)
d9ba4830 4320 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4321 else
d9ba4830 4322 gen_sxth(t0);
ee097184 4323 if (y)
d9ba4830 4324 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4325 else
d9ba4830
PB
4326 gen_sxth(t1);
4327 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4328}
4329
4330/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4331static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4332{
b5ff1b31
FB
4333 uint32_t mask;
4334
4335 mask = 0;
4336 if (flags & (1 << 0))
4337 mask |= 0xff;
4338 if (flags & (1 << 1))
4339 mask |= 0xff00;
4340 if (flags & (1 << 2))
4341 mask |= 0xff0000;
4342 if (flags & (1 << 3))
4343 mask |= 0xff000000;
9ee6e8bb 4344
2ae23e75 4345 /* Mask out undefined bits. */
9ee6e8bb 4346 mask &= ~CPSR_RESERVED;
d614a513 4347 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4348 mask &= ~CPSR_T;
d614a513
PM
4349 }
4350 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4351 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4352 }
4353 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4354 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4355 }
4356 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4357 mask &= ~CPSR_IT;
d614a513 4358 }
4051e12c
PM
4359 /* Mask out execution state and reserved bits. */
4360 if (!spsr) {
4361 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4362 }
b5ff1b31
FB
4363 /* Mask out privileged bits. */
4364 if (IS_USER(s))
9ee6e8bb 4365 mask &= CPSR_USER;
b5ff1b31
FB
4366 return mask;
4367}
4368
2fbac54b 4369/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4370static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4371{
39d5492a 4372 TCGv_i32 tmp;
b5ff1b31
FB
4373 if (spsr) {
4374 /* ??? This is also undefined in system mode. */
4375 if (IS_USER(s))
4376 return 1;
d9ba4830
PB
4377
4378 tmp = load_cpu_field(spsr);
4379 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4380 tcg_gen_andi_i32(t0, t0, mask);
4381 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4382 store_cpu_field(tmp, spsr);
b5ff1b31 4383 } else {
2fbac54b 4384 gen_set_cpsr(t0, mask);
b5ff1b31 4385 }
7d1b0095 4386 tcg_temp_free_i32(t0);
b5ff1b31
FB
4387 gen_lookup_tb(s);
4388 return 0;
4389}
4390
2fbac54b
FN
4391/* Returns nonzero if access to the PSR is not permitted. */
4392static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4393{
39d5492a 4394 TCGv_i32 tmp;
7d1b0095 4395 tmp = tcg_temp_new_i32();
2fbac54b
FN
4396 tcg_gen_movi_i32(tmp, val);
4397 return gen_set_psr(s, mask, spsr, tmp);
4398}
4399
8bfd0550
PM
4400static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4401 int *tgtmode, int *regno)
4402{
4403 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4404 * the target mode and register number, and identify the various
4405 * unpredictable cases.
4406 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4407 * + executed in user mode
4408 * + using R15 as the src/dest register
4409 * + accessing an unimplemented register
4410 * + accessing a register that's inaccessible at current PL/security state*
4411 * + accessing a register that you could access with a different insn
4412 * We choose to UNDEF in all these cases.
4413 * Since we don't know which of the various AArch32 modes we are in
4414 * we have to defer some checks to runtime.
4415 * Accesses to Monitor mode registers from Secure EL1 (which implies
4416 * that EL3 is AArch64) must trap to EL3.
4417 *
4418 * If the access checks fail this function will emit code to take
4419 * an exception and return false. Otherwise it will return true,
4420 * and set *tgtmode and *regno appropriately.
4421 */
4422 int exc_target = default_exception_el(s);
4423
4424 /* These instructions are present only in ARMv8, or in ARMv7 with the
4425 * Virtualization Extensions.
4426 */
4427 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4428 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4429 goto undef;
4430 }
4431
4432 if (IS_USER(s) || rn == 15) {
4433 goto undef;
4434 }
4435
4436 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4437 * of registers into (r, sysm).
4438 */
4439 if (r) {
4440 /* SPSRs for other modes */
4441 switch (sysm) {
4442 case 0xe: /* SPSR_fiq */
4443 *tgtmode = ARM_CPU_MODE_FIQ;
4444 break;
4445 case 0x10: /* SPSR_irq */
4446 *tgtmode = ARM_CPU_MODE_IRQ;
4447 break;
4448 case 0x12: /* SPSR_svc */
4449 *tgtmode = ARM_CPU_MODE_SVC;
4450 break;
4451 case 0x14: /* SPSR_abt */
4452 *tgtmode = ARM_CPU_MODE_ABT;
4453 break;
4454 case 0x16: /* SPSR_und */
4455 *tgtmode = ARM_CPU_MODE_UND;
4456 break;
4457 case 0x1c: /* SPSR_mon */
4458 *tgtmode = ARM_CPU_MODE_MON;
4459 break;
4460 case 0x1e: /* SPSR_hyp */
4461 *tgtmode = ARM_CPU_MODE_HYP;
4462 break;
4463 default: /* unallocated */
4464 goto undef;
4465 }
4466 /* We arbitrarily assign SPSR a register number of 16. */
4467 *regno = 16;
4468 } else {
4469 /* general purpose registers for other modes */
4470 switch (sysm) {
4471 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4472 *tgtmode = ARM_CPU_MODE_USR;
4473 *regno = sysm + 8;
4474 break;
4475 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4476 *tgtmode = ARM_CPU_MODE_FIQ;
4477 *regno = sysm;
4478 break;
4479 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4480 *tgtmode = ARM_CPU_MODE_IRQ;
4481 *regno = sysm & 1 ? 13 : 14;
4482 break;
4483 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4484 *tgtmode = ARM_CPU_MODE_SVC;
4485 *regno = sysm & 1 ? 13 : 14;
4486 break;
4487 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4488 *tgtmode = ARM_CPU_MODE_ABT;
4489 *regno = sysm & 1 ? 13 : 14;
4490 break;
4491 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4492 *tgtmode = ARM_CPU_MODE_UND;
4493 *regno = sysm & 1 ? 13 : 14;
4494 break;
4495 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4496 *tgtmode = ARM_CPU_MODE_MON;
4497 *regno = sysm & 1 ? 13 : 14;
4498 break;
4499 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4500 *tgtmode = ARM_CPU_MODE_HYP;
4501 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4502 *regno = sysm & 1 ? 13 : 17;
4503 break;
4504 default: /* unallocated */
4505 goto undef;
4506 }
4507 }
4508
4509 /* Catch the 'accessing inaccessible register' cases we can detect
4510 * at translate time.
4511 */
4512 switch (*tgtmode) {
4513 case ARM_CPU_MODE_MON:
4514 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4515 goto undef;
4516 }
4517 if (s->current_el == 1) {
4518 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4519 * then accesses to Mon registers trap to EL3
4520 */
4521 exc_target = 3;
4522 goto undef;
4523 }
4524 break;
4525 case ARM_CPU_MODE_HYP:
aec4dd09
PM
4526 /*
4527 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
4528 * (and so we can forbid accesses from EL2 or below). elr_hyp
4529 * can be accessed also from Hyp mode, so forbid accesses from
4530 * EL0 or EL1.
8bfd0550 4531 */
aec4dd09
PM
4532 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
4533 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
4534 goto undef;
4535 }
4536 break;
4537 default:
4538 break;
4539 }
4540
4541 return true;
4542
4543undef:
4544 /* If we get here then some access check did not pass */
4545 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4546 return false;
4547}
4548
4549static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4550{
4551 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4552 int tgtmode = 0, regno = 0;
4553
4554 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4555 return;
4556 }
4557
4558 /* Sync state because msr_banked() can raise exceptions */
4559 gen_set_condexec(s);
4560 gen_set_pc_im(s, s->pc - 4);
4561 tcg_reg = load_reg(s, rn);
4562 tcg_tgtmode = tcg_const_i32(tgtmode);
4563 tcg_regno = tcg_const_i32(regno);
4564 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4565 tcg_temp_free_i32(tcg_tgtmode);
4566 tcg_temp_free_i32(tcg_regno);
4567 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4568 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4569}
4570
4571static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4572{
4573 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4574 int tgtmode = 0, regno = 0;
4575
4576 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4577 return;
4578 }
4579
4580 /* Sync state because mrs_banked() can raise exceptions */
4581 gen_set_condexec(s);
4582 gen_set_pc_im(s, s->pc - 4);
4583 tcg_reg = tcg_temp_new_i32();
4584 tcg_tgtmode = tcg_const_i32(tgtmode);
4585 tcg_regno = tcg_const_i32(regno);
4586 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4587 tcg_temp_free_i32(tcg_tgtmode);
4588 tcg_temp_free_i32(tcg_regno);
4589 store_reg(s, rn, tcg_reg);
dcba3a8d 4590 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4591}
4592
fb0e8e79
PM
4593/* Store value to PC as for an exception return (ie don't
4594 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4595 * will do the masking based on the new value of the Thumb bit.
4596 */
4597static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4598{
fb0e8e79
PM
4599 tcg_gen_mov_i32(cpu_R[15], pc);
4600 tcg_temp_free_i32(pc);
b5ff1b31
FB
4601}
4602
b0109805 4603/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4604static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4605{
fb0e8e79
PM
4606 store_pc_exc_ret(s, pc);
4607 /* The cpsr_write_eret helper will mask the low bits of PC
4608 * appropriately depending on the new Thumb bit, so it must
4609 * be called after storing the new PC.
4610 */
e69ad9df
AL
4611 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4612 gen_io_start();
4613 }
235ea1f5 4614 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
4615 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4616 gen_io_end();
4617 }
7d1b0095 4618 tcg_temp_free_i32(cpsr);
b29fd33d 4619 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4620 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4621}
3b46e624 4622
fb0e8e79
PM
4623/* Generate an old-style exception return. Marks pc as dead. */
4624static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4625{
4626 gen_rfe(s, pc, load_cpu_field(spsr));
4627}
4628
c22edfeb
AB
4629/*
4630 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4631 * only call the helper when running single threaded TCG code to ensure
4632 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4633 * just skip this instruction. Currently the SEV/SEVL instructions
4634 * which are *one* of many ways to wake the CPU from WFE are not
4635 * implemented so we can't sleep like WFI does.
4636 */
9ee6e8bb
PB
4637static void gen_nop_hint(DisasContext *s, int val)
4638{
4639 switch (val) {
2399d4e7
EC
4640 /* When running in MTTCG we don't generate jumps to the yield and
4641 * WFE helpers as it won't affect the scheduling of other vCPUs.
4642 * If we wanted to more completely model WFE/SEV so we don't busy
4643 * spin unnecessarily we would need to do something more involved.
4644 */
c87e5a61 4645 case 1: /* yield */
2399d4e7 4646 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4647 gen_set_pc_im(s, s->pc);
dcba3a8d 4648 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4649 }
c87e5a61 4650 break;
9ee6e8bb 4651 case 3: /* wfi */
eaed129d 4652 gen_set_pc_im(s, s->pc);
dcba3a8d 4653 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4654 break;
4655 case 2: /* wfe */
2399d4e7 4656 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4657 gen_set_pc_im(s, s->pc);
dcba3a8d 4658 s->base.is_jmp = DISAS_WFE;
c22edfeb 4659 }
72c1d3af 4660 break;
9ee6e8bb 4661 case 4: /* sev */
12b10571
MR
4662 case 5: /* sevl */
4663 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4664 default: /* nop */
4665 break;
4666 }
4667}
99c475ab 4668
ad69471c 4669#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4670
39d5492a 4671static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4672{
4673 switch (size) {
dd8fbd78
FN
4674 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4675 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4676 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4677 default: abort();
9ee6e8bb 4678 }
9ee6e8bb
PB
4679}
4680
39d5492a 4681static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4682{
4683 switch (size) {
dd8fbd78
FN
4684 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4685 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4686 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4687 default: return;
4688 }
4689}
4690
4691/* 32-bit pairwise ops end up the same as the elementwise versions. */
4692#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4693#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4694#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4695#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4696
ad69471c
PB
4697#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4698 switch ((size << 1) | u) { \
4699 case 0: \
dd8fbd78 4700 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4701 break; \
4702 case 1: \
dd8fbd78 4703 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4704 break; \
4705 case 2: \
dd8fbd78 4706 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4707 break; \
4708 case 3: \
dd8fbd78 4709 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4710 break; \
4711 case 4: \
dd8fbd78 4712 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4713 break; \
4714 case 5: \
dd8fbd78 4715 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4716 break; \
4717 default: return 1; \
4718 }} while (0)
9ee6e8bb
PB
4719
4720#define GEN_NEON_INTEGER_OP(name) do { \
4721 switch ((size << 1) | u) { \
ad69471c 4722 case 0: \
dd8fbd78 4723 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4724 break; \
4725 case 1: \
dd8fbd78 4726 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4727 break; \
4728 case 2: \
dd8fbd78 4729 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4730 break; \
4731 case 3: \
dd8fbd78 4732 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4733 break; \
4734 case 4: \
dd8fbd78 4735 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4736 break; \
4737 case 5: \
dd8fbd78 4738 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4739 break; \
9ee6e8bb
PB
4740 default: return 1; \
4741 }} while (0)
4742
39d5492a 4743static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4744{
39d5492a 4745 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4746 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4747 return tmp;
9ee6e8bb
PB
4748}
4749
39d5492a 4750static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4751{
dd8fbd78 4752 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4753 tcg_temp_free_i32(var);
9ee6e8bb
PB
4754}
4755
39d5492a 4756static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4757{
39d5492a 4758 TCGv_i32 tmp;
9ee6e8bb 4759 if (size == 1) {
0fad6efc
PM
4760 tmp = neon_load_reg(reg & 7, reg >> 4);
4761 if (reg & 8) {
dd8fbd78 4762 gen_neon_dup_high16(tmp);
0fad6efc
PM
4763 } else {
4764 gen_neon_dup_low16(tmp);
dd8fbd78 4765 }
0fad6efc
PM
4766 } else {
4767 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4768 }
dd8fbd78 4769 return tmp;
9ee6e8bb
PB
4770}
4771
02acedf9 4772static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4773{
b13708bb
RH
4774 TCGv_ptr pd, pm;
4775
600b828c 4776 if (!q && size == 2) {
02acedf9
PM
4777 return 1;
4778 }
b13708bb
RH
4779 pd = vfp_reg_ptr(true, rd);
4780 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4781 if (q) {
4782 switch (size) {
4783 case 0:
b13708bb 4784 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4785 break;
4786 case 1:
b13708bb 4787 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4788 break;
4789 case 2:
b13708bb 4790 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4791 break;
4792 default:
4793 abort();
4794 }
4795 } else {
4796 switch (size) {
4797 case 0:
b13708bb 4798 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4799 break;
4800 case 1:
b13708bb 4801 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4802 break;
4803 default:
4804 abort();
4805 }
4806 }
b13708bb
RH
4807 tcg_temp_free_ptr(pd);
4808 tcg_temp_free_ptr(pm);
02acedf9 4809 return 0;
19457615
FN
4810}
4811
d68a6f3a 4812static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4813{
b13708bb
RH
4814 TCGv_ptr pd, pm;
4815
600b828c 4816 if (!q && size == 2) {
d68a6f3a
PM
4817 return 1;
4818 }
b13708bb
RH
4819 pd = vfp_reg_ptr(true, rd);
4820 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4821 if (q) {
4822 switch (size) {
4823 case 0:
b13708bb 4824 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4825 break;
4826 case 1:
b13708bb 4827 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4828 break;
4829 case 2:
b13708bb 4830 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4831 break;
4832 default:
4833 abort();
4834 }
4835 } else {
4836 switch (size) {
4837 case 0:
b13708bb 4838 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4839 break;
4840 case 1:
b13708bb 4841 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4842 break;
4843 default:
4844 abort();
4845 }
4846 }
b13708bb
RH
4847 tcg_temp_free_ptr(pd);
4848 tcg_temp_free_ptr(pm);
d68a6f3a 4849 return 0;
19457615
FN
4850}
4851
39d5492a 4852static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4853{
39d5492a 4854 TCGv_i32 rd, tmp;
19457615 4855
7d1b0095
PM
4856 rd = tcg_temp_new_i32();
4857 tmp = tcg_temp_new_i32();
19457615
FN
4858
4859 tcg_gen_shli_i32(rd, t0, 8);
4860 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4861 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4862 tcg_gen_or_i32(rd, rd, tmp);
4863
4864 tcg_gen_shri_i32(t1, t1, 8);
4865 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4866 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4867 tcg_gen_or_i32(t1, t1, tmp);
4868 tcg_gen_mov_i32(t0, rd);
4869
7d1b0095
PM
4870 tcg_temp_free_i32(tmp);
4871 tcg_temp_free_i32(rd);
19457615
FN
4872}
4873
39d5492a 4874static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4875{
39d5492a 4876 TCGv_i32 rd, tmp;
19457615 4877
7d1b0095
PM
4878 rd = tcg_temp_new_i32();
4879 tmp = tcg_temp_new_i32();
19457615
FN
4880
4881 tcg_gen_shli_i32(rd, t0, 16);
4882 tcg_gen_andi_i32(tmp, t1, 0xffff);
4883 tcg_gen_or_i32(rd, rd, tmp);
4884 tcg_gen_shri_i32(t1, t1, 16);
4885 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4886 tcg_gen_or_i32(t1, t1, tmp);
4887 tcg_gen_mov_i32(t0, rd);
4888
7d1b0095
PM
4889 tcg_temp_free_i32(tmp);
4890 tcg_temp_free_i32(rd);
19457615
FN
4891}
4892
4893
9ee6e8bb
PB
4894static struct {
4895 int nregs;
4896 int interleave;
4897 int spacing;
4898} neon_ls_element_type[11] = {
4899 {4, 4, 1},
4900 {4, 4, 2},
4901 {4, 1, 1},
4902 {4, 2, 1},
4903 {3, 3, 1},
4904 {3, 3, 2},
4905 {3, 1, 1},
4906 {1, 1, 1},
4907 {2, 2, 1},
4908 {2, 2, 2},
4909 {2, 1, 1}
4910};
4911
4912/* Translate a NEON load/store element instruction. Return nonzero if the
4913 instruction is invalid. */
7dcc1f89 4914static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4915{
4916 int rd, rn, rm;
4917 int op;
4918 int nregs;
4919 int interleave;
84496233 4920 int spacing;
9ee6e8bb
PB
4921 int stride;
4922 int size;
4923 int reg;
4924 int pass;
4925 int load;
4926 int shift;
9ee6e8bb 4927 int n;
39d5492a
PM
4928 TCGv_i32 addr;
4929 TCGv_i32 tmp;
4930 TCGv_i32 tmp2;
84496233 4931 TCGv_i64 tmp64;
9ee6e8bb 4932
2c7ffc41
PM
4933 /* FIXME: this access check should not take precedence over UNDEF
4934 * for invalid encodings; we will generate incorrect syndrome information
4935 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4936 */
9dbbc748 4937 if (s->fp_excp_el) {
2c7ffc41 4938 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4939 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4940 return 0;
4941 }
4942
5df8bac1 4943 if (!s->vfp_enabled)
9ee6e8bb
PB
4944 return 1;
4945 VFP_DREG_D(rd, insn);
4946 rn = (insn >> 16) & 0xf;
4947 rm = insn & 0xf;
4948 load = (insn & (1 << 21)) != 0;
4949 if ((insn & (1 << 23)) == 0) {
4950 /* Load store all elements. */
4951 op = (insn >> 8) & 0xf;
4952 size = (insn >> 6) & 3;
84496233 4953 if (op > 10)
9ee6e8bb 4954 return 1;
f2dd89d0
PM
4955 /* Catch UNDEF cases for bad values of align field */
4956 switch (op & 0xc) {
4957 case 4:
4958 if (((insn >> 5) & 1) == 1) {
4959 return 1;
4960 }
4961 break;
4962 case 8:
4963 if (((insn >> 4) & 3) == 3) {
4964 return 1;
4965 }
4966 break;
4967 default:
4968 break;
4969 }
9ee6e8bb
PB
4970 nregs = neon_ls_element_type[op].nregs;
4971 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4972 spacing = neon_ls_element_type[op].spacing;
4973 if (size == 3 && (interleave | spacing) != 1)
4974 return 1;
e318a60b 4975 addr = tcg_temp_new_i32();
dcc65026 4976 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4977 stride = (1 << size) * interleave;
4978 for (reg = 0; reg < nregs; reg++) {
4979 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4980 load_reg_var(s, addr, rn);
4981 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4982 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4983 load_reg_var(s, addr, rn);
4984 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4985 }
84496233 4986 if (size == 3) {
8ed1237d 4987 tmp64 = tcg_temp_new_i64();
84496233 4988 if (load) {
12dcc321 4989 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4990 neon_store_reg64(tmp64, rd);
84496233 4991 } else {
84496233 4992 neon_load_reg64(tmp64, rd);
12dcc321 4993 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4994 }
8ed1237d 4995 tcg_temp_free_i64(tmp64);
84496233
JR
4996 tcg_gen_addi_i32(addr, addr, stride);
4997 } else {
4998 for (pass = 0; pass < 2; pass++) {
4999 if (size == 2) {
5000 if (load) {
58ab8e96 5001 tmp = tcg_temp_new_i32();
12dcc321 5002 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
5003 neon_store_reg(rd, pass, tmp);
5004 } else {
5005 tmp = neon_load_reg(rd, pass);
12dcc321 5006 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 5007 tcg_temp_free_i32(tmp);
84496233 5008 }
1b2b1e54 5009 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
5010 } else if (size == 1) {
5011 if (load) {
58ab8e96 5012 tmp = tcg_temp_new_i32();
12dcc321 5013 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 5014 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 5015 tmp2 = tcg_temp_new_i32();
12dcc321 5016 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 5017 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
5018 tcg_gen_shli_i32(tmp2, tmp2, 16);
5019 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5020 tcg_temp_free_i32(tmp2);
84496233
JR
5021 neon_store_reg(rd, pass, tmp);
5022 } else {
5023 tmp = neon_load_reg(rd, pass);
7d1b0095 5024 tmp2 = tcg_temp_new_i32();
84496233 5025 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 5026 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 5027 tcg_temp_free_i32(tmp);
84496233 5028 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 5029 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 5030 tcg_temp_free_i32(tmp2);
1b2b1e54 5031 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 5032 }
84496233
JR
5033 } else /* size == 0 */ {
5034 if (load) {
f764718d 5035 tmp2 = NULL;
84496233 5036 for (n = 0; n < 4; n++) {
58ab8e96 5037 tmp = tcg_temp_new_i32();
12dcc321 5038 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
5039 tcg_gen_addi_i32(addr, addr, stride);
5040 if (n == 0) {
5041 tmp2 = tmp;
5042 } else {
41ba8341
PB
5043 tcg_gen_shli_i32(tmp, tmp, n * 8);
5044 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 5045 tcg_temp_free_i32(tmp);
84496233 5046 }
9ee6e8bb 5047 }
84496233
JR
5048 neon_store_reg(rd, pass, tmp2);
5049 } else {
5050 tmp2 = neon_load_reg(rd, pass);
5051 for (n = 0; n < 4; n++) {
7d1b0095 5052 tmp = tcg_temp_new_i32();
84496233
JR
5053 if (n == 0) {
5054 tcg_gen_mov_i32(tmp, tmp2);
5055 } else {
5056 tcg_gen_shri_i32(tmp, tmp2, n * 8);
5057 }
12dcc321 5058 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 5059 tcg_temp_free_i32(tmp);
84496233
JR
5060 tcg_gen_addi_i32(addr, addr, stride);
5061 }
7d1b0095 5062 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5063 }
5064 }
5065 }
5066 }
84496233 5067 rd += spacing;
9ee6e8bb 5068 }
e318a60b 5069 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5070 stride = nregs * 8;
5071 } else {
5072 size = (insn >> 10) & 3;
5073 if (size == 3) {
5074 /* Load single element to all lanes. */
8e18cde3
PM
5075 int a = (insn >> 4) & 1;
5076 if (!load) {
9ee6e8bb 5077 return 1;
8e18cde3 5078 }
9ee6e8bb
PB
5079 size = (insn >> 6) & 3;
5080 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
5081
5082 if (size == 3) {
5083 if (nregs != 4 || a == 0) {
9ee6e8bb 5084 return 1;
99c475ab 5085 }
8e18cde3
PM
5086 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5087 size = 2;
5088 }
5089 if (nregs == 1 && a == 1 && size == 0) {
5090 return 1;
5091 }
5092 if (nregs == 3 && a == 1) {
5093 return 1;
5094 }
e318a60b 5095 addr = tcg_temp_new_i32();
8e18cde3
PM
5096 load_reg_var(s, addr, rn);
5097 if (nregs == 1) {
5098 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5099 tmp = gen_load_and_replicate(s, addr, size);
5100 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5101 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5102 if (insn & (1 << 5)) {
5103 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5104 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5105 }
5106 tcg_temp_free_i32(tmp);
5107 } else {
5108 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5109 stride = (insn & (1 << 5)) ? 2 : 1;
5110 for (reg = 0; reg < nregs; reg++) {
5111 tmp = gen_load_and_replicate(s, addr, size);
5112 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5113 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5114 tcg_temp_free_i32(tmp);
5115 tcg_gen_addi_i32(addr, addr, 1 << size);
5116 rd += stride;
5117 }
9ee6e8bb 5118 }
e318a60b 5119 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5120 stride = (1 << size) * nregs;
5121 } else {
5122 /* Single element. */
93262b16 5123 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
5124 pass = (insn >> 7) & 1;
5125 switch (size) {
5126 case 0:
5127 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
5128 stride = 1;
5129 break;
5130 case 1:
5131 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
5132 stride = (insn & (1 << 5)) ? 2 : 1;
5133 break;
5134 case 2:
5135 shift = 0;
9ee6e8bb
PB
5136 stride = (insn & (1 << 6)) ? 2 : 1;
5137 break;
5138 default:
5139 abort();
5140 }
5141 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5142 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5143 switch (nregs) {
5144 case 1:
5145 if (((idx & (1 << size)) != 0) ||
5146 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5147 return 1;
5148 }
5149 break;
5150 case 3:
5151 if ((idx & 1) != 0) {
5152 return 1;
5153 }
5154 /* fall through */
5155 case 2:
5156 if (size == 2 && (idx & 2) != 0) {
5157 return 1;
5158 }
5159 break;
5160 case 4:
5161 if ((size == 2) && ((idx & 3) == 3)) {
5162 return 1;
5163 }
5164 break;
5165 default:
5166 abort();
5167 }
5168 if ((rd + stride * (nregs - 1)) > 31) {
5169 /* Attempts to write off the end of the register file
5170 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5171 * the neon_load_reg() would write off the end of the array.
5172 */
5173 return 1;
5174 }
e318a60b 5175 addr = tcg_temp_new_i32();
dcc65026 5176 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5177 for (reg = 0; reg < nregs; reg++) {
5178 if (load) {
58ab8e96 5179 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5180 switch (size) {
5181 case 0:
12dcc321 5182 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5183 break;
5184 case 1:
12dcc321 5185 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5186 break;
5187 case 2:
12dcc321 5188 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5189 break;
a50f5b91
PB
5190 default: /* Avoid compiler warnings. */
5191 abort();
9ee6e8bb
PB
5192 }
5193 if (size != 2) {
8f8e3aa4 5194 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5195 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5196 shift, size ? 16 : 8);
7d1b0095 5197 tcg_temp_free_i32(tmp2);
9ee6e8bb 5198 }
8f8e3aa4 5199 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5200 } else { /* Store */
8f8e3aa4
PB
5201 tmp = neon_load_reg(rd, pass);
5202 if (shift)
5203 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5204 switch (size) {
5205 case 0:
12dcc321 5206 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5207 break;
5208 case 1:
12dcc321 5209 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5210 break;
5211 case 2:
12dcc321 5212 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5213 break;
99c475ab 5214 }
58ab8e96 5215 tcg_temp_free_i32(tmp);
99c475ab 5216 }
9ee6e8bb 5217 rd += stride;
1b2b1e54 5218 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5219 }
e318a60b 5220 tcg_temp_free_i32(addr);
9ee6e8bb 5221 stride = nregs * (1 << size);
99c475ab 5222 }
9ee6e8bb
PB
5223 }
5224 if (rm != 15) {
39d5492a 5225 TCGv_i32 base;
b26eefb6
PB
5226
5227 base = load_reg(s, rn);
9ee6e8bb 5228 if (rm == 13) {
b26eefb6 5229 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5230 } else {
39d5492a 5231 TCGv_i32 index;
b26eefb6
PB
5232 index = load_reg(s, rm);
5233 tcg_gen_add_i32(base, base, index);
7d1b0095 5234 tcg_temp_free_i32(index);
9ee6e8bb 5235 }
b26eefb6 5236 store_reg(s, rn, base);
9ee6e8bb
PB
5237 }
5238 return 0;
5239}
3b46e624 5240
8f8e3aa4 5241/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5242static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5243{
5244 tcg_gen_and_i32(t, t, c);
f669df27 5245 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5246 tcg_gen_or_i32(dest, t, f);
5247}
5248
39d5492a 5249static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5250{
5251 switch (size) {
5252 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5253 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5254 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5255 default: abort();
5256 }
5257}
5258
39d5492a 5259static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5260{
5261 switch (size) {
02da0b2d
PM
5262 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5263 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5264 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5265 default: abort();
5266 }
5267}
5268
39d5492a 5269static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5270{
5271 switch (size) {
02da0b2d
PM
5272 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5273 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5274 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5275 default: abort();
5276 }
5277}
5278
39d5492a 5279static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5280{
5281 switch (size) {
02da0b2d
PM
5282 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5283 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5284 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5285 default: abort();
5286 }
5287}
5288
39d5492a 5289static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5290 int q, int u)
5291{
5292 if (q) {
5293 if (u) {
5294 switch (size) {
5295 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5296 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5297 default: abort();
5298 }
5299 } else {
5300 switch (size) {
5301 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5302 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5303 default: abort();
5304 }
5305 }
5306 } else {
5307 if (u) {
5308 switch (size) {
b408a9b0
CL
5309 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5310 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5311 default: abort();
5312 }
5313 } else {
5314 switch (size) {
5315 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5316 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5317 default: abort();
5318 }
5319 }
5320 }
5321}
5322
39d5492a 5323static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5324{
5325 if (u) {
5326 switch (size) {
5327 case 0: gen_helper_neon_widen_u8(dest, src); break;
5328 case 1: gen_helper_neon_widen_u16(dest, src); break;
5329 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5330 default: abort();
5331 }
5332 } else {
5333 switch (size) {
5334 case 0: gen_helper_neon_widen_s8(dest, src); break;
5335 case 1: gen_helper_neon_widen_s16(dest, src); break;
5336 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5337 default: abort();
5338 }
5339 }
7d1b0095 5340 tcg_temp_free_i32(src);
ad69471c
PB
5341}
5342
5343static inline void gen_neon_addl(int size)
5344{
5345 switch (size) {
5346 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5347 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5348 case 2: tcg_gen_add_i64(CPU_V001); break;
5349 default: abort();
5350 }
5351}
5352
5353static inline void gen_neon_subl(int size)
5354{
5355 switch (size) {
5356 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5357 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5358 case 2: tcg_gen_sub_i64(CPU_V001); break;
5359 default: abort();
5360 }
5361}
5362
a7812ae4 5363static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5364{
5365 switch (size) {
5366 case 0: gen_helper_neon_negl_u16(var, var); break;
5367 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5368 case 2:
5369 tcg_gen_neg_i64(var, var);
5370 break;
ad69471c
PB
5371 default: abort();
5372 }
5373}
5374
a7812ae4 5375static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5376{
5377 switch (size) {
02da0b2d
PM
5378 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5379 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5380 default: abort();
5381 }
5382}
5383
39d5492a
PM
5384static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5385 int size, int u)
ad69471c 5386{
a7812ae4 5387 TCGv_i64 tmp;
ad69471c
PB
5388
5389 switch ((size << 1) | u) {
5390 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5391 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5392 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5393 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5394 case 4:
5395 tmp = gen_muls_i64_i32(a, b);
5396 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5397 tcg_temp_free_i64(tmp);
ad69471c
PB
5398 break;
5399 case 5:
5400 tmp = gen_mulu_i64_i32(a, b);
5401 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5402 tcg_temp_free_i64(tmp);
ad69471c
PB
5403 break;
5404 default: abort();
5405 }
c6067f04
CL
5406
5407 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5408 Don't forget to clean them now. */
5409 if (size < 2) {
7d1b0095
PM
5410 tcg_temp_free_i32(a);
5411 tcg_temp_free_i32(b);
c6067f04 5412 }
ad69471c
PB
5413}
5414
39d5492a
PM
5415static void gen_neon_narrow_op(int op, int u, int size,
5416 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5417{
5418 if (op) {
5419 if (u) {
5420 gen_neon_unarrow_sats(size, dest, src);
5421 } else {
5422 gen_neon_narrow(size, dest, src);
5423 }
5424 } else {
5425 if (u) {
5426 gen_neon_narrow_satu(size, dest, src);
5427 } else {
5428 gen_neon_narrow_sats(size, dest, src);
5429 }
5430 }
5431}
5432
62698be3
PM
5433/* Symbolic constants for op fields for Neon 3-register same-length.
5434 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5435 * table A7-9.
5436 */
5437#define NEON_3R_VHADD 0
5438#define NEON_3R_VQADD 1
5439#define NEON_3R_VRHADD 2
5440#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5441#define NEON_3R_VHSUB 4
5442#define NEON_3R_VQSUB 5
5443#define NEON_3R_VCGT 6
5444#define NEON_3R_VCGE 7
5445#define NEON_3R_VSHL 8
5446#define NEON_3R_VQSHL 9
5447#define NEON_3R_VRSHL 10
5448#define NEON_3R_VQRSHL 11
5449#define NEON_3R_VMAX 12
5450#define NEON_3R_VMIN 13
5451#define NEON_3R_VABD 14
5452#define NEON_3R_VABA 15
5453#define NEON_3R_VADD_VSUB 16
5454#define NEON_3R_VTST_VCEQ 17
5455#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5456#define NEON_3R_VMUL 19
5457#define NEON_3R_VPMAX 20
5458#define NEON_3R_VPMIN 21
5459#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 5460#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 5461#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 5462#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
5463#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5464#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5465#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5466#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5467#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5468#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5469
5470static const uint8_t neon_3r_sizes[] = {
5471 [NEON_3R_VHADD] = 0x7,
5472 [NEON_3R_VQADD] = 0xf,
5473 [NEON_3R_VRHADD] = 0x7,
5474 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5475 [NEON_3R_VHSUB] = 0x7,
5476 [NEON_3R_VQSUB] = 0xf,
5477 [NEON_3R_VCGT] = 0x7,
5478 [NEON_3R_VCGE] = 0x7,
5479 [NEON_3R_VSHL] = 0xf,
5480 [NEON_3R_VQSHL] = 0xf,
5481 [NEON_3R_VRSHL] = 0xf,
5482 [NEON_3R_VQRSHL] = 0xf,
5483 [NEON_3R_VMAX] = 0x7,
5484 [NEON_3R_VMIN] = 0x7,
5485 [NEON_3R_VABD] = 0x7,
5486 [NEON_3R_VABA] = 0x7,
5487 [NEON_3R_VADD_VSUB] = 0xf,
5488 [NEON_3R_VTST_VCEQ] = 0x7,
5489 [NEON_3R_VML] = 0x7,
5490 [NEON_3R_VMUL] = 0x7,
5491 [NEON_3R_VPMAX] = 0x7,
5492 [NEON_3R_VPMIN] = 0x7,
5493 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 5494 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 5495 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 5496 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
5497 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5498 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5499 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5500 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5501 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5502 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5503};
5504
600b828c
PM
5505/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5506 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5507 * table A7-13.
5508 */
5509#define NEON_2RM_VREV64 0
5510#define NEON_2RM_VREV32 1
5511#define NEON_2RM_VREV16 2
5512#define NEON_2RM_VPADDL 4
5513#define NEON_2RM_VPADDL_U 5
9d935509
AB
5514#define NEON_2RM_AESE 6 /* Includes AESD */
5515#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5516#define NEON_2RM_VCLS 8
5517#define NEON_2RM_VCLZ 9
5518#define NEON_2RM_VCNT 10
5519#define NEON_2RM_VMVN 11
5520#define NEON_2RM_VPADAL 12
5521#define NEON_2RM_VPADAL_U 13
5522#define NEON_2RM_VQABS 14
5523#define NEON_2RM_VQNEG 15
5524#define NEON_2RM_VCGT0 16
5525#define NEON_2RM_VCGE0 17
5526#define NEON_2RM_VCEQ0 18
5527#define NEON_2RM_VCLE0 19
5528#define NEON_2RM_VCLT0 20
f1ecb913 5529#define NEON_2RM_SHA1H 21
600b828c
PM
5530#define NEON_2RM_VABS 22
5531#define NEON_2RM_VNEG 23
5532#define NEON_2RM_VCGT0_F 24
5533#define NEON_2RM_VCGE0_F 25
5534#define NEON_2RM_VCEQ0_F 26
5535#define NEON_2RM_VCLE0_F 27
5536#define NEON_2RM_VCLT0_F 28
5537#define NEON_2RM_VABS_F 30
5538#define NEON_2RM_VNEG_F 31
5539#define NEON_2RM_VSWP 32
5540#define NEON_2RM_VTRN 33
5541#define NEON_2RM_VUZP 34
5542#define NEON_2RM_VZIP 35
5543#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5544#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5545#define NEON_2RM_VSHLL 38
f1ecb913 5546#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5547#define NEON_2RM_VRINTN 40
2ce70625 5548#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5549#define NEON_2RM_VRINTA 42
5550#define NEON_2RM_VRINTZ 43
600b828c 5551#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5552#define NEON_2RM_VRINTM 45
600b828c 5553#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5554#define NEON_2RM_VRINTP 47
901ad525
WN
5555#define NEON_2RM_VCVTAU 48
5556#define NEON_2RM_VCVTAS 49
5557#define NEON_2RM_VCVTNU 50
5558#define NEON_2RM_VCVTNS 51
5559#define NEON_2RM_VCVTPU 52
5560#define NEON_2RM_VCVTPS 53
5561#define NEON_2RM_VCVTMU 54
5562#define NEON_2RM_VCVTMS 55
600b828c
PM
5563#define NEON_2RM_VRECPE 56
5564#define NEON_2RM_VRSQRTE 57
5565#define NEON_2RM_VRECPE_F 58
5566#define NEON_2RM_VRSQRTE_F 59
5567#define NEON_2RM_VCVT_FS 60
5568#define NEON_2RM_VCVT_FU 61
5569#define NEON_2RM_VCVT_SF 62
5570#define NEON_2RM_VCVT_UF 63
5571
5572static int neon_2rm_is_float_op(int op)
5573{
5574 /* Return true if this neon 2reg-misc op is float-to-float */
5575 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5576 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5577 op == NEON_2RM_VRINTM ||
5578 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5579 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5580}
5581
fe8fcf3d
PM
5582static bool neon_2rm_is_v8_op(int op)
5583{
5584 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5585 switch (op) {
5586 case NEON_2RM_VRINTN:
5587 case NEON_2RM_VRINTA:
5588 case NEON_2RM_VRINTM:
5589 case NEON_2RM_VRINTP:
5590 case NEON_2RM_VRINTZ:
5591 case NEON_2RM_VRINTX:
5592 case NEON_2RM_VCVTAU:
5593 case NEON_2RM_VCVTAS:
5594 case NEON_2RM_VCVTNU:
5595 case NEON_2RM_VCVTNS:
5596 case NEON_2RM_VCVTPU:
5597 case NEON_2RM_VCVTPS:
5598 case NEON_2RM_VCVTMU:
5599 case NEON_2RM_VCVTMS:
5600 return true;
5601 default:
5602 return false;
5603 }
5604}
5605
600b828c
PM
5606/* Each entry in this array has bit n set if the insn allows
5607 * size value n (otherwise it will UNDEF). Since unallocated
5608 * op values will have no bits set they always UNDEF.
5609 */
5610static const uint8_t neon_2rm_sizes[] = {
5611 [NEON_2RM_VREV64] = 0x7,
5612 [NEON_2RM_VREV32] = 0x3,
5613 [NEON_2RM_VREV16] = 0x1,
5614 [NEON_2RM_VPADDL] = 0x7,
5615 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5616 [NEON_2RM_AESE] = 0x1,
5617 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5618 [NEON_2RM_VCLS] = 0x7,
5619 [NEON_2RM_VCLZ] = 0x7,
5620 [NEON_2RM_VCNT] = 0x1,
5621 [NEON_2RM_VMVN] = 0x1,
5622 [NEON_2RM_VPADAL] = 0x7,
5623 [NEON_2RM_VPADAL_U] = 0x7,
5624 [NEON_2RM_VQABS] = 0x7,
5625 [NEON_2RM_VQNEG] = 0x7,
5626 [NEON_2RM_VCGT0] = 0x7,
5627 [NEON_2RM_VCGE0] = 0x7,
5628 [NEON_2RM_VCEQ0] = 0x7,
5629 [NEON_2RM_VCLE0] = 0x7,
5630 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5631 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5632 [NEON_2RM_VABS] = 0x7,
5633 [NEON_2RM_VNEG] = 0x7,
5634 [NEON_2RM_VCGT0_F] = 0x4,
5635 [NEON_2RM_VCGE0_F] = 0x4,
5636 [NEON_2RM_VCEQ0_F] = 0x4,
5637 [NEON_2RM_VCLE0_F] = 0x4,
5638 [NEON_2RM_VCLT0_F] = 0x4,
5639 [NEON_2RM_VABS_F] = 0x4,
5640 [NEON_2RM_VNEG_F] = 0x4,
5641 [NEON_2RM_VSWP] = 0x1,
5642 [NEON_2RM_VTRN] = 0x7,
5643 [NEON_2RM_VUZP] = 0x7,
5644 [NEON_2RM_VZIP] = 0x7,
5645 [NEON_2RM_VMOVN] = 0x7,
5646 [NEON_2RM_VQMOVN] = 0x7,
5647 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5648 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5649 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5650 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5651 [NEON_2RM_VRINTA] = 0x4,
5652 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5653 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5654 [NEON_2RM_VRINTM] = 0x4,
600b828c 5655 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5656 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5657 [NEON_2RM_VCVTAU] = 0x4,
5658 [NEON_2RM_VCVTAS] = 0x4,
5659 [NEON_2RM_VCVTNU] = 0x4,
5660 [NEON_2RM_VCVTNS] = 0x4,
5661 [NEON_2RM_VCVTPU] = 0x4,
5662 [NEON_2RM_VCVTPS] = 0x4,
5663 [NEON_2RM_VCVTMU] = 0x4,
5664 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5665 [NEON_2RM_VRECPE] = 0x4,
5666 [NEON_2RM_VRSQRTE] = 0x4,
5667 [NEON_2RM_VRECPE_F] = 0x4,
5668 [NEON_2RM_VRSQRTE_F] = 0x4,
5669 [NEON_2RM_VCVT_FS] = 0x4,
5670 [NEON_2RM_VCVT_FU] = 0x4,
5671 [NEON_2RM_VCVT_SF] = 0x4,
5672 [NEON_2RM_VCVT_UF] = 0x4,
5673};
5674
36a71934
RH
5675
5676/* Expand v8.1 simd helper. */
5677static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5678 int q, int rd, int rn, int rm)
5679{
5680 if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
5681 int opr_sz = (1 + q) * 8;
5682 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5683 vfp_reg_offset(1, rn),
5684 vfp_reg_offset(1, rm), cpu_env,
5685 opr_sz, opr_sz, 0, fn);
5686 return 0;
5687 }
5688 return 1;
5689}
5690
9ee6e8bb
PB
5691/* Translate a NEON data processing instruction. Return nonzero if the
5692 instruction is invalid.
ad69471c
PB
5693 We process data in a mixture of 32-bit and 64-bit chunks.
5694 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5695
7dcc1f89 5696static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5697{
5698 int op;
5699 int q;
5700 int rd, rn, rm;
5701 int size;
5702 int shift;
5703 int pass;
5704 int count;
5705 int pairwise;
5706 int u;
ca9a32e4 5707 uint32_t imm, mask;
39d5492a 5708 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 5709 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 5710 TCGv_i64 tmp64;
9ee6e8bb 5711
2c7ffc41
PM
5712 /* FIXME: this access check should not take precedence over UNDEF
5713 * for invalid encodings; we will generate incorrect syndrome information
5714 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5715 */
9dbbc748 5716 if (s->fp_excp_el) {
2c7ffc41 5717 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5718 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5719 return 0;
5720 }
5721
5df8bac1 5722 if (!s->vfp_enabled)
9ee6e8bb
PB
5723 return 1;
5724 q = (insn & (1 << 6)) != 0;
5725 u = (insn >> 24) & 1;
5726 VFP_DREG_D(rd, insn);
5727 VFP_DREG_N(rn, insn);
5728 VFP_DREG_M(rm, insn);
5729 size = (insn >> 20) & 3;
5730 if ((insn & (1 << 23)) == 0) {
5731 /* Three register same length. */
5732 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5733 /* Catch invalid op and bad size combinations: UNDEF */
5734 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5735 return 1;
5736 }
25f84f79
PM
5737 /* All insns of this form UNDEF for either this condition or the
5738 * superset of cases "Q==1"; we catch the latter later.
5739 */
5740 if (q && ((rd | rn | rm) & 1)) {
5741 return 1;
5742 }
36a71934
RH
5743 switch (op) {
5744 case NEON_3R_SHA:
5745 /* The SHA-1/SHA-256 3-register instructions require special
5746 * treatment here, as their size field is overloaded as an
5747 * op type selector, and they all consume their input in a
5748 * single pass.
5749 */
f1ecb913
AB
5750 if (!q) {
5751 return 1;
5752 }
5753 if (!u) { /* SHA-1 */
d614a513 5754 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5755 return 1;
5756 }
1a66ac61
RH
5757 ptr1 = vfp_reg_ptr(true, rd);
5758 ptr2 = vfp_reg_ptr(true, rn);
5759 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 5760 tmp4 = tcg_const_i32(size);
1a66ac61 5761 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
5762 tcg_temp_free_i32(tmp4);
5763 } else { /* SHA-256 */
d614a513 5764 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5765 return 1;
5766 }
1a66ac61
RH
5767 ptr1 = vfp_reg_ptr(true, rd);
5768 ptr2 = vfp_reg_ptr(true, rn);
5769 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
5770 switch (size) {
5771 case 0:
1a66ac61 5772 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
5773 break;
5774 case 1:
1a66ac61 5775 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
5776 break;
5777 case 2:
1a66ac61 5778 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
5779 break;
5780 }
5781 }
1a66ac61
RH
5782 tcg_temp_free_ptr(ptr1);
5783 tcg_temp_free_ptr(ptr2);
5784 tcg_temp_free_ptr(ptr3);
f1ecb913 5785 return 0;
36a71934
RH
5786
5787 case NEON_3R_VPADD_VQRDMLAH:
5788 if (!u) {
5789 break; /* VPADD */
5790 }
5791 /* VQRDMLAH */
5792 switch (size) {
5793 case 1:
5794 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
5795 q, rd, rn, rm);
5796 case 2:
5797 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
5798 q, rd, rn, rm);
5799 }
5800 return 1;
5801
5802 case NEON_3R_VFM_VQRDMLSH:
5803 if (!u) {
5804 /* VFM, VFMS */
5805 if (size == 1) {
5806 return 1;
5807 }
5808 break;
5809 }
5810 /* VQRDMLSH */
5811 switch (size) {
5812 case 1:
5813 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
5814 q, rd, rn, rm);
5815 case 2:
5816 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
5817 q, rd, rn, rm);
5818 }
5819 return 1;
f1ecb913 5820 }
62698be3
PM
5821 if (size == 3 && op != NEON_3R_LOGIC) {
5822 /* 64-bit element instructions. */
9ee6e8bb 5823 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5824 neon_load_reg64(cpu_V0, rn + pass);
5825 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5826 switch (op) {
62698be3 5827 case NEON_3R_VQADD:
9ee6e8bb 5828 if (u) {
02da0b2d
PM
5829 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5830 cpu_V0, cpu_V1);
2c0262af 5831 } else {
02da0b2d
PM
5832 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5833 cpu_V0, cpu_V1);
2c0262af 5834 }
9ee6e8bb 5835 break;
62698be3 5836 case NEON_3R_VQSUB:
9ee6e8bb 5837 if (u) {
02da0b2d
PM
5838 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5839 cpu_V0, cpu_V1);
ad69471c 5840 } else {
02da0b2d
PM
5841 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5842 cpu_V0, cpu_V1);
ad69471c
PB
5843 }
5844 break;
62698be3 5845 case NEON_3R_VSHL:
ad69471c
PB
5846 if (u) {
5847 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5848 } else {
5849 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5850 }
5851 break;
62698be3 5852 case NEON_3R_VQSHL:
ad69471c 5853 if (u) {
02da0b2d
PM
5854 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5855 cpu_V1, cpu_V0);
ad69471c 5856 } else {
02da0b2d
PM
5857 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5858 cpu_V1, cpu_V0);
ad69471c
PB
5859 }
5860 break;
62698be3 5861 case NEON_3R_VRSHL:
ad69471c
PB
5862 if (u) {
5863 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5864 } else {
ad69471c
PB
5865 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5866 }
5867 break;
62698be3 5868 case NEON_3R_VQRSHL:
ad69471c 5869 if (u) {
02da0b2d
PM
5870 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5871 cpu_V1, cpu_V0);
ad69471c 5872 } else {
02da0b2d
PM
5873 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5874 cpu_V1, cpu_V0);
1e8d4eec 5875 }
9ee6e8bb 5876 break;
62698be3 5877 case NEON_3R_VADD_VSUB:
9ee6e8bb 5878 if (u) {
ad69471c 5879 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5880 } else {
ad69471c 5881 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5882 }
5883 break;
5884 default:
5885 abort();
2c0262af 5886 }
ad69471c 5887 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5888 }
9ee6e8bb 5889 return 0;
2c0262af 5890 }
25f84f79 5891 pairwise = 0;
9ee6e8bb 5892 switch (op) {
62698be3
PM
5893 case NEON_3R_VSHL:
5894 case NEON_3R_VQSHL:
5895 case NEON_3R_VRSHL:
5896 case NEON_3R_VQRSHL:
9ee6e8bb 5897 {
ad69471c
PB
5898 int rtmp;
5899 /* Shift instruction operands are reversed. */
5900 rtmp = rn;
9ee6e8bb 5901 rn = rm;
ad69471c 5902 rm = rtmp;
9ee6e8bb 5903 }
2c0262af 5904 break;
36a71934 5905 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
5906 case NEON_3R_VPMAX:
5907 case NEON_3R_VPMIN:
9ee6e8bb 5908 pairwise = 1;
2c0262af 5909 break;
25f84f79
PM
5910 case NEON_3R_FLOAT_ARITH:
5911 pairwise = (u && size < 2); /* if VPADD (float) */
5912 break;
5913 case NEON_3R_FLOAT_MINMAX:
5914 pairwise = u; /* if VPMIN/VPMAX (float) */
5915 break;
5916 case NEON_3R_FLOAT_CMP:
5917 if (!u && size) {
5918 /* no encoding for U=0 C=1x */
5919 return 1;
5920 }
5921 break;
5922 case NEON_3R_FLOAT_ACMP:
5923 if (!u) {
5924 return 1;
5925 }
5926 break;
505935fc
WN
5927 case NEON_3R_FLOAT_MISC:
5928 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5929 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5930 return 1;
5931 }
2c0262af 5932 break;
25f84f79
PM
5933 case NEON_3R_VMUL:
5934 if (u && (size != 0)) {
5935 /* UNDEF on invalid size for polynomial subcase */
5936 return 1;
5937 }
2c0262af 5938 break;
36a71934
RH
5939 case NEON_3R_VFM_VQRDMLSH:
5940 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
5941 return 1;
5942 }
5943 break;
9ee6e8bb 5944 default:
2c0262af 5945 break;
9ee6e8bb 5946 }
dd8fbd78 5947
25f84f79
PM
5948 if (pairwise && q) {
5949 /* All the pairwise insns UNDEF if Q is set */
5950 return 1;
5951 }
5952
9ee6e8bb
PB
5953 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5954
5955 if (pairwise) {
5956 /* Pairwise. */
a5a14945
JR
5957 if (pass < 1) {
5958 tmp = neon_load_reg(rn, 0);
5959 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5960 } else {
a5a14945
JR
5961 tmp = neon_load_reg(rm, 0);
5962 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5963 }
5964 } else {
5965 /* Elementwise. */
dd8fbd78
FN
5966 tmp = neon_load_reg(rn, pass);
5967 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5968 }
5969 switch (op) {
62698be3 5970 case NEON_3R_VHADD:
9ee6e8bb
PB
5971 GEN_NEON_INTEGER_OP(hadd);
5972 break;
62698be3 5973 case NEON_3R_VQADD:
02da0b2d 5974 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5975 break;
62698be3 5976 case NEON_3R_VRHADD:
9ee6e8bb 5977 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5978 break;
62698be3 5979 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5980 switch ((u << 2) | size) {
5981 case 0: /* VAND */
dd8fbd78 5982 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5983 break;
5984 case 1: /* BIC */
f669df27 5985 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5986 break;
5987 case 2: /* VORR */
dd8fbd78 5988 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5989 break;
5990 case 3: /* VORN */
f669df27 5991 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5992 break;
5993 case 4: /* VEOR */
dd8fbd78 5994 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5995 break;
5996 case 5: /* VBSL */
dd8fbd78
FN
5997 tmp3 = neon_load_reg(rd, pass);
5998 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5999 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
6000 break;
6001 case 6: /* VBIT */
dd8fbd78
FN
6002 tmp3 = neon_load_reg(rd, pass);
6003 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 6004 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
6005 break;
6006 case 7: /* VBIF */
dd8fbd78
FN
6007 tmp3 = neon_load_reg(rd, pass);
6008 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 6009 tcg_temp_free_i32(tmp3);
9ee6e8bb 6010 break;
2c0262af
FB
6011 }
6012 break;
62698be3 6013 case NEON_3R_VHSUB:
9ee6e8bb
PB
6014 GEN_NEON_INTEGER_OP(hsub);
6015 break;
62698be3 6016 case NEON_3R_VQSUB:
02da0b2d 6017 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 6018 break;
62698be3 6019 case NEON_3R_VCGT:
9ee6e8bb
PB
6020 GEN_NEON_INTEGER_OP(cgt);
6021 break;
62698be3 6022 case NEON_3R_VCGE:
9ee6e8bb
PB
6023 GEN_NEON_INTEGER_OP(cge);
6024 break;
62698be3 6025 case NEON_3R_VSHL:
ad69471c 6026 GEN_NEON_INTEGER_OP(shl);
2c0262af 6027 break;
62698be3 6028 case NEON_3R_VQSHL:
02da0b2d 6029 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 6030 break;
62698be3 6031 case NEON_3R_VRSHL:
ad69471c 6032 GEN_NEON_INTEGER_OP(rshl);
2c0262af 6033 break;
62698be3 6034 case NEON_3R_VQRSHL:
02da0b2d 6035 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 6036 break;
62698be3 6037 case NEON_3R_VMAX:
9ee6e8bb
PB
6038 GEN_NEON_INTEGER_OP(max);
6039 break;
62698be3 6040 case NEON_3R_VMIN:
9ee6e8bb
PB
6041 GEN_NEON_INTEGER_OP(min);
6042 break;
62698be3 6043 case NEON_3R_VABD:
9ee6e8bb
PB
6044 GEN_NEON_INTEGER_OP(abd);
6045 break;
62698be3 6046 case NEON_3R_VABA:
9ee6e8bb 6047 GEN_NEON_INTEGER_OP(abd);
7d1b0095 6048 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
6049 tmp2 = neon_load_reg(rd, pass);
6050 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 6051 break;
62698be3 6052 case NEON_3R_VADD_VSUB:
9ee6e8bb 6053 if (!u) { /* VADD */
62698be3 6054 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6055 } else { /* VSUB */
6056 switch (size) {
dd8fbd78
FN
6057 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
6058 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
6059 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 6060 default: abort();
9ee6e8bb
PB
6061 }
6062 }
6063 break;
62698be3 6064 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
6065 if (!u) { /* VTST */
6066 switch (size) {
dd8fbd78
FN
6067 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
6068 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
6069 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 6070 default: abort();
9ee6e8bb
PB
6071 }
6072 } else { /* VCEQ */
6073 switch (size) {
dd8fbd78
FN
6074 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6075 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6076 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 6077 default: abort();
9ee6e8bb
PB
6078 }
6079 }
6080 break;
62698be3 6081 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 6082 switch (size) {
dd8fbd78
FN
6083 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6084 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6085 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 6086 default: abort();
9ee6e8bb 6087 }
7d1b0095 6088 tcg_temp_free_i32(tmp2);
dd8fbd78 6089 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6090 if (u) { /* VMLS */
dd8fbd78 6091 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 6092 } else { /* VMLA */
dd8fbd78 6093 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6094 }
6095 break;
62698be3 6096 case NEON_3R_VMUL:
9ee6e8bb 6097 if (u) { /* polynomial */
dd8fbd78 6098 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
6099 } else { /* Integer */
6100 switch (size) {
dd8fbd78
FN
6101 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6102 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6103 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 6104 default: abort();
9ee6e8bb
PB
6105 }
6106 }
6107 break;
62698be3 6108 case NEON_3R_VPMAX:
9ee6e8bb
PB
6109 GEN_NEON_INTEGER_OP(pmax);
6110 break;
62698be3 6111 case NEON_3R_VPMIN:
9ee6e8bb
PB
6112 GEN_NEON_INTEGER_OP(pmin);
6113 break;
62698be3 6114 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
6115 if (!u) { /* VQDMULH */
6116 switch (size) {
02da0b2d
PM
6117 case 1:
6118 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6119 break;
6120 case 2:
6121 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6122 break;
62698be3 6123 default: abort();
9ee6e8bb 6124 }
62698be3 6125 } else { /* VQRDMULH */
9ee6e8bb 6126 switch (size) {
02da0b2d
PM
6127 case 1:
6128 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6129 break;
6130 case 2:
6131 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6132 break;
62698be3 6133 default: abort();
9ee6e8bb
PB
6134 }
6135 }
6136 break;
36a71934 6137 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 6138 switch (size) {
dd8fbd78
FN
6139 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6140 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6141 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6142 default: abort();
9ee6e8bb
PB
6143 }
6144 break;
62698be3 6145 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6146 {
6147 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6148 switch ((u << 2) | size) {
6149 case 0: /* VADD */
aa47cfdd
PM
6150 case 4: /* VPADD */
6151 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6152 break;
6153 case 2: /* VSUB */
aa47cfdd 6154 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6155 break;
6156 case 6: /* VABD */
aa47cfdd 6157 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6158 break;
6159 default:
62698be3 6160 abort();
9ee6e8bb 6161 }
aa47cfdd 6162 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6163 break;
aa47cfdd 6164 }
62698be3 6165 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6166 {
6167 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6168 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6169 if (!u) {
7d1b0095 6170 tcg_temp_free_i32(tmp2);
dd8fbd78 6171 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6172 if (size == 0) {
aa47cfdd 6173 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6174 } else {
aa47cfdd 6175 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6176 }
6177 }
aa47cfdd 6178 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6179 break;
aa47cfdd 6180 }
62698be3 6181 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6182 {
6183 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6184 if (!u) {
aa47cfdd 6185 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6186 } else {
aa47cfdd
PM
6187 if (size == 0) {
6188 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6189 } else {
6190 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6191 }
b5ff1b31 6192 }
aa47cfdd 6193 tcg_temp_free_ptr(fpstatus);
2c0262af 6194 break;
aa47cfdd 6195 }
62698be3 6196 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6197 {
6198 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6199 if (size == 0) {
6200 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6201 } else {
6202 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6203 }
6204 tcg_temp_free_ptr(fpstatus);
2c0262af 6205 break;
aa47cfdd 6206 }
62698be3 6207 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6208 {
6209 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6210 if (size == 0) {
f71a2ae5 6211 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6212 } else {
f71a2ae5 6213 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6214 }
6215 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6216 break;
aa47cfdd 6217 }
505935fc
WN
6218 case NEON_3R_FLOAT_MISC:
6219 if (u) {
6220 /* VMAXNM/VMINNM */
6221 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6222 if (size == 0) {
f71a2ae5 6223 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6224 } else {
f71a2ae5 6225 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6226 }
6227 tcg_temp_free_ptr(fpstatus);
6228 } else {
6229 if (size == 0) {
6230 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6231 } else {
6232 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6233 }
6234 }
2c0262af 6235 break;
36a71934 6236 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
6237 {
6238 /* VFMA, VFMS: fused multiply-add */
6239 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6240 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6241 if (size) {
6242 /* VFMS */
6243 gen_helper_vfp_negs(tmp, tmp);
6244 }
6245 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6246 tcg_temp_free_i32(tmp3);
6247 tcg_temp_free_ptr(fpstatus);
6248 break;
6249 }
9ee6e8bb
PB
6250 default:
6251 abort();
2c0262af 6252 }
7d1b0095 6253 tcg_temp_free_i32(tmp2);
dd8fbd78 6254
9ee6e8bb
PB
6255 /* Save the result. For elementwise operations we can put it
6256 straight into the destination register. For pairwise operations
6257 we have to be careful to avoid clobbering the source operands. */
6258 if (pairwise && rd == rm) {
dd8fbd78 6259 neon_store_scratch(pass, tmp);
9ee6e8bb 6260 } else {
dd8fbd78 6261 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6262 }
6263
6264 } /* for pass */
6265 if (pairwise && rd == rm) {
6266 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6267 tmp = neon_load_scratch(pass);
6268 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6269 }
6270 }
ad69471c 6271 /* End of 3 register same size operations. */
9ee6e8bb
PB
6272 } else if (insn & (1 << 4)) {
6273 if ((insn & 0x00380080) != 0) {
6274 /* Two registers and shift. */
6275 op = (insn >> 8) & 0xf;
6276 if (insn & (1 << 7)) {
cc13115b
PM
6277 /* 64-bit shift. */
6278 if (op > 7) {
6279 return 1;
6280 }
9ee6e8bb
PB
6281 size = 3;
6282 } else {
6283 size = 2;
6284 while ((insn & (1 << (size + 19))) == 0)
6285 size--;
6286 }
6287 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6288 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6289 by immediate using the variable shift operations. */
6290 if (op < 8) {
6291 /* Shift by immediate:
6292 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6293 if (q && ((rd | rm) & 1)) {
6294 return 1;
6295 }
6296 if (!u && (op == 4 || op == 6)) {
6297 return 1;
6298 }
9ee6e8bb
PB
6299 /* Right shifts are encoded as N - shift, where N is the
6300 element size in bits. */
6301 if (op <= 4)
6302 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6303 if (size == 3) {
6304 count = q + 1;
6305 } else {
6306 count = q ? 4: 2;
6307 }
6308 switch (size) {
6309 case 0:
6310 imm = (uint8_t) shift;
6311 imm |= imm << 8;
6312 imm |= imm << 16;
6313 break;
6314 case 1:
6315 imm = (uint16_t) shift;
6316 imm |= imm << 16;
6317 break;
6318 case 2:
6319 case 3:
6320 imm = shift;
6321 break;
6322 default:
6323 abort();
6324 }
6325
6326 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6327 if (size == 3) {
6328 neon_load_reg64(cpu_V0, rm + pass);
6329 tcg_gen_movi_i64(cpu_V1, imm);
6330 switch (op) {
6331 case 0: /* VSHR */
6332 case 1: /* VSRA */
6333 if (u)
6334 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6335 else
ad69471c 6336 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6337 break;
ad69471c
PB
6338 case 2: /* VRSHR */
6339 case 3: /* VRSRA */
6340 if (u)
6341 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6342 else
ad69471c 6343 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6344 break;
ad69471c 6345 case 4: /* VSRI */
ad69471c
PB
6346 case 5: /* VSHL, VSLI */
6347 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6348 break;
0322b26e 6349 case 6: /* VQSHLU */
02da0b2d
PM
6350 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6351 cpu_V0, cpu_V1);
ad69471c 6352 break;
0322b26e
PM
6353 case 7: /* VQSHL */
6354 if (u) {
02da0b2d 6355 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6356 cpu_V0, cpu_V1);
6357 } else {
02da0b2d 6358 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6359 cpu_V0, cpu_V1);
6360 }
9ee6e8bb 6361 break;
9ee6e8bb 6362 }
ad69471c
PB
6363 if (op == 1 || op == 3) {
6364 /* Accumulate. */
5371cb81 6365 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6366 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6367 } else if (op == 4 || (op == 5 && u)) {
6368 /* Insert */
923e6509
CL
6369 neon_load_reg64(cpu_V1, rd + pass);
6370 uint64_t mask;
6371 if (shift < -63 || shift > 63) {
6372 mask = 0;
6373 } else {
6374 if (op == 4) {
6375 mask = 0xffffffffffffffffull >> -shift;
6376 } else {
6377 mask = 0xffffffffffffffffull << shift;
6378 }
6379 }
6380 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6381 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6382 }
6383 neon_store_reg64(cpu_V0, rd + pass);
6384 } else { /* size < 3 */
6385 /* Operands in T0 and T1. */
dd8fbd78 6386 tmp = neon_load_reg(rm, pass);
7d1b0095 6387 tmp2 = tcg_temp_new_i32();
dd8fbd78 6388 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6389 switch (op) {
6390 case 0: /* VSHR */
6391 case 1: /* VSRA */
6392 GEN_NEON_INTEGER_OP(shl);
6393 break;
6394 case 2: /* VRSHR */
6395 case 3: /* VRSRA */
6396 GEN_NEON_INTEGER_OP(rshl);
6397 break;
6398 case 4: /* VSRI */
ad69471c
PB
6399 case 5: /* VSHL, VSLI */
6400 switch (size) {
dd8fbd78
FN
6401 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6402 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6403 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6404 default: abort();
ad69471c
PB
6405 }
6406 break;
0322b26e 6407 case 6: /* VQSHLU */
ad69471c 6408 switch (size) {
0322b26e 6409 case 0:
02da0b2d
PM
6410 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6411 tmp, tmp2);
0322b26e
PM
6412 break;
6413 case 1:
02da0b2d
PM
6414 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6415 tmp, tmp2);
0322b26e
PM
6416 break;
6417 case 2:
02da0b2d
PM
6418 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6419 tmp, tmp2);
0322b26e
PM
6420 break;
6421 default:
cc13115b 6422 abort();
ad69471c
PB
6423 }
6424 break;
0322b26e 6425 case 7: /* VQSHL */
02da0b2d 6426 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6427 break;
ad69471c 6428 }
7d1b0095 6429 tcg_temp_free_i32(tmp2);
ad69471c
PB
6430
6431 if (op == 1 || op == 3) {
6432 /* Accumulate. */
dd8fbd78 6433 tmp2 = neon_load_reg(rd, pass);
5371cb81 6434 gen_neon_add(size, tmp, tmp2);
7d1b0095 6435 tcg_temp_free_i32(tmp2);
ad69471c
PB
6436 } else if (op == 4 || (op == 5 && u)) {
6437 /* Insert */
6438 switch (size) {
6439 case 0:
6440 if (op == 4)
ca9a32e4 6441 mask = 0xff >> -shift;
ad69471c 6442 else
ca9a32e4
JR
6443 mask = (uint8_t)(0xff << shift);
6444 mask |= mask << 8;
6445 mask |= mask << 16;
ad69471c
PB
6446 break;
6447 case 1:
6448 if (op == 4)
ca9a32e4 6449 mask = 0xffff >> -shift;
ad69471c 6450 else
ca9a32e4
JR
6451 mask = (uint16_t)(0xffff << shift);
6452 mask |= mask << 16;
ad69471c
PB
6453 break;
6454 case 2:
ca9a32e4
JR
6455 if (shift < -31 || shift > 31) {
6456 mask = 0;
6457 } else {
6458 if (op == 4)
6459 mask = 0xffffffffu >> -shift;
6460 else
6461 mask = 0xffffffffu << shift;
6462 }
ad69471c
PB
6463 break;
6464 default:
6465 abort();
6466 }
dd8fbd78 6467 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6468 tcg_gen_andi_i32(tmp, tmp, mask);
6469 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6470 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6471 tcg_temp_free_i32(tmp2);
ad69471c 6472 }
dd8fbd78 6473 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6474 }
6475 } /* for pass */
6476 } else if (op < 10) {
ad69471c 6477 /* Shift by immediate and narrow:
9ee6e8bb 6478 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6479 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6480 if (rm & 1) {
6481 return 1;
6482 }
9ee6e8bb
PB
6483 shift = shift - (1 << (size + 3));
6484 size++;
92cdfaeb 6485 if (size == 3) {
a7812ae4 6486 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6487 neon_load_reg64(cpu_V0, rm);
6488 neon_load_reg64(cpu_V1, rm + 1);
6489 for (pass = 0; pass < 2; pass++) {
6490 TCGv_i64 in;
6491 if (pass == 0) {
6492 in = cpu_V0;
6493 } else {
6494 in = cpu_V1;
6495 }
ad69471c 6496 if (q) {
0b36f4cd 6497 if (input_unsigned) {
92cdfaeb 6498 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6499 } else {
92cdfaeb 6500 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6501 }
ad69471c 6502 } else {
0b36f4cd 6503 if (input_unsigned) {
92cdfaeb 6504 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6505 } else {
92cdfaeb 6506 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6507 }
ad69471c 6508 }
7d1b0095 6509 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6510 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6511 neon_store_reg(rd, pass, tmp);
6512 } /* for pass */
6513 tcg_temp_free_i64(tmp64);
6514 } else {
6515 if (size == 1) {
6516 imm = (uint16_t)shift;
6517 imm |= imm << 16;
2c0262af 6518 } else {
92cdfaeb
PM
6519 /* size == 2 */
6520 imm = (uint32_t)shift;
6521 }
6522 tmp2 = tcg_const_i32(imm);
6523 tmp4 = neon_load_reg(rm + 1, 0);
6524 tmp5 = neon_load_reg(rm + 1, 1);
6525 for (pass = 0; pass < 2; pass++) {
6526 if (pass == 0) {
6527 tmp = neon_load_reg(rm, 0);
6528 } else {
6529 tmp = tmp4;
6530 }
0b36f4cd
CL
6531 gen_neon_shift_narrow(size, tmp, tmp2, q,
6532 input_unsigned);
92cdfaeb
PM
6533 if (pass == 0) {
6534 tmp3 = neon_load_reg(rm, 1);
6535 } else {
6536 tmp3 = tmp5;
6537 }
0b36f4cd
CL
6538 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6539 input_unsigned);
36aa55dc 6540 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6541 tcg_temp_free_i32(tmp);
6542 tcg_temp_free_i32(tmp3);
6543 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6544 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6545 neon_store_reg(rd, pass, tmp);
6546 } /* for pass */
c6067f04 6547 tcg_temp_free_i32(tmp2);
b75263d6 6548 }
9ee6e8bb 6549 } else if (op == 10) {
cc13115b
PM
6550 /* VSHLL, VMOVL */
6551 if (q || (rd & 1)) {
9ee6e8bb 6552 return 1;
cc13115b 6553 }
ad69471c
PB
6554 tmp = neon_load_reg(rm, 0);
6555 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6556 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6557 if (pass == 1)
6558 tmp = tmp2;
6559
6560 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6561
9ee6e8bb
PB
6562 if (shift != 0) {
6563 /* The shift is less than the width of the source
ad69471c
PB
6564 type, so we can just shift the whole register. */
6565 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6566 /* Widen the result of shift: we need to clear
6567 * the potential overflow bits resulting from
6568 * left bits of the narrow input appearing as
6569 * right bits of left the neighbour narrow
6570 * input. */
ad69471c
PB
6571 if (size < 2 || !u) {
6572 uint64_t imm64;
6573 if (size == 0) {
6574 imm = (0xffu >> (8 - shift));
6575 imm |= imm << 16;
acdf01ef 6576 } else if (size == 1) {
ad69471c 6577 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6578 } else {
6579 /* size == 2 */
6580 imm = 0xffffffff >> (32 - shift);
6581 }
6582 if (size < 2) {
6583 imm64 = imm | (((uint64_t)imm) << 32);
6584 } else {
6585 imm64 = imm;
9ee6e8bb 6586 }
acdf01ef 6587 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6588 }
6589 }
ad69471c 6590 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6591 }
f73534a5 6592 } else if (op >= 14) {
9ee6e8bb 6593 /* VCVT fixed-point. */
cc13115b
PM
6594 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6595 return 1;
6596 }
f73534a5
PM
6597 /* We have already masked out the must-be-1 top bit of imm6,
6598 * hence this 32-shift where the ARM ARM has 64-imm6.
6599 */
6600 shift = 32 - shift;
9ee6e8bb 6601 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6602 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6603 if (!(op & 1)) {
9ee6e8bb 6604 if (u)
5500b06c 6605 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6606 else
5500b06c 6607 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6608 } else {
6609 if (u)
5500b06c 6610 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6611 else
5500b06c 6612 gen_vfp_tosl(0, shift, 1);
2c0262af 6613 }
4373f3ce 6614 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6615 }
6616 } else {
9ee6e8bb
PB
6617 return 1;
6618 }
6619 } else { /* (insn & 0x00380080) == 0 */
6620 int invert;
7d80fee5
PM
6621 if (q && (rd & 1)) {
6622 return 1;
6623 }
9ee6e8bb
PB
6624
6625 op = (insn >> 8) & 0xf;
6626 /* One register and immediate. */
6627 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6628 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6629 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6630 * We choose to not special-case this and will behave as if a
6631 * valid constant encoding of 0 had been given.
6632 */
9ee6e8bb
PB
6633 switch (op) {
6634 case 0: case 1:
6635 /* no-op */
6636 break;
6637 case 2: case 3:
6638 imm <<= 8;
6639 break;
6640 case 4: case 5:
6641 imm <<= 16;
6642 break;
6643 case 6: case 7:
6644 imm <<= 24;
6645 break;
6646 case 8: case 9:
6647 imm |= imm << 16;
6648 break;
6649 case 10: case 11:
6650 imm = (imm << 8) | (imm << 24);
6651 break;
6652 case 12:
8e31209e 6653 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6654 break;
6655 case 13:
6656 imm = (imm << 16) | 0xffff;
6657 break;
6658 case 14:
6659 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6660 if (invert)
6661 imm = ~imm;
6662 break;
6663 case 15:
7d80fee5
PM
6664 if (invert) {
6665 return 1;
6666 }
9ee6e8bb
PB
6667 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6668 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6669 break;
6670 }
6671 if (invert)
6672 imm = ~imm;
6673
9ee6e8bb
PB
6674 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6675 if (op & 1 && op < 12) {
ad69471c 6676 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6677 if (invert) {
6678 /* The immediate value has already been inverted, so
6679 BIC becomes AND. */
ad69471c 6680 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6681 } else {
ad69471c 6682 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6683 }
9ee6e8bb 6684 } else {
ad69471c 6685 /* VMOV, VMVN. */
7d1b0095 6686 tmp = tcg_temp_new_i32();
9ee6e8bb 6687 if (op == 14 && invert) {
a5a14945 6688 int n;
ad69471c
PB
6689 uint32_t val;
6690 val = 0;
9ee6e8bb
PB
6691 for (n = 0; n < 4; n++) {
6692 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6693 val |= 0xff << (n * 8);
9ee6e8bb 6694 }
ad69471c
PB
6695 tcg_gen_movi_i32(tmp, val);
6696 } else {
6697 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6698 }
9ee6e8bb 6699 }
ad69471c 6700 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6701 }
6702 }
e4b3861d 6703 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6704 if (size != 3) {
6705 op = (insn >> 8) & 0xf;
6706 if ((insn & (1 << 6)) == 0) {
6707 /* Three registers of different lengths. */
6708 int src1_wide;
6709 int src2_wide;
6710 int prewiden;
526d0096
PM
6711 /* undefreq: bit 0 : UNDEF if size == 0
6712 * bit 1 : UNDEF if size == 1
6713 * bit 2 : UNDEF if size == 2
6714 * bit 3 : UNDEF if U == 1
6715 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6716 */
6717 int undefreq;
6718 /* prewiden, src1_wide, src2_wide, undefreq */
6719 static const int neon_3reg_wide[16][4] = {
6720 {1, 0, 0, 0}, /* VADDL */
6721 {1, 1, 0, 0}, /* VADDW */
6722 {1, 0, 0, 0}, /* VSUBL */
6723 {1, 1, 0, 0}, /* VSUBW */
6724 {0, 1, 1, 0}, /* VADDHN */
6725 {0, 0, 0, 0}, /* VABAL */
6726 {0, 1, 1, 0}, /* VSUBHN */
6727 {0, 0, 0, 0}, /* VABDL */
6728 {0, 0, 0, 0}, /* VMLAL */
526d0096 6729 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6730 {0, 0, 0, 0}, /* VMLSL */
526d0096 6731 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6732 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6733 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6734 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6735 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6736 };
6737
6738 prewiden = neon_3reg_wide[op][0];
6739 src1_wide = neon_3reg_wide[op][1];
6740 src2_wide = neon_3reg_wide[op][2];
695272dc 6741 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6742
526d0096
PM
6743 if ((undefreq & (1 << size)) ||
6744 ((undefreq & 8) && u)) {
695272dc
PM
6745 return 1;
6746 }
6747 if ((src1_wide && (rn & 1)) ||
6748 (src2_wide && (rm & 1)) ||
6749 (!src2_wide && (rd & 1))) {
ad69471c 6750 return 1;
695272dc 6751 }
ad69471c 6752
4e624eda
PM
6753 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6754 * outside the loop below as it only performs a single pass.
6755 */
6756 if (op == 14 && size == 2) {
6757 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6758
d614a513 6759 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6760 return 1;
6761 }
6762 tcg_rn = tcg_temp_new_i64();
6763 tcg_rm = tcg_temp_new_i64();
6764 tcg_rd = tcg_temp_new_i64();
6765 neon_load_reg64(tcg_rn, rn);
6766 neon_load_reg64(tcg_rm, rm);
6767 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6768 neon_store_reg64(tcg_rd, rd);
6769 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6770 neon_store_reg64(tcg_rd, rd + 1);
6771 tcg_temp_free_i64(tcg_rn);
6772 tcg_temp_free_i64(tcg_rm);
6773 tcg_temp_free_i64(tcg_rd);
6774 return 0;
6775 }
6776
9ee6e8bb
PB
6777 /* Avoid overlapping operands. Wide source operands are
6778 always aligned so will never overlap with wide
6779 destinations in problematic ways. */
8f8e3aa4 6780 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6781 tmp = neon_load_reg(rm, 1);
6782 neon_store_scratch(2, tmp);
8f8e3aa4 6783 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6784 tmp = neon_load_reg(rn, 1);
6785 neon_store_scratch(2, tmp);
9ee6e8bb 6786 }
f764718d 6787 tmp3 = NULL;
9ee6e8bb 6788 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6789 if (src1_wide) {
6790 neon_load_reg64(cpu_V0, rn + pass);
f764718d 6791 tmp = NULL;
9ee6e8bb 6792 } else {
ad69471c 6793 if (pass == 1 && rd == rn) {
dd8fbd78 6794 tmp = neon_load_scratch(2);
9ee6e8bb 6795 } else {
ad69471c
PB
6796 tmp = neon_load_reg(rn, pass);
6797 }
6798 if (prewiden) {
6799 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6800 }
6801 }
ad69471c
PB
6802 if (src2_wide) {
6803 neon_load_reg64(cpu_V1, rm + pass);
f764718d 6804 tmp2 = NULL;
9ee6e8bb 6805 } else {
ad69471c 6806 if (pass == 1 && rd == rm) {
dd8fbd78 6807 tmp2 = neon_load_scratch(2);
9ee6e8bb 6808 } else {
ad69471c
PB
6809 tmp2 = neon_load_reg(rm, pass);
6810 }
6811 if (prewiden) {
6812 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6813 }
9ee6e8bb
PB
6814 }
6815 switch (op) {
6816 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6817 gen_neon_addl(size);
9ee6e8bb 6818 break;
79b0e534 6819 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6820 gen_neon_subl(size);
9ee6e8bb
PB
6821 break;
6822 case 5: case 7: /* VABAL, VABDL */
6823 switch ((size << 1) | u) {
ad69471c
PB
6824 case 0:
6825 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6826 break;
6827 case 1:
6828 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6829 break;
6830 case 2:
6831 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6832 break;
6833 case 3:
6834 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6835 break;
6836 case 4:
6837 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6838 break;
6839 case 5:
6840 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6841 break;
9ee6e8bb
PB
6842 default: abort();
6843 }
7d1b0095
PM
6844 tcg_temp_free_i32(tmp2);
6845 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6846 break;
6847 case 8: case 9: case 10: case 11: case 12: case 13:
6848 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6849 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6850 break;
6851 case 14: /* Polynomial VMULL */
e5ca24cb 6852 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6853 tcg_temp_free_i32(tmp2);
6854 tcg_temp_free_i32(tmp);
e5ca24cb 6855 break;
695272dc
PM
6856 default: /* 15 is RESERVED: caught earlier */
6857 abort();
9ee6e8bb 6858 }
ebcd88ce
PM
6859 if (op == 13) {
6860 /* VQDMULL */
6861 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6862 neon_store_reg64(cpu_V0, rd + pass);
6863 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6864 /* Accumulate. */
ebcd88ce 6865 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6866 switch (op) {
4dc064e6
PM
6867 case 10: /* VMLSL */
6868 gen_neon_negl(cpu_V0, size);
6869 /* Fall through */
6870 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6871 gen_neon_addl(size);
9ee6e8bb
PB
6872 break;
6873 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6874 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6875 if (op == 11) {
6876 gen_neon_negl(cpu_V0, size);
6877 }
ad69471c
PB
6878 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6879 break;
9ee6e8bb
PB
6880 default:
6881 abort();
6882 }
ad69471c 6883 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6884 } else if (op == 4 || op == 6) {
6885 /* Narrowing operation. */
7d1b0095 6886 tmp = tcg_temp_new_i32();
79b0e534 6887 if (!u) {
9ee6e8bb 6888 switch (size) {
ad69471c
PB
6889 case 0:
6890 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6891 break;
6892 case 1:
6893 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6894 break;
6895 case 2:
6896 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6897 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6898 break;
9ee6e8bb
PB
6899 default: abort();
6900 }
6901 } else {
6902 switch (size) {
ad69471c
PB
6903 case 0:
6904 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6905 break;
6906 case 1:
6907 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6908 break;
6909 case 2:
6910 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6911 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6912 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6913 break;
9ee6e8bb
PB
6914 default: abort();
6915 }
6916 }
ad69471c
PB
6917 if (pass == 0) {
6918 tmp3 = tmp;
6919 } else {
6920 neon_store_reg(rd, 0, tmp3);
6921 neon_store_reg(rd, 1, tmp);
6922 }
9ee6e8bb
PB
6923 } else {
6924 /* Write back the result. */
ad69471c 6925 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6926 }
6927 }
6928 } else {
3e3326df
PM
6929 /* Two registers and a scalar. NB that for ops of this form
6930 * the ARM ARM labels bit 24 as Q, but it is in our variable
6931 * 'u', not 'q'.
6932 */
6933 if (size == 0) {
6934 return 1;
6935 }
9ee6e8bb 6936 switch (op) {
9ee6e8bb 6937 case 1: /* Float VMLA scalar */
9ee6e8bb 6938 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6939 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6940 if (size == 1) {
6941 return 1;
6942 }
6943 /* fall through */
6944 case 0: /* Integer VMLA scalar */
6945 case 4: /* Integer VMLS scalar */
6946 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6947 case 12: /* VQDMULH scalar */
6948 case 13: /* VQRDMULH scalar */
3e3326df
PM
6949 if (u && ((rd | rn) & 1)) {
6950 return 1;
6951 }
dd8fbd78
FN
6952 tmp = neon_get_scalar(size, rm);
6953 neon_store_scratch(0, tmp);
9ee6e8bb 6954 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6955 tmp = neon_load_scratch(0);
6956 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6957 if (op == 12) {
6958 if (size == 1) {
02da0b2d 6959 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6960 } else {
02da0b2d 6961 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6962 }
6963 } else if (op == 13) {
6964 if (size == 1) {
02da0b2d 6965 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6966 } else {
02da0b2d 6967 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6968 }
6969 } else if (op & 1) {
aa47cfdd
PM
6970 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6971 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6972 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6973 } else {
6974 switch (size) {
dd8fbd78
FN
6975 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6976 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6977 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6978 default: abort();
9ee6e8bb
PB
6979 }
6980 }
7d1b0095 6981 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6982 if (op < 8) {
6983 /* Accumulate. */
dd8fbd78 6984 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6985 switch (op) {
6986 case 0:
dd8fbd78 6987 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6988 break;
6989 case 1:
aa47cfdd
PM
6990 {
6991 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6992 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6993 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6994 break;
aa47cfdd 6995 }
9ee6e8bb 6996 case 4:
dd8fbd78 6997 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6998 break;
6999 case 5:
aa47cfdd
PM
7000 {
7001 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7002 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
7003 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7004 break;
aa47cfdd 7005 }
9ee6e8bb
PB
7006 default:
7007 abort();
7008 }
7d1b0095 7009 tcg_temp_free_i32(tmp2);
9ee6e8bb 7010 }
dd8fbd78 7011 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7012 }
7013 break;
9ee6e8bb 7014 case 3: /* VQDMLAL scalar */
9ee6e8bb 7015 case 7: /* VQDMLSL scalar */
9ee6e8bb 7016 case 11: /* VQDMULL scalar */
3e3326df 7017 if (u == 1) {
ad69471c 7018 return 1;
3e3326df
PM
7019 }
7020 /* fall through */
7021 case 2: /* VMLAL sclar */
7022 case 6: /* VMLSL scalar */
7023 case 10: /* VMULL scalar */
7024 if (rd & 1) {
7025 return 1;
7026 }
dd8fbd78 7027 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
7028 /* We need a copy of tmp2 because gen_neon_mull
7029 * deletes it during pass 0. */
7d1b0095 7030 tmp4 = tcg_temp_new_i32();
c6067f04 7031 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 7032 tmp3 = neon_load_reg(rn, 1);
ad69471c 7033
9ee6e8bb 7034 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7035 if (pass == 0) {
7036 tmp = neon_load_reg(rn, 0);
9ee6e8bb 7037 } else {
dd8fbd78 7038 tmp = tmp3;
c6067f04 7039 tmp2 = tmp4;
9ee6e8bb 7040 }
ad69471c 7041 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
7042 if (op != 11) {
7043 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7044 }
9ee6e8bb 7045 switch (op) {
4dc064e6
PM
7046 case 6:
7047 gen_neon_negl(cpu_V0, size);
7048 /* Fall through */
7049 case 2:
ad69471c 7050 gen_neon_addl(size);
9ee6e8bb
PB
7051 break;
7052 case 3: case 7:
ad69471c 7053 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7054 if (op == 7) {
7055 gen_neon_negl(cpu_V0, size);
7056 }
ad69471c 7057 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
7058 break;
7059 case 10:
7060 /* no-op */
7061 break;
7062 case 11:
ad69471c 7063 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
7064 break;
7065 default:
7066 abort();
7067 }
ad69471c 7068 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7069 }
61adacc8
RH
7070 break;
7071 case 14: /* VQRDMLAH scalar */
7072 case 15: /* VQRDMLSH scalar */
7073 {
7074 NeonGenThreeOpEnvFn *fn;
dd8fbd78 7075
61adacc8
RH
7076 if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
7077 return 1;
7078 }
7079 if (u && ((rd | rn) & 1)) {
7080 return 1;
7081 }
7082 if (op == 14) {
7083 if (size == 1) {
7084 fn = gen_helper_neon_qrdmlah_s16;
7085 } else {
7086 fn = gen_helper_neon_qrdmlah_s32;
7087 }
7088 } else {
7089 if (size == 1) {
7090 fn = gen_helper_neon_qrdmlsh_s16;
7091 } else {
7092 fn = gen_helper_neon_qrdmlsh_s32;
7093 }
7094 }
dd8fbd78 7095
61adacc8
RH
7096 tmp2 = neon_get_scalar(size, rm);
7097 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7098 tmp = neon_load_reg(rn, pass);
7099 tmp3 = neon_load_reg(rd, pass);
7100 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7101 tcg_temp_free_i32(tmp3);
7102 neon_store_reg(rd, pass, tmp);
7103 }
7104 tcg_temp_free_i32(tmp2);
7105 }
9ee6e8bb 7106 break;
61adacc8
RH
7107 default:
7108 g_assert_not_reached();
9ee6e8bb
PB
7109 }
7110 }
7111 } else { /* size == 3 */
7112 if (!u) {
7113 /* Extract. */
9ee6e8bb 7114 imm = (insn >> 8) & 0xf;
ad69471c
PB
7115
7116 if (imm > 7 && !q)
7117 return 1;
7118
52579ea1
PM
7119 if (q && ((rd | rn | rm) & 1)) {
7120 return 1;
7121 }
7122
ad69471c
PB
7123 if (imm == 0) {
7124 neon_load_reg64(cpu_V0, rn);
7125 if (q) {
7126 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 7127 }
ad69471c
PB
7128 } else if (imm == 8) {
7129 neon_load_reg64(cpu_V0, rn + 1);
7130 if (q) {
7131 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7132 }
ad69471c 7133 } else if (q) {
a7812ae4 7134 tmp64 = tcg_temp_new_i64();
ad69471c
PB
7135 if (imm < 8) {
7136 neon_load_reg64(cpu_V0, rn);
a7812ae4 7137 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
7138 } else {
7139 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 7140 neon_load_reg64(tmp64, rm);
ad69471c
PB
7141 }
7142 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 7143 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
7144 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7145 if (imm < 8) {
7146 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7147 } else {
ad69471c
PB
7148 neon_load_reg64(cpu_V1, rm + 1);
7149 imm -= 8;
9ee6e8bb 7150 }
ad69471c 7151 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
7152 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7153 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 7154 tcg_temp_free_i64(tmp64);
ad69471c 7155 } else {
a7812ae4 7156 /* BUGFIX */
ad69471c 7157 neon_load_reg64(cpu_V0, rn);
a7812ae4 7158 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 7159 neon_load_reg64(cpu_V1, rm);
a7812ae4 7160 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
7161 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7162 }
7163 neon_store_reg64(cpu_V0, rd);
7164 if (q) {
7165 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7166 }
7167 } else if ((insn & (1 << 11)) == 0) {
7168 /* Two register misc. */
7169 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7170 size = (insn >> 18) & 3;
600b828c
PM
7171 /* UNDEF for unknown op values and bad op-size combinations */
7172 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7173 return 1;
7174 }
fe8fcf3d
PM
7175 if (neon_2rm_is_v8_op(op) &&
7176 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7177 return 1;
7178 }
fc2a9b37
PM
7179 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7180 q && ((rm | rd) & 1)) {
7181 return 1;
7182 }
9ee6e8bb 7183 switch (op) {
600b828c 7184 case NEON_2RM_VREV64:
9ee6e8bb 7185 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7186 tmp = neon_load_reg(rm, pass * 2);
7187 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7188 switch (size) {
dd8fbd78
FN
7189 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7190 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7191 case 2: /* no-op */ break;
7192 default: abort();
7193 }
dd8fbd78 7194 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7195 if (size == 2) {
dd8fbd78 7196 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7197 } else {
9ee6e8bb 7198 switch (size) {
dd8fbd78
FN
7199 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7200 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7201 default: abort();
7202 }
dd8fbd78 7203 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7204 }
7205 }
7206 break;
600b828c
PM
7207 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7208 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7209 for (pass = 0; pass < q + 1; pass++) {
7210 tmp = neon_load_reg(rm, pass * 2);
7211 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7212 tmp = neon_load_reg(rm, pass * 2 + 1);
7213 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7214 switch (size) {
7215 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7216 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7217 case 2: tcg_gen_add_i64(CPU_V001); break;
7218 default: abort();
7219 }
600b828c 7220 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7221 /* Accumulate. */
ad69471c
PB
7222 neon_load_reg64(cpu_V1, rd + pass);
7223 gen_neon_addl(size);
9ee6e8bb 7224 }
ad69471c 7225 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7226 }
7227 break;
600b828c 7228 case NEON_2RM_VTRN:
9ee6e8bb 7229 if (size == 2) {
a5a14945 7230 int n;
9ee6e8bb 7231 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7232 tmp = neon_load_reg(rm, n);
7233 tmp2 = neon_load_reg(rd, n + 1);
7234 neon_store_reg(rm, n, tmp2);
7235 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7236 }
7237 } else {
7238 goto elementwise;
7239 }
7240 break;
600b828c 7241 case NEON_2RM_VUZP:
02acedf9 7242 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7243 return 1;
9ee6e8bb
PB
7244 }
7245 break;
600b828c 7246 case NEON_2RM_VZIP:
d68a6f3a 7247 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7248 return 1;
9ee6e8bb
PB
7249 }
7250 break;
600b828c
PM
7251 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7252 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7253 if (rm & 1) {
7254 return 1;
7255 }
f764718d 7256 tmp2 = NULL;
9ee6e8bb 7257 for (pass = 0; pass < 2; pass++) {
ad69471c 7258 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7259 tmp = tcg_temp_new_i32();
600b828c
PM
7260 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7261 tmp, cpu_V0);
ad69471c
PB
7262 if (pass == 0) {
7263 tmp2 = tmp;
7264 } else {
7265 neon_store_reg(rd, 0, tmp2);
7266 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7267 }
9ee6e8bb
PB
7268 }
7269 break;
600b828c 7270 case NEON_2RM_VSHLL:
fc2a9b37 7271 if (q || (rd & 1)) {
9ee6e8bb 7272 return 1;
600b828c 7273 }
ad69471c
PB
7274 tmp = neon_load_reg(rm, 0);
7275 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7276 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7277 if (pass == 1)
7278 tmp = tmp2;
7279 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7280 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7281 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7282 }
7283 break;
600b828c 7284 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
7285 {
7286 TCGv_ptr fpst;
7287 TCGv_i32 ahp;
7288
d614a513 7289 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7290 q || (rm & 1)) {
7291 return 1;
7292 }
7d1b0095
PM
7293 tmp = tcg_temp_new_i32();
7294 tmp2 = tcg_temp_new_i32();
486624fc
AB
7295 fpst = get_fpstatus_ptr(true);
7296 ahp = get_ahp_flag();
60011498 7297 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
486624fc 7298 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498 7299 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
486624fc 7300 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7301 tcg_gen_shli_i32(tmp2, tmp2, 16);
7302 tcg_gen_or_i32(tmp2, tmp2, tmp);
7303 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
486624fc 7304 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498
PB
7305 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7306 neon_store_reg(rd, 0, tmp2);
7d1b0095 7307 tmp2 = tcg_temp_new_i32();
486624fc 7308 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7309 tcg_gen_shli_i32(tmp2, tmp2, 16);
7310 tcg_gen_or_i32(tmp2, tmp2, tmp);
7311 neon_store_reg(rd, 1, tmp2);
7d1b0095 7312 tcg_temp_free_i32(tmp);
486624fc
AB
7313 tcg_temp_free_i32(ahp);
7314 tcg_temp_free_ptr(fpst);
60011498 7315 break;
486624fc 7316 }
600b828c 7317 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
7318 {
7319 TCGv_ptr fpst;
7320 TCGv_i32 ahp;
d614a513 7321 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7322 q || (rd & 1)) {
7323 return 1;
7324 }
486624fc
AB
7325 fpst = get_fpstatus_ptr(true);
7326 ahp = get_ahp_flag();
7d1b0095 7327 tmp3 = tcg_temp_new_i32();
60011498
PB
7328 tmp = neon_load_reg(rm, 0);
7329 tmp2 = neon_load_reg(rm, 1);
7330 tcg_gen_ext16u_i32(tmp3, tmp);
486624fc 7331 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7332 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7333 tcg_gen_shri_i32(tmp3, tmp, 16);
486624fc 7334 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7335 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7336 tcg_temp_free_i32(tmp);
60011498 7337 tcg_gen_ext16u_i32(tmp3, tmp2);
486624fc 7338 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7339 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7340 tcg_gen_shri_i32(tmp3, tmp2, 16);
486624fc 7341 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7342 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7343 tcg_temp_free_i32(tmp2);
7344 tcg_temp_free_i32(tmp3);
486624fc
AB
7345 tcg_temp_free_i32(ahp);
7346 tcg_temp_free_ptr(fpst);
60011498 7347 break;
486624fc 7348 }
9d935509 7349 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7350 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7351 || ((rm | rd) & 1)) {
7352 return 1;
7353 }
1a66ac61
RH
7354 ptr1 = vfp_reg_ptr(true, rd);
7355 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7356
7357 /* Bit 6 is the lowest opcode bit; it distinguishes between
7358 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7359 */
7360 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7361
7362 if (op == NEON_2RM_AESE) {
1a66ac61 7363 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7364 } else {
1a66ac61 7365 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 7366 }
1a66ac61
RH
7367 tcg_temp_free_ptr(ptr1);
7368 tcg_temp_free_ptr(ptr2);
9d935509
AB
7369 tcg_temp_free_i32(tmp3);
7370 break;
f1ecb913 7371 case NEON_2RM_SHA1H:
d614a513 7372 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7373 || ((rm | rd) & 1)) {
7374 return 1;
7375 }
1a66ac61
RH
7376 ptr1 = vfp_reg_ptr(true, rd);
7377 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7378
1a66ac61 7379 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 7380
1a66ac61
RH
7381 tcg_temp_free_ptr(ptr1);
7382 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
7383 break;
7384 case NEON_2RM_SHA1SU1:
7385 if ((rm | rd) & 1) {
7386 return 1;
7387 }
7388 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7389 if (q) {
d614a513 7390 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7391 return 1;
7392 }
d614a513 7393 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7394 return 1;
7395 }
1a66ac61
RH
7396 ptr1 = vfp_reg_ptr(true, rd);
7397 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7398 if (q) {
1a66ac61 7399 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 7400 } else {
1a66ac61 7401 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 7402 }
1a66ac61
RH
7403 tcg_temp_free_ptr(ptr1);
7404 tcg_temp_free_ptr(ptr2);
f1ecb913 7405 break;
9ee6e8bb
PB
7406 default:
7407 elementwise:
7408 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7409 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7410 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7411 neon_reg_offset(rm, pass));
f764718d 7412 tmp = NULL;
9ee6e8bb 7413 } else {
dd8fbd78 7414 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7415 }
7416 switch (op) {
600b828c 7417 case NEON_2RM_VREV32:
9ee6e8bb 7418 switch (size) {
dd8fbd78
FN
7419 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7420 case 1: gen_swap_half(tmp); break;
600b828c 7421 default: abort();
9ee6e8bb
PB
7422 }
7423 break;
600b828c 7424 case NEON_2RM_VREV16:
dd8fbd78 7425 gen_rev16(tmp);
9ee6e8bb 7426 break;
600b828c 7427 case NEON_2RM_VCLS:
9ee6e8bb 7428 switch (size) {
dd8fbd78
FN
7429 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7430 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7431 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7432 default: abort();
9ee6e8bb
PB
7433 }
7434 break;
600b828c 7435 case NEON_2RM_VCLZ:
9ee6e8bb 7436 switch (size) {
dd8fbd78
FN
7437 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7438 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7439 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7440 default: abort();
9ee6e8bb
PB
7441 }
7442 break;
600b828c 7443 case NEON_2RM_VCNT:
dd8fbd78 7444 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7445 break;
600b828c 7446 case NEON_2RM_VMVN:
dd8fbd78 7447 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7448 break;
600b828c 7449 case NEON_2RM_VQABS:
9ee6e8bb 7450 switch (size) {
02da0b2d
PM
7451 case 0:
7452 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7453 break;
7454 case 1:
7455 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7456 break;
7457 case 2:
7458 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7459 break;
600b828c 7460 default: abort();
9ee6e8bb
PB
7461 }
7462 break;
600b828c 7463 case NEON_2RM_VQNEG:
9ee6e8bb 7464 switch (size) {
02da0b2d
PM
7465 case 0:
7466 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7467 break;
7468 case 1:
7469 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7470 break;
7471 case 2:
7472 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7473 break;
600b828c 7474 default: abort();
9ee6e8bb
PB
7475 }
7476 break;
600b828c 7477 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7478 tmp2 = tcg_const_i32(0);
9ee6e8bb 7479 switch(size) {
dd8fbd78
FN
7480 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7481 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7482 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7483 default: abort();
9ee6e8bb 7484 }
39d5492a 7485 tcg_temp_free_i32(tmp2);
600b828c 7486 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7487 tcg_gen_not_i32(tmp, tmp);
600b828c 7488 }
9ee6e8bb 7489 break;
600b828c 7490 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7491 tmp2 = tcg_const_i32(0);
9ee6e8bb 7492 switch(size) {
dd8fbd78
FN
7493 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7494 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7495 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7496 default: abort();
9ee6e8bb 7497 }
39d5492a 7498 tcg_temp_free_i32(tmp2);
600b828c 7499 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7500 tcg_gen_not_i32(tmp, tmp);
600b828c 7501 }
9ee6e8bb 7502 break;
600b828c 7503 case NEON_2RM_VCEQ0:
dd8fbd78 7504 tmp2 = tcg_const_i32(0);
9ee6e8bb 7505 switch(size) {
dd8fbd78
FN
7506 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7507 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7508 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7509 default: abort();
9ee6e8bb 7510 }
39d5492a 7511 tcg_temp_free_i32(tmp2);
9ee6e8bb 7512 break;
600b828c 7513 case NEON_2RM_VABS:
9ee6e8bb 7514 switch(size) {
dd8fbd78
FN
7515 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7516 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7517 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7518 default: abort();
9ee6e8bb
PB
7519 }
7520 break;
600b828c 7521 case NEON_2RM_VNEG:
dd8fbd78
FN
7522 tmp2 = tcg_const_i32(0);
7523 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7524 tcg_temp_free_i32(tmp2);
9ee6e8bb 7525 break;
600b828c 7526 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7527 {
7528 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7529 tmp2 = tcg_const_i32(0);
aa47cfdd 7530 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7531 tcg_temp_free_i32(tmp2);
aa47cfdd 7532 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7533 break;
aa47cfdd 7534 }
600b828c 7535 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7536 {
7537 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7538 tmp2 = tcg_const_i32(0);
aa47cfdd 7539 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7540 tcg_temp_free_i32(tmp2);
aa47cfdd 7541 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7542 break;
aa47cfdd 7543 }
600b828c 7544 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7545 {
7546 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7547 tmp2 = tcg_const_i32(0);
aa47cfdd 7548 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7549 tcg_temp_free_i32(tmp2);
aa47cfdd 7550 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7551 break;
aa47cfdd 7552 }
600b828c 7553 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7554 {
7555 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7556 tmp2 = tcg_const_i32(0);
aa47cfdd 7557 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7558 tcg_temp_free_i32(tmp2);
aa47cfdd 7559 tcg_temp_free_ptr(fpstatus);
0e326109 7560 break;
aa47cfdd 7561 }
600b828c 7562 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7563 {
7564 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7565 tmp2 = tcg_const_i32(0);
aa47cfdd 7566 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7567 tcg_temp_free_i32(tmp2);
aa47cfdd 7568 tcg_temp_free_ptr(fpstatus);
0e326109 7569 break;
aa47cfdd 7570 }
600b828c 7571 case NEON_2RM_VABS_F:
4373f3ce 7572 gen_vfp_abs(0);
9ee6e8bb 7573 break;
600b828c 7574 case NEON_2RM_VNEG_F:
4373f3ce 7575 gen_vfp_neg(0);
9ee6e8bb 7576 break;
600b828c 7577 case NEON_2RM_VSWP:
dd8fbd78
FN
7578 tmp2 = neon_load_reg(rd, pass);
7579 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7580 break;
600b828c 7581 case NEON_2RM_VTRN:
dd8fbd78 7582 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7583 switch (size) {
dd8fbd78
FN
7584 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7585 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7586 default: abort();
9ee6e8bb 7587 }
dd8fbd78 7588 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7589 break;
34f7b0a2
WN
7590 case NEON_2RM_VRINTN:
7591 case NEON_2RM_VRINTA:
7592 case NEON_2RM_VRINTM:
7593 case NEON_2RM_VRINTP:
7594 case NEON_2RM_VRINTZ:
7595 {
7596 TCGv_i32 tcg_rmode;
7597 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7598 int rmode;
7599
7600 if (op == NEON_2RM_VRINTZ) {
7601 rmode = FPROUNDING_ZERO;
7602 } else {
7603 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7604 }
7605
7606 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7607 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7608 cpu_env);
7609 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7610 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7611 cpu_env);
7612 tcg_temp_free_ptr(fpstatus);
7613 tcg_temp_free_i32(tcg_rmode);
7614 break;
7615 }
2ce70625
WN
7616 case NEON_2RM_VRINTX:
7617 {
7618 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7619 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7620 tcg_temp_free_ptr(fpstatus);
7621 break;
7622 }
901ad525
WN
7623 case NEON_2RM_VCVTAU:
7624 case NEON_2RM_VCVTAS:
7625 case NEON_2RM_VCVTNU:
7626 case NEON_2RM_VCVTNS:
7627 case NEON_2RM_VCVTPU:
7628 case NEON_2RM_VCVTPS:
7629 case NEON_2RM_VCVTMU:
7630 case NEON_2RM_VCVTMS:
7631 {
7632 bool is_signed = !extract32(insn, 7, 1);
7633 TCGv_ptr fpst = get_fpstatus_ptr(1);
7634 TCGv_i32 tcg_rmode, tcg_shift;
7635 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7636
7637 tcg_shift = tcg_const_i32(0);
7638 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7639 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7640 cpu_env);
7641
7642 if (is_signed) {
7643 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7644 tcg_shift, fpst);
7645 } else {
7646 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7647 tcg_shift, fpst);
7648 }
7649
7650 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7651 cpu_env);
7652 tcg_temp_free_i32(tcg_rmode);
7653 tcg_temp_free_i32(tcg_shift);
7654 tcg_temp_free_ptr(fpst);
7655 break;
7656 }
600b828c 7657 case NEON_2RM_VRECPE:
b6d4443a
AB
7658 {
7659 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7660 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7661 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7662 break;
b6d4443a 7663 }
600b828c 7664 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7665 {
7666 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7667 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7668 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7669 break;
c2fb418e 7670 }
600b828c 7671 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7672 {
7673 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7674 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7675 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7676 break;
b6d4443a 7677 }
600b828c 7678 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7679 {
7680 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7681 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7682 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7683 break;
c2fb418e 7684 }
600b828c 7685 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7686 gen_vfp_sito(0, 1);
9ee6e8bb 7687 break;
600b828c 7688 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7689 gen_vfp_uito(0, 1);
9ee6e8bb 7690 break;
600b828c 7691 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7692 gen_vfp_tosiz(0, 1);
9ee6e8bb 7693 break;
600b828c 7694 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7695 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7696 break;
7697 default:
600b828c
PM
7698 /* Reserved op values were caught by the
7699 * neon_2rm_sizes[] check earlier.
7700 */
7701 abort();
9ee6e8bb 7702 }
600b828c 7703 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7704 tcg_gen_st_f32(cpu_F0s, cpu_env,
7705 neon_reg_offset(rd, pass));
9ee6e8bb 7706 } else {
dd8fbd78 7707 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7708 }
7709 }
7710 break;
7711 }
7712 } else if ((insn & (1 << 10)) == 0) {
7713 /* VTBL, VTBX. */
56907d77
PM
7714 int n = ((insn >> 8) & 3) + 1;
7715 if ((rn + n) > 32) {
7716 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7717 * helper function running off the end of the register file.
7718 */
7719 return 1;
7720 }
7721 n <<= 3;
9ee6e8bb 7722 if (insn & (1 << 6)) {
8f8e3aa4 7723 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7724 } else {
7d1b0095 7725 tmp = tcg_temp_new_i32();
8f8e3aa4 7726 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7727 }
8f8e3aa4 7728 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 7729 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 7730 tmp5 = tcg_const_i32(n);
e7c06c4e 7731 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 7732 tcg_temp_free_i32(tmp);
9ee6e8bb 7733 if (insn & (1 << 6)) {
8f8e3aa4 7734 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7735 } else {
7d1b0095 7736 tmp = tcg_temp_new_i32();
8f8e3aa4 7737 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7738 }
8f8e3aa4 7739 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 7740 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 7741 tcg_temp_free_i32(tmp5);
e7c06c4e 7742 tcg_temp_free_ptr(ptr1);
8f8e3aa4 7743 neon_store_reg(rd, 0, tmp2);
3018f259 7744 neon_store_reg(rd, 1, tmp3);
7d1b0095 7745 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7746 } else if ((insn & 0x380) == 0) {
7747 /* VDUP */
133da6aa
JR
7748 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7749 return 1;
7750 }
9ee6e8bb 7751 if (insn & (1 << 19)) {
dd8fbd78 7752 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7753 } else {
dd8fbd78 7754 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7755 }
7756 if (insn & (1 << 16)) {
dd8fbd78 7757 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7758 } else if (insn & (1 << 17)) {
7759 if ((insn >> 18) & 1)
dd8fbd78 7760 gen_neon_dup_high16(tmp);
9ee6e8bb 7761 else
dd8fbd78 7762 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7763 }
7764 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7765 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7766 tcg_gen_mov_i32(tmp2, tmp);
7767 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7768 }
7d1b0095 7769 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7770 } else {
7771 return 1;
7772 }
7773 }
7774 }
7775 return 0;
7776}
7777
8b7209fa
RH
7778/* Advanced SIMD three registers of the same length extension.
7779 * 31 25 23 22 20 16 12 11 10 9 8 3 0
7780 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7781 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7782 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7783 */
7784static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
7785{
26c470a7
RH
7786 gen_helper_gvec_3 *fn_gvec = NULL;
7787 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
7788 int rd, rn, rm, opr_sz;
7789 int data = 0;
8b7209fa
RH
7790 bool q;
7791
7792 q = extract32(insn, 6, 1);
7793 VFP_DREG_D(rd, insn);
7794 VFP_DREG_N(rn, insn);
7795 VFP_DREG_M(rm, insn);
7796 if ((rd | rn | rm) & q) {
7797 return 1;
7798 }
7799
7800 if ((insn & 0xfe200f10) == 0xfc200800) {
7801 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
7802 int size = extract32(insn, 20, 1);
7803 data = extract32(insn, 23, 2); /* rot */
8b7209fa
RH
7804 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7805 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7806 return 1;
7807 }
7808 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
7809 } else if ((insn & 0xfea00f10) == 0xfc800800) {
7810 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
7811 int size = extract32(insn, 20, 1);
7812 data = extract32(insn, 24, 1); /* rot */
8b7209fa
RH
7813 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7814 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7815 return 1;
7816 }
7817 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
7818 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
7819 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
7820 bool u = extract32(insn, 4, 1);
7821 if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
7822 return 1;
7823 }
7824 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
8b7209fa
RH
7825 } else {
7826 return 1;
7827 }
7828
7829 if (s->fp_excp_el) {
7830 gen_exception_insn(s, 4, EXCP_UDEF,
7831 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7832 return 0;
7833 }
7834 if (!s->vfp_enabled) {
7835 return 1;
7836 }
7837
7838 opr_sz = (1 + q) * 8;
26c470a7
RH
7839 if (fn_gvec_ptr) {
7840 TCGv_ptr fpst = get_fpstatus_ptr(1);
7841 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7842 vfp_reg_offset(1, rn),
7843 vfp_reg_offset(1, rm), fpst,
7844 opr_sz, opr_sz, data, fn_gvec_ptr);
7845 tcg_temp_free_ptr(fpst);
7846 } else {
7847 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
7848 vfp_reg_offset(1, rn),
7849 vfp_reg_offset(1, rm),
7850 opr_sz, opr_sz, data, fn_gvec);
7851 }
8b7209fa
RH
7852 return 0;
7853}
7854
638808ff
RH
7855/* Advanced SIMD two registers and a scalar extension.
7856 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7857 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7858 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7859 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7860 *
7861 */
7862
7863static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7864{
26c470a7
RH
7865 gen_helper_gvec_3 *fn_gvec = NULL;
7866 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 7867 int rd, rn, rm, opr_sz, data;
638808ff
RH
7868 bool q;
7869
7870 q = extract32(insn, 6, 1);
7871 VFP_DREG_D(rd, insn);
7872 VFP_DREG_N(rn, insn);
638808ff
RH
7873 if ((rd | rn) & q) {
7874 return 1;
7875 }
7876
7877 if ((insn & 0xff000f10) == 0xfe000800) {
7878 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
7879 int rot = extract32(insn, 20, 2);
7880 int size = extract32(insn, 23, 1);
7881 int index;
7882
7883 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
638808ff
RH
7884 return 1;
7885 }
2cc99919
RH
7886 if (size == 0) {
7887 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7888 return 1;
7889 }
7890 /* For fp16, rm is just Vm, and index is M. */
7891 rm = extract32(insn, 0, 4);
7892 index = extract32(insn, 5, 1);
7893 } else {
7894 /* For fp32, rm is the usual M:Vm, and index is 0. */
7895 VFP_DREG_M(rm, insn);
7896 index = 0;
7897 }
7898 data = (index << 2) | rot;
7899 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7900 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
7901 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7902 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7903 int u = extract32(insn, 4, 1);
7904 if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
7905 return 1;
7906 }
7907 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7908 /* rm is just Vm, and index is M. */
7909 data = extract32(insn, 5, 1); /* index */
7910 rm = extract32(insn, 0, 4);
638808ff
RH
7911 } else {
7912 return 1;
7913 }
7914
7915 if (s->fp_excp_el) {
7916 gen_exception_insn(s, 4, EXCP_UDEF,
7917 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7918 return 0;
7919 }
7920 if (!s->vfp_enabled) {
7921 return 1;
7922 }
7923
7924 opr_sz = (1 + q) * 8;
26c470a7
RH
7925 if (fn_gvec_ptr) {
7926 TCGv_ptr fpst = get_fpstatus_ptr(1);
7927 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7928 vfp_reg_offset(1, rn),
7929 vfp_reg_offset(1, rm), fpst,
7930 opr_sz, opr_sz, data, fn_gvec_ptr);
7931 tcg_temp_free_ptr(fpst);
7932 } else {
7933 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
7934 vfp_reg_offset(1, rn),
7935 vfp_reg_offset(1, rm),
7936 opr_sz, opr_sz, data, fn_gvec);
7937 }
638808ff
RH
7938 return 0;
7939}
7940
7dcc1f89 7941static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7942{
4b6a83fb
PM
7943 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7944 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7945
7946 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7947
7948 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7949 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7950 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7951 return 1;
7952 }
d614a513 7953 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7954 return disas_iwmmxt_insn(s, insn);
d614a513 7955 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7956 return disas_dsp_insn(s, insn);
c0f4af17
PM
7957 }
7958 return 1;
4b6a83fb
PM
7959 }
7960
7961 /* Otherwise treat as a generic register access */
7962 is64 = (insn & (1 << 25)) == 0;
7963 if (!is64 && ((insn & (1 << 4)) == 0)) {
7964 /* cdp */
7965 return 1;
7966 }
7967
7968 crm = insn & 0xf;
7969 if (is64) {
7970 crn = 0;
7971 opc1 = (insn >> 4) & 0xf;
7972 opc2 = 0;
7973 rt2 = (insn >> 16) & 0xf;
7974 } else {
7975 crn = (insn >> 16) & 0xf;
7976 opc1 = (insn >> 21) & 7;
7977 opc2 = (insn >> 5) & 7;
7978 rt2 = 0;
7979 }
7980 isread = (insn >> 20) & 1;
7981 rt = (insn >> 12) & 0xf;
7982
60322b39 7983 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7984 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7985 if (ri) {
7986 /* Check access permissions */
dcbff19b 7987 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7988 return 1;
7989 }
7990
c0f4af17 7991 if (ri->accessfn ||
d614a513 7992 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7993 /* Emit code to perform further access permissions checks at
7994 * runtime; this may result in an exception.
c0f4af17
PM
7995 * Note that on XScale all cp0..c13 registers do an access check
7996 * call in order to handle c15_cpar.
f59df3f2
PM
7997 */
7998 TCGv_ptr tmpptr;
3f208fd7 7999 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
8000 uint32_t syndrome;
8001
8002 /* Note that since we are an implementation which takes an
8003 * exception on a trapped conditional instruction only if the
8004 * instruction passes its condition code check, we can take
8005 * advantage of the clause in the ARM ARM that allows us to set
8006 * the COND field in the instruction to 0xE in all cases.
8007 * We could fish the actual condition out of the insn (ARM)
8008 * or the condexec bits (Thumb) but it isn't necessary.
8009 */
8010 switch (cpnum) {
8011 case 14:
8012 if (is64) {
8013 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8014 isread, false);
8bcbf37c
PM
8015 } else {
8016 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8017 rt, isread, false);
8bcbf37c
PM
8018 }
8019 break;
8020 case 15:
8021 if (is64) {
8022 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8023 isread, false);
8bcbf37c
PM
8024 } else {
8025 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8026 rt, isread, false);
8bcbf37c
PM
8027 }
8028 break;
8029 default:
8030 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8031 * so this can only happen if this is an ARMv7 or earlier CPU,
8032 * in which case the syndrome information won't actually be
8033 * guest visible.
8034 */
d614a513 8035 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
8036 syndrome = syn_uncategorized();
8037 break;
8038 }
8039
43bfa4a1 8040 gen_set_condexec(s);
3977ee5d 8041 gen_set_pc_im(s, s->pc - 4);
f59df3f2 8042 tmpptr = tcg_const_ptr(ri);
8bcbf37c 8043 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
8044 tcg_isread = tcg_const_i32(isread);
8045 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8046 tcg_isread);
f59df3f2 8047 tcg_temp_free_ptr(tmpptr);
8bcbf37c 8048 tcg_temp_free_i32(tcg_syn);
3f208fd7 8049 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
8050 }
8051
4b6a83fb
PM
8052 /* Handle special cases first */
8053 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8054 case ARM_CP_NOP:
8055 return 0;
8056 case ARM_CP_WFI:
8057 if (isread) {
8058 return 1;
8059 }
eaed129d 8060 gen_set_pc_im(s, s->pc);
dcba3a8d 8061 s->base.is_jmp = DISAS_WFI;
2bee5105 8062 return 0;
4b6a83fb
PM
8063 default:
8064 break;
8065 }
8066
c5a49c63 8067 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8068 gen_io_start();
8069 }
8070
4b6a83fb
PM
8071 if (isread) {
8072 /* Read */
8073 if (is64) {
8074 TCGv_i64 tmp64;
8075 TCGv_i32 tmp;
8076 if (ri->type & ARM_CP_CONST) {
8077 tmp64 = tcg_const_i64(ri->resetvalue);
8078 } else if (ri->readfn) {
8079 TCGv_ptr tmpptr;
4b6a83fb
PM
8080 tmp64 = tcg_temp_new_i64();
8081 tmpptr = tcg_const_ptr(ri);
8082 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8083 tcg_temp_free_ptr(tmpptr);
8084 } else {
8085 tmp64 = tcg_temp_new_i64();
8086 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8087 }
8088 tmp = tcg_temp_new_i32();
ecc7b3aa 8089 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
8090 store_reg(s, rt, tmp);
8091 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 8092 tmp = tcg_temp_new_i32();
ecc7b3aa 8093 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 8094 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
8095 store_reg(s, rt2, tmp);
8096 } else {
39d5492a 8097 TCGv_i32 tmp;
4b6a83fb
PM
8098 if (ri->type & ARM_CP_CONST) {
8099 tmp = tcg_const_i32(ri->resetvalue);
8100 } else if (ri->readfn) {
8101 TCGv_ptr tmpptr;
4b6a83fb
PM
8102 tmp = tcg_temp_new_i32();
8103 tmpptr = tcg_const_ptr(ri);
8104 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8105 tcg_temp_free_ptr(tmpptr);
8106 } else {
8107 tmp = load_cpu_offset(ri->fieldoffset);
8108 }
8109 if (rt == 15) {
8110 /* Destination register of r15 for 32 bit loads sets
8111 * the condition codes from the high 4 bits of the value
8112 */
8113 gen_set_nzcv(tmp);
8114 tcg_temp_free_i32(tmp);
8115 } else {
8116 store_reg(s, rt, tmp);
8117 }
8118 }
8119 } else {
8120 /* Write */
8121 if (ri->type & ARM_CP_CONST) {
8122 /* If not forbidden by access permissions, treat as WI */
8123 return 0;
8124 }
8125
8126 if (is64) {
39d5492a 8127 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
8128 TCGv_i64 tmp64 = tcg_temp_new_i64();
8129 tmplo = load_reg(s, rt);
8130 tmphi = load_reg(s, rt2);
8131 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8132 tcg_temp_free_i32(tmplo);
8133 tcg_temp_free_i32(tmphi);
8134 if (ri->writefn) {
8135 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
8136 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8137 tcg_temp_free_ptr(tmpptr);
8138 } else {
8139 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8140 }
8141 tcg_temp_free_i64(tmp64);
8142 } else {
8143 if (ri->writefn) {
39d5492a 8144 TCGv_i32 tmp;
4b6a83fb 8145 TCGv_ptr tmpptr;
4b6a83fb
PM
8146 tmp = load_reg(s, rt);
8147 tmpptr = tcg_const_ptr(ri);
8148 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8149 tcg_temp_free_ptr(tmpptr);
8150 tcg_temp_free_i32(tmp);
8151 } else {
39d5492a 8152 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
8153 store_cpu_offset(tmp, ri->fieldoffset);
8154 }
8155 }
2452731c
PM
8156 }
8157
c5a49c63 8158 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8159 /* I/O operations must end the TB here (whether read or write) */
8160 gen_io_end();
8161 gen_lookup_tb(s);
8162 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
8163 /* We default to ending the TB on a coprocessor register write,
8164 * but allow this to be suppressed by the register definition
8165 * (usually only necessary to work around guest bugs).
8166 */
2452731c 8167 gen_lookup_tb(s);
4b6a83fb 8168 }
2452731c 8169
4b6a83fb
PM
8170 return 0;
8171 }
8172
626187d8
PM
8173 /* Unknown register; this might be a guest error or a QEMU
8174 * unimplemented feature.
8175 */
8176 if (is64) {
8177 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8178 "64 bit system register cp:%d opc1: %d crm:%d "
8179 "(%s)\n",
8180 isread ? "read" : "write", cpnum, opc1, crm,
8181 s->ns ? "non-secure" : "secure");
626187d8
PM
8182 } else {
8183 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8184 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8185 "(%s)\n",
8186 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8187 s->ns ? "non-secure" : "secure");
626187d8
PM
8188 }
8189
4a9a539f 8190 return 1;
9ee6e8bb
PB
8191}
8192
5e3f878a
PB
8193
8194/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 8195static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 8196{
39d5492a 8197 TCGv_i32 tmp;
7d1b0095 8198 tmp = tcg_temp_new_i32();
ecc7b3aa 8199 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 8200 store_reg(s, rlow, tmp);
7d1b0095 8201 tmp = tcg_temp_new_i32();
5e3f878a 8202 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 8203 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
8204 store_reg(s, rhigh, tmp);
8205}
8206
8207/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 8208static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 8209{
a7812ae4 8210 TCGv_i64 tmp;
39d5492a 8211 TCGv_i32 tmp2;
5e3f878a 8212
36aa55dc 8213 /* Load value and extend to 64 bits. */
a7812ae4 8214 tmp = tcg_temp_new_i64();
5e3f878a
PB
8215 tmp2 = load_reg(s, rlow);
8216 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 8217 tcg_temp_free_i32(tmp2);
5e3f878a 8218 tcg_gen_add_i64(val, val, tmp);
b75263d6 8219 tcg_temp_free_i64(tmp);
5e3f878a
PB
8220}
8221
8222/* load and add a 64-bit value from a register pair. */
a7812ae4 8223static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 8224{
a7812ae4 8225 TCGv_i64 tmp;
39d5492a
PM
8226 TCGv_i32 tmpl;
8227 TCGv_i32 tmph;
5e3f878a
PB
8228
8229 /* Load 64-bit value rd:rn. */
36aa55dc
PB
8230 tmpl = load_reg(s, rlow);
8231 tmph = load_reg(s, rhigh);
a7812ae4 8232 tmp = tcg_temp_new_i64();
36aa55dc 8233 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
8234 tcg_temp_free_i32(tmpl);
8235 tcg_temp_free_i32(tmph);
5e3f878a 8236 tcg_gen_add_i64(val, val, tmp);
b75263d6 8237 tcg_temp_free_i64(tmp);
5e3f878a
PB
8238}
8239
c9f10124 8240/* Set N and Z flags from hi|lo. */
39d5492a 8241static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 8242{
c9f10124
RH
8243 tcg_gen_mov_i32(cpu_NF, hi);
8244 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
8245}
8246
426f5abc
PB
8247/* Load/Store exclusive instructions are implemented by remembering
8248 the value/address loaded, and seeing if these are the same
354161b3 8249 when the store is performed. This should be sufficient to implement
426f5abc 8250 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
8251 regular stores. The compare vs the remembered value is done during
8252 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 8253static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 8254 TCGv_i32 addr, int size)
426f5abc 8255{
94ee24e7 8256 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 8257 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 8258
50225ad0
PM
8259 s->is_ldex = true;
8260
426f5abc 8261 if (size == 3) {
39d5492a 8262 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 8263 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 8264
3448d47b
PM
8265 /* For AArch32, architecturally the 32-bit word at the lowest
8266 * address is always Rt and the one at addr+4 is Rt2, even if
8267 * the CPU is big-endian. That means we don't want to do a
8268 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8269 * for an architecturally 64-bit access, but instead do a
8270 * 64-bit access using MO_BE if appropriate and then split
8271 * the two halves.
8272 * This only makes a difference for BE32 user-mode, where
8273 * frob64() must not flip the two halves of the 64-bit data
8274 * but this code must treat BE32 user-mode like BE32 system.
8275 */
8276 TCGv taddr = gen_aa32_addr(s, addr, opc);
8277
8278 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8279 tcg_temp_free(taddr);
354161b3 8280 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
8281 if (s->be_data == MO_BE) {
8282 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8283 } else {
8284 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8285 }
354161b3
EC
8286 tcg_temp_free_i64(t64);
8287
8288 store_reg(s, rt2, tmp2);
03d05e2d 8289 } else {
354161b3 8290 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 8291 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 8292 }
03d05e2d
PM
8293
8294 store_reg(s, rt, tmp);
8295 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
8296}
8297
8298static void gen_clrex(DisasContext *s)
8299{
03d05e2d 8300 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
8301}
8302
426f5abc 8303static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 8304 TCGv_i32 addr, int size)
426f5abc 8305{
354161b3
EC
8306 TCGv_i32 t0, t1, t2;
8307 TCGv_i64 extaddr;
8308 TCGv taddr;
42a268c2
RH
8309 TCGLabel *done_label;
8310 TCGLabel *fail_label;
354161b3 8311 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
8312
8313 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8314 [addr] = {Rt};
8315 {Rd} = 0;
8316 } else {
8317 {Rd} = 1;
8318 } */
8319 fail_label = gen_new_label();
8320 done_label = gen_new_label();
03d05e2d
PM
8321 extaddr = tcg_temp_new_i64();
8322 tcg_gen_extu_i32_i64(extaddr, addr);
8323 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8324 tcg_temp_free_i64(extaddr);
8325
354161b3
EC
8326 taddr = gen_aa32_addr(s, addr, opc);
8327 t0 = tcg_temp_new_i32();
8328 t1 = load_reg(s, rt);
426f5abc 8329 if (size == 3) {
354161b3
EC
8330 TCGv_i64 o64 = tcg_temp_new_i64();
8331 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 8332
354161b3 8333 t2 = load_reg(s, rt2);
3448d47b
PM
8334 /* For AArch32, architecturally the 32-bit word at the lowest
8335 * address is always Rt and the one at addr+4 is Rt2, even if
8336 * the CPU is big-endian. Since we're going to treat this as a
8337 * single 64-bit BE store, we need to put the two halves in the
8338 * opposite order for BE to LE, so that they end up in the right
8339 * places.
8340 * We don't want gen_aa32_frob64() because that does the wrong
8341 * thing for BE32 usermode.
8342 */
8343 if (s->be_data == MO_BE) {
8344 tcg_gen_concat_i32_i64(n64, t2, t1);
8345 } else {
8346 tcg_gen_concat_i32_i64(n64, t1, t2);
8347 }
354161b3 8348 tcg_temp_free_i32(t2);
03d05e2d 8349
354161b3
EC
8350 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8351 get_mem_index(s), opc);
8352 tcg_temp_free_i64(n64);
8353
354161b3
EC
8354 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8355 tcg_gen_extrl_i64_i32(t0, o64);
8356
8357 tcg_temp_free_i64(o64);
8358 } else {
8359 t2 = tcg_temp_new_i32();
8360 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8361 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8362 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8363 tcg_temp_free_i32(t2);
426f5abc 8364 }
354161b3
EC
8365 tcg_temp_free_i32(t1);
8366 tcg_temp_free(taddr);
8367 tcg_gen_mov_i32(cpu_R[rd], t0);
8368 tcg_temp_free_i32(t0);
426f5abc 8369 tcg_gen_br(done_label);
354161b3 8370
426f5abc
PB
8371 gen_set_label(fail_label);
8372 tcg_gen_movi_i32(cpu_R[rd], 1);
8373 gen_set_label(done_label);
03d05e2d 8374 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 8375}
426f5abc 8376
81465888
PM
8377/* gen_srs:
8378 * @env: CPUARMState
8379 * @s: DisasContext
8380 * @mode: mode field from insn (which stack to store to)
8381 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8382 * @writeback: true if writeback bit set
8383 *
8384 * Generate code for the SRS (Store Return State) insn.
8385 */
8386static void gen_srs(DisasContext *s,
8387 uint32_t mode, uint32_t amode, bool writeback)
8388{
8389 int32_t offset;
cbc0326b
PM
8390 TCGv_i32 addr, tmp;
8391 bool undef = false;
8392
8393 /* SRS is:
8394 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 8395 * and specified mode is monitor mode
cbc0326b
PM
8396 * - UNDEFINED in Hyp mode
8397 * - UNPREDICTABLE in User or System mode
8398 * - UNPREDICTABLE if the specified mode is:
8399 * -- not implemented
8400 * -- not a valid mode number
8401 * -- a mode that's at a higher exception level
8402 * -- Monitor, if we are Non-secure
f01377f5 8403 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 8404 */
ba63cf47 8405 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
8406 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8407 return;
8408 }
8409
8410 if (s->current_el == 0 || s->current_el == 2) {
8411 undef = true;
8412 }
8413
8414 switch (mode) {
8415 case ARM_CPU_MODE_USR:
8416 case ARM_CPU_MODE_FIQ:
8417 case ARM_CPU_MODE_IRQ:
8418 case ARM_CPU_MODE_SVC:
8419 case ARM_CPU_MODE_ABT:
8420 case ARM_CPU_MODE_UND:
8421 case ARM_CPU_MODE_SYS:
8422 break;
8423 case ARM_CPU_MODE_HYP:
8424 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8425 undef = true;
8426 }
8427 break;
8428 case ARM_CPU_MODE_MON:
8429 /* No need to check specifically for "are we non-secure" because
8430 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8431 * so if this isn't EL3 then we must be non-secure.
8432 */
8433 if (s->current_el != 3) {
8434 undef = true;
8435 }
8436 break;
8437 default:
8438 undef = true;
8439 }
8440
8441 if (undef) {
8442 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8443 default_exception_el(s));
8444 return;
8445 }
8446
8447 addr = tcg_temp_new_i32();
8448 tmp = tcg_const_i32(mode);
f01377f5
PM
8449 /* get_r13_banked() will raise an exception if called from System mode */
8450 gen_set_condexec(s);
8451 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8452 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8453 tcg_temp_free_i32(tmp);
8454 switch (amode) {
8455 case 0: /* DA */
8456 offset = -4;
8457 break;
8458 case 1: /* IA */
8459 offset = 0;
8460 break;
8461 case 2: /* DB */
8462 offset = -8;
8463 break;
8464 case 3: /* IB */
8465 offset = 4;
8466 break;
8467 default:
8468 abort();
8469 }
8470 tcg_gen_addi_i32(addr, addr, offset);
8471 tmp = load_reg(s, 14);
12dcc321 8472 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8473 tcg_temp_free_i32(tmp);
81465888
PM
8474 tmp = load_cpu_field(spsr);
8475 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8476 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8477 tcg_temp_free_i32(tmp);
81465888
PM
8478 if (writeback) {
8479 switch (amode) {
8480 case 0:
8481 offset = -8;
8482 break;
8483 case 1:
8484 offset = 4;
8485 break;
8486 case 2:
8487 offset = -4;
8488 break;
8489 case 3:
8490 offset = 0;
8491 break;
8492 default:
8493 abort();
8494 }
8495 tcg_gen_addi_i32(addr, addr, offset);
8496 tmp = tcg_const_i32(mode);
8497 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8498 tcg_temp_free_i32(tmp);
8499 }
8500 tcg_temp_free_i32(addr);
dcba3a8d 8501 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8502}
8503
c2d9644e
RK
8504/* Generate a label used for skipping this instruction */
8505static void arm_gen_condlabel(DisasContext *s)
8506{
8507 if (!s->condjmp) {
8508 s->condlabel = gen_new_label();
8509 s->condjmp = 1;
8510 }
8511}
8512
8513/* Skip this instruction if the ARM condition is false */
8514static void arm_skip_unless(DisasContext *s, uint32_t cond)
8515{
8516 arm_gen_condlabel(s);
8517 arm_gen_test_cc(cond ^ 1, s->condlabel);
8518}
8519
f4df2210 8520static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8521{
f4df2210 8522 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8523 TCGv_i32 tmp;
8524 TCGv_i32 tmp2;
8525 TCGv_i32 tmp3;
8526 TCGv_i32 addr;
a7812ae4 8527 TCGv_i64 tmp64;
9ee6e8bb 8528
e13886e3
PM
8529 /* M variants do not implement ARM mode; this must raise the INVSTATE
8530 * UsageFault exception.
8531 */
b53d8923 8532 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8533 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8534 default_exception_el(s));
8535 return;
b53d8923 8536 }
9ee6e8bb
PB
8537 cond = insn >> 28;
8538 if (cond == 0xf){
be5e7a76
DES
8539 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8540 * choose to UNDEF. In ARMv5 and above the space is used
8541 * for miscellaneous unconditional instructions.
8542 */
8543 ARCH(5);
8544
9ee6e8bb
PB
8545 /* Unconditional instructions. */
8546 if (((insn >> 25) & 7) == 1) {
8547 /* NEON Data processing. */
d614a513 8548 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8549 goto illegal_op;
d614a513 8550 }
9ee6e8bb 8551
7dcc1f89 8552 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8553 goto illegal_op;
7dcc1f89 8554 }
9ee6e8bb
PB
8555 return;
8556 }
8557 if ((insn & 0x0f100000) == 0x04000000) {
8558 /* NEON load/store. */
d614a513 8559 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8560 goto illegal_op;
d614a513 8561 }
9ee6e8bb 8562
7dcc1f89 8563 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8564 goto illegal_op;
7dcc1f89 8565 }
9ee6e8bb
PB
8566 return;
8567 }
6a57f3eb
WN
8568 if ((insn & 0x0f000e10) == 0x0e000a00) {
8569 /* VFP. */
7dcc1f89 8570 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8571 goto illegal_op;
8572 }
8573 return;
8574 }
3d185e5d
PM
8575 if (((insn & 0x0f30f000) == 0x0510f000) ||
8576 ((insn & 0x0f30f010) == 0x0710f000)) {
8577 if ((insn & (1 << 22)) == 0) {
8578 /* PLDW; v7MP */
d614a513 8579 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8580 goto illegal_op;
8581 }
8582 }
8583 /* Otherwise PLD; v5TE+ */
be5e7a76 8584 ARCH(5TE);
3d185e5d
PM
8585 return;
8586 }
8587 if (((insn & 0x0f70f000) == 0x0450f000) ||
8588 ((insn & 0x0f70f010) == 0x0650f000)) {
8589 ARCH(7);
8590 return; /* PLI; V7 */
8591 }
8592 if (((insn & 0x0f700000) == 0x04100000) ||
8593 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8594 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8595 goto illegal_op;
8596 }
8597 return; /* v7MP: Unallocated memory hint: must NOP */
8598 }
8599
8600 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8601 ARCH(6);
8602 /* setend */
9886ecdf
PB
8603 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8604 gen_helper_setend(cpu_env);
dcba3a8d 8605 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8606 }
8607 return;
8608 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8609 switch ((insn >> 4) & 0xf) {
8610 case 1: /* clrex */
8611 ARCH(6K);
426f5abc 8612 gen_clrex(s);
9ee6e8bb
PB
8613 return;
8614 case 4: /* dsb */
8615 case 5: /* dmb */
9ee6e8bb 8616 ARCH(7);
61e4c432 8617 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8618 return;
6df99dec
SS
8619 case 6: /* isb */
8620 /* We need to break the TB after this insn to execute
8621 * self-modifying code correctly and also to take
8622 * any pending interrupts immediately.
8623 */
0b609cc1 8624 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8625 return;
9ee6e8bb
PB
8626 default:
8627 goto illegal_op;
8628 }
8629 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8630 /* srs */
81465888
PM
8631 ARCH(6);
8632 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8633 return;
ea825eee 8634 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8635 /* rfe */
c67b6b71 8636 int32_t offset;
9ee6e8bb
PB
8637 if (IS_USER(s))
8638 goto illegal_op;
8639 ARCH(6);
8640 rn = (insn >> 16) & 0xf;
b0109805 8641 addr = load_reg(s, rn);
9ee6e8bb
PB
8642 i = (insn >> 23) & 3;
8643 switch (i) {
b0109805 8644 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8645 case 1: offset = 0; break; /* IA */
8646 case 2: offset = -8; break; /* DB */
b0109805 8647 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8648 default: abort();
8649 }
8650 if (offset)
b0109805
PB
8651 tcg_gen_addi_i32(addr, addr, offset);
8652 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8653 tmp = tcg_temp_new_i32();
12dcc321 8654 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8655 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8656 tmp2 = tcg_temp_new_i32();
12dcc321 8657 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8658 if (insn & (1 << 21)) {
8659 /* Base writeback. */
8660 switch (i) {
b0109805 8661 case 0: offset = -8; break;
c67b6b71
FN
8662 case 1: offset = 4; break;
8663 case 2: offset = -4; break;
b0109805 8664 case 3: offset = 0; break;
9ee6e8bb
PB
8665 default: abort();
8666 }
8667 if (offset)
b0109805
PB
8668 tcg_gen_addi_i32(addr, addr, offset);
8669 store_reg(s, rn, addr);
8670 } else {
7d1b0095 8671 tcg_temp_free_i32(addr);
9ee6e8bb 8672 }
b0109805 8673 gen_rfe(s, tmp, tmp2);
c67b6b71 8674 return;
9ee6e8bb
PB
8675 } else if ((insn & 0x0e000000) == 0x0a000000) {
8676 /* branch link and change to thumb (blx <offset>) */
8677 int32_t offset;
8678
8679 val = (uint32_t)s->pc;
7d1b0095 8680 tmp = tcg_temp_new_i32();
d9ba4830
PB
8681 tcg_gen_movi_i32(tmp, val);
8682 store_reg(s, 14, tmp);
9ee6e8bb
PB
8683 /* Sign-extend the 24-bit offset */
8684 offset = (((int32_t)insn) << 8) >> 8;
8685 /* offset * 4 + bit24 * 2 + (thumb bit) */
8686 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8687 /* pipeline offset */
8688 val += 4;
be5e7a76 8689 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8690 gen_bx_im(s, val);
9ee6e8bb
PB
8691 return;
8692 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8693 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8694 /* iWMMXt register transfer. */
c0f4af17 8695 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8696 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8697 return;
c0f4af17
PM
8698 }
8699 }
9ee6e8bb 8700 }
8b7209fa
RH
8701 } else if ((insn & 0x0e000a00) == 0x0c000800
8702 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8703 if (disas_neon_insn_3same_ext(s, insn)) {
8704 goto illegal_op;
8705 }
8706 return;
638808ff
RH
8707 } else if ((insn & 0x0f000a00) == 0x0e000800
8708 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8709 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
8710 goto illegal_op;
8711 }
8712 return;
9ee6e8bb
PB
8713 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8714 /* Coprocessor double register transfer. */
be5e7a76 8715 ARCH(5TE);
9ee6e8bb
PB
8716 } else if ((insn & 0x0f000010) == 0x0e000010) {
8717 /* Additional coprocessor register transfer. */
7997d92f 8718 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8719 uint32_t mask;
8720 uint32_t val;
8721 /* cps (privileged) */
8722 if (IS_USER(s))
8723 return;
8724 mask = val = 0;
8725 if (insn & (1 << 19)) {
8726 if (insn & (1 << 8))
8727 mask |= CPSR_A;
8728 if (insn & (1 << 7))
8729 mask |= CPSR_I;
8730 if (insn & (1 << 6))
8731 mask |= CPSR_F;
8732 if (insn & (1 << 18))
8733 val |= mask;
8734 }
7997d92f 8735 if (insn & (1 << 17)) {
9ee6e8bb
PB
8736 mask |= CPSR_M;
8737 val |= (insn & 0x1f);
8738 }
8739 if (mask) {
2fbac54b 8740 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8741 }
8742 return;
8743 }
8744 goto illegal_op;
8745 }
8746 if (cond != 0xe) {
8747 /* if not always execute, we generate a conditional jump to
8748 next instruction */
c2d9644e 8749 arm_skip_unless(s, cond);
9ee6e8bb
PB
8750 }
8751 if ((insn & 0x0f900000) == 0x03000000) {
8752 if ((insn & (1 << 21)) == 0) {
8753 ARCH(6T2);
8754 rd = (insn >> 12) & 0xf;
8755 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8756 if ((insn & (1 << 22)) == 0) {
8757 /* MOVW */
7d1b0095 8758 tmp = tcg_temp_new_i32();
5e3f878a 8759 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8760 } else {
8761 /* MOVT */
5e3f878a 8762 tmp = load_reg(s, rd);
86831435 8763 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8764 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8765 }
5e3f878a 8766 store_reg(s, rd, tmp);
9ee6e8bb
PB
8767 } else {
8768 if (((insn >> 12) & 0xf) != 0xf)
8769 goto illegal_op;
8770 if (((insn >> 16) & 0xf) == 0) {
8771 gen_nop_hint(s, insn & 0xff);
8772 } else {
8773 /* CPSR = immediate */
8774 val = insn & 0xff;
8775 shift = ((insn >> 8) & 0xf) * 2;
8776 if (shift)
8777 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8778 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8779 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8780 i, val)) {
9ee6e8bb 8781 goto illegal_op;
7dcc1f89 8782 }
9ee6e8bb
PB
8783 }
8784 }
8785 } else if ((insn & 0x0f900000) == 0x01000000
8786 && (insn & 0x00000090) != 0x00000090) {
8787 /* miscellaneous instructions */
8788 op1 = (insn >> 21) & 3;
8789 sh = (insn >> 4) & 0xf;
8790 rm = insn & 0xf;
8791 switch (sh) {
8bfd0550
PM
8792 case 0x0: /* MSR, MRS */
8793 if (insn & (1 << 9)) {
8794 /* MSR (banked) and MRS (banked) */
8795 int sysm = extract32(insn, 16, 4) |
8796 (extract32(insn, 8, 1) << 4);
8797 int r = extract32(insn, 22, 1);
8798
8799 if (op1 & 1) {
8800 /* MSR (banked) */
8801 gen_msr_banked(s, r, sysm, rm);
8802 } else {
8803 /* MRS (banked) */
8804 int rd = extract32(insn, 12, 4);
8805
8806 gen_mrs_banked(s, r, sysm, rd);
8807 }
8808 break;
8809 }
8810
8811 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8812 if (op1 & 1) {
8813 /* PSR = reg */
2fbac54b 8814 tmp = load_reg(s, rm);
9ee6e8bb 8815 i = ((op1 & 2) != 0);
7dcc1f89 8816 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8817 goto illegal_op;
8818 } else {
8819 /* reg = PSR */
8820 rd = (insn >> 12) & 0xf;
8821 if (op1 & 2) {
8822 if (IS_USER(s))
8823 goto illegal_op;
d9ba4830 8824 tmp = load_cpu_field(spsr);
9ee6e8bb 8825 } else {
7d1b0095 8826 tmp = tcg_temp_new_i32();
9ef39277 8827 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8828 }
d9ba4830 8829 store_reg(s, rd, tmp);
9ee6e8bb
PB
8830 }
8831 break;
8832 case 0x1:
8833 if (op1 == 1) {
8834 /* branch/exchange thumb (bx). */
be5e7a76 8835 ARCH(4T);
d9ba4830
PB
8836 tmp = load_reg(s, rm);
8837 gen_bx(s, tmp);
9ee6e8bb
PB
8838 } else if (op1 == 3) {
8839 /* clz */
be5e7a76 8840 ARCH(5);
9ee6e8bb 8841 rd = (insn >> 12) & 0xf;
1497c961 8842 tmp = load_reg(s, rm);
7539a012 8843 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8844 store_reg(s, rd, tmp);
9ee6e8bb
PB
8845 } else {
8846 goto illegal_op;
8847 }
8848 break;
8849 case 0x2:
8850 if (op1 == 1) {
8851 ARCH(5J); /* bxj */
8852 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8853 tmp = load_reg(s, rm);
8854 gen_bx(s, tmp);
9ee6e8bb
PB
8855 } else {
8856 goto illegal_op;
8857 }
8858 break;
8859 case 0x3:
8860 if (op1 != 1)
8861 goto illegal_op;
8862
be5e7a76 8863 ARCH(5);
9ee6e8bb 8864 /* branch link/exchange thumb (blx) */
d9ba4830 8865 tmp = load_reg(s, rm);
7d1b0095 8866 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8867 tcg_gen_movi_i32(tmp2, s->pc);
8868 store_reg(s, 14, tmp2);
8869 gen_bx(s, tmp);
9ee6e8bb 8870 break;
eb0ecd5a
WN
8871 case 0x4:
8872 {
8873 /* crc32/crc32c */
8874 uint32_t c = extract32(insn, 8, 4);
8875
8876 /* Check this CPU supports ARMv8 CRC instructions.
8877 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8878 * Bits 8, 10 and 11 should be zero.
8879 */
d614a513 8880 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8881 (c & 0xd) != 0) {
8882 goto illegal_op;
8883 }
8884
8885 rn = extract32(insn, 16, 4);
8886 rd = extract32(insn, 12, 4);
8887
8888 tmp = load_reg(s, rn);
8889 tmp2 = load_reg(s, rm);
aa633469
PM
8890 if (op1 == 0) {
8891 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8892 } else if (op1 == 1) {
8893 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8894 }
eb0ecd5a
WN
8895 tmp3 = tcg_const_i32(1 << op1);
8896 if (c & 0x2) {
8897 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8898 } else {
8899 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8900 }
8901 tcg_temp_free_i32(tmp2);
8902 tcg_temp_free_i32(tmp3);
8903 store_reg(s, rd, tmp);
8904 break;
8905 }
9ee6e8bb 8906 case 0x5: /* saturating add/subtract */
be5e7a76 8907 ARCH(5TE);
9ee6e8bb
PB
8908 rd = (insn >> 12) & 0xf;
8909 rn = (insn >> 16) & 0xf;
b40d0353 8910 tmp = load_reg(s, rm);
5e3f878a 8911 tmp2 = load_reg(s, rn);
9ee6e8bb 8912 if (op1 & 2)
9ef39277 8913 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8914 if (op1 & 1)
9ef39277 8915 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8916 else
9ef39277 8917 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8918 tcg_temp_free_i32(tmp2);
5e3f878a 8919 store_reg(s, rd, tmp);
9ee6e8bb 8920 break;
55c544ed
PM
8921 case 0x6: /* ERET */
8922 if (op1 != 3) {
8923 goto illegal_op;
8924 }
8925 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8926 goto illegal_op;
8927 }
8928 if ((insn & 0x000fff0f) != 0x0000000e) {
8929 /* UNPREDICTABLE; we choose to UNDEF */
8930 goto illegal_op;
8931 }
8932
8933 if (s->current_el == 2) {
8934 tmp = load_cpu_field(elr_el[2]);
8935 } else {
8936 tmp = load_reg(s, 14);
8937 }
8938 gen_exception_return(s, tmp);
8939 break;
49e14940 8940 case 7:
d4a2dc67
PM
8941 {
8942 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8943 switch (op1) {
19a6e31c
PM
8944 case 0:
8945 /* HLT */
8946 gen_hlt(s, imm16);
8947 break;
37e6456e
PM
8948 case 1:
8949 /* bkpt */
8950 ARCH(5);
c900a2e6 8951 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
8952 break;
8953 case 2:
8954 /* Hypervisor call (v7) */
8955 ARCH(7);
8956 if (IS_USER(s)) {
8957 goto illegal_op;
8958 }
8959 gen_hvc(s, imm16);
8960 break;
8961 case 3:
8962 /* Secure monitor call (v6+) */
8963 ARCH(6K);
8964 if (IS_USER(s)) {
8965 goto illegal_op;
8966 }
8967 gen_smc(s);
8968 break;
8969 default:
19a6e31c 8970 g_assert_not_reached();
49e14940 8971 }
9ee6e8bb 8972 break;
d4a2dc67 8973 }
9ee6e8bb
PB
8974 case 0x8: /* signed multiply */
8975 case 0xa:
8976 case 0xc:
8977 case 0xe:
be5e7a76 8978 ARCH(5TE);
9ee6e8bb
PB
8979 rs = (insn >> 8) & 0xf;
8980 rn = (insn >> 12) & 0xf;
8981 rd = (insn >> 16) & 0xf;
8982 if (op1 == 1) {
8983 /* (32 * 16) >> 16 */
5e3f878a
PB
8984 tmp = load_reg(s, rm);
8985 tmp2 = load_reg(s, rs);
9ee6e8bb 8986 if (sh & 4)
5e3f878a 8987 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8988 else
5e3f878a 8989 gen_sxth(tmp2);
a7812ae4
PB
8990 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8991 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8992 tmp = tcg_temp_new_i32();
ecc7b3aa 8993 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8994 tcg_temp_free_i64(tmp64);
9ee6e8bb 8995 if ((sh & 2) == 0) {
5e3f878a 8996 tmp2 = load_reg(s, rn);
9ef39277 8997 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8998 tcg_temp_free_i32(tmp2);
9ee6e8bb 8999 }
5e3f878a 9000 store_reg(s, rd, tmp);
9ee6e8bb
PB
9001 } else {
9002 /* 16 * 16 */
5e3f878a
PB
9003 tmp = load_reg(s, rm);
9004 tmp2 = load_reg(s, rs);
9005 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 9006 tcg_temp_free_i32(tmp2);
9ee6e8bb 9007 if (op1 == 2) {
a7812ae4
PB
9008 tmp64 = tcg_temp_new_i64();
9009 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9010 tcg_temp_free_i32(tmp);
a7812ae4
PB
9011 gen_addq(s, tmp64, rn, rd);
9012 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 9013 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9014 } else {
9015 if (op1 == 0) {
5e3f878a 9016 tmp2 = load_reg(s, rn);
9ef39277 9017 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9018 tcg_temp_free_i32(tmp2);
9ee6e8bb 9019 }
5e3f878a 9020 store_reg(s, rd, tmp);
9ee6e8bb
PB
9021 }
9022 }
9023 break;
9024 default:
9025 goto illegal_op;
9026 }
9027 } else if (((insn & 0x0e000000) == 0 &&
9028 (insn & 0x00000090) != 0x90) ||
9029 ((insn & 0x0e000000) == (1 << 25))) {
9030 int set_cc, logic_cc, shiftop;
9031
9032 op1 = (insn >> 21) & 0xf;
9033 set_cc = (insn >> 20) & 1;
9034 logic_cc = table_logic_cc[op1] & set_cc;
9035
9036 /* data processing instruction */
9037 if (insn & (1 << 25)) {
9038 /* immediate operand */
9039 val = insn & 0xff;
9040 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 9041 if (shift) {
9ee6e8bb 9042 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 9043 }
7d1b0095 9044 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
9045 tcg_gen_movi_i32(tmp2, val);
9046 if (logic_cc && shift) {
9047 gen_set_CF_bit31(tmp2);
9048 }
9ee6e8bb
PB
9049 } else {
9050 /* register */
9051 rm = (insn) & 0xf;
e9bb4aa9 9052 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9053 shiftop = (insn >> 5) & 3;
9054 if (!(insn & (1 << 4))) {
9055 shift = (insn >> 7) & 0x1f;
e9bb4aa9 9056 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
9057 } else {
9058 rs = (insn >> 8) & 0xf;
8984bd2e 9059 tmp = load_reg(s, rs);
e9bb4aa9 9060 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
9061 }
9062 }
9063 if (op1 != 0x0f && op1 != 0x0d) {
9064 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
9065 tmp = load_reg(s, rn);
9066 } else {
f764718d 9067 tmp = NULL;
9ee6e8bb
PB
9068 }
9069 rd = (insn >> 12) & 0xf;
9070 switch(op1) {
9071 case 0x00:
e9bb4aa9
JR
9072 tcg_gen_and_i32(tmp, tmp, tmp2);
9073 if (logic_cc) {
9074 gen_logic_CC(tmp);
9075 }
7dcc1f89 9076 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9077 break;
9078 case 0x01:
e9bb4aa9
JR
9079 tcg_gen_xor_i32(tmp, tmp, tmp2);
9080 if (logic_cc) {
9081 gen_logic_CC(tmp);
9082 }
7dcc1f89 9083 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9084 break;
9085 case 0x02:
9086 if (set_cc && rd == 15) {
9087 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 9088 if (IS_USER(s)) {
9ee6e8bb 9089 goto illegal_op;
e9bb4aa9 9090 }
72485ec4 9091 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 9092 gen_exception_return(s, tmp);
9ee6e8bb 9093 } else {
e9bb4aa9 9094 if (set_cc) {
72485ec4 9095 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9096 } else {
9097 tcg_gen_sub_i32(tmp, tmp, tmp2);
9098 }
7dcc1f89 9099 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9100 }
9101 break;
9102 case 0x03:
e9bb4aa9 9103 if (set_cc) {
72485ec4 9104 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9105 } else {
9106 tcg_gen_sub_i32(tmp, tmp2, tmp);
9107 }
7dcc1f89 9108 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9109 break;
9110 case 0x04:
e9bb4aa9 9111 if (set_cc) {
72485ec4 9112 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9113 } else {
9114 tcg_gen_add_i32(tmp, tmp, tmp2);
9115 }
7dcc1f89 9116 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9117 break;
9118 case 0x05:
e9bb4aa9 9119 if (set_cc) {
49b4c31e 9120 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9121 } else {
9122 gen_add_carry(tmp, tmp, tmp2);
9123 }
7dcc1f89 9124 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9125 break;
9126 case 0x06:
e9bb4aa9 9127 if (set_cc) {
2de68a49 9128 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9129 } else {
9130 gen_sub_carry(tmp, tmp, tmp2);
9131 }
7dcc1f89 9132 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9133 break;
9134 case 0x07:
e9bb4aa9 9135 if (set_cc) {
2de68a49 9136 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9137 } else {
9138 gen_sub_carry(tmp, tmp2, tmp);
9139 }
7dcc1f89 9140 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9141 break;
9142 case 0x08:
9143 if (set_cc) {
e9bb4aa9
JR
9144 tcg_gen_and_i32(tmp, tmp, tmp2);
9145 gen_logic_CC(tmp);
9ee6e8bb 9146 }
7d1b0095 9147 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9148 break;
9149 case 0x09:
9150 if (set_cc) {
e9bb4aa9
JR
9151 tcg_gen_xor_i32(tmp, tmp, tmp2);
9152 gen_logic_CC(tmp);
9ee6e8bb 9153 }
7d1b0095 9154 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9155 break;
9156 case 0x0a:
9157 if (set_cc) {
72485ec4 9158 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 9159 }
7d1b0095 9160 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9161 break;
9162 case 0x0b:
9163 if (set_cc) {
72485ec4 9164 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9165 }
7d1b0095 9166 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9167 break;
9168 case 0x0c:
e9bb4aa9
JR
9169 tcg_gen_or_i32(tmp, tmp, tmp2);
9170 if (logic_cc) {
9171 gen_logic_CC(tmp);
9172 }
7dcc1f89 9173 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9174 break;
9175 case 0x0d:
9176 if (logic_cc && rd == 15) {
9177 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 9178 if (IS_USER(s)) {
9ee6e8bb 9179 goto illegal_op;
e9bb4aa9
JR
9180 }
9181 gen_exception_return(s, tmp2);
9ee6e8bb 9182 } else {
e9bb4aa9
JR
9183 if (logic_cc) {
9184 gen_logic_CC(tmp2);
9185 }
7dcc1f89 9186 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9187 }
9188 break;
9189 case 0x0e:
f669df27 9190 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
9191 if (logic_cc) {
9192 gen_logic_CC(tmp);
9193 }
7dcc1f89 9194 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9195 break;
9196 default:
9197 case 0x0f:
e9bb4aa9
JR
9198 tcg_gen_not_i32(tmp2, tmp2);
9199 if (logic_cc) {
9200 gen_logic_CC(tmp2);
9201 }
7dcc1f89 9202 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9203 break;
9204 }
e9bb4aa9 9205 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 9206 tcg_temp_free_i32(tmp2);
e9bb4aa9 9207 }
9ee6e8bb
PB
9208 } else {
9209 /* other instructions */
9210 op1 = (insn >> 24) & 0xf;
9211 switch(op1) {
9212 case 0x0:
9213 case 0x1:
9214 /* multiplies, extra load/stores */
9215 sh = (insn >> 5) & 3;
9216 if (sh == 0) {
9217 if (op1 == 0x0) {
9218 rd = (insn >> 16) & 0xf;
9219 rn = (insn >> 12) & 0xf;
9220 rs = (insn >> 8) & 0xf;
9221 rm = (insn) & 0xf;
9222 op1 = (insn >> 20) & 0xf;
9223 switch (op1) {
9224 case 0: case 1: case 2: case 3: case 6:
9225 /* 32 bit mul */
5e3f878a
PB
9226 tmp = load_reg(s, rs);
9227 tmp2 = load_reg(s, rm);
9228 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9229 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9230 if (insn & (1 << 22)) {
9231 /* Subtract (mls) */
9232 ARCH(6T2);
5e3f878a
PB
9233 tmp2 = load_reg(s, rn);
9234 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 9235 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9236 } else if (insn & (1 << 21)) {
9237 /* Add */
5e3f878a
PB
9238 tmp2 = load_reg(s, rn);
9239 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9240 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9241 }
9242 if (insn & (1 << 20))
5e3f878a
PB
9243 gen_logic_CC(tmp);
9244 store_reg(s, rd, tmp);
9ee6e8bb 9245 break;
8aac08b1
AJ
9246 case 4:
9247 /* 64 bit mul double accumulate (UMAAL) */
9248 ARCH(6);
9249 tmp = load_reg(s, rs);
9250 tmp2 = load_reg(s, rm);
9251 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9252 gen_addq_lo(s, tmp64, rn);
9253 gen_addq_lo(s, tmp64, rd);
9254 gen_storeq_reg(s, rn, rd, tmp64);
9255 tcg_temp_free_i64(tmp64);
9256 break;
9257 case 8: case 9: case 10: case 11:
9258 case 12: case 13: case 14: case 15:
9259 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
9260 tmp = load_reg(s, rs);
9261 tmp2 = load_reg(s, rm);
8aac08b1 9262 if (insn & (1 << 22)) {
c9f10124 9263 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 9264 } else {
c9f10124 9265 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
9266 }
9267 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
9268 TCGv_i32 al = load_reg(s, rn);
9269 TCGv_i32 ah = load_reg(s, rd);
c9f10124 9270 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
9271 tcg_temp_free_i32(al);
9272 tcg_temp_free_i32(ah);
9ee6e8bb 9273 }
8aac08b1 9274 if (insn & (1 << 20)) {
c9f10124 9275 gen_logicq_cc(tmp, tmp2);
8aac08b1 9276 }
c9f10124
RH
9277 store_reg(s, rn, tmp);
9278 store_reg(s, rd, tmp2);
9ee6e8bb 9279 break;
8aac08b1
AJ
9280 default:
9281 goto illegal_op;
9ee6e8bb
PB
9282 }
9283 } else {
9284 rn = (insn >> 16) & 0xf;
9285 rd = (insn >> 12) & 0xf;
9286 if (insn & (1 << 23)) {
9287 /* load/store exclusive */
2359bf80 9288 int op2 = (insn >> 8) & 3;
86753403 9289 op1 = (insn >> 21) & 0x3;
2359bf80
MR
9290
9291 switch (op2) {
9292 case 0: /* lda/stl */
9293 if (op1 == 1) {
9294 goto illegal_op;
9295 }
9296 ARCH(8);
9297 break;
9298 case 1: /* reserved */
9299 goto illegal_op;
9300 case 2: /* ldaex/stlex */
9301 ARCH(8);
9302 break;
9303 case 3: /* ldrex/strex */
9304 if (op1) {
9305 ARCH(6K);
9306 } else {
9307 ARCH(6);
9308 }
9309 break;
9310 }
9311
3174f8e9 9312 addr = tcg_temp_local_new_i32();
98a46317 9313 load_reg_var(s, addr, rn);
2359bf80
MR
9314
9315 /* Since the emulation does not have barriers,
9316 the acquire/release semantics need no special
9317 handling */
9318 if (op2 == 0) {
9319 if (insn & (1 << 20)) {
9320 tmp = tcg_temp_new_i32();
9321 switch (op1) {
9322 case 0: /* lda */
9bb6558a
PM
9323 gen_aa32_ld32u_iss(s, tmp, addr,
9324 get_mem_index(s),
9325 rd | ISSIsAcqRel);
2359bf80
MR
9326 break;
9327 case 2: /* ldab */
9bb6558a
PM
9328 gen_aa32_ld8u_iss(s, tmp, addr,
9329 get_mem_index(s),
9330 rd | ISSIsAcqRel);
2359bf80
MR
9331 break;
9332 case 3: /* ldah */
9bb6558a
PM
9333 gen_aa32_ld16u_iss(s, tmp, addr,
9334 get_mem_index(s),
9335 rd | ISSIsAcqRel);
2359bf80
MR
9336 break;
9337 default:
9338 abort();
9339 }
9340 store_reg(s, rd, tmp);
9341 } else {
9342 rm = insn & 0xf;
9343 tmp = load_reg(s, rm);
9344 switch (op1) {
9345 case 0: /* stl */
9bb6558a
PM
9346 gen_aa32_st32_iss(s, tmp, addr,
9347 get_mem_index(s),
9348 rm | ISSIsAcqRel);
2359bf80
MR
9349 break;
9350 case 2: /* stlb */
9bb6558a
PM
9351 gen_aa32_st8_iss(s, tmp, addr,
9352 get_mem_index(s),
9353 rm | ISSIsAcqRel);
2359bf80
MR
9354 break;
9355 case 3: /* stlh */
9bb6558a
PM
9356 gen_aa32_st16_iss(s, tmp, addr,
9357 get_mem_index(s),
9358 rm | ISSIsAcqRel);
2359bf80
MR
9359 break;
9360 default:
9361 abort();
9362 }
9363 tcg_temp_free_i32(tmp);
9364 }
9365 } else if (insn & (1 << 20)) {
86753403
PB
9366 switch (op1) {
9367 case 0: /* ldrex */
426f5abc 9368 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
9369 break;
9370 case 1: /* ldrexd */
426f5abc 9371 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
9372 break;
9373 case 2: /* ldrexb */
426f5abc 9374 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
9375 break;
9376 case 3: /* ldrexh */
426f5abc 9377 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
9378 break;
9379 default:
9380 abort();
9381 }
9ee6e8bb
PB
9382 } else {
9383 rm = insn & 0xf;
86753403
PB
9384 switch (op1) {
9385 case 0: /* strex */
426f5abc 9386 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
9387 break;
9388 case 1: /* strexd */
502e64fe 9389 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
9390 break;
9391 case 2: /* strexb */
426f5abc 9392 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
9393 break;
9394 case 3: /* strexh */
426f5abc 9395 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
9396 break;
9397 default:
9398 abort();
9399 }
9ee6e8bb 9400 }
39d5492a 9401 tcg_temp_free_i32(addr);
c4869ca6
OS
9402 } else if ((insn & 0x00300f00) == 0) {
9403 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9404 * - SWP, SWPB
9405 */
9406
cf12bce0
EC
9407 TCGv taddr;
9408 TCGMemOp opc = s->be_data;
9409
9ee6e8bb
PB
9410 rm = (insn) & 0xf;
9411
9ee6e8bb 9412 if (insn & (1 << 22)) {
cf12bce0 9413 opc |= MO_UB;
9ee6e8bb 9414 } else {
cf12bce0 9415 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 9416 }
cf12bce0
EC
9417
9418 addr = load_reg(s, rn);
9419 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 9420 tcg_temp_free_i32(addr);
cf12bce0
EC
9421
9422 tmp = load_reg(s, rm);
9423 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9424 get_mem_index(s), opc);
9425 tcg_temp_free(taddr);
9426 store_reg(s, rd, tmp);
c4869ca6
OS
9427 } else {
9428 goto illegal_op;
9ee6e8bb
PB
9429 }
9430 }
9431 } else {
9432 int address_offset;
3960c336 9433 bool load = insn & (1 << 20);
63f26fcf
PM
9434 bool wbit = insn & (1 << 21);
9435 bool pbit = insn & (1 << 24);
3960c336 9436 bool doubleword = false;
9bb6558a
PM
9437 ISSInfo issinfo;
9438
9ee6e8bb
PB
9439 /* Misc load/store */
9440 rn = (insn >> 16) & 0xf;
9441 rd = (insn >> 12) & 0xf;
3960c336 9442
9bb6558a
PM
9443 /* ISS not valid if writeback */
9444 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9445
3960c336
PM
9446 if (!load && (sh & 2)) {
9447 /* doubleword */
9448 ARCH(5TE);
9449 if (rd & 1) {
9450 /* UNPREDICTABLE; we choose to UNDEF */
9451 goto illegal_op;
9452 }
9453 load = (sh & 1) == 0;
9454 doubleword = true;
9455 }
9456
b0109805 9457 addr = load_reg(s, rn);
63f26fcf 9458 if (pbit) {
b0109805 9459 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 9460 }
9ee6e8bb 9461 address_offset = 0;
3960c336
PM
9462
9463 if (doubleword) {
9464 if (!load) {
9ee6e8bb 9465 /* store */
b0109805 9466 tmp = load_reg(s, rd);
12dcc321 9467 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9468 tcg_temp_free_i32(tmp);
b0109805
PB
9469 tcg_gen_addi_i32(addr, addr, 4);
9470 tmp = load_reg(s, rd + 1);
12dcc321 9471 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9472 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9473 } else {
9474 /* load */
5a839c0d 9475 tmp = tcg_temp_new_i32();
12dcc321 9476 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9477 store_reg(s, rd, tmp);
9478 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9479 tmp = tcg_temp_new_i32();
12dcc321 9480 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9481 rd++;
9ee6e8bb
PB
9482 }
9483 address_offset = -4;
3960c336
PM
9484 } else if (load) {
9485 /* load */
9486 tmp = tcg_temp_new_i32();
9487 switch (sh) {
9488 case 1:
9bb6558a
PM
9489 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9490 issinfo);
3960c336
PM
9491 break;
9492 case 2:
9bb6558a
PM
9493 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9494 issinfo);
3960c336
PM
9495 break;
9496 default:
9497 case 3:
9bb6558a
PM
9498 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9499 issinfo);
3960c336
PM
9500 break;
9501 }
9ee6e8bb
PB
9502 } else {
9503 /* store */
b0109805 9504 tmp = load_reg(s, rd);
9bb6558a 9505 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9506 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9507 }
9508 /* Perform base writeback before the loaded value to
9509 ensure correct behavior with overlapping index registers.
b6af0975 9510 ldrd with base writeback is undefined if the
9ee6e8bb 9511 destination and index registers overlap. */
63f26fcf 9512 if (!pbit) {
b0109805
PB
9513 gen_add_datah_offset(s, insn, address_offset, addr);
9514 store_reg(s, rn, addr);
63f26fcf 9515 } else if (wbit) {
9ee6e8bb 9516 if (address_offset)
b0109805
PB
9517 tcg_gen_addi_i32(addr, addr, address_offset);
9518 store_reg(s, rn, addr);
9519 } else {
7d1b0095 9520 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9521 }
9522 if (load) {
9523 /* Complete the load. */
b0109805 9524 store_reg(s, rd, tmp);
9ee6e8bb
PB
9525 }
9526 }
9527 break;
9528 case 0x4:
9529 case 0x5:
9530 goto do_ldst;
9531 case 0x6:
9532 case 0x7:
9533 if (insn & (1 << 4)) {
9534 ARCH(6);
9535 /* Armv6 Media instructions. */
9536 rm = insn & 0xf;
9537 rn = (insn >> 16) & 0xf;
2c0262af 9538 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9539 rs = (insn >> 8) & 0xf;
9540 switch ((insn >> 23) & 3) {
9541 case 0: /* Parallel add/subtract. */
9542 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9543 tmp = load_reg(s, rn);
9544 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9545 sh = (insn >> 5) & 7;
9546 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9547 goto illegal_op;
6ddbc6e4 9548 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9549 tcg_temp_free_i32(tmp2);
6ddbc6e4 9550 store_reg(s, rd, tmp);
9ee6e8bb
PB
9551 break;
9552 case 1:
9553 if ((insn & 0x00700020) == 0) {
6c95676b 9554 /* Halfword pack. */
3670669c
PB
9555 tmp = load_reg(s, rn);
9556 tmp2 = load_reg(s, rm);
9ee6e8bb 9557 shift = (insn >> 7) & 0x1f;
3670669c
PB
9558 if (insn & (1 << 6)) {
9559 /* pkhtb */
22478e79
AZ
9560 if (shift == 0)
9561 shift = 31;
9562 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9563 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9564 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9565 } else {
9566 /* pkhbt */
22478e79
AZ
9567 if (shift)
9568 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9569 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9570 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9571 }
9572 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9573 tcg_temp_free_i32(tmp2);
3670669c 9574 store_reg(s, rd, tmp);
9ee6e8bb
PB
9575 } else if ((insn & 0x00200020) == 0x00200000) {
9576 /* [us]sat */
6ddbc6e4 9577 tmp = load_reg(s, rm);
9ee6e8bb
PB
9578 shift = (insn >> 7) & 0x1f;
9579 if (insn & (1 << 6)) {
9580 if (shift == 0)
9581 shift = 31;
6ddbc6e4 9582 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9583 } else {
6ddbc6e4 9584 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9585 }
9586 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9587 tmp2 = tcg_const_i32(sh);
9588 if (insn & (1 << 22))
9ef39277 9589 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9590 else
9ef39277 9591 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9592 tcg_temp_free_i32(tmp2);
6ddbc6e4 9593 store_reg(s, rd, tmp);
9ee6e8bb
PB
9594 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9595 /* [us]sat16 */
6ddbc6e4 9596 tmp = load_reg(s, rm);
9ee6e8bb 9597 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9598 tmp2 = tcg_const_i32(sh);
9599 if (insn & (1 << 22))
9ef39277 9600 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9601 else
9ef39277 9602 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9603 tcg_temp_free_i32(tmp2);
6ddbc6e4 9604 store_reg(s, rd, tmp);
9ee6e8bb
PB
9605 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9606 /* Select bytes. */
6ddbc6e4
PB
9607 tmp = load_reg(s, rn);
9608 tmp2 = load_reg(s, rm);
7d1b0095 9609 tmp3 = tcg_temp_new_i32();
0ecb72a5 9610 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9611 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9612 tcg_temp_free_i32(tmp3);
9613 tcg_temp_free_i32(tmp2);
6ddbc6e4 9614 store_reg(s, rd, tmp);
9ee6e8bb 9615 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9616 tmp = load_reg(s, rm);
9ee6e8bb 9617 shift = (insn >> 10) & 3;
1301f322 9618 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9619 rotate, a shift is sufficient. */
9620 if (shift != 0)
f669df27 9621 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9622 op1 = (insn >> 20) & 7;
9623 switch (op1) {
5e3f878a
PB
9624 case 0: gen_sxtb16(tmp); break;
9625 case 2: gen_sxtb(tmp); break;
9626 case 3: gen_sxth(tmp); break;
9627 case 4: gen_uxtb16(tmp); break;
9628 case 6: gen_uxtb(tmp); break;
9629 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9630 default: goto illegal_op;
9631 }
9632 if (rn != 15) {
5e3f878a 9633 tmp2 = load_reg(s, rn);
9ee6e8bb 9634 if ((op1 & 3) == 0) {
5e3f878a 9635 gen_add16(tmp, tmp2);
9ee6e8bb 9636 } else {
5e3f878a 9637 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9638 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9639 }
9640 }
6c95676b 9641 store_reg(s, rd, tmp);
9ee6e8bb
PB
9642 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9643 /* rev */
b0109805 9644 tmp = load_reg(s, rm);
9ee6e8bb
PB
9645 if (insn & (1 << 22)) {
9646 if (insn & (1 << 7)) {
b0109805 9647 gen_revsh(tmp);
9ee6e8bb
PB
9648 } else {
9649 ARCH(6T2);
b0109805 9650 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9651 }
9652 } else {
9653 if (insn & (1 << 7))
b0109805 9654 gen_rev16(tmp);
9ee6e8bb 9655 else
66896cb8 9656 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9657 }
b0109805 9658 store_reg(s, rd, tmp);
9ee6e8bb
PB
9659 } else {
9660 goto illegal_op;
9661 }
9662 break;
9663 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9664 switch ((insn >> 20) & 0x7) {
9665 case 5:
9666 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9667 /* op2 not 00x or 11x : UNDEF */
9668 goto illegal_op;
9669 }
838fa72d
AJ
9670 /* Signed multiply most significant [accumulate].
9671 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9672 tmp = load_reg(s, rm);
9673 tmp2 = load_reg(s, rs);
a7812ae4 9674 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9675
955a7dd5 9676 if (rd != 15) {
838fa72d 9677 tmp = load_reg(s, rd);
9ee6e8bb 9678 if (insn & (1 << 6)) {
838fa72d 9679 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9680 } else {
838fa72d 9681 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9682 }
9683 }
838fa72d
AJ
9684 if (insn & (1 << 5)) {
9685 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9686 }
9687 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9688 tmp = tcg_temp_new_i32();
ecc7b3aa 9689 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9690 tcg_temp_free_i64(tmp64);
955a7dd5 9691 store_reg(s, rn, tmp);
41e9564d
PM
9692 break;
9693 case 0:
9694 case 4:
9695 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9696 if (insn & (1 << 7)) {
9697 goto illegal_op;
9698 }
9699 tmp = load_reg(s, rm);
9700 tmp2 = load_reg(s, rs);
9ee6e8bb 9701 if (insn & (1 << 5))
5e3f878a
PB
9702 gen_swap_half(tmp2);
9703 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9704 if (insn & (1 << 22)) {
5e3f878a 9705 /* smlald, smlsld */
33bbd75a
PC
9706 TCGv_i64 tmp64_2;
9707
a7812ae4 9708 tmp64 = tcg_temp_new_i64();
33bbd75a 9709 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9710 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9711 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9712 tcg_temp_free_i32(tmp);
33bbd75a
PC
9713 tcg_temp_free_i32(tmp2);
9714 if (insn & (1 << 6)) {
9715 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9716 } else {
9717 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9718 }
9719 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9720 gen_addq(s, tmp64, rd, rn);
9721 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9722 tcg_temp_free_i64(tmp64);
9ee6e8bb 9723 } else {
5e3f878a 9724 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9725 if (insn & (1 << 6)) {
9726 /* This subtraction cannot overflow. */
9727 tcg_gen_sub_i32(tmp, tmp, tmp2);
9728 } else {
9729 /* This addition cannot overflow 32 bits;
9730 * however it may overflow considered as a
9731 * signed operation, in which case we must set
9732 * the Q flag.
9733 */
9734 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9735 }
9736 tcg_temp_free_i32(tmp2);
22478e79 9737 if (rd != 15)
9ee6e8bb 9738 {
22478e79 9739 tmp2 = load_reg(s, rd);
9ef39277 9740 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9741 tcg_temp_free_i32(tmp2);
9ee6e8bb 9742 }
22478e79 9743 store_reg(s, rn, tmp);
9ee6e8bb 9744 }
41e9564d 9745 break;
b8b8ea05
PM
9746 case 1:
9747 case 3:
9748 /* SDIV, UDIV */
d614a513 9749 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9750 goto illegal_op;
9751 }
9752 if (((insn >> 5) & 7) || (rd != 15)) {
9753 goto illegal_op;
9754 }
9755 tmp = load_reg(s, rm);
9756 tmp2 = load_reg(s, rs);
9757 if (insn & (1 << 21)) {
9758 gen_helper_udiv(tmp, tmp, tmp2);
9759 } else {
9760 gen_helper_sdiv(tmp, tmp, tmp2);
9761 }
9762 tcg_temp_free_i32(tmp2);
9763 store_reg(s, rn, tmp);
9764 break;
41e9564d
PM
9765 default:
9766 goto illegal_op;
9ee6e8bb
PB
9767 }
9768 break;
9769 case 3:
9770 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9771 switch (op1) {
9772 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9773 ARCH(6);
9774 tmp = load_reg(s, rm);
9775 tmp2 = load_reg(s, rs);
9776 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9777 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9778 if (rd != 15) {
9779 tmp2 = load_reg(s, rd);
6ddbc6e4 9780 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9781 tcg_temp_free_i32(tmp2);
9ee6e8bb 9782 }
ded9d295 9783 store_reg(s, rn, tmp);
9ee6e8bb
PB
9784 break;
9785 case 0x20: case 0x24: case 0x28: case 0x2c:
9786 /* Bitfield insert/clear. */
9787 ARCH(6T2);
9788 shift = (insn >> 7) & 0x1f;
9789 i = (insn >> 16) & 0x1f;
45140a57
KB
9790 if (i < shift) {
9791 /* UNPREDICTABLE; we choose to UNDEF */
9792 goto illegal_op;
9793 }
9ee6e8bb
PB
9794 i = i + 1 - shift;
9795 if (rm == 15) {
7d1b0095 9796 tmp = tcg_temp_new_i32();
5e3f878a 9797 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9798 } else {
5e3f878a 9799 tmp = load_reg(s, rm);
9ee6e8bb
PB
9800 }
9801 if (i != 32) {
5e3f878a 9802 tmp2 = load_reg(s, rd);
d593c48e 9803 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9804 tcg_temp_free_i32(tmp2);
9ee6e8bb 9805 }
5e3f878a 9806 store_reg(s, rd, tmp);
9ee6e8bb
PB
9807 break;
9808 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9809 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9810 ARCH(6T2);
5e3f878a 9811 tmp = load_reg(s, rm);
9ee6e8bb
PB
9812 shift = (insn >> 7) & 0x1f;
9813 i = ((insn >> 16) & 0x1f) + 1;
9814 if (shift + i > 32)
9815 goto illegal_op;
9816 if (i < 32) {
9817 if (op1 & 0x20) {
59a71b4c 9818 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9819 } else {
59a71b4c 9820 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9821 }
9822 }
5e3f878a 9823 store_reg(s, rd, tmp);
9ee6e8bb
PB
9824 break;
9825 default:
9826 goto illegal_op;
9827 }
9828 break;
9829 }
9830 break;
9831 }
9832 do_ldst:
9833 /* Check for undefined extension instructions
9834 * per the ARM Bible IE:
9835 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9836 */
9837 sh = (0xf << 20) | (0xf << 4);
9838 if (op1 == 0x7 && ((insn & sh) == sh))
9839 {
9840 goto illegal_op;
9841 }
9842 /* load/store byte/word */
9843 rn = (insn >> 16) & 0xf;
9844 rd = (insn >> 12) & 0xf;
b0109805 9845 tmp2 = load_reg(s, rn);
a99caa48
PM
9846 if ((insn & 0x01200000) == 0x00200000) {
9847 /* ldrt/strt */
579d21cc 9848 i = get_a32_user_mem_index(s);
a99caa48
PM
9849 } else {
9850 i = get_mem_index(s);
9851 }
9ee6e8bb 9852 if (insn & (1 << 24))
b0109805 9853 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9854 if (insn & (1 << 20)) {
9855 /* load */
5a839c0d 9856 tmp = tcg_temp_new_i32();
9ee6e8bb 9857 if (insn & (1 << 22)) {
9bb6558a 9858 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9859 } else {
9bb6558a 9860 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9861 }
9ee6e8bb
PB
9862 } else {
9863 /* store */
b0109805 9864 tmp = load_reg(s, rd);
5a839c0d 9865 if (insn & (1 << 22)) {
9bb6558a 9866 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9867 } else {
9bb6558a 9868 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9869 }
9870 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9871 }
9872 if (!(insn & (1 << 24))) {
b0109805
PB
9873 gen_add_data_offset(s, insn, tmp2);
9874 store_reg(s, rn, tmp2);
9875 } else if (insn & (1 << 21)) {
9876 store_reg(s, rn, tmp2);
9877 } else {
7d1b0095 9878 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9879 }
9880 if (insn & (1 << 20)) {
9881 /* Complete the load. */
7dcc1f89 9882 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9883 }
9884 break;
9885 case 0x08:
9886 case 0x09:
9887 {
da3e53dd
PM
9888 int j, n, loaded_base;
9889 bool exc_return = false;
9890 bool is_load = extract32(insn, 20, 1);
9891 bool user = false;
39d5492a 9892 TCGv_i32 loaded_var;
9ee6e8bb
PB
9893 /* load/store multiple words */
9894 /* XXX: store correct base if write back */
9ee6e8bb 9895 if (insn & (1 << 22)) {
da3e53dd 9896 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9897 if (IS_USER(s))
9898 goto illegal_op; /* only usable in supervisor mode */
9899
da3e53dd
PM
9900 if (is_load && extract32(insn, 15, 1)) {
9901 exc_return = true;
9902 } else {
9903 user = true;
9904 }
9ee6e8bb
PB
9905 }
9906 rn = (insn >> 16) & 0xf;
b0109805 9907 addr = load_reg(s, rn);
9ee6e8bb
PB
9908
9909 /* compute total size */
9910 loaded_base = 0;
f764718d 9911 loaded_var = NULL;
9ee6e8bb
PB
9912 n = 0;
9913 for(i=0;i<16;i++) {
9914 if (insn & (1 << i))
9915 n++;
9916 }
9917 /* XXX: test invalid n == 0 case ? */
9918 if (insn & (1 << 23)) {
9919 if (insn & (1 << 24)) {
9920 /* pre increment */
b0109805 9921 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9922 } else {
9923 /* post increment */
9924 }
9925 } else {
9926 if (insn & (1 << 24)) {
9927 /* pre decrement */
b0109805 9928 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9929 } else {
9930 /* post decrement */
9931 if (n != 1)
b0109805 9932 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9933 }
9934 }
9935 j = 0;
9936 for(i=0;i<16;i++) {
9937 if (insn & (1 << i)) {
da3e53dd 9938 if (is_load) {
9ee6e8bb 9939 /* load */
5a839c0d 9940 tmp = tcg_temp_new_i32();
12dcc321 9941 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9942 if (user) {
b75263d6 9943 tmp2 = tcg_const_i32(i);
1ce94f81 9944 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9945 tcg_temp_free_i32(tmp2);
7d1b0095 9946 tcg_temp_free_i32(tmp);
9ee6e8bb 9947 } else if (i == rn) {
b0109805 9948 loaded_var = tmp;
9ee6e8bb 9949 loaded_base = 1;
fb0e8e79
PM
9950 } else if (rn == 15 && exc_return) {
9951 store_pc_exc_ret(s, tmp);
9ee6e8bb 9952 } else {
7dcc1f89 9953 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9954 }
9955 } else {
9956 /* store */
9957 if (i == 15) {
9958 /* special case: r15 = PC + 8 */
9959 val = (long)s->pc + 4;
7d1b0095 9960 tmp = tcg_temp_new_i32();
b0109805 9961 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9962 } else if (user) {
7d1b0095 9963 tmp = tcg_temp_new_i32();
b75263d6 9964 tmp2 = tcg_const_i32(i);
9ef39277 9965 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9966 tcg_temp_free_i32(tmp2);
9ee6e8bb 9967 } else {
b0109805 9968 tmp = load_reg(s, i);
9ee6e8bb 9969 }
12dcc321 9970 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9971 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9972 }
9973 j++;
9974 /* no need to add after the last transfer */
9975 if (j != n)
b0109805 9976 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9977 }
9978 }
9979 if (insn & (1 << 21)) {
9980 /* write back */
9981 if (insn & (1 << 23)) {
9982 if (insn & (1 << 24)) {
9983 /* pre increment */
9984 } else {
9985 /* post increment */
b0109805 9986 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9987 }
9988 } else {
9989 if (insn & (1 << 24)) {
9990 /* pre decrement */
9991 if (n != 1)
b0109805 9992 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9993 } else {
9994 /* post decrement */
b0109805 9995 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9996 }
9997 }
b0109805
PB
9998 store_reg(s, rn, addr);
9999 } else {
7d1b0095 10000 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10001 }
10002 if (loaded_base) {
b0109805 10003 store_reg(s, rn, loaded_var);
9ee6e8bb 10004 }
da3e53dd 10005 if (exc_return) {
9ee6e8bb 10006 /* Restore CPSR from SPSR. */
d9ba4830 10007 tmp = load_cpu_field(spsr);
e69ad9df
AL
10008 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10009 gen_io_start();
10010 }
235ea1f5 10011 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
10012 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10013 gen_io_end();
10014 }
7d1b0095 10015 tcg_temp_free_i32(tmp);
b29fd33d 10016 /* Must exit loop to check un-masked IRQs */
dcba3a8d 10017 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
10018 }
10019 }
10020 break;
10021 case 0xa:
10022 case 0xb:
10023 {
10024 int32_t offset;
10025
10026 /* branch (and link) */
10027 val = (int32_t)s->pc;
10028 if (insn & (1 << 24)) {
7d1b0095 10029 tmp = tcg_temp_new_i32();
5e3f878a
PB
10030 tcg_gen_movi_i32(tmp, val);
10031 store_reg(s, 14, tmp);
9ee6e8bb 10032 }
534df156
PM
10033 offset = sextract32(insn << 2, 0, 26);
10034 val += offset + 4;
9ee6e8bb
PB
10035 gen_jmp(s, val);
10036 }
10037 break;
10038 case 0xc:
10039 case 0xd:
10040 case 0xe:
6a57f3eb
WN
10041 if (((insn >> 8) & 0xe) == 10) {
10042 /* VFP. */
7dcc1f89 10043 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10044 goto illegal_op;
10045 }
7dcc1f89 10046 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 10047 /* Coprocessor. */
9ee6e8bb 10048 goto illegal_op;
6a57f3eb 10049 }
9ee6e8bb
PB
10050 break;
10051 case 0xf:
10052 /* swi */
eaed129d 10053 gen_set_pc_im(s, s->pc);
d4a2dc67 10054 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 10055 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
10056 break;
10057 default:
10058 illegal_op:
73710361
GB
10059 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10060 default_exception_el(s));
9ee6e8bb
PB
10061 break;
10062 }
10063 }
10064}
10065
296e5a0a
PM
10066static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10067{
10068 /* Return true if this is a 16 bit instruction. We must be precise
10069 * about this (matching the decode). We assume that s->pc still
10070 * points to the first 16 bits of the insn.
10071 */
10072 if ((insn >> 11) < 0x1d) {
10073 /* Definitely a 16-bit instruction */
10074 return true;
10075 }
10076
10077 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10078 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10079 * end up actually treating this as two 16-bit insns, though,
10080 * if it's half of a bl/blx pair that might span a page boundary.
10081 */
14120108
JS
10082 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10083 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
10084 /* Thumb2 cores (including all M profile ones) always treat
10085 * 32-bit insns as 32-bit.
10086 */
10087 return false;
10088 }
10089
bfe7ad5b 10090 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
10091 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10092 * is not on the next page; we merge this into a 32-bit
10093 * insn.
10094 */
10095 return false;
10096 }
10097 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10098 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10099 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10100 * -- handle as single 16 bit insn
10101 */
10102 return true;
10103}
10104
9ee6e8bb
PB
10105/* Return true if this is a Thumb-2 logical op. */
10106static int
10107thumb2_logic_op(int op)
10108{
10109 return (op < 8);
10110}
10111
10112/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10113 then set condition code flags based on the result of the operation.
10114 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10115 to the high bit of T1.
10116 Returns zero if the opcode is valid. */
10117
10118static int
39d5492a
PM
10119gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10120 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
10121{
10122 int logic_cc;
10123
10124 logic_cc = 0;
10125 switch (op) {
10126 case 0: /* and */
396e467c 10127 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
10128 logic_cc = conds;
10129 break;
10130 case 1: /* bic */
f669df27 10131 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
10132 logic_cc = conds;
10133 break;
10134 case 2: /* orr */
396e467c 10135 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
10136 logic_cc = conds;
10137 break;
10138 case 3: /* orn */
29501f1b 10139 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
10140 logic_cc = conds;
10141 break;
10142 case 4: /* eor */
396e467c 10143 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
10144 logic_cc = conds;
10145 break;
10146 case 8: /* add */
10147 if (conds)
72485ec4 10148 gen_add_CC(t0, t0, t1);
9ee6e8bb 10149 else
396e467c 10150 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
10151 break;
10152 case 10: /* adc */
10153 if (conds)
49b4c31e 10154 gen_adc_CC(t0, t0, t1);
9ee6e8bb 10155 else
396e467c 10156 gen_adc(t0, t1);
9ee6e8bb
PB
10157 break;
10158 case 11: /* sbc */
2de68a49
RH
10159 if (conds) {
10160 gen_sbc_CC(t0, t0, t1);
10161 } else {
396e467c 10162 gen_sub_carry(t0, t0, t1);
2de68a49 10163 }
9ee6e8bb
PB
10164 break;
10165 case 13: /* sub */
10166 if (conds)
72485ec4 10167 gen_sub_CC(t0, t0, t1);
9ee6e8bb 10168 else
396e467c 10169 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
10170 break;
10171 case 14: /* rsb */
10172 if (conds)
72485ec4 10173 gen_sub_CC(t0, t1, t0);
9ee6e8bb 10174 else
396e467c 10175 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
10176 break;
10177 default: /* 5, 6, 7, 9, 12, 15. */
10178 return 1;
10179 }
10180 if (logic_cc) {
396e467c 10181 gen_logic_CC(t0);
9ee6e8bb 10182 if (shifter_out)
396e467c 10183 gen_set_CF_bit31(t1);
9ee6e8bb
PB
10184 }
10185 return 0;
10186}
10187
2eea841c
PM
10188/* Translate a 32-bit thumb instruction. */
10189static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 10190{
296e5a0a 10191 uint32_t imm, shift, offset;
9ee6e8bb 10192 uint32_t rd, rn, rm, rs;
39d5492a
PM
10193 TCGv_i32 tmp;
10194 TCGv_i32 tmp2;
10195 TCGv_i32 tmp3;
10196 TCGv_i32 addr;
a7812ae4 10197 TCGv_i64 tmp64;
9ee6e8bb
PB
10198 int op;
10199 int shiftop;
10200 int conds;
10201 int logic_cc;
10202
14120108
JS
10203 /*
10204 * ARMv6-M supports a limited subset of Thumb2 instructions.
10205 * Other Thumb1 architectures allow only 32-bit
10206 * combined BL/BLX prefix and suffix.
296e5a0a 10207 */
14120108
JS
10208 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10209 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10210 int i;
10211 bool found = false;
8297cb13
JS
10212 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10213 0xf3b08040 /* dsb */,
10214 0xf3b08050 /* dmb */,
10215 0xf3b08060 /* isb */,
10216 0xf3e08000 /* mrs */,
10217 0xf000d000 /* bl */};
10218 static const uint32_t armv6m_mask[] = {0xffe0d000,
10219 0xfff0d0f0,
10220 0xfff0d0f0,
10221 0xfff0d0f0,
10222 0xffe0d000,
10223 0xf800d000};
14120108
JS
10224
10225 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10226 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10227 found = true;
10228 break;
10229 }
10230 }
10231 if (!found) {
10232 goto illegal_op;
10233 }
10234 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
10235 ARCH(6T2);
10236 }
10237
10238 rn = (insn >> 16) & 0xf;
10239 rs = (insn >> 12) & 0xf;
10240 rd = (insn >> 8) & 0xf;
10241 rm = insn & 0xf;
10242 switch ((insn >> 25) & 0xf) {
10243 case 0: case 1: case 2: case 3:
10244 /* 16-bit instructions. Should never happen. */
10245 abort();
10246 case 4:
10247 if (insn & (1 << 22)) {
ebfe27c5
PM
10248 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10249 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 10250 * table branch, TT.
ebfe27c5 10251 */
76eff04d
PM
10252 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10253 arm_dc_feature(s, ARM_FEATURE_V8)) {
10254 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10255 * - SG (v8M only)
10256 * The bulk of the behaviour for this instruction is implemented
10257 * in v7m_handle_execute_nsc(), which deals with the insn when
10258 * it is executed by a CPU in non-secure state from memory
10259 * which is Secure & NonSecure-Callable.
10260 * Here we only need to handle the remaining cases:
10261 * * in NS memory (including the "security extension not
10262 * implemented" case) : NOP
10263 * * in S memory but CPU already secure (clear IT bits)
10264 * We know that the attribute for the memory this insn is
10265 * in must match the current CPU state, because otherwise
10266 * get_phys_addr_pmsav8 would have generated an exception.
10267 */
10268 if (s->v8m_secure) {
10269 /* Like the IT insn, we don't need to generate any code */
10270 s->condexec_cond = 0;
10271 s->condexec_mask = 0;
10272 }
10273 } else if (insn & 0x01200000) {
ebfe27c5
PM
10274 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10275 * - load/store dual (post-indexed)
10276 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10277 * - load/store dual (literal and immediate)
10278 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10279 * - load/store dual (pre-indexed)
10280 */
9ee6e8bb 10281 if (rn == 15) {
ebfe27c5
PM
10282 if (insn & (1 << 21)) {
10283 /* UNPREDICTABLE */
10284 goto illegal_op;
10285 }
7d1b0095 10286 addr = tcg_temp_new_i32();
b0109805 10287 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 10288 } else {
b0109805 10289 addr = load_reg(s, rn);
9ee6e8bb
PB
10290 }
10291 offset = (insn & 0xff) * 4;
10292 if ((insn & (1 << 23)) == 0)
10293 offset = -offset;
10294 if (insn & (1 << 24)) {
b0109805 10295 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
10296 offset = 0;
10297 }
10298 if (insn & (1 << 20)) {
10299 /* ldrd */
e2592fad 10300 tmp = tcg_temp_new_i32();
12dcc321 10301 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10302 store_reg(s, rs, tmp);
10303 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10304 tmp = tcg_temp_new_i32();
12dcc321 10305 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10306 store_reg(s, rd, tmp);
9ee6e8bb
PB
10307 } else {
10308 /* strd */
b0109805 10309 tmp = load_reg(s, rs);
12dcc321 10310 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10311 tcg_temp_free_i32(tmp);
b0109805
PB
10312 tcg_gen_addi_i32(addr, addr, 4);
10313 tmp = load_reg(s, rd);
12dcc321 10314 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10315 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10316 }
10317 if (insn & (1 << 21)) {
10318 /* Base writeback. */
b0109805
PB
10319 tcg_gen_addi_i32(addr, addr, offset - 4);
10320 store_reg(s, rn, addr);
10321 } else {
7d1b0095 10322 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10323 }
10324 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
10325 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10326 * - load/store exclusive word
5158de24 10327 * - TT (v8M only)
ebfe27c5
PM
10328 */
10329 if (rs == 15) {
5158de24
PM
10330 if (!(insn & (1 << 20)) &&
10331 arm_dc_feature(s, ARM_FEATURE_M) &&
10332 arm_dc_feature(s, ARM_FEATURE_V8)) {
10333 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10334 * - TT (v8M only)
10335 */
10336 bool alt = insn & (1 << 7);
10337 TCGv_i32 addr, op, ttresp;
10338
10339 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10340 /* we UNDEF for these UNPREDICTABLE cases */
10341 goto illegal_op;
10342 }
10343
10344 if (alt && !s->v8m_secure) {
10345 goto illegal_op;
10346 }
10347
10348 addr = load_reg(s, rn);
10349 op = tcg_const_i32(extract32(insn, 6, 2));
10350 ttresp = tcg_temp_new_i32();
10351 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10352 tcg_temp_free_i32(addr);
10353 tcg_temp_free_i32(op);
10354 store_reg(s, rd, ttresp);
384c6c03 10355 break;
5158de24 10356 }
ebfe27c5
PM
10357 goto illegal_op;
10358 }
39d5492a 10359 addr = tcg_temp_local_new_i32();
98a46317 10360 load_reg_var(s, addr, rn);
426f5abc 10361 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 10362 if (insn & (1 << 20)) {
426f5abc 10363 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 10364 } else {
426f5abc 10365 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 10366 }
39d5492a 10367 tcg_temp_free_i32(addr);
2359bf80 10368 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
10369 /* Table Branch. */
10370 if (rn == 15) {
7d1b0095 10371 addr = tcg_temp_new_i32();
b0109805 10372 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 10373 } else {
b0109805 10374 addr = load_reg(s, rn);
9ee6e8bb 10375 }
b26eefb6 10376 tmp = load_reg(s, rm);
b0109805 10377 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
10378 if (insn & (1 << 4)) {
10379 /* tbh */
b0109805 10380 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10381 tcg_temp_free_i32(tmp);
e2592fad 10382 tmp = tcg_temp_new_i32();
12dcc321 10383 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10384 } else { /* tbb */
7d1b0095 10385 tcg_temp_free_i32(tmp);
e2592fad 10386 tmp = tcg_temp_new_i32();
12dcc321 10387 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10388 }
7d1b0095 10389 tcg_temp_free_i32(addr);
b0109805
PB
10390 tcg_gen_shli_i32(tmp, tmp, 1);
10391 tcg_gen_addi_i32(tmp, tmp, s->pc);
10392 store_reg(s, 15, tmp);
9ee6e8bb 10393 } else {
2359bf80 10394 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 10395 op = (insn >> 4) & 0x3;
2359bf80
MR
10396 switch (op2) {
10397 case 0:
426f5abc 10398 goto illegal_op;
2359bf80
MR
10399 case 1:
10400 /* Load/store exclusive byte/halfword/doubleword */
10401 if (op == 2) {
10402 goto illegal_op;
10403 }
10404 ARCH(7);
10405 break;
10406 case 2:
10407 /* Load-acquire/store-release */
10408 if (op == 3) {
10409 goto illegal_op;
10410 }
10411 /* Fall through */
10412 case 3:
10413 /* Load-acquire/store-release exclusive */
10414 ARCH(8);
10415 break;
426f5abc 10416 }
39d5492a 10417 addr = tcg_temp_local_new_i32();
98a46317 10418 load_reg_var(s, addr, rn);
2359bf80
MR
10419 if (!(op2 & 1)) {
10420 if (insn & (1 << 20)) {
10421 tmp = tcg_temp_new_i32();
10422 switch (op) {
10423 case 0: /* ldab */
9bb6558a
PM
10424 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
10425 rs | ISSIsAcqRel);
2359bf80
MR
10426 break;
10427 case 1: /* ldah */
9bb6558a
PM
10428 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10429 rs | ISSIsAcqRel);
2359bf80
MR
10430 break;
10431 case 2: /* lda */
9bb6558a
PM
10432 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10433 rs | ISSIsAcqRel);
2359bf80
MR
10434 break;
10435 default:
10436 abort();
10437 }
10438 store_reg(s, rs, tmp);
10439 } else {
10440 tmp = load_reg(s, rs);
10441 switch (op) {
10442 case 0: /* stlb */
9bb6558a
PM
10443 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10444 rs | ISSIsAcqRel);
2359bf80
MR
10445 break;
10446 case 1: /* stlh */
9bb6558a
PM
10447 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10448 rs | ISSIsAcqRel);
2359bf80
MR
10449 break;
10450 case 2: /* stl */
9bb6558a
PM
10451 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10452 rs | ISSIsAcqRel);
2359bf80
MR
10453 break;
10454 default:
10455 abort();
10456 }
10457 tcg_temp_free_i32(tmp);
10458 }
10459 } else if (insn & (1 << 20)) {
426f5abc 10460 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 10461 } else {
426f5abc 10462 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 10463 }
39d5492a 10464 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10465 }
10466 } else {
10467 /* Load/store multiple, RFE, SRS. */
10468 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 10469 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 10470 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10471 goto illegal_op;
00115976 10472 }
9ee6e8bb
PB
10473 if (insn & (1 << 20)) {
10474 /* rfe */
b0109805
PB
10475 addr = load_reg(s, rn);
10476 if ((insn & (1 << 24)) == 0)
10477 tcg_gen_addi_i32(addr, addr, -8);
10478 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 10479 tmp = tcg_temp_new_i32();
12dcc321 10480 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10481 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10482 tmp2 = tcg_temp_new_i32();
12dcc321 10483 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
10484 if (insn & (1 << 21)) {
10485 /* Base writeback. */
b0109805
PB
10486 if (insn & (1 << 24)) {
10487 tcg_gen_addi_i32(addr, addr, 4);
10488 } else {
10489 tcg_gen_addi_i32(addr, addr, -4);
10490 }
10491 store_reg(s, rn, addr);
10492 } else {
7d1b0095 10493 tcg_temp_free_i32(addr);
9ee6e8bb 10494 }
b0109805 10495 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10496 } else {
10497 /* srs */
81465888
PM
10498 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10499 insn & (1 << 21));
9ee6e8bb
PB
10500 }
10501 } else {
5856d44e 10502 int i, loaded_base = 0;
39d5492a 10503 TCGv_i32 loaded_var;
9ee6e8bb 10504 /* Load/store multiple. */
b0109805 10505 addr = load_reg(s, rn);
9ee6e8bb
PB
10506 offset = 0;
10507 for (i = 0; i < 16; i++) {
10508 if (insn & (1 << i))
10509 offset += 4;
10510 }
10511 if (insn & (1 << 24)) {
b0109805 10512 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10513 }
10514
f764718d 10515 loaded_var = NULL;
9ee6e8bb
PB
10516 for (i = 0; i < 16; i++) {
10517 if ((insn & (1 << i)) == 0)
10518 continue;
10519 if (insn & (1 << 20)) {
10520 /* Load. */
e2592fad 10521 tmp = tcg_temp_new_i32();
12dcc321 10522 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10523 if (i == 15) {
3bb8a96f 10524 gen_bx_excret(s, tmp);
5856d44e
YO
10525 } else if (i == rn) {
10526 loaded_var = tmp;
10527 loaded_base = 1;
9ee6e8bb 10528 } else {
b0109805 10529 store_reg(s, i, tmp);
9ee6e8bb
PB
10530 }
10531 } else {
10532 /* Store. */
b0109805 10533 tmp = load_reg(s, i);
12dcc321 10534 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10535 tcg_temp_free_i32(tmp);
9ee6e8bb 10536 }
b0109805 10537 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 10538 }
5856d44e
YO
10539 if (loaded_base) {
10540 store_reg(s, rn, loaded_var);
10541 }
9ee6e8bb
PB
10542 if (insn & (1 << 21)) {
10543 /* Base register writeback. */
10544 if (insn & (1 << 24)) {
b0109805 10545 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10546 }
10547 /* Fault if writeback register is in register list. */
10548 if (insn & (1 << rn))
10549 goto illegal_op;
b0109805
PB
10550 store_reg(s, rn, addr);
10551 } else {
7d1b0095 10552 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10553 }
10554 }
10555 }
10556 break;
2af9ab77
JB
10557 case 5:
10558
9ee6e8bb 10559 op = (insn >> 21) & 0xf;
2af9ab77 10560 if (op == 6) {
62b44f05
AR
10561 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10562 goto illegal_op;
10563 }
2af9ab77
JB
10564 /* Halfword pack. */
10565 tmp = load_reg(s, rn);
10566 tmp2 = load_reg(s, rm);
10567 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10568 if (insn & (1 << 5)) {
10569 /* pkhtb */
10570 if (shift == 0)
10571 shift = 31;
10572 tcg_gen_sari_i32(tmp2, tmp2, shift);
10573 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10574 tcg_gen_ext16u_i32(tmp2, tmp2);
10575 } else {
10576 /* pkhbt */
10577 if (shift)
10578 tcg_gen_shli_i32(tmp2, tmp2, shift);
10579 tcg_gen_ext16u_i32(tmp, tmp);
10580 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10581 }
10582 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10583 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10584 store_reg(s, rd, tmp);
10585 } else {
2af9ab77
JB
10586 /* Data processing register constant shift. */
10587 if (rn == 15) {
7d1b0095 10588 tmp = tcg_temp_new_i32();
2af9ab77
JB
10589 tcg_gen_movi_i32(tmp, 0);
10590 } else {
10591 tmp = load_reg(s, rn);
10592 }
10593 tmp2 = load_reg(s, rm);
10594
10595 shiftop = (insn >> 4) & 3;
10596 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10597 conds = (insn & (1 << 20)) != 0;
10598 logic_cc = (conds && thumb2_logic_op(op));
10599 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10600 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10601 goto illegal_op;
7d1b0095 10602 tcg_temp_free_i32(tmp2);
55203189
PM
10603 if (rd == 13 &&
10604 ((op == 2 && rn == 15) ||
10605 (op == 8 && rn == 13) ||
10606 (op == 13 && rn == 13))) {
10607 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
10608 store_sp_checked(s, tmp);
10609 } else if (rd != 15) {
2af9ab77
JB
10610 store_reg(s, rd, tmp);
10611 } else {
7d1b0095 10612 tcg_temp_free_i32(tmp);
2af9ab77 10613 }
3174f8e9 10614 }
9ee6e8bb
PB
10615 break;
10616 case 13: /* Misc data processing. */
10617 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10618 if (op < 4 && (insn & 0xf000) != 0xf000)
10619 goto illegal_op;
10620 switch (op) {
10621 case 0: /* Register controlled shift. */
8984bd2e
PB
10622 tmp = load_reg(s, rn);
10623 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10624 if ((insn & 0x70) != 0)
10625 goto illegal_op;
a2d12f0f
PM
10626 /*
10627 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
10628 * - MOV, MOVS (register-shifted register), flagsetting
10629 */
9ee6e8bb 10630 op = (insn >> 21) & 3;
8984bd2e
PB
10631 logic_cc = (insn & (1 << 20)) != 0;
10632 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10633 if (logic_cc)
10634 gen_logic_CC(tmp);
bedb8a6b 10635 store_reg(s, rd, tmp);
9ee6e8bb
PB
10636 break;
10637 case 1: /* Sign/zero extend. */
62b44f05
AR
10638 op = (insn >> 20) & 7;
10639 switch (op) {
10640 case 0: /* SXTAH, SXTH */
10641 case 1: /* UXTAH, UXTH */
10642 case 4: /* SXTAB, SXTB */
10643 case 5: /* UXTAB, UXTB */
10644 break;
10645 case 2: /* SXTAB16, SXTB16 */
10646 case 3: /* UXTAB16, UXTB16 */
10647 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10648 goto illegal_op;
10649 }
10650 break;
10651 default:
10652 goto illegal_op;
10653 }
10654 if (rn != 15) {
10655 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10656 goto illegal_op;
10657 }
10658 }
5e3f878a 10659 tmp = load_reg(s, rm);
9ee6e8bb 10660 shift = (insn >> 4) & 3;
1301f322 10661 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10662 rotate, a shift is sufficient. */
10663 if (shift != 0)
f669df27 10664 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10665 op = (insn >> 20) & 7;
10666 switch (op) {
5e3f878a
PB
10667 case 0: gen_sxth(tmp); break;
10668 case 1: gen_uxth(tmp); break;
10669 case 2: gen_sxtb16(tmp); break;
10670 case 3: gen_uxtb16(tmp); break;
10671 case 4: gen_sxtb(tmp); break;
10672 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10673 default:
10674 g_assert_not_reached();
9ee6e8bb
PB
10675 }
10676 if (rn != 15) {
5e3f878a 10677 tmp2 = load_reg(s, rn);
9ee6e8bb 10678 if ((op >> 1) == 1) {
5e3f878a 10679 gen_add16(tmp, tmp2);
9ee6e8bb 10680 } else {
5e3f878a 10681 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10682 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10683 }
10684 }
5e3f878a 10685 store_reg(s, rd, tmp);
9ee6e8bb
PB
10686 break;
10687 case 2: /* SIMD add/subtract. */
62b44f05
AR
10688 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10689 goto illegal_op;
10690 }
9ee6e8bb
PB
10691 op = (insn >> 20) & 7;
10692 shift = (insn >> 4) & 7;
10693 if ((op & 3) == 3 || (shift & 3) == 3)
10694 goto illegal_op;
6ddbc6e4
PB
10695 tmp = load_reg(s, rn);
10696 tmp2 = load_reg(s, rm);
10697 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10698 tcg_temp_free_i32(tmp2);
6ddbc6e4 10699 store_reg(s, rd, tmp);
9ee6e8bb
PB
10700 break;
10701 case 3: /* Other data processing. */
10702 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10703 if (op < 4) {
10704 /* Saturating add/subtract. */
62b44f05
AR
10705 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10706 goto illegal_op;
10707 }
d9ba4830
PB
10708 tmp = load_reg(s, rn);
10709 tmp2 = load_reg(s, rm);
9ee6e8bb 10710 if (op & 1)
9ef39277 10711 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10712 if (op & 2)
9ef39277 10713 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10714 else
9ef39277 10715 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10716 tcg_temp_free_i32(tmp2);
9ee6e8bb 10717 } else {
62b44f05
AR
10718 switch (op) {
10719 case 0x0a: /* rbit */
10720 case 0x08: /* rev */
10721 case 0x09: /* rev16 */
10722 case 0x0b: /* revsh */
10723 case 0x18: /* clz */
10724 break;
10725 case 0x10: /* sel */
10726 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10727 goto illegal_op;
10728 }
10729 break;
10730 case 0x20: /* crc32/crc32c */
10731 case 0x21:
10732 case 0x22:
10733 case 0x28:
10734 case 0x29:
10735 case 0x2a:
10736 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10737 goto illegal_op;
10738 }
10739 break;
10740 default:
10741 goto illegal_op;
10742 }
d9ba4830 10743 tmp = load_reg(s, rn);
9ee6e8bb
PB
10744 switch (op) {
10745 case 0x0a: /* rbit */
d9ba4830 10746 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10747 break;
10748 case 0x08: /* rev */
66896cb8 10749 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10750 break;
10751 case 0x09: /* rev16 */
d9ba4830 10752 gen_rev16(tmp);
9ee6e8bb
PB
10753 break;
10754 case 0x0b: /* revsh */
d9ba4830 10755 gen_revsh(tmp);
9ee6e8bb
PB
10756 break;
10757 case 0x10: /* sel */
d9ba4830 10758 tmp2 = load_reg(s, rm);
7d1b0095 10759 tmp3 = tcg_temp_new_i32();
0ecb72a5 10760 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10761 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10762 tcg_temp_free_i32(tmp3);
10763 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10764 break;
10765 case 0x18: /* clz */
7539a012 10766 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10767 break;
eb0ecd5a
WN
10768 case 0x20:
10769 case 0x21:
10770 case 0x22:
10771 case 0x28:
10772 case 0x29:
10773 case 0x2a:
10774 {
10775 /* crc32/crc32c */
10776 uint32_t sz = op & 0x3;
10777 uint32_t c = op & 0x8;
10778
eb0ecd5a 10779 tmp2 = load_reg(s, rm);
aa633469
PM
10780 if (sz == 0) {
10781 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10782 } else if (sz == 1) {
10783 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10784 }
eb0ecd5a
WN
10785 tmp3 = tcg_const_i32(1 << sz);
10786 if (c) {
10787 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10788 } else {
10789 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10790 }
10791 tcg_temp_free_i32(tmp2);
10792 tcg_temp_free_i32(tmp3);
10793 break;
10794 }
9ee6e8bb 10795 default:
62b44f05 10796 g_assert_not_reached();
9ee6e8bb
PB
10797 }
10798 }
d9ba4830 10799 store_reg(s, rd, tmp);
9ee6e8bb
PB
10800 break;
10801 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10802 switch ((insn >> 20) & 7) {
10803 case 0: /* 32 x 32 -> 32 */
10804 case 7: /* Unsigned sum of absolute differences. */
10805 break;
10806 case 1: /* 16 x 16 -> 32 */
10807 case 2: /* Dual multiply add. */
10808 case 3: /* 32 * 16 -> 32msb */
10809 case 4: /* Dual multiply subtract. */
10810 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10811 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10812 goto illegal_op;
10813 }
10814 break;
10815 }
9ee6e8bb 10816 op = (insn >> 4) & 0xf;
d9ba4830
PB
10817 tmp = load_reg(s, rn);
10818 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10819 switch ((insn >> 20) & 7) {
10820 case 0: /* 32 x 32 -> 32 */
d9ba4830 10821 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10822 tcg_temp_free_i32(tmp2);
9ee6e8bb 10823 if (rs != 15) {
d9ba4830 10824 tmp2 = load_reg(s, rs);
9ee6e8bb 10825 if (op)
d9ba4830 10826 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10827 else
d9ba4830 10828 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10829 tcg_temp_free_i32(tmp2);
9ee6e8bb 10830 }
9ee6e8bb
PB
10831 break;
10832 case 1: /* 16 x 16 -> 32 */
d9ba4830 10833 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10834 tcg_temp_free_i32(tmp2);
9ee6e8bb 10835 if (rs != 15) {
d9ba4830 10836 tmp2 = load_reg(s, rs);
9ef39277 10837 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10838 tcg_temp_free_i32(tmp2);
9ee6e8bb 10839 }
9ee6e8bb
PB
10840 break;
10841 case 2: /* Dual multiply add. */
10842 case 4: /* Dual multiply subtract. */
10843 if (op)
d9ba4830
PB
10844 gen_swap_half(tmp2);
10845 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10846 if (insn & (1 << 22)) {
e1d177b9 10847 /* This subtraction cannot overflow. */
d9ba4830 10848 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10849 } else {
e1d177b9
PM
10850 /* This addition cannot overflow 32 bits;
10851 * however it may overflow considered as a signed
10852 * operation, in which case we must set the Q flag.
10853 */
9ef39277 10854 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10855 }
7d1b0095 10856 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10857 if (rs != 15)
10858 {
d9ba4830 10859 tmp2 = load_reg(s, rs);
9ef39277 10860 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10861 tcg_temp_free_i32(tmp2);
9ee6e8bb 10862 }
9ee6e8bb
PB
10863 break;
10864 case 3: /* 32 * 16 -> 32msb */
10865 if (op)
d9ba4830 10866 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10867 else
d9ba4830 10868 gen_sxth(tmp2);
a7812ae4
PB
10869 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10870 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10871 tmp = tcg_temp_new_i32();
ecc7b3aa 10872 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10873 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10874 if (rs != 15)
10875 {
d9ba4830 10876 tmp2 = load_reg(s, rs);
9ef39277 10877 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10878 tcg_temp_free_i32(tmp2);
9ee6e8bb 10879 }
9ee6e8bb 10880 break;
838fa72d
AJ
10881 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10882 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10883 if (rs != 15) {
838fa72d
AJ
10884 tmp = load_reg(s, rs);
10885 if (insn & (1 << 20)) {
10886 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10887 } else {
838fa72d 10888 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10889 }
2c0262af 10890 }
838fa72d
AJ
10891 if (insn & (1 << 4)) {
10892 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10893 }
10894 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10895 tmp = tcg_temp_new_i32();
ecc7b3aa 10896 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10897 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10898 break;
10899 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10900 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10901 tcg_temp_free_i32(tmp2);
9ee6e8bb 10902 if (rs != 15) {
d9ba4830
PB
10903 tmp2 = load_reg(s, rs);
10904 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10905 tcg_temp_free_i32(tmp2);
5fd46862 10906 }
9ee6e8bb 10907 break;
2c0262af 10908 }
d9ba4830 10909 store_reg(s, rd, tmp);
2c0262af 10910 break;
9ee6e8bb
PB
10911 case 6: case 7: /* 64-bit multiply, Divide. */
10912 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10913 tmp = load_reg(s, rn);
10914 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10915 if ((op & 0x50) == 0x10) {
10916 /* sdiv, udiv */
d614a513 10917 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10918 goto illegal_op;
47789990 10919 }
9ee6e8bb 10920 if (op & 0x20)
5e3f878a 10921 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10922 else
5e3f878a 10923 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10924 tcg_temp_free_i32(tmp2);
5e3f878a 10925 store_reg(s, rd, tmp);
9ee6e8bb
PB
10926 } else if ((op & 0xe) == 0xc) {
10927 /* Dual multiply accumulate long. */
62b44f05
AR
10928 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10929 tcg_temp_free_i32(tmp);
10930 tcg_temp_free_i32(tmp2);
10931 goto illegal_op;
10932 }
9ee6e8bb 10933 if (op & 1)
5e3f878a
PB
10934 gen_swap_half(tmp2);
10935 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10936 if (op & 0x10) {
5e3f878a 10937 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10938 } else {
5e3f878a 10939 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10940 }
7d1b0095 10941 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10942 /* BUGFIX */
10943 tmp64 = tcg_temp_new_i64();
10944 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10945 tcg_temp_free_i32(tmp);
a7812ae4
PB
10946 gen_addq(s, tmp64, rs, rd);
10947 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10948 tcg_temp_free_i64(tmp64);
2c0262af 10949 } else {
9ee6e8bb
PB
10950 if (op & 0x20) {
10951 /* Unsigned 64-bit multiply */
a7812ae4 10952 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10953 } else {
9ee6e8bb
PB
10954 if (op & 8) {
10955 /* smlalxy */
62b44f05
AR
10956 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10957 tcg_temp_free_i32(tmp2);
10958 tcg_temp_free_i32(tmp);
10959 goto illegal_op;
10960 }
5e3f878a 10961 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10962 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10963 tmp64 = tcg_temp_new_i64();
10964 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10965 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10966 } else {
10967 /* Signed 64-bit multiply */
a7812ae4 10968 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10969 }
b5ff1b31 10970 }
9ee6e8bb
PB
10971 if (op & 4) {
10972 /* umaal */
62b44f05
AR
10973 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10974 tcg_temp_free_i64(tmp64);
10975 goto illegal_op;
10976 }
a7812ae4
PB
10977 gen_addq_lo(s, tmp64, rs);
10978 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10979 } else if (op & 0x40) {
10980 /* 64-bit accumulate. */
a7812ae4 10981 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10982 }
a7812ae4 10983 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10984 tcg_temp_free_i64(tmp64);
5fd46862 10985 }
2c0262af 10986 break;
9ee6e8bb
PB
10987 }
10988 break;
10989 case 6: case 7: case 14: case 15:
10990 /* Coprocessor. */
7517748e
PM
10991 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10992 /* We don't currently implement M profile FP support,
b1e5336a
PM
10993 * so this entire space should give a NOCP fault, with
10994 * the exception of the v8M VLLDM and VLSTM insns, which
10995 * must be NOPs in Secure state and UNDEF in Nonsecure state.
7517748e 10996 */
b1e5336a
PM
10997 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10998 (insn & 0xffa00f00) == 0xec200a00) {
10999 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
11000 * - VLLDM, VLSTM
11001 * We choose to UNDEF if the RAZ bits are non-zero.
11002 */
11003 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
11004 goto illegal_op;
11005 }
11006 /* Just NOP since FP support is not implemented */
11007 break;
11008 }
11009 /* All other insns: NOCP */
7517748e
PM
11010 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
11011 default_exception_el(s));
11012 break;
11013 }
0052087e
RH
11014 if ((insn & 0xfe000a00) == 0xfc000800
11015 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11016 /* The Thumb2 and ARM encodings are identical. */
11017 if (disas_neon_insn_3same_ext(s, insn)) {
11018 goto illegal_op;
11019 }
11020 } else if ((insn & 0xff000a00) == 0xfe000800
11021 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11022 /* The Thumb2 and ARM encodings are identical. */
11023 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
11024 goto illegal_op;
11025 }
11026 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 11027 /* Translate into the equivalent ARM encoding. */
f06053e3 11028 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 11029 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 11030 goto illegal_op;
7dcc1f89 11031 }
6a57f3eb 11032 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 11033 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
11034 goto illegal_op;
11035 }
9ee6e8bb
PB
11036 } else {
11037 if (insn & (1 << 28))
11038 goto illegal_op;
7dcc1f89 11039 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 11040 goto illegal_op;
7dcc1f89 11041 }
9ee6e8bb
PB
11042 }
11043 break;
11044 case 8: case 9: case 10: case 11:
11045 if (insn & (1 << 15)) {
11046 /* Branches, misc control. */
11047 if (insn & 0x5000) {
11048 /* Unconditional branch. */
11049 /* signextend(hw1[10:0]) -> offset[:12]. */
11050 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
11051 /* hw1[10:0] -> offset[11:1]. */
11052 offset |= (insn & 0x7ff) << 1;
11053 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
11054 offset[24:22] already have the same value because of the
11055 sign extension above. */
11056 offset ^= ((~insn) & (1 << 13)) << 10;
11057 offset ^= ((~insn) & (1 << 11)) << 11;
11058
9ee6e8bb
PB
11059 if (insn & (1 << 14)) {
11060 /* Branch and link. */
3174f8e9 11061 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 11062 }
3b46e624 11063
b0109805 11064 offset += s->pc;
9ee6e8bb
PB
11065 if (insn & (1 << 12)) {
11066 /* b/bl */
b0109805 11067 gen_jmp(s, offset);
9ee6e8bb
PB
11068 } else {
11069 /* blx */
b0109805 11070 offset &= ~(uint32_t)2;
be5e7a76 11071 /* thumb2 bx, no need to check */
b0109805 11072 gen_bx_im(s, offset);
2c0262af 11073 }
9ee6e8bb
PB
11074 } else if (((insn >> 23) & 7) == 7) {
11075 /* Misc control */
11076 if (insn & (1 << 13))
11077 goto illegal_op;
11078
11079 if (insn & (1 << 26)) {
001b3cab
PM
11080 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11081 goto illegal_op;
11082 }
37e6456e
PM
11083 if (!(insn & (1 << 20))) {
11084 /* Hypervisor call (v7) */
11085 int imm16 = extract32(insn, 16, 4) << 12
11086 | extract32(insn, 0, 12);
11087 ARCH(7);
11088 if (IS_USER(s)) {
11089 goto illegal_op;
11090 }
11091 gen_hvc(s, imm16);
11092 } else {
11093 /* Secure monitor call (v6+) */
11094 ARCH(6K);
11095 if (IS_USER(s)) {
11096 goto illegal_op;
11097 }
11098 gen_smc(s);
11099 }
2c0262af 11100 } else {
9ee6e8bb
PB
11101 op = (insn >> 20) & 7;
11102 switch (op) {
11103 case 0: /* msr cpsr. */
b53d8923 11104 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 11105 tmp = load_reg(s, rn);
b28b3377
PM
11106 /* the constant is the mask and SYSm fields */
11107 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 11108 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 11109 tcg_temp_free_i32(addr);
7d1b0095 11110 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11111 gen_lookup_tb(s);
11112 break;
11113 }
11114 /* fall through */
11115 case 1: /* msr spsr. */
b53d8923 11116 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11117 goto illegal_op;
b53d8923 11118 }
8bfd0550
PM
11119
11120 if (extract32(insn, 5, 1)) {
11121 /* MSR (banked) */
11122 int sysm = extract32(insn, 8, 4) |
11123 (extract32(insn, 4, 1) << 4);
11124 int r = op & 1;
11125
11126 gen_msr_banked(s, r, sysm, rm);
11127 break;
11128 }
11129
11130 /* MSR (for PSRs) */
2fbac54b
FN
11131 tmp = load_reg(s, rn);
11132 if (gen_set_psr(s,
7dcc1f89 11133 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 11134 op == 1, tmp))
9ee6e8bb
PB
11135 goto illegal_op;
11136 break;
11137 case 2: /* cps, nop-hint. */
11138 if (((insn >> 8) & 7) == 0) {
11139 gen_nop_hint(s, insn & 0xff);
11140 }
11141 /* Implemented as NOP in user mode. */
11142 if (IS_USER(s))
11143 break;
11144 offset = 0;
11145 imm = 0;
11146 if (insn & (1 << 10)) {
11147 if (insn & (1 << 7))
11148 offset |= CPSR_A;
11149 if (insn & (1 << 6))
11150 offset |= CPSR_I;
11151 if (insn & (1 << 5))
11152 offset |= CPSR_F;
11153 if (insn & (1 << 9))
11154 imm = CPSR_A | CPSR_I | CPSR_F;
11155 }
11156 if (insn & (1 << 8)) {
11157 offset |= 0x1f;
11158 imm |= (insn & 0x1f);
11159 }
11160 if (offset) {
2fbac54b 11161 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
11162 }
11163 break;
11164 case 3: /* Special control operations. */
14120108 11165 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 11166 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
11167 goto illegal_op;
11168 }
9ee6e8bb
PB
11169 op = (insn >> 4) & 0xf;
11170 switch (op) {
11171 case 2: /* clrex */
426f5abc 11172 gen_clrex(s);
9ee6e8bb
PB
11173 break;
11174 case 4: /* dsb */
11175 case 5: /* dmb */
61e4c432 11176 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 11177 break;
6df99dec
SS
11178 case 6: /* isb */
11179 /* We need to break the TB after this insn
11180 * to execute self-modifying code correctly
11181 * and also to take any pending interrupts
11182 * immediately.
11183 */
0b609cc1 11184 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 11185 break;
9ee6e8bb
PB
11186 default:
11187 goto illegal_op;
11188 }
11189 break;
11190 case 4: /* bxj */
9d7c59c8
PM
11191 /* Trivial implementation equivalent to bx.
11192 * This instruction doesn't exist at all for M-profile.
11193 */
11194 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11195 goto illegal_op;
11196 }
d9ba4830
PB
11197 tmp = load_reg(s, rn);
11198 gen_bx(s, tmp);
9ee6e8bb
PB
11199 break;
11200 case 5: /* Exception return. */
b8b45b68
RV
11201 if (IS_USER(s)) {
11202 goto illegal_op;
11203 }
11204 if (rn != 14 || rd != 15) {
11205 goto illegal_op;
11206 }
55c544ed
PM
11207 if (s->current_el == 2) {
11208 /* ERET from Hyp uses ELR_Hyp, not LR */
11209 if (insn & 0xff) {
11210 goto illegal_op;
11211 }
11212 tmp = load_cpu_field(elr_el[2]);
11213 } else {
11214 tmp = load_reg(s, rn);
11215 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11216 }
b8b45b68
RV
11217 gen_exception_return(s, tmp);
11218 break;
8bfd0550 11219 case 6: /* MRS */
43ac6574
PM
11220 if (extract32(insn, 5, 1) &&
11221 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11222 /* MRS (banked) */
11223 int sysm = extract32(insn, 16, 4) |
11224 (extract32(insn, 4, 1) << 4);
11225
11226 gen_mrs_banked(s, 0, sysm, rd);
11227 break;
11228 }
11229
3d54026f
PM
11230 if (extract32(insn, 16, 4) != 0xf) {
11231 goto illegal_op;
11232 }
11233 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11234 extract32(insn, 0, 8) != 0) {
11235 goto illegal_op;
11236 }
11237
8bfd0550 11238 /* mrs cpsr */
7d1b0095 11239 tmp = tcg_temp_new_i32();
b53d8923 11240 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
11241 addr = tcg_const_i32(insn & 0xff);
11242 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 11243 tcg_temp_free_i32(addr);
9ee6e8bb 11244 } else {
9ef39277 11245 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 11246 }
8984bd2e 11247 store_reg(s, rd, tmp);
9ee6e8bb 11248 break;
8bfd0550 11249 case 7: /* MRS */
43ac6574
PM
11250 if (extract32(insn, 5, 1) &&
11251 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11252 /* MRS (banked) */
11253 int sysm = extract32(insn, 16, 4) |
11254 (extract32(insn, 4, 1) << 4);
11255
11256 gen_mrs_banked(s, 1, sysm, rd);
11257 break;
11258 }
11259
11260 /* mrs spsr. */
9ee6e8bb 11261 /* Not accessible in user mode. */
b53d8923 11262 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11263 goto illegal_op;
b53d8923 11264 }
3d54026f
PM
11265
11266 if (extract32(insn, 16, 4) != 0xf ||
11267 extract32(insn, 0, 8) != 0) {
11268 goto illegal_op;
11269 }
11270
d9ba4830
PB
11271 tmp = load_cpu_field(spsr);
11272 store_reg(s, rd, tmp);
9ee6e8bb 11273 break;
2c0262af
FB
11274 }
11275 }
9ee6e8bb
PB
11276 } else {
11277 /* Conditional branch. */
11278 op = (insn >> 22) & 0xf;
11279 /* Generate a conditional jump to next instruction. */
c2d9644e 11280 arm_skip_unless(s, op);
9ee6e8bb
PB
11281
11282 /* offset[11:1] = insn[10:0] */
11283 offset = (insn & 0x7ff) << 1;
11284 /* offset[17:12] = insn[21:16]. */
11285 offset |= (insn & 0x003f0000) >> 4;
11286 /* offset[31:20] = insn[26]. */
11287 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11288 /* offset[18] = insn[13]. */
11289 offset |= (insn & (1 << 13)) << 5;
11290 /* offset[19] = insn[11]. */
11291 offset |= (insn & (1 << 11)) << 8;
11292
11293 /* jump to the offset */
b0109805 11294 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
11295 }
11296 } else {
55203189
PM
11297 /*
11298 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
11299 * - Data-processing (modified immediate, plain binary immediate)
11300 */
9ee6e8bb 11301 if (insn & (1 << 25)) {
55203189
PM
11302 /*
11303 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
11304 * - Data-processing (plain binary immediate)
11305 */
9ee6e8bb
PB
11306 if (insn & (1 << 24)) {
11307 if (insn & (1 << 20))
11308 goto illegal_op;
11309 /* Bitfield/Saturate. */
11310 op = (insn >> 21) & 7;
11311 imm = insn & 0x1f;
11312 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 11313 if (rn == 15) {
7d1b0095 11314 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
11315 tcg_gen_movi_i32(tmp, 0);
11316 } else {
11317 tmp = load_reg(s, rn);
11318 }
9ee6e8bb
PB
11319 switch (op) {
11320 case 2: /* Signed bitfield extract. */
11321 imm++;
11322 if (shift + imm > 32)
11323 goto illegal_op;
59a71b4c
RH
11324 if (imm < 32) {
11325 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11326 }
9ee6e8bb
PB
11327 break;
11328 case 6: /* Unsigned bitfield extract. */
11329 imm++;
11330 if (shift + imm > 32)
11331 goto illegal_op;
59a71b4c
RH
11332 if (imm < 32) {
11333 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11334 }
9ee6e8bb
PB
11335 break;
11336 case 3: /* Bitfield insert/clear. */
11337 if (imm < shift)
11338 goto illegal_op;
11339 imm = imm + 1 - shift;
11340 if (imm != 32) {
6ddbc6e4 11341 tmp2 = load_reg(s, rd);
d593c48e 11342 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 11343 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11344 }
11345 break;
11346 case 7:
11347 goto illegal_op;
11348 default: /* Saturate. */
9ee6e8bb
PB
11349 if (shift) {
11350 if (op & 1)
6ddbc6e4 11351 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 11352 else
6ddbc6e4 11353 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 11354 }
6ddbc6e4 11355 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
11356 if (op & 4) {
11357 /* Unsigned. */
62b44f05
AR
11358 if ((op & 1) && shift == 0) {
11359 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11360 tcg_temp_free_i32(tmp);
11361 tcg_temp_free_i32(tmp2);
11362 goto illegal_op;
11363 }
9ef39277 11364 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11365 } else {
9ef39277 11366 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 11367 }
2c0262af 11368 } else {
9ee6e8bb 11369 /* Signed. */
62b44f05
AR
11370 if ((op & 1) && shift == 0) {
11371 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11372 tcg_temp_free_i32(tmp);
11373 tcg_temp_free_i32(tmp2);
11374 goto illegal_op;
11375 }
9ef39277 11376 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11377 } else {
9ef39277 11378 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 11379 }
2c0262af 11380 }
b75263d6 11381 tcg_temp_free_i32(tmp2);
9ee6e8bb 11382 break;
2c0262af 11383 }
6ddbc6e4 11384 store_reg(s, rd, tmp);
9ee6e8bb
PB
11385 } else {
11386 imm = ((insn & 0x04000000) >> 15)
11387 | ((insn & 0x7000) >> 4) | (insn & 0xff);
11388 if (insn & (1 << 22)) {
11389 /* 16-bit immediate. */
11390 imm |= (insn >> 4) & 0xf000;
11391 if (insn & (1 << 23)) {
11392 /* movt */
5e3f878a 11393 tmp = load_reg(s, rd);
86831435 11394 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 11395 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 11396 } else {
9ee6e8bb 11397 /* movw */
7d1b0095 11398 tmp = tcg_temp_new_i32();
5e3f878a 11399 tcg_gen_movi_i32(tmp, imm);
2c0262af 11400 }
55203189 11401 store_reg(s, rd, tmp);
2c0262af 11402 } else {
9ee6e8bb
PB
11403 /* Add/sub 12-bit immediate. */
11404 if (rn == 15) {
b0109805 11405 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 11406 if (insn & (1 << 23))
b0109805 11407 offset -= imm;
9ee6e8bb 11408 else
b0109805 11409 offset += imm;
7d1b0095 11410 tmp = tcg_temp_new_i32();
5e3f878a 11411 tcg_gen_movi_i32(tmp, offset);
55203189 11412 store_reg(s, rd, tmp);
2c0262af 11413 } else {
5e3f878a 11414 tmp = load_reg(s, rn);
9ee6e8bb 11415 if (insn & (1 << 23))
5e3f878a 11416 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 11417 else
5e3f878a 11418 tcg_gen_addi_i32(tmp, tmp, imm);
55203189
PM
11419 if (rn == 13 && rd == 13) {
11420 /* ADD SP, SP, imm or SUB SP, SP, imm */
11421 store_sp_checked(s, tmp);
11422 } else {
11423 store_reg(s, rd, tmp);
11424 }
2c0262af 11425 }
9ee6e8bb 11426 }
191abaa2 11427 }
9ee6e8bb 11428 } else {
55203189
PM
11429 /*
11430 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
11431 * - Data-processing (modified immediate)
11432 */
9ee6e8bb
PB
11433 int shifter_out = 0;
11434 /* modified 12-bit immediate. */
11435 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11436 imm = (insn & 0xff);
11437 switch (shift) {
11438 case 0: /* XY */
11439 /* Nothing to do. */
11440 break;
11441 case 1: /* 00XY00XY */
11442 imm |= imm << 16;
11443 break;
11444 case 2: /* XY00XY00 */
11445 imm |= imm << 16;
11446 imm <<= 8;
11447 break;
11448 case 3: /* XYXYXYXY */
11449 imm |= imm << 16;
11450 imm |= imm << 8;
11451 break;
11452 default: /* Rotated constant. */
11453 shift = (shift << 1) | (imm >> 7);
11454 imm |= 0x80;
11455 imm = imm << (32 - shift);
11456 shifter_out = 1;
11457 break;
b5ff1b31 11458 }
7d1b0095 11459 tmp2 = tcg_temp_new_i32();
3174f8e9 11460 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 11461 rn = (insn >> 16) & 0xf;
3174f8e9 11462 if (rn == 15) {
7d1b0095 11463 tmp = tcg_temp_new_i32();
3174f8e9
FN
11464 tcg_gen_movi_i32(tmp, 0);
11465 } else {
11466 tmp = load_reg(s, rn);
11467 }
9ee6e8bb
PB
11468 op = (insn >> 21) & 0xf;
11469 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 11470 shifter_out, tmp, tmp2))
9ee6e8bb 11471 goto illegal_op;
7d1b0095 11472 tcg_temp_free_i32(tmp2);
9ee6e8bb 11473 rd = (insn >> 8) & 0xf;
55203189
PM
11474 if (rd == 13 && rn == 13
11475 && (op == 8 || op == 13)) {
11476 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
11477 store_sp_checked(s, tmp);
11478 } else if (rd != 15) {
3174f8e9
FN
11479 store_reg(s, rd, tmp);
11480 } else {
7d1b0095 11481 tcg_temp_free_i32(tmp);
2c0262af 11482 }
2c0262af 11483 }
9ee6e8bb
PB
11484 }
11485 break;
11486 case 12: /* Load/store single data item. */
11487 {
11488 int postinc = 0;
11489 int writeback = 0;
a99caa48 11490 int memidx;
9bb6558a
PM
11491 ISSInfo issinfo;
11492
9ee6e8bb 11493 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 11494 if (disas_neon_ls_insn(s, insn)) {
c1713132 11495 goto illegal_op;
7dcc1f89 11496 }
9ee6e8bb
PB
11497 break;
11498 }
a2fdc890
PM
11499 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
11500 if (rs == 15) {
11501 if (!(insn & (1 << 20))) {
11502 goto illegal_op;
11503 }
11504 if (op != 2) {
11505 /* Byte or halfword load space with dest == r15 : memory hints.
11506 * Catch them early so we don't emit pointless addressing code.
11507 * This space is a mix of:
11508 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11509 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11510 * cores)
11511 * unallocated hints, which must be treated as NOPs
11512 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11513 * which is easiest for the decoding logic
11514 * Some space which must UNDEF
11515 */
11516 int op1 = (insn >> 23) & 3;
11517 int op2 = (insn >> 6) & 0x3f;
11518 if (op & 2) {
11519 goto illegal_op;
11520 }
11521 if (rn == 15) {
02afbf64
PM
11522 /* UNPREDICTABLE, unallocated hint or
11523 * PLD/PLDW/PLI (literal)
11524 */
2eea841c 11525 return;
a2fdc890
PM
11526 }
11527 if (op1 & 1) {
2eea841c 11528 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11529 }
11530 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 11531 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11532 }
11533 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 11534 goto illegal_op;
a2fdc890
PM
11535 }
11536 }
a99caa48 11537 memidx = get_mem_index(s);
9ee6e8bb 11538 if (rn == 15) {
7d1b0095 11539 addr = tcg_temp_new_i32();
9ee6e8bb
PB
11540 /* PC relative. */
11541 /* s->pc has already been incremented by 4. */
11542 imm = s->pc & 0xfffffffc;
11543 if (insn & (1 << 23))
11544 imm += insn & 0xfff;
11545 else
11546 imm -= insn & 0xfff;
b0109805 11547 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 11548 } else {
b0109805 11549 addr = load_reg(s, rn);
9ee6e8bb
PB
11550 if (insn & (1 << 23)) {
11551 /* Positive offset. */
11552 imm = insn & 0xfff;
b0109805 11553 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 11554 } else {
9ee6e8bb 11555 imm = insn & 0xff;
2a0308c5
PM
11556 switch ((insn >> 8) & 0xf) {
11557 case 0x0: /* Shifted Register. */
9ee6e8bb 11558 shift = (insn >> 4) & 0xf;
2a0308c5
PM
11559 if (shift > 3) {
11560 tcg_temp_free_i32(addr);
18c9b560 11561 goto illegal_op;
2a0308c5 11562 }
b26eefb6 11563 tmp = load_reg(s, rm);
9ee6e8bb 11564 if (shift)
b26eefb6 11565 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 11566 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11567 tcg_temp_free_i32(tmp);
9ee6e8bb 11568 break;
2a0308c5 11569 case 0xc: /* Negative offset. */
b0109805 11570 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 11571 break;
2a0308c5 11572 case 0xe: /* User privilege. */
b0109805 11573 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 11574 memidx = get_a32_user_mem_index(s);
9ee6e8bb 11575 break;
2a0308c5 11576 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
11577 imm = -imm;
11578 /* Fall through. */
2a0308c5 11579 case 0xb: /* Post-increment. */
9ee6e8bb
PB
11580 postinc = 1;
11581 writeback = 1;
11582 break;
2a0308c5 11583 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
11584 imm = -imm;
11585 /* Fall through. */
2a0308c5 11586 case 0xf: /* Pre-increment. */
b0109805 11587 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
11588 writeback = 1;
11589 break;
11590 default:
2a0308c5 11591 tcg_temp_free_i32(addr);
b7bcbe95 11592 goto illegal_op;
9ee6e8bb
PB
11593 }
11594 }
11595 }
9bb6558a
PM
11596
11597 issinfo = writeback ? ISSInvalid : rs;
11598
9ee6e8bb
PB
11599 if (insn & (1 << 20)) {
11600 /* Load. */
5a839c0d 11601 tmp = tcg_temp_new_i32();
a2fdc890 11602 switch (op) {
5a839c0d 11603 case 0:
9bb6558a 11604 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11605 break;
11606 case 4:
9bb6558a 11607 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11608 break;
11609 case 1:
9bb6558a 11610 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11611 break;
11612 case 5:
9bb6558a 11613 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11614 break;
11615 case 2:
9bb6558a 11616 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11617 break;
2a0308c5 11618 default:
5a839c0d 11619 tcg_temp_free_i32(tmp);
2a0308c5
PM
11620 tcg_temp_free_i32(addr);
11621 goto illegal_op;
a2fdc890
PM
11622 }
11623 if (rs == 15) {
3bb8a96f 11624 gen_bx_excret(s, tmp);
9ee6e8bb 11625 } else {
a2fdc890 11626 store_reg(s, rs, tmp);
9ee6e8bb
PB
11627 }
11628 } else {
11629 /* Store. */
b0109805 11630 tmp = load_reg(s, rs);
9ee6e8bb 11631 switch (op) {
5a839c0d 11632 case 0:
9bb6558a 11633 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11634 break;
11635 case 1:
9bb6558a 11636 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11637 break;
11638 case 2:
9bb6558a 11639 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11640 break;
2a0308c5 11641 default:
5a839c0d 11642 tcg_temp_free_i32(tmp);
2a0308c5
PM
11643 tcg_temp_free_i32(addr);
11644 goto illegal_op;
b7bcbe95 11645 }
5a839c0d 11646 tcg_temp_free_i32(tmp);
2c0262af 11647 }
9ee6e8bb 11648 if (postinc)
b0109805
PB
11649 tcg_gen_addi_i32(addr, addr, imm);
11650 if (writeback) {
11651 store_reg(s, rn, addr);
11652 } else {
7d1b0095 11653 tcg_temp_free_i32(addr);
b0109805 11654 }
9ee6e8bb
PB
11655 }
11656 break;
11657 default:
11658 goto illegal_op;
2c0262af 11659 }
2eea841c 11660 return;
9ee6e8bb 11661illegal_op:
2eea841c
PM
11662 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11663 default_exception_el(s));
2c0262af
FB
11664}
11665
296e5a0a 11666static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 11667{
296e5a0a 11668 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
11669 int32_t offset;
11670 int i;
39d5492a
PM
11671 TCGv_i32 tmp;
11672 TCGv_i32 tmp2;
11673 TCGv_i32 addr;
99c475ab 11674
99c475ab
FB
11675 switch (insn >> 12) {
11676 case 0: case 1:
396e467c 11677
99c475ab
FB
11678 rd = insn & 7;
11679 op = (insn >> 11) & 3;
11680 if (op == 3) {
a2d12f0f
PM
11681 /*
11682 * 0b0001_1xxx_xxxx_xxxx
11683 * - Add, subtract (three low registers)
11684 * - Add, subtract (two low registers and immediate)
11685 */
99c475ab 11686 rn = (insn >> 3) & 7;
396e467c 11687 tmp = load_reg(s, rn);
99c475ab
FB
11688 if (insn & (1 << 10)) {
11689 /* immediate */
7d1b0095 11690 tmp2 = tcg_temp_new_i32();
396e467c 11691 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11692 } else {
11693 /* reg */
11694 rm = (insn >> 6) & 7;
396e467c 11695 tmp2 = load_reg(s, rm);
99c475ab 11696 }
9ee6e8bb
PB
11697 if (insn & (1 << 9)) {
11698 if (s->condexec_mask)
396e467c 11699 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11700 else
72485ec4 11701 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11702 } else {
11703 if (s->condexec_mask)
396e467c 11704 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11705 else
72485ec4 11706 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11707 }
7d1b0095 11708 tcg_temp_free_i32(tmp2);
396e467c 11709 store_reg(s, rd, tmp);
99c475ab
FB
11710 } else {
11711 /* shift immediate */
11712 rm = (insn >> 3) & 7;
11713 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11714 tmp = load_reg(s, rm);
11715 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11716 if (!s->condexec_mask)
11717 gen_logic_CC(tmp);
11718 store_reg(s, rd, tmp);
99c475ab
FB
11719 }
11720 break;
11721 case 2: case 3:
a2d12f0f
PM
11722 /*
11723 * 0b001x_xxxx_xxxx_xxxx
11724 * - Add, subtract, compare, move (one low register and immediate)
11725 */
99c475ab
FB
11726 op = (insn >> 11) & 3;
11727 rd = (insn >> 8) & 0x7;
396e467c 11728 if (op == 0) { /* mov */
7d1b0095 11729 tmp = tcg_temp_new_i32();
396e467c 11730 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11731 if (!s->condexec_mask)
396e467c
FN
11732 gen_logic_CC(tmp);
11733 store_reg(s, rd, tmp);
11734 } else {
11735 tmp = load_reg(s, rd);
7d1b0095 11736 tmp2 = tcg_temp_new_i32();
396e467c
FN
11737 tcg_gen_movi_i32(tmp2, insn & 0xff);
11738 switch (op) {
11739 case 1: /* cmp */
72485ec4 11740 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11741 tcg_temp_free_i32(tmp);
11742 tcg_temp_free_i32(tmp2);
396e467c
FN
11743 break;
11744 case 2: /* add */
11745 if (s->condexec_mask)
11746 tcg_gen_add_i32(tmp, tmp, tmp2);
11747 else
72485ec4 11748 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11749 tcg_temp_free_i32(tmp2);
396e467c
FN
11750 store_reg(s, rd, tmp);
11751 break;
11752 case 3: /* sub */
11753 if (s->condexec_mask)
11754 tcg_gen_sub_i32(tmp, tmp, tmp2);
11755 else
72485ec4 11756 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11757 tcg_temp_free_i32(tmp2);
396e467c
FN
11758 store_reg(s, rd, tmp);
11759 break;
11760 }
99c475ab 11761 }
99c475ab
FB
11762 break;
11763 case 4:
11764 if (insn & (1 << 11)) {
11765 rd = (insn >> 8) & 7;
5899f386
FB
11766 /* load pc-relative. Bit 1 of PC is ignored. */
11767 val = s->pc + 2 + ((insn & 0xff) * 4);
11768 val &= ~(uint32_t)2;
7d1b0095 11769 addr = tcg_temp_new_i32();
b0109805 11770 tcg_gen_movi_i32(addr, val);
c40c8556 11771 tmp = tcg_temp_new_i32();
9bb6558a
PM
11772 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11773 rd | ISSIs16Bit);
7d1b0095 11774 tcg_temp_free_i32(addr);
b0109805 11775 store_reg(s, rd, tmp);
99c475ab
FB
11776 break;
11777 }
11778 if (insn & (1 << 10)) {
ebfe27c5
PM
11779 /* 0b0100_01xx_xxxx_xxxx
11780 * - data processing extended, branch and exchange
11781 */
99c475ab
FB
11782 rd = (insn & 7) | ((insn >> 4) & 8);
11783 rm = (insn >> 3) & 0xf;
11784 op = (insn >> 8) & 3;
11785 switch (op) {
11786 case 0: /* add */
396e467c
FN
11787 tmp = load_reg(s, rd);
11788 tmp2 = load_reg(s, rm);
11789 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11790 tcg_temp_free_i32(tmp2);
55203189
PM
11791 if (rd == 13) {
11792 /* ADD SP, SP, reg */
11793 store_sp_checked(s, tmp);
11794 } else {
11795 store_reg(s, rd, tmp);
11796 }
99c475ab
FB
11797 break;
11798 case 1: /* cmp */
396e467c
FN
11799 tmp = load_reg(s, rd);
11800 tmp2 = load_reg(s, rm);
72485ec4 11801 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11802 tcg_temp_free_i32(tmp2);
11803 tcg_temp_free_i32(tmp);
99c475ab
FB
11804 break;
11805 case 2: /* mov/cpy */
396e467c 11806 tmp = load_reg(s, rm);
55203189
PM
11807 if (rd == 13) {
11808 /* MOV SP, reg */
11809 store_sp_checked(s, tmp);
11810 } else {
11811 store_reg(s, rd, tmp);
11812 }
99c475ab 11813 break;
ebfe27c5
PM
11814 case 3:
11815 {
11816 /* 0b0100_0111_xxxx_xxxx
11817 * - branch [and link] exchange thumb register
11818 */
11819 bool link = insn & (1 << 7);
11820
fb602cb7 11821 if (insn & 3) {
ebfe27c5
PM
11822 goto undef;
11823 }
11824 if (link) {
be5e7a76 11825 ARCH(5);
ebfe27c5 11826 }
fb602cb7
PM
11827 if ((insn & 4)) {
11828 /* BXNS/BLXNS: only exists for v8M with the
11829 * security extensions, and always UNDEF if NonSecure.
11830 * We don't implement these in the user-only mode
11831 * either (in theory you can use them from Secure User
11832 * mode but they are too tied in to system emulation.)
11833 */
11834 if (!s->v8m_secure || IS_USER_ONLY) {
11835 goto undef;
11836 }
11837 if (link) {
3e3fa230 11838 gen_blxns(s, rm);
fb602cb7
PM
11839 } else {
11840 gen_bxns(s, rm);
11841 }
11842 break;
11843 }
11844 /* BLX/BX */
ebfe27c5
PM
11845 tmp = load_reg(s, rm);
11846 if (link) {
99c475ab 11847 val = (uint32_t)s->pc | 1;
7d1b0095 11848 tmp2 = tcg_temp_new_i32();
b0109805
PB
11849 tcg_gen_movi_i32(tmp2, val);
11850 store_reg(s, 14, tmp2);
3bb8a96f
PM
11851 gen_bx(s, tmp);
11852 } else {
11853 /* Only BX works as exception-return, not BLX */
11854 gen_bx_excret(s, tmp);
99c475ab 11855 }
99c475ab
FB
11856 break;
11857 }
ebfe27c5 11858 }
99c475ab
FB
11859 break;
11860 }
11861
a2d12f0f
PM
11862 /*
11863 * 0b0100_00xx_xxxx_xxxx
11864 * - Data-processing (two low registers)
11865 */
99c475ab
FB
11866 rd = insn & 7;
11867 rm = (insn >> 3) & 7;
11868 op = (insn >> 6) & 0xf;
11869 if (op == 2 || op == 3 || op == 4 || op == 7) {
11870 /* the shift/rotate ops want the operands backwards */
11871 val = rm;
11872 rm = rd;
11873 rd = val;
11874 val = 1;
11875 } else {
11876 val = 0;
11877 }
11878
396e467c 11879 if (op == 9) { /* neg */
7d1b0095 11880 tmp = tcg_temp_new_i32();
396e467c
FN
11881 tcg_gen_movi_i32(tmp, 0);
11882 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11883 tmp = load_reg(s, rd);
11884 } else {
f764718d 11885 tmp = NULL;
396e467c 11886 }
99c475ab 11887
396e467c 11888 tmp2 = load_reg(s, rm);
5899f386 11889 switch (op) {
99c475ab 11890 case 0x0: /* and */
396e467c 11891 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11892 if (!s->condexec_mask)
396e467c 11893 gen_logic_CC(tmp);
99c475ab
FB
11894 break;
11895 case 0x1: /* eor */
396e467c 11896 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11897 if (!s->condexec_mask)
396e467c 11898 gen_logic_CC(tmp);
99c475ab
FB
11899 break;
11900 case 0x2: /* lsl */
9ee6e8bb 11901 if (s->condexec_mask) {
365af80e 11902 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11903 } else {
9ef39277 11904 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11905 gen_logic_CC(tmp2);
9ee6e8bb 11906 }
99c475ab
FB
11907 break;
11908 case 0x3: /* lsr */
9ee6e8bb 11909 if (s->condexec_mask) {
365af80e 11910 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11911 } else {
9ef39277 11912 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11913 gen_logic_CC(tmp2);
9ee6e8bb 11914 }
99c475ab
FB
11915 break;
11916 case 0x4: /* asr */
9ee6e8bb 11917 if (s->condexec_mask) {
365af80e 11918 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11919 } else {
9ef39277 11920 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11921 gen_logic_CC(tmp2);
9ee6e8bb 11922 }
99c475ab
FB
11923 break;
11924 case 0x5: /* adc */
49b4c31e 11925 if (s->condexec_mask) {
396e467c 11926 gen_adc(tmp, tmp2);
49b4c31e
RH
11927 } else {
11928 gen_adc_CC(tmp, tmp, tmp2);
11929 }
99c475ab
FB
11930 break;
11931 case 0x6: /* sbc */
2de68a49 11932 if (s->condexec_mask) {
396e467c 11933 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11934 } else {
11935 gen_sbc_CC(tmp, tmp, tmp2);
11936 }
99c475ab
FB
11937 break;
11938 case 0x7: /* ror */
9ee6e8bb 11939 if (s->condexec_mask) {
f669df27
AJ
11940 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11941 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11942 } else {
9ef39277 11943 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11944 gen_logic_CC(tmp2);
9ee6e8bb 11945 }
99c475ab
FB
11946 break;
11947 case 0x8: /* tst */
396e467c
FN
11948 tcg_gen_and_i32(tmp, tmp, tmp2);
11949 gen_logic_CC(tmp);
99c475ab 11950 rd = 16;
5899f386 11951 break;
99c475ab 11952 case 0x9: /* neg */
9ee6e8bb 11953 if (s->condexec_mask)
396e467c 11954 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11955 else
72485ec4 11956 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11957 break;
11958 case 0xa: /* cmp */
72485ec4 11959 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11960 rd = 16;
11961 break;
11962 case 0xb: /* cmn */
72485ec4 11963 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11964 rd = 16;
11965 break;
11966 case 0xc: /* orr */
396e467c 11967 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11968 if (!s->condexec_mask)
396e467c 11969 gen_logic_CC(tmp);
99c475ab
FB
11970 break;
11971 case 0xd: /* mul */
7b2919a0 11972 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11973 if (!s->condexec_mask)
396e467c 11974 gen_logic_CC(tmp);
99c475ab
FB
11975 break;
11976 case 0xe: /* bic */
f669df27 11977 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11978 if (!s->condexec_mask)
396e467c 11979 gen_logic_CC(tmp);
99c475ab
FB
11980 break;
11981 case 0xf: /* mvn */
396e467c 11982 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11983 if (!s->condexec_mask)
396e467c 11984 gen_logic_CC(tmp2);
99c475ab 11985 val = 1;
5899f386 11986 rm = rd;
99c475ab
FB
11987 break;
11988 }
11989 if (rd != 16) {
396e467c
FN
11990 if (val) {
11991 store_reg(s, rm, tmp2);
11992 if (op != 0xf)
7d1b0095 11993 tcg_temp_free_i32(tmp);
396e467c
FN
11994 } else {
11995 store_reg(s, rd, tmp);
7d1b0095 11996 tcg_temp_free_i32(tmp2);
396e467c
FN
11997 }
11998 } else {
7d1b0095
PM
11999 tcg_temp_free_i32(tmp);
12000 tcg_temp_free_i32(tmp2);
99c475ab
FB
12001 }
12002 break;
12003
12004 case 5:
12005 /* load/store register offset. */
12006 rd = insn & 7;
12007 rn = (insn >> 3) & 7;
12008 rm = (insn >> 6) & 7;
12009 op = (insn >> 9) & 7;
b0109805 12010 addr = load_reg(s, rn);
b26eefb6 12011 tmp = load_reg(s, rm);
b0109805 12012 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12013 tcg_temp_free_i32(tmp);
99c475ab 12014
c40c8556 12015 if (op < 3) { /* store */
b0109805 12016 tmp = load_reg(s, rd);
c40c8556
PM
12017 } else {
12018 tmp = tcg_temp_new_i32();
12019 }
99c475ab
FB
12020
12021 switch (op) {
12022 case 0: /* str */
9bb6558a 12023 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12024 break;
12025 case 1: /* strh */
9bb6558a 12026 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12027 break;
12028 case 2: /* strb */
9bb6558a 12029 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12030 break;
12031 case 3: /* ldrsb */
9bb6558a 12032 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12033 break;
12034 case 4: /* ldr */
9bb6558a 12035 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12036 break;
12037 case 5: /* ldrh */
9bb6558a 12038 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12039 break;
12040 case 6: /* ldrb */
9bb6558a 12041 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12042 break;
12043 case 7: /* ldrsh */
9bb6558a 12044 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12045 break;
12046 }
c40c8556 12047 if (op >= 3) { /* load */
b0109805 12048 store_reg(s, rd, tmp);
c40c8556
PM
12049 } else {
12050 tcg_temp_free_i32(tmp);
12051 }
7d1b0095 12052 tcg_temp_free_i32(addr);
99c475ab
FB
12053 break;
12054
12055 case 6:
12056 /* load/store word immediate offset */
12057 rd = insn & 7;
12058 rn = (insn >> 3) & 7;
b0109805 12059 addr = load_reg(s, rn);
99c475ab 12060 val = (insn >> 4) & 0x7c;
b0109805 12061 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12062
12063 if (insn & (1 << 11)) {
12064 /* load */
c40c8556 12065 tmp = tcg_temp_new_i32();
12dcc321 12066 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12067 store_reg(s, rd, tmp);
99c475ab
FB
12068 } else {
12069 /* store */
b0109805 12070 tmp = load_reg(s, rd);
12dcc321 12071 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12072 tcg_temp_free_i32(tmp);
99c475ab 12073 }
7d1b0095 12074 tcg_temp_free_i32(addr);
99c475ab
FB
12075 break;
12076
12077 case 7:
12078 /* load/store byte immediate offset */
12079 rd = insn & 7;
12080 rn = (insn >> 3) & 7;
b0109805 12081 addr = load_reg(s, rn);
99c475ab 12082 val = (insn >> 6) & 0x1f;
b0109805 12083 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12084
12085 if (insn & (1 << 11)) {
12086 /* load */
c40c8556 12087 tmp = tcg_temp_new_i32();
9bb6558a 12088 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12089 store_reg(s, rd, tmp);
99c475ab
FB
12090 } else {
12091 /* store */
b0109805 12092 tmp = load_reg(s, rd);
9bb6558a 12093 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12094 tcg_temp_free_i32(tmp);
99c475ab 12095 }
7d1b0095 12096 tcg_temp_free_i32(addr);
99c475ab
FB
12097 break;
12098
12099 case 8:
12100 /* load/store halfword immediate offset */
12101 rd = insn & 7;
12102 rn = (insn >> 3) & 7;
b0109805 12103 addr = load_reg(s, rn);
99c475ab 12104 val = (insn >> 5) & 0x3e;
b0109805 12105 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12106
12107 if (insn & (1 << 11)) {
12108 /* load */
c40c8556 12109 tmp = tcg_temp_new_i32();
9bb6558a 12110 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12111 store_reg(s, rd, tmp);
99c475ab
FB
12112 } else {
12113 /* store */
b0109805 12114 tmp = load_reg(s, rd);
9bb6558a 12115 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12116 tcg_temp_free_i32(tmp);
99c475ab 12117 }
7d1b0095 12118 tcg_temp_free_i32(addr);
99c475ab
FB
12119 break;
12120
12121 case 9:
12122 /* load/store from stack */
12123 rd = (insn >> 8) & 7;
b0109805 12124 addr = load_reg(s, 13);
99c475ab 12125 val = (insn & 0xff) * 4;
b0109805 12126 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12127
12128 if (insn & (1 << 11)) {
12129 /* load */
c40c8556 12130 tmp = tcg_temp_new_i32();
9bb6558a 12131 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12132 store_reg(s, rd, tmp);
99c475ab
FB
12133 } else {
12134 /* store */
b0109805 12135 tmp = load_reg(s, rd);
9bb6558a 12136 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12137 tcg_temp_free_i32(tmp);
99c475ab 12138 }
7d1b0095 12139 tcg_temp_free_i32(addr);
99c475ab
FB
12140 break;
12141
12142 case 10:
55203189
PM
12143 /*
12144 * 0b1010_xxxx_xxxx_xxxx
12145 * - Add PC/SP (immediate)
12146 */
99c475ab 12147 rd = (insn >> 8) & 7;
5899f386
FB
12148 if (insn & (1 << 11)) {
12149 /* SP */
5e3f878a 12150 tmp = load_reg(s, 13);
5899f386
FB
12151 } else {
12152 /* PC. bit 1 is ignored. */
7d1b0095 12153 tmp = tcg_temp_new_i32();
5e3f878a 12154 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 12155 }
99c475ab 12156 val = (insn & 0xff) * 4;
5e3f878a
PB
12157 tcg_gen_addi_i32(tmp, tmp, val);
12158 store_reg(s, rd, tmp);
99c475ab
FB
12159 break;
12160
12161 case 11:
12162 /* misc */
12163 op = (insn >> 8) & 0xf;
12164 switch (op) {
12165 case 0:
55203189
PM
12166 /*
12167 * 0b1011_0000_xxxx_xxxx
12168 * - ADD (SP plus immediate)
12169 * - SUB (SP minus immediate)
12170 */
b26eefb6 12171 tmp = load_reg(s, 13);
99c475ab
FB
12172 val = (insn & 0x7f) * 4;
12173 if (insn & (1 << 7))
6a0d8a1d 12174 val = -(int32_t)val;
b26eefb6 12175 tcg_gen_addi_i32(tmp, tmp, val);
55203189 12176 store_sp_checked(s, tmp);
99c475ab
FB
12177 break;
12178
9ee6e8bb
PB
12179 case 2: /* sign/zero extend. */
12180 ARCH(6);
12181 rd = insn & 7;
12182 rm = (insn >> 3) & 7;
b0109805 12183 tmp = load_reg(s, rm);
9ee6e8bb 12184 switch ((insn >> 6) & 3) {
b0109805
PB
12185 case 0: gen_sxth(tmp); break;
12186 case 1: gen_sxtb(tmp); break;
12187 case 2: gen_uxth(tmp); break;
12188 case 3: gen_uxtb(tmp); break;
9ee6e8bb 12189 }
b0109805 12190 store_reg(s, rd, tmp);
9ee6e8bb 12191 break;
99c475ab
FB
12192 case 4: case 5: case 0xc: case 0xd:
12193 /* push/pop */
b0109805 12194 addr = load_reg(s, 13);
5899f386
FB
12195 if (insn & (1 << 8))
12196 offset = 4;
99c475ab 12197 else
5899f386
FB
12198 offset = 0;
12199 for (i = 0; i < 8; i++) {
12200 if (insn & (1 << i))
12201 offset += 4;
12202 }
12203 if ((insn & (1 << 11)) == 0) {
b0109805 12204 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12205 }
99c475ab
FB
12206 for (i = 0; i < 8; i++) {
12207 if (insn & (1 << i)) {
12208 if (insn & (1 << 11)) {
12209 /* pop */
c40c8556 12210 tmp = tcg_temp_new_i32();
12dcc321 12211 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12212 store_reg(s, i, tmp);
99c475ab
FB
12213 } else {
12214 /* push */
b0109805 12215 tmp = load_reg(s, i);
12dcc321 12216 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12217 tcg_temp_free_i32(tmp);
99c475ab 12218 }
5899f386 12219 /* advance to the next address. */
b0109805 12220 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12221 }
12222 }
f764718d 12223 tmp = NULL;
99c475ab
FB
12224 if (insn & (1 << 8)) {
12225 if (insn & (1 << 11)) {
12226 /* pop pc */
c40c8556 12227 tmp = tcg_temp_new_i32();
12dcc321 12228 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
12229 /* don't set the pc until the rest of the instruction
12230 has completed */
12231 } else {
12232 /* push lr */
b0109805 12233 tmp = load_reg(s, 14);
12dcc321 12234 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12235 tcg_temp_free_i32(tmp);
99c475ab 12236 }
b0109805 12237 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 12238 }
5899f386 12239 if ((insn & (1 << 11)) == 0) {
b0109805 12240 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12241 }
99c475ab 12242 /* write back the new stack pointer */
b0109805 12243 store_reg(s, 13, addr);
99c475ab 12244 /* set the new PC value */
be5e7a76 12245 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 12246 store_reg_from_load(s, 15, tmp);
be5e7a76 12247 }
99c475ab
FB
12248 break;
12249
9ee6e8bb
PB
12250 case 1: case 3: case 9: case 11: /* czb */
12251 rm = insn & 7;
d9ba4830 12252 tmp = load_reg(s, rm);
c2d9644e 12253 arm_gen_condlabel(s);
9ee6e8bb 12254 if (insn & (1 << 11))
cb63669a 12255 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 12256 else
cb63669a 12257 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 12258 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
12259 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
12260 val = (uint32_t)s->pc + 2;
12261 val += offset;
12262 gen_jmp(s, val);
12263 break;
12264
12265 case 15: /* IT, nop-hint. */
12266 if ((insn & 0xf) == 0) {
12267 gen_nop_hint(s, (insn >> 4) & 0xf);
12268 break;
12269 }
12270 /* If Then. */
12271 s->condexec_cond = (insn >> 4) & 0xe;
12272 s->condexec_mask = insn & 0x1f;
12273 /* No actual code generated for this insn, just setup state. */
12274 break;
12275
06c949e6 12276 case 0xe: /* bkpt */
d4a2dc67
PM
12277 {
12278 int imm8 = extract32(insn, 0, 8);
be5e7a76 12279 ARCH(5);
c900a2e6 12280 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 12281 break;
d4a2dc67 12282 }
06c949e6 12283
19a6e31c
PM
12284 case 0xa: /* rev, and hlt */
12285 {
12286 int op1 = extract32(insn, 6, 2);
12287
12288 if (op1 == 2) {
12289 /* HLT */
12290 int imm6 = extract32(insn, 0, 6);
12291
12292 gen_hlt(s, imm6);
12293 break;
12294 }
12295
12296 /* Otherwise this is rev */
9ee6e8bb
PB
12297 ARCH(6);
12298 rn = (insn >> 3) & 0x7;
12299 rd = insn & 0x7;
b0109805 12300 tmp = load_reg(s, rn);
19a6e31c 12301 switch (op1) {
66896cb8 12302 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
12303 case 1: gen_rev16(tmp); break;
12304 case 3: gen_revsh(tmp); break;
19a6e31c
PM
12305 default:
12306 g_assert_not_reached();
9ee6e8bb 12307 }
b0109805 12308 store_reg(s, rd, tmp);
9ee6e8bb 12309 break;
19a6e31c 12310 }
9ee6e8bb 12311
d9e028c1
PM
12312 case 6:
12313 switch ((insn >> 5) & 7) {
12314 case 2:
12315 /* setend */
12316 ARCH(6);
9886ecdf
PB
12317 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12318 gen_helper_setend(cpu_env);
dcba3a8d 12319 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 12320 }
9ee6e8bb 12321 break;
d9e028c1
PM
12322 case 3:
12323 /* cps */
12324 ARCH(6);
12325 if (IS_USER(s)) {
12326 break;
8984bd2e 12327 }
b53d8923 12328 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
12329 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
12330 /* FAULTMASK */
12331 if (insn & 1) {
12332 addr = tcg_const_i32(19);
12333 gen_helper_v7m_msr(cpu_env, addr, tmp);
12334 tcg_temp_free_i32(addr);
12335 }
12336 /* PRIMASK */
12337 if (insn & 2) {
12338 addr = tcg_const_i32(16);
12339 gen_helper_v7m_msr(cpu_env, addr, tmp);
12340 tcg_temp_free_i32(addr);
12341 }
12342 tcg_temp_free_i32(tmp);
12343 gen_lookup_tb(s);
12344 } else {
12345 if (insn & (1 << 4)) {
12346 shift = CPSR_A | CPSR_I | CPSR_F;
12347 } else {
12348 shift = 0;
12349 }
12350 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 12351 }
d9e028c1
PM
12352 break;
12353 default:
12354 goto undef;
9ee6e8bb
PB
12355 }
12356 break;
12357
99c475ab
FB
12358 default:
12359 goto undef;
12360 }
12361 break;
12362
12363 case 12:
a7d3970d 12364 {
99c475ab 12365 /* load/store multiple */
f764718d 12366 TCGv_i32 loaded_var = NULL;
99c475ab 12367 rn = (insn >> 8) & 0x7;
b0109805 12368 addr = load_reg(s, rn);
99c475ab
FB
12369 for (i = 0; i < 8; i++) {
12370 if (insn & (1 << i)) {
99c475ab
FB
12371 if (insn & (1 << 11)) {
12372 /* load */
c40c8556 12373 tmp = tcg_temp_new_i32();
12dcc321 12374 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
12375 if (i == rn) {
12376 loaded_var = tmp;
12377 } else {
12378 store_reg(s, i, tmp);
12379 }
99c475ab
FB
12380 } else {
12381 /* store */
b0109805 12382 tmp = load_reg(s, i);
12dcc321 12383 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12384 tcg_temp_free_i32(tmp);
99c475ab 12385 }
5899f386 12386 /* advance to the next address */
b0109805 12387 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12388 }
12389 }
b0109805 12390 if ((insn & (1 << rn)) == 0) {
a7d3970d 12391 /* base reg not in list: base register writeback */
b0109805
PB
12392 store_reg(s, rn, addr);
12393 } else {
a7d3970d
PM
12394 /* base reg in list: if load, complete it now */
12395 if (insn & (1 << 11)) {
12396 store_reg(s, rn, loaded_var);
12397 }
7d1b0095 12398 tcg_temp_free_i32(addr);
b0109805 12399 }
99c475ab 12400 break;
a7d3970d 12401 }
99c475ab
FB
12402 case 13:
12403 /* conditional branch or swi */
12404 cond = (insn >> 8) & 0xf;
12405 if (cond == 0xe)
12406 goto undef;
12407
12408 if (cond == 0xf) {
12409 /* swi */
eaed129d 12410 gen_set_pc_im(s, s->pc);
d4a2dc67 12411 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 12412 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
12413 break;
12414 }
12415 /* generate a conditional jump to next instruction */
c2d9644e 12416 arm_skip_unless(s, cond);
99c475ab
FB
12417
12418 /* jump to the offset */
5899f386 12419 val = (uint32_t)s->pc + 2;
99c475ab 12420 offset = ((int32_t)insn << 24) >> 24;
5899f386 12421 val += offset << 1;
8aaca4c0 12422 gen_jmp(s, val);
99c475ab
FB
12423 break;
12424
12425 case 14:
358bf29e 12426 if (insn & (1 << 11)) {
296e5a0a
PM
12427 /* thumb_insn_is_16bit() ensures we can't get here for
12428 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12429 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12430 */
12431 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12432 ARCH(5);
12433 offset = ((insn & 0x7ff) << 1);
12434 tmp = load_reg(s, 14);
12435 tcg_gen_addi_i32(tmp, tmp, offset);
12436 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12437
12438 tmp2 = tcg_temp_new_i32();
12439 tcg_gen_movi_i32(tmp2, s->pc | 1);
12440 store_reg(s, 14, tmp2);
12441 gen_bx(s, tmp);
358bf29e
PB
12442 break;
12443 }
9ee6e8bb 12444 /* unconditional branch */
99c475ab
FB
12445 val = (uint32_t)s->pc;
12446 offset = ((int32_t)insn << 21) >> 21;
12447 val += (offset << 1) + 2;
8aaca4c0 12448 gen_jmp(s, val);
99c475ab
FB
12449 break;
12450
12451 case 15:
296e5a0a
PM
12452 /* thumb_insn_is_16bit() ensures we can't get here for
12453 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12454 */
12455 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12456
12457 if (insn & (1 << 11)) {
12458 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
12459 offset = ((insn & 0x7ff) << 1) | 1;
12460 tmp = load_reg(s, 14);
12461 tcg_gen_addi_i32(tmp, tmp, offset);
12462
12463 tmp2 = tcg_temp_new_i32();
12464 tcg_gen_movi_i32(tmp2, s->pc | 1);
12465 store_reg(s, 14, tmp2);
12466 gen_bx(s, tmp);
12467 } else {
12468 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
12469 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
12470
12471 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
12472 }
9ee6e8bb 12473 break;
99c475ab
FB
12474 }
12475 return;
9ee6e8bb 12476illegal_op:
99c475ab 12477undef:
73710361
GB
12478 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
12479 default_exception_el(s));
99c475ab
FB
12480}
12481
541ebcd4
PM
12482static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
12483{
12484 /* Return true if the insn at dc->pc might cross a page boundary.
12485 * (False positives are OK, false negatives are not.)
5b8d7289
PM
12486 * We know this is a Thumb insn, and our caller ensures we are
12487 * only called if dc->pc is less than 4 bytes from the page
12488 * boundary, so we cross the page if the first 16 bits indicate
12489 * that this is a 32 bit insn.
541ebcd4 12490 */
5b8d7289 12491 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 12492
5b8d7289 12493 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
12494}
12495
b542683d 12496static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 12497{
1d8a5535 12498 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 12499 CPUARMState *env = cs->env_ptr;
4e5e1215 12500 ARMCPU *cpu = arm_env_get_cpu(env);
3b46e624 12501
dcba3a8d 12502 dc->pc = dc->base.pc_first;
e50e6a20 12503 dc->condjmp = 0;
3926cc84 12504
40f860cd 12505 dc->aarch64 = 0;
cef9ee70
SS
12506 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
12507 * there is no secure EL1, so we route exceptions to EL3.
12508 */
12509 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
12510 !arm_el_is_aa64(env, 3);
1d8a5535
LV
12511 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
12512 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
12513 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
12514 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
12515 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
12516 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
c1e37810 12517 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 12518#if !defined(CONFIG_USER_ONLY)
c1e37810 12519 dc->user = (dc->current_el == 0);
3926cc84 12520#endif
1d8a5535
LV
12521 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
12522 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
12523 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
12524 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
12525 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
12526 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
12527 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
fb602cb7
PM
12528 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12529 regime_is_secure(env, dc->mmu_idx);
4730fb85 12530 dc->v8m_stackcheck = ARM_TBFLAG_STACKCHECK(dc->base.tb->flags);
60322b39 12531 dc->cp_regs = cpu->cp_regs;
a984e42c 12532 dc->features = env->features;
40f860cd 12533
50225ad0
PM
12534 /* Single step state. The code-generation logic here is:
12535 * SS_ACTIVE == 0:
12536 * generate code with no special handling for single-stepping (except
12537 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12538 * this happens anyway because those changes are all system register or
12539 * PSTATE writes).
12540 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12541 * emit code for one insn
12542 * emit code to clear PSTATE.SS
12543 * emit code to generate software step exception for completed step
12544 * end TB (as usual for having generated an exception)
12545 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12546 * emit code to generate a software step exception
12547 * end the TB
12548 */
1d8a5535
LV
12549 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
12550 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
50225ad0
PM
12551 dc->is_ldex = false;
12552 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
12553
bfe7ad5b 12554 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 12555
f7708456
RH
12556 /* If architectural single step active, limit to 1. */
12557 if (is_singlestepping(dc)) {
b542683d 12558 dc->base.max_insns = 1;
f7708456
RH
12559 }
12560
d0264d86
RH
12561 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12562 to those left on the page. */
12563 if (!dc->thumb) {
bfe7ad5b 12564 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 12565 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
12566 }
12567
a7812ae4
PB
12568 cpu_F0s = tcg_temp_new_i32();
12569 cpu_F1s = tcg_temp_new_i32();
12570 cpu_F0d = tcg_temp_new_i64();
12571 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
12572 cpu_V0 = cpu_F0d;
12573 cpu_V1 = cpu_F1d;
e677137d 12574 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 12575 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
12576}
12577
b1476854
LV
12578static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12579{
12580 DisasContext *dc = container_of(dcbase, DisasContext, base);
12581
12582 /* A note on handling of the condexec (IT) bits:
12583 *
12584 * We want to avoid the overhead of having to write the updated condexec
12585 * bits back to the CPUARMState for every instruction in an IT block. So:
12586 * (1) if the condexec bits are not already zero then we write
12587 * zero back into the CPUARMState now. This avoids complications trying
12588 * to do it at the end of the block. (For example if we don't do this
12589 * it's hard to identify whether we can safely skip writing condexec
12590 * at the end of the TB, which we definitely want to do for the case
12591 * where a TB doesn't do anything with the IT state at all.)
12592 * (2) if we are going to leave the TB then we call gen_set_condexec()
12593 * which will write the correct value into CPUARMState if zero is wrong.
12594 * This is done both for leaving the TB at the end, and for leaving
12595 * it because of an exception we know will happen, which is done in
12596 * gen_exception_insn(). The latter is necessary because we need to
12597 * leave the TB with the PC/IT state just prior to execution of the
12598 * instruction which caused the exception.
12599 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12600 * then the CPUARMState will be wrong and we need to reset it.
12601 * This is handled in the same way as restoration of the
12602 * PC in these situations; we save the value of the condexec bits
12603 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12604 * then uses this to restore them after an exception.
12605 *
12606 * Note that there are no instructions which can read the condexec
12607 * bits, and none which can write non-static values to them, so
12608 * we don't need to care about whether CPUARMState is correct in the
12609 * middle of a TB.
12610 */
12611
12612 /* Reset the conditional execution bits immediately. This avoids
12613 complications trying to do it at the end of the block. */
12614 if (dc->condexec_mask || dc->condexec_cond) {
12615 TCGv_i32 tmp = tcg_temp_new_i32();
12616 tcg_gen_movi_i32(tmp, 0);
12617 store_cpu_field(tmp, condexec_bits);
12618 }
23169224 12619 tcg_clear_temp_count();
b1476854
LV
12620}
12621
f62bd897
LV
12622static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12623{
12624 DisasContext *dc = container_of(dcbase, DisasContext, base);
12625
f62bd897
LV
12626 tcg_gen_insn_start(dc->pc,
12627 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12628 0);
15fa08f8 12629 dc->insn_start = tcg_last_op();
f62bd897
LV
12630}
12631
a68956ad
LV
12632static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12633 const CPUBreakpoint *bp)
12634{
12635 DisasContext *dc = container_of(dcbase, DisasContext, base);
12636
12637 if (bp->flags & BP_CPU) {
12638 gen_set_condexec(dc);
12639 gen_set_pc_im(dc, dc->pc);
12640 gen_helper_check_breakpoints(cpu_env);
12641 /* End the TB early; it's likely not going to be executed */
12642 dc->base.is_jmp = DISAS_TOO_MANY;
12643 } else {
12644 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12645 /* The address covered by the breakpoint must be
12646 included in [tb->pc, tb->pc + tb->size) in order
12647 to for it to be properly cleared -- thus we
12648 increment the PC here so that the logic setting
12649 tb->size below does the right thing. */
12650 /* TODO: Advance PC by correct instruction length to
12651 * avoid disassembler error messages */
12652 dc->pc += 2;
12653 dc->base.is_jmp = DISAS_NORETURN;
12654 }
12655
12656 return true;
12657}
12658
722ef0a5 12659static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 12660{
13189a90
LV
12661#ifdef CONFIG_USER_ONLY
12662 /* Intercept jump to the magic kernel page. */
12663 if (dc->pc >= 0xffff0000) {
12664 /* We always get here via a jump, so know we are not in a
12665 conditional execution block. */
12666 gen_exception_internal(EXCP_KERNEL_TRAP);
12667 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12668 return true;
13189a90
LV
12669 }
12670#endif
12671
12672 if (dc->ss_active && !dc->pstate_ss) {
12673 /* Singlestep state is Active-pending.
12674 * If we're in this state at the start of a TB then either
12675 * a) we just took an exception to an EL which is being debugged
12676 * and this is the first insn in the exception handler
12677 * b) debug exceptions were masked and we just unmasked them
12678 * without changing EL (eg by clearing PSTATE.D)
12679 * In either case we're going to take a swstep exception in the
12680 * "did not step an insn" case, and so the syndrome ISV and EX
12681 * bits should be zero.
12682 */
12683 assert(dc->base.num_insns == 1);
12684 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12685 default_exception_el(dc));
12686 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12687 return true;
13189a90
LV
12688 }
12689
722ef0a5
RH
12690 return false;
12691}
13189a90 12692
d0264d86 12693static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12694{
13189a90
LV
12695 if (dc->condjmp && !dc->base.is_jmp) {
12696 gen_set_label(dc->condlabel);
12697 dc->condjmp = 0;
12698 }
13189a90 12699 dc->base.pc_next = dc->pc;
23169224 12700 translator_loop_temp_check(&dc->base);
13189a90
LV
12701}
12702
722ef0a5
RH
12703static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12704{
12705 DisasContext *dc = container_of(dcbase, DisasContext, base);
12706 CPUARMState *env = cpu->env_ptr;
12707 unsigned int insn;
12708
12709 if (arm_pre_translate_insn(dc)) {
12710 return;
12711 }
12712
12713 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 12714 dc->insn = insn;
722ef0a5
RH
12715 dc->pc += 4;
12716 disas_arm_insn(dc, insn);
12717
d0264d86
RH
12718 arm_post_translate_insn(dc);
12719
12720 /* ARM is a fixed-length ISA. We performed the cross-page check
12721 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12722}
12723
dcf14dfb
PM
12724static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12725{
12726 /* Return true if this Thumb insn is always unconditional,
12727 * even inside an IT block. This is true of only a very few
12728 * instructions: BKPT, HLT, and SG.
12729 *
12730 * A larger class of instructions are UNPREDICTABLE if used
12731 * inside an IT block; we do not need to detect those here, because
12732 * what we do by default (perform the cc check and update the IT
12733 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12734 * choice for those situations.
12735 *
12736 * insn is either a 16-bit or a 32-bit instruction; the two are
12737 * distinguishable because for the 16-bit case the top 16 bits
12738 * are zeroes, and that isn't a valid 32-bit encoding.
12739 */
12740 if ((insn & 0xffffff00) == 0xbe00) {
12741 /* BKPT */
12742 return true;
12743 }
12744
12745 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12746 !arm_dc_feature(s, ARM_FEATURE_M)) {
12747 /* HLT: v8A only. This is unconditional even when it is going to
12748 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12749 * For v7 cores this was a plain old undefined encoding and so
12750 * honours its cc check. (We might be using the encoding as
12751 * a semihosting trap, but we don't change the cc check behaviour
12752 * on that account, because a debugger connected to a real v7A
12753 * core and emulating semihosting traps by catching the UNDEF
12754 * exception would also only see cases where the cc check passed.
12755 * No guest code should be trying to do a HLT semihosting trap
12756 * in an IT block anyway.
12757 */
12758 return true;
12759 }
12760
12761 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12762 arm_dc_feature(s, ARM_FEATURE_M)) {
12763 /* SG: v8M only */
12764 return true;
12765 }
12766
12767 return false;
12768}
12769
722ef0a5
RH
12770static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12771{
12772 DisasContext *dc = container_of(dcbase, DisasContext, base);
12773 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12774 uint32_t insn;
12775 bool is_16bit;
722ef0a5
RH
12776
12777 if (arm_pre_translate_insn(dc)) {
12778 return;
12779 }
12780
296e5a0a
PM
12781 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12782 is_16bit = thumb_insn_is_16bit(dc, insn);
12783 dc->pc += 2;
12784 if (!is_16bit) {
12785 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12786
12787 insn = insn << 16 | insn2;
12788 dc->pc += 2;
12789 }
58803318 12790 dc->insn = insn;
296e5a0a 12791
dcf14dfb 12792 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12793 uint32_t cond = dc->condexec_cond;
12794
12795 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
c2d9644e 12796 arm_skip_unless(dc, cond);
296e5a0a
PM
12797 }
12798 }
12799
12800 if (is_16bit) {
12801 disas_thumb_insn(dc, insn);
12802 } else {
2eea841c 12803 disas_thumb2_insn(dc, insn);
296e5a0a 12804 }
722ef0a5
RH
12805
12806 /* Advance the Thumb condexec condition. */
12807 if (dc->condexec_mask) {
12808 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12809 ((dc->condexec_mask >> 4) & 1));
12810 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12811 if (dc->condexec_mask == 0) {
12812 dc->condexec_cond = 0;
12813 }
12814 }
12815
d0264d86
RH
12816 arm_post_translate_insn(dc);
12817
12818 /* Thumb is a variable-length ISA. Stop translation when the next insn
12819 * will touch a new page. This ensures that prefetch aborts occur at
12820 * the right place.
12821 *
12822 * We want to stop the TB if the next insn starts in a new page,
12823 * or if it spans between this page and the next. This means that
12824 * if we're looking at the last halfword in the page we need to
12825 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12826 * or a 32-bit Thumb insn (which won't).
12827 * This is to avoid generating a silly TB with a single 16-bit insn
12828 * in it at the end of this page (which would execute correctly
12829 * but isn't very efficient).
12830 */
12831 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
12832 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
12833 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
12834 && insn_crosses_page(env, dc)))) {
12835 dc->base.is_jmp = DISAS_TOO_MANY;
12836 }
722ef0a5
RH
12837}
12838
70d3c035 12839static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12840{
70d3c035 12841 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12842
c5a49c63 12843 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12844 /* FIXME: This can theoretically happen with self-modifying code. */
12845 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12846 }
9ee6e8bb 12847
b5ff1b31 12848 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12849 instruction was a conditional branch or trap, and the PC has
12850 already been written. */
f021b2c4 12851 gen_set_condexec(dc);
dcba3a8d 12852 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12853 /* Exception return branches need some special case code at the
12854 * end of the TB, which is complex enough that it has to
12855 * handle the single-step vs not and the condition-failed
12856 * insn codepath itself.
12857 */
12858 gen_bx_excret_final_code(dc);
12859 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12860 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12861 switch (dc->base.is_jmp) {
7999a5c8 12862 case DISAS_SWI:
50225ad0 12863 gen_ss_advance(dc);
73710361
GB
12864 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12865 default_exception_el(dc));
7999a5c8
SF
12866 break;
12867 case DISAS_HVC:
37e6456e 12868 gen_ss_advance(dc);
73710361 12869 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12870 break;
12871 case DISAS_SMC:
37e6456e 12872 gen_ss_advance(dc);
73710361 12873 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12874 break;
12875 case DISAS_NEXT:
a68956ad 12876 case DISAS_TOO_MANY:
7999a5c8
SF
12877 case DISAS_UPDATE:
12878 gen_set_pc_im(dc, dc->pc);
12879 /* fall through */
12880 default:
5425415e
PM
12881 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12882 gen_singlestep_exception(dc);
a0c231e6
RH
12883 break;
12884 case DISAS_NORETURN:
12885 break;
7999a5c8 12886 }
8aaca4c0 12887 } else {
9ee6e8bb
PB
12888 /* While branches must always occur at the end of an IT block,
12889 there are a few other things that can cause us to terminate
65626741 12890 the TB in the middle of an IT block:
9ee6e8bb
PB
12891 - Exception generating instructions (bkpt, swi, undefined).
12892 - Page boundaries.
12893 - Hardware watchpoints.
12894 Hardware breakpoints have already been handled and skip this code.
12895 */
dcba3a8d 12896 switch(dc->base.is_jmp) {
8aaca4c0 12897 case DISAS_NEXT:
a68956ad 12898 case DISAS_TOO_MANY:
6e256c93 12899 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12900 break;
577bf808 12901 case DISAS_JUMP:
8a6b28c7
EC
12902 gen_goto_ptr();
12903 break;
e8d52302
AB
12904 case DISAS_UPDATE:
12905 gen_set_pc_im(dc, dc->pc);
12906 /* fall through */
577bf808 12907 default:
8aaca4c0 12908 /* indicate that the hash table must be used to find the next TB */
07ea28b4 12909 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 12910 break;
a0c231e6 12911 case DISAS_NORETURN:
8aaca4c0
FB
12912 /* nothing more to generate */
12913 break;
9ee6e8bb 12914 case DISAS_WFI:
58803318
SS
12915 {
12916 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12917 !(dc->insn & (1U << 31))) ? 2 : 4);
12918
12919 gen_helper_wfi(cpu_env, tmp);
12920 tcg_temp_free_i32(tmp);
84549b6d
PM
12921 /* The helper doesn't necessarily throw an exception, but we
12922 * must go back to the main loop to check for interrupts anyway.
12923 */
07ea28b4 12924 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 12925 break;
58803318 12926 }
72c1d3af
PM
12927 case DISAS_WFE:
12928 gen_helper_wfe(cpu_env);
12929 break;
c87e5a61
PM
12930 case DISAS_YIELD:
12931 gen_helper_yield(cpu_env);
12932 break;
9ee6e8bb 12933 case DISAS_SWI:
73710361
GB
12934 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12935 default_exception_el(dc));
9ee6e8bb 12936 break;
37e6456e 12937 case DISAS_HVC:
73710361 12938 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12939 break;
12940 case DISAS_SMC:
73710361 12941 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12942 break;
8aaca4c0 12943 }
f021b2c4
PM
12944 }
12945
12946 if (dc->condjmp) {
12947 /* "Condition failed" instruction codepath for the branch/trap insn */
12948 gen_set_label(dc->condlabel);
12949 gen_set_condexec(dc);
b636649f 12950 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12951 gen_set_pc_im(dc, dc->pc);
12952 gen_singlestep_exception(dc);
12953 } else {
6e256c93 12954 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12955 }
2c0262af 12956 }
23169224
LV
12957
12958 /* Functions above can change dc->pc, so re-align db->pc_next */
12959 dc->base.pc_next = dc->pc;
70d3c035
LV
12960}
12961
4013f7fc
LV
12962static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12963{
12964 DisasContext *dc = container_of(dcbase, DisasContext, base);
12965
12966 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12967 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12968}
12969
23169224
LV
12970static const TranslatorOps arm_translator_ops = {
12971 .init_disas_context = arm_tr_init_disas_context,
12972 .tb_start = arm_tr_tb_start,
12973 .insn_start = arm_tr_insn_start,
12974 .breakpoint_check = arm_tr_breakpoint_check,
12975 .translate_insn = arm_tr_translate_insn,
12976 .tb_stop = arm_tr_tb_stop,
12977 .disas_log = arm_tr_disas_log,
12978};
12979
722ef0a5
RH
12980static const TranslatorOps thumb_translator_ops = {
12981 .init_disas_context = arm_tr_init_disas_context,
12982 .tb_start = arm_tr_tb_start,
12983 .insn_start = arm_tr_insn_start,
12984 .breakpoint_check = arm_tr_breakpoint_check,
12985 .translate_insn = thumb_tr_translate_insn,
12986 .tb_stop = arm_tr_tb_stop,
12987 .disas_log = arm_tr_disas_log,
12988};
12989
70d3c035 12990/* generate intermediate code for basic block 'tb'. */
23169224 12991void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 12992{
23169224
LV
12993 DisasContext dc;
12994 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12995
722ef0a5
RH
12996 if (ARM_TBFLAG_THUMB(tb->flags)) {
12997 ops = &thumb_translator_ops;
12998 }
23169224 12999#ifdef TARGET_AARCH64
70d3c035 13000 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
23169224 13001 ops = &aarch64_translator_ops;
2c0262af
FB
13002 }
13003#endif
23169224
LV
13004
13005 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
13006}
13007
b5ff1b31 13008static const char *cpu_mode_names[16] = {
28c9457d
EI
13009 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
13010 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 13011};
9ee6e8bb 13012
878096ee
AF
13013void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
13014 int flags)
2c0262af 13015{
878096ee
AF
13016 ARMCPU *cpu = ARM_CPU(cs);
13017 CPUARMState *env = &cpu->env;
2c0262af
FB
13018 int i;
13019
17731115
PM
13020 if (is_a64(env)) {
13021 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
13022 return;
13023 }
13024
2c0262af 13025 for(i=0;i<16;i++) {
7fe48483 13026 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 13027 if ((i % 4) == 3)
7fe48483 13028 cpu_fprintf(f, "\n");
2c0262af 13029 else
7fe48483 13030 cpu_fprintf(f, " ");
2c0262af 13031 }
06e5cf7a 13032
5b906f35
PM
13033 if (arm_feature(env, ARM_FEATURE_M)) {
13034 uint32_t xpsr = xpsr_read(env);
13035 const char *mode;
1e577cc7
PM
13036 const char *ns_status = "";
13037
13038 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
13039 ns_status = env->v7m.secure ? "S " : "NS ";
13040 }
5b906f35
PM
13041
13042 if (xpsr & XPSR_EXCP) {
13043 mode = "handler";
13044 } else {
8bfc26ea 13045 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
13046 mode = "unpriv-thread";
13047 } else {
13048 mode = "priv-thread";
13049 }
13050 }
13051
1e577cc7 13052 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
13053 xpsr,
13054 xpsr & XPSR_N ? 'N' : '-',
13055 xpsr & XPSR_Z ? 'Z' : '-',
13056 xpsr & XPSR_C ? 'C' : '-',
13057 xpsr & XPSR_V ? 'V' : '-',
13058 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 13059 ns_status,
5b906f35 13060 mode);
06e5cf7a 13061 } else {
5b906f35
PM
13062 uint32_t psr = cpsr_read(env);
13063 const char *ns_status = "";
13064
13065 if (arm_feature(env, ARM_FEATURE_EL3) &&
13066 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
13067 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
13068 }
13069
13070 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
13071 psr,
13072 psr & CPSR_N ? 'N' : '-',
13073 psr & CPSR_Z ? 'Z' : '-',
13074 psr & CPSR_C ? 'C' : '-',
13075 psr & CPSR_V ? 'V' : '-',
13076 psr & CPSR_T ? 'T' : 'A',
13077 ns_status,
13078 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
13079 }
b7bcbe95 13080
f2617cfc
PM
13081 if (flags & CPU_DUMP_FPU) {
13082 int numvfpregs = 0;
13083 if (arm_feature(env, ARM_FEATURE_VFP)) {
13084 numvfpregs += 16;
13085 }
13086 if (arm_feature(env, ARM_FEATURE_VFP3)) {
13087 numvfpregs += 16;
13088 }
13089 for (i = 0; i < numvfpregs; i++) {
9a2b5256 13090 uint64_t v = *aa32_vfp_dreg(env, i);
f2617cfc
PM
13091 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
13092 i * 2, (uint32_t)v,
13093 i * 2 + 1, (uint32_t)(v >> 32),
13094 i, v);
13095 }
13096 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 13097 }
2c0262af 13098}
a6b025d3 13099
bad729e2
RH
13100void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
13101 target_ulong *data)
d2856f1a 13102{
3926cc84 13103 if (is_a64(env)) {
bad729e2 13104 env->pc = data[0];
40f860cd 13105 env->condexec_bits = 0;
aaa1f954 13106 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13107 } else {
bad729e2
RH
13108 env->regs[15] = data[0];
13109 env->condexec_bits = data[1];
aaa1f954 13110 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13111 }
d2856f1a 13112}