]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Convert division from feature bits to isar0 tests
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
1d854765 31#include "arm_ldst.h"
19a6e31c 32#include "exec/semihost.h"
1497c961 33
2ef6175a
RH
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
2c0262af 36
a7e30d84 37#include "trace-tcg.h"
508127e2 38#include "exec/log.h"
a7e30d84
LV
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
c99a55d3 45#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d 55
b5ff1b31
FB
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
155c3eac
FN
75static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
61adacc8
RH
79/* Function prototypes for gen_ functions calling Neon helpers. */
80typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
81 TCGv_i32, TCGv_i32);
82
b26eefb6
PB
83/* initialize TCG globals. */
84void arm_translate_init(void)
85{
155c3eac
FN
86 int i;
87
155c3eac 88 for (i = 0; i < 16; i++) {
e1ccc054 89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
e1ccc054
RH
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 97
e1ccc054 98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 102
14ade10f 103 a64_translate_init();
b26eefb6
PB
104}
105
9bb6558a
PM
106/* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
108 */
109typedef enum ISSInfo {
110 ISSNone = 0,
111 ISSRegMask = 0x1f,
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
116} ISSInfo;
117
118/* Save the syndrome information for a Data Abort */
119static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
120{
121 uint32_t syn;
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
128
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
132 */
133 return;
134 }
135
136 if (srt == 15) {
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
139 * the call sites.
140 */
141 return;
142 }
143
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
147}
148
8bd5c820 149static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 150{
8bd5c820 151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
152 * insns:
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
155 */
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
8bd5c820 160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
161 case ARMMMUIdx_S1E3:
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
8bd5c820 164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
b9f587d6 173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
177 case ARMMMUIdx_S2NS:
178 default:
179 g_assert_not_reached();
180 }
181}
182
39d5492a 183static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 184{
39d5492a 185 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
187 return tmp;
188}
189
0ecb72a5 190#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 191
39d5492a 192static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
193{
194 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 195 tcg_temp_free_i32(var);
d9ba4830
PB
196}
197
198#define store_cpu_field(var, name) \
0ecb72a5 199 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 200
b26eefb6 201/* Set a variable to the value of a CPU register. */
39d5492a 202static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
203{
204 if (reg == 15) {
205 uint32_t addr;
b90372ad 206 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
207 if (s->thumb)
208 addr = (long)s->pc + 2;
209 else
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
212 } else {
155c3eac 213 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
214 }
215}
216
217/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 218static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 219{
39d5492a 220 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
221 load_reg_var(s, tmp, reg);
222 return tmp;
223}
224
225/* Set a CPU register. The source must be a temporary and will be
226 marked as dead. */
39d5492a 227static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
228{
229 if (reg == 15) {
9b6a3ea7
PM
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
234 */
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 236 s->base.is_jmp = DISAS_JUMP;
b26eefb6 237 }
155c3eac 238 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 239 tcg_temp_free_i32(var);
b26eefb6
PB
240}
241
55203189
PM
242/*
243 * Variant of store_reg which applies v8M stack-limit checks before updating
244 * SP. If the check fails this will result in an exception being taken.
245 * We disable the stack checks for CONFIG_USER_ONLY because we have
246 * no idea what the stack limits should be in that case.
247 * If stack checking is not being done this just acts like store_reg().
248 */
249static void store_sp_checked(DisasContext *s, TCGv_i32 var)
250{
251#ifndef CONFIG_USER_ONLY
252 if (s->v8m_stackcheck) {
253 gen_helper_v8m_stackcheck(cpu_env, var);
254 }
255#endif
256 store_reg(s, 13, var);
257}
258
b26eefb6 259/* Value extensions. */
86831435
PB
260#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
261#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
262#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
263#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
264
1497c961
PB
265#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
266#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 267
b26eefb6 268
39d5492a 269static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 270{
39d5492a 271 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 272 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
273 tcg_temp_free_i32(tmp_mask);
274}
d9ba4830
PB
275/* Set NZCV flags from the high 4 bits of var. */
276#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
277
d4a2dc67 278static void gen_exception_internal(int excp)
d9ba4830 279{
d4a2dc67
PM
280 TCGv_i32 tcg_excp = tcg_const_i32(excp);
281
282 assert(excp_is_internal(excp));
283 gen_helper_exception_internal(cpu_env, tcg_excp);
284 tcg_temp_free_i32(tcg_excp);
285}
286
73710361 287static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
288{
289 TCGv_i32 tcg_excp = tcg_const_i32(excp);
290 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 291 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 292
73710361
GB
293 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
294 tcg_syn, tcg_el);
295
296 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
297 tcg_temp_free_i32(tcg_syn);
298 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
299}
300
50225ad0
PM
301static void gen_ss_advance(DisasContext *s)
302{
303 /* If the singlestep state is Active-not-pending, advance to
304 * Active-pending.
305 */
306 if (s->ss_active) {
307 s->pstate_ss = 0;
308 gen_helper_clear_pstate_ss(cpu_env);
309 }
310}
311
312static void gen_step_complete_exception(DisasContext *s)
313{
314 /* We just completed step of an insn. Move from Active-not-pending
315 * to Active-pending, and then also take the swstep exception.
316 * This corresponds to making the (IMPDEF) choice to prioritize
317 * swstep exceptions over asynchronous exceptions taken to an exception
318 * level where debug is disabled. This choice has the advantage that
319 * we do not need to maintain internal state corresponding to the
320 * ISV/EX syndrome bits between completion of the step and generation
321 * of the exception, and our syndrome information is always correct.
322 */
323 gen_ss_advance(s);
73710361
GB
324 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
325 default_exception_el(s));
dcba3a8d 326 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
327}
328
5425415e
PM
329static void gen_singlestep_exception(DisasContext *s)
330{
331 /* Generate the right kind of exception for singlestep, which is
332 * either the architectural singlestep or EXCP_DEBUG for QEMU's
333 * gdb singlestepping.
334 */
335 if (s->ss_active) {
336 gen_step_complete_exception(s);
337 } else {
338 gen_exception_internal(EXCP_DEBUG);
339 }
340}
341
b636649f
PM
342static inline bool is_singlestepping(DisasContext *s)
343{
344 /* Return true if we are singlestepping either because of
345 * architectural singlestep or QEMU gdbstub singlestep. This does
346 * not include the command line '-singlestep' mode which is rather
347 * misnamed as it only means "one instruction per TB" and doesn't
348 * affect the code we generate.
349 */
dcba3a8d 350 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
351}
352
39d5492a 353static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 354{
39d5492a
PM
355 TCGv_i32 tmp1 = tcg_temp_new_i32();
356 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
357 tcg_gen_ext16s_i32(tmp1, a);
358 tcg_gen_ext16s_i32(tmp2, b);
3670669c 359 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 360 tcg_temp_free_i32(tmp2);
3670669c
PB
361 tcg_gen_sari_i32(a, a, 16);
362 tcg_gen_sari_i32(b, b, 16);
363 tcg_gen_mul_i32(b, b, a);
364 tcg_gen_mov_i32(a, tmp1);
7d1b0095 365 tcg_temp_free_i32(tmp1);
3670669c
PB
366}
367
368/* Byteswap each halfword. */
39d5492a 369static void gen_rev16(TCGv_i32 var)
3670669c 370{
39d5492a 371 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 372 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 373 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
374 tcg_gen_and_i32(tmp, tmp, mask);
375 tcg_gen_and_i32(var, var, mask);
3670669c 376 tcg_gen_shli_i32(var, var, 8);
3670669c 377 tcg_gen_or_i32(var, var, tmp);
68cedf73 378 tcg_temp_free_i32(mask);
7d1b0095 379 tcg_temp_free_i32(tmp);
3670669c
PB
380}
381
382/* Byteswap low halfword and sign extend. */
39d5492a 383static void gen_revsh(TCGv_i32 var)
3670669c 384{
1a855029
AJ
385 tcg_gen_ext16u_i32(var, var);
386 tcg_gen_bswap16_i32(var, var);
387 tcg_gen_ext16s_i32(var, var);
3670669c
PB
388}
389
838fa72d 390/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 391static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 392{
838fa72d
AJ
393 TCGv_i64 tmp64 = tcg_temp_new_i64();
394
395 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 396 tcg_temp_free_i32(b);
838fa72d
AJ
397 tcg_gen_shli_i64(tmp64, tmp64, 32);
398 tcg_gen_add_i64(a, tmp64, a);
399
400 tcg_temp_free_i64(tmp64);
401 return a;
402}
403
404/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 405static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
406{
407 TCGv_i64 tmp64 = tcg_temp_new_i64();
408
409 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 410 tcg_temp_free_i32(b);
838fa72d
AJ
411 tcg_gen_shli_i64(tmp64, tmp64, 32);
412 tcg_gen_sub_i64(a, tmp64, a);
413
414 tcg_temp_free_i64(tmp64);
415 return a;
3670669c
PB
416}
417
5e3f878a 418/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 419static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 420{
39d5492a
PM
421 TCGv_i32 lo = tcg_temp_new_i32();
422 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 423 TCGv_i64 ret;
5e3f878a 424
831d7fe8 425 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 426 tcg_temp_free_i32(a);
7d1b0095 427 tcg_temp_free_i32(b);
831d7fe8
RH
428
429 ret = tcg_temp_new_i64();
430 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
431 tcg_temp_free_i32(lo);
432 tcg_temp_free_i32(hi);
831d7fe8
RH
433
434 return ret;
5e3f878a
PB
435}
436
39d5492a 437static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 438{
39d5492a
PM
439 TCGv_i32 lo = tcg_temp_new_i32();
440 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 441 TCGv_i64 ret;
5e3f878a 442
831d7fe8 443 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 444 tcg_temp_free_i32(a);
7d1b0095 445 tcg_temp_free_i32(b);
831d7fe8
RH
446
447 ret = tcg_temp_new_i64();
448 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
449 tcg_temp_free_i32(lo);
450 tcg_temp_free_i32(hi);
831d7fe8
RH
451
452 return ret;
5e3f878a
PB
453}
454
8f01245e 455/* Swap low and high halfwords. */
39d5492a 456static void gen_swap_half(TCGv_i32 var)
8f01245e 457{
39d5492a 458 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
459 tcg_gen_shri_i32(tmp, var, 16);
460 tcg_gen_shli_i32(var, var, 16);
461 tcg_gen_or_i32(var, var, tmp);
7d1b0095 462 tcg_temp_free_i32(tmp);
8f01245e
PB
463}
464
b26eefb6
PB
465/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
466 tmp = (t0 ^ t1) & 0x8000;
467 t0 &= ~0x8000;
468 t1 &= ~0x8000;
469 t0 = (t0 + t1) ^ tmp;
470 */
471
39d5492a 472static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 473{
39d5492a 474 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
475 tcg_gen_xor_i32(tmp, t0, t1);
476 tcg_gen_andi_i32(tmp, tmp, 0x8000);
477 tcg_gen_andi_i32(t0, t0, ~0x8000);
478 tcg_gen_andi_i32(t1, t1, ~0x8000);
479 tcg_gen_add_i32(t0, t0, t1);
480 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
481 tcg_temp_free_i32(tmp);
482 tcg_temp_free_i32(t1);
b26eefb6
PB
483}
484
485/* Set CF to the top bit of var. */
39d5492a 486static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 487{
66c374de 488 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
489}
490
491/* Set N and Z flags from var. */
39d5492a 492static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 493{
66c374de
AJ
494 tcg_gen_mov_i32(cpu_NF, var);
495 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
496}
497
498/* T0 += T1 + CF. */
39d5492a 499static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 500{
396e467c 501 tcg_gen_add_i32(t0, t0, t1);
66c374de 502 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
503}
504
e9bb4aa9 505/* dest = T0 + T1 + CF. */
39d5492a 506static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 507{
e9bb4aa9 508 tcg_gen_add_i32(dest, t0, t1);
66c374de 509 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
510}
511
3670669c 512/* dest = T0 - T1 + CF - 1. */
39d5492a 513static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 514{
3670669c 515 tcg_gen_sub_i32(dest, t0, t1);
66c374de 516 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 517 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
518}
519
72485ec4 520/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 521static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 522{
39d5492a 523 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
524 tcg_gen_movi_i32(tmp, 0);
525 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 526 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 527 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
528 tcg_gen_xor_i32(tmp, t0, t1);
529 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
530 tcg_temp_free_i32(tmp);
531 tcg_gen_mov_i32(dest, cpu_NF);
532}
533
49b4c31e 534/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 535static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 536{
39d5492a 537 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
538 if (TCG_TARGET_HAS_add2_i32) {
539 tcg_gen_movi_i32(tmp, 0);
540 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 541 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
542 } else {
543 TCGv_i64 q0 = tcg_temp_new_i64();
544 TCGv_i64 q1 = tcg_temp_new_i64();
545 tcg_gen_extu_i32_i64(q0, t0);
546 tcg_gen_extu_i32_i64(q1, t1);
547 tcg_gen_add_i64(q0, q0, q1);
548 tcg_gen_extu_i32_i64(q1, cpu_CF);
549 tcg_gen_add_i64(q0, q0, q1);
550 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
551 tcg_temp_free_i64(q0);
552 tcg_temp_free_i64(q1);
553 }
554 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
555 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
556 tcg_gen_xor_i32(tmp, t0, t1);
557 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
558 tcg_temp_free_i32(tmp);
559 tcg_gen_mov_i32(dest, cpu_NF);
560}
561
72485ec4 562/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 563static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 564{
39d5492a 565 TCGv_i32 tmp;
72485ec4
AJ
566 tcg_gen_sub_i32(cpu_NF, t0, t1);
567 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
568 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
569 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
570 tmp = tcg_temp_new_i32();
571 tcg_gen_xor_i32(tmp, t0, t1);
572 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
573 tcg_temp_free_i32(tmp);
574 tcg_gen_mov_i32(dest, cpu_NF);
575}
576
e77f0832 577/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 578static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 579{
39d5492a 580 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
581 tcg_gen_not_i32(tmp, t1);
582 gen_adc_CC(dest, t0, tmp);
39d5492a 583 tcg_temp_free_i32(tmp);
2de68a49
RH
584}
585
365af80e 586#define GEN_SHIFT(name) \
39d5492a 587static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 588{ \
39d5492a 589 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
590 tmp1 = tcg_temp_new_i32(); \
591 tcg_gen_andi_i32(tmp1, t1, 0xff); \
592 tmp2 = tcg_const_i32(0); \
593 tmp3 = tcg_const_i32(0x1f); \
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
595 tcg_temp_free_i32(tmp3); \
596 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
597 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
598 tcg_temp_free_i32(tmp2); \
599 tcg_temp_free_i32(tmp1); \
600}
601GEN_SHIFT(shl)
602GEN_SHIFT(shr)
603#undef GEN_SHIFT
604
39d5492a 605static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 606{
39d5492a 607 TCGv_i32 tmp1, tmp2;
365af80e
AJ
608 tmp1 = tcg_temp_new_i32();
609 tcg_gen_andi_i32(tmp1, t1, 0xff);
610 tmp2 = tcg_const_i32(0x1f);
611 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
612 tcg_temp_free_i32(tmp2);
613 tcg_gen_sar_i32(dest, t0, tmp1);
614 tcg_temp_free_i32(tmp1);
615}
616
39d5492a 617static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 618{
39d5492a
PM
619 TCGv_i32 c0 = tcg_const_i32(0);
620 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
621 tcg_gen_neg_i32(tmp, src);
622 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
623 tcg_temp_free_i32(c0);
624 tcg_temp_free_i32(tmp);
625}
ad69471c 626
39d5492a 627static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 628{
9a119ff6 629 if (shift == 0) {
66c374de 630 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 631 } else {
66c374de
AJ
632 tcg_gen_shri_i32(cpu_CF, var, shift);
633 if (shift != 31) {
634 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
635 }
9a119ff6 636 }
9a119ff6 637}
b26eefb6 638
9a119ff6 639/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
640static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
641 int shift, int flags)
9a119ff6
PB
642{
643 switch (shiftop) {
644 case 0: /* LSL */
645 if (shift != 0) {
646 if (flags)
647 shifter_out_im(var, 32 - shift);
648 tcg_gen_shli_i32(var, var, shift);
649 }
650 break;
651 case 1: /* LSR */
652 if (shift == 0) {
653 if (flags) {
66c374de 654 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
655 }
656 tcg_gen_movi_i32(var, 0);
657 } else {
658 if (flags)
659 shifter_out_im(var, shift - 1);
660 tcg_gen_shri_i32(var, var, shift);
661 }
662 break;
663 case 2: /* ASR */
664 if (shift == 0)
665 shift = 32;
666 if (flags)
667 shifter_out_im(var, shift - 1);
668 if (shift == 32)
669 shift = 31;
670 tcg_gen_sari_i32(var, var, shift);
671 break;
672 case 3: /* ROR/RRX */
673 if (shift != 0) {
674 if (flags)
675 shifter_out_im(var, shift - 1);
f669df27 676 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 677 } else {
39d5492a 678 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 679 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
680 if (flags)
681 shifter_out_im(var, 0);
682 tcg_gen_shri_i32(var, var, 1);
b26eefb6 683 tcg_gen_or_i32(var, var, tmp);
7d1b0095 684 tcg_temp_free_i32(tmp);
b26eefb6
PB
685 }
686 }
687};
688
39d5492a
PM
689static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
690 TCGv_i32 shift, int flags)
8984bd2e
PB
691{
692 if (flags) {
693 switch (shiftop) {
9ef39277
BS
694 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
695 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
696 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
697 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
698 }
699 } else {
700 switch (shiftop) {
365af80e
AJ
701 case 0:
702 gen_shl(var, var, shift);
703 break;
704 case 1:
705 gen_shr(var, var, shift);
706 break;
707 case 2:
708 gen_sar(var, var, shift);
709 break;
f669df27
AJ
710 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
711 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
712 }
713 }
7d1b0095 714 tcg_temp_free_i32(shift);
8984bd2e
PB
715}
716
6ddbc6e4
PB
717#define PAS_OP(pfx) \
718 switch (op2) { \
719 case 0: gen_pas_helper(glue(pfx,add16)); break; \
720 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
721 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
722 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
723 case 4: gen_pas_helper(glue(pfx,add8)); break; \
724 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
725 }
39d5492a 726static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 727{
a7812ae4 728 TCGv_ptr tmp;
6ddbc6e4
PB
729
730 switch (op1) {
731#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
732 case 1:
a7812ae4 733 tmp = tcg_temp_new_ptr();
0ecb72a5 734 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 735 PAS_OP(s)
b75263d6 736 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
737 break;
738 case 5:
a7812ae4 739 tmp = tcg_temp_new_ptr();
0ecb72a5 740 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 741 PAS_OP(u)
b75263d6 742 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
743 break;
744#undef gen_pas_helper
745#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
746 case 2:
747 PAS_OP(q);
748 break;
749 case 3:
750 PAS_OP(sh);
751 break;
752 case 6:
753 PAS_OP(uq);
754 break;
755 case 7:
756 PAS_OP(uh);
757 break;
758#undef gen_pas_helper
759 }
760}
9ee6e8bb
PB
761#undef PAS_OP
762
6ddbc6e4
PB
763/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
764#define PAS_OP(pfx) \
ed89a2f1 765 switch (op1) { \
6ddbc6e4
PB
766 case 0: gen_pas_helper(glue(pfx,add8)); break; \
767 case 1: gen_pas_helper(glue(pfx,add16)); break; \
768 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
769 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
770 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
771 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
772 }
39d5492a 773static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 774{
a7812ae4 775 TCGv_ptr tmp;
6ddbc6e4 776
ed89a2f1 777 switch (op2) {
6ddbc6e4
PB
778#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
779 case 0:
a7812ae4 780 tmp = tcg_temp_new_ptr();
0ecb72a5 781 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 782 PAS_OP(s)
b75263d6 783 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
784 break;
785 case 4:
a7812ae4 786 tmp = tcg_temp_new_ptr();
0ecb72a5 787 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 788 PAS_OP(u)
b75263d6 789 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
790 break;
791#undef gen_pas_helper
792#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
793 case 1:
794 PAS_OP(q);
795 break;
796 case 2:
797 PAS_OP(sh);
798 break;
799 case 5:
800 PAS_OP(uq);
801 break;
802 case 6:
803 PAS_OP(uh);
804 break;
805#undef gen_pas_helper
806 }
807}
9ee6e8bb
PB
808#undef PAS_OP
809
39fb730a 810/*
6c2c63d3 811 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
812 * This is common between ARM and Aarch64 targets.
813 */
6c2c63d3 814void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 815{
6c2c63d3
RH
816 TCGv_i32 value;
817 TCGCond cond;
818 bool global = true;
d9ba4830 819
d9ba4830
PB
820 switch (cc) {
821 case 0: /* eq: Z */
d9ba4830 822 case 1: /* ne: !Z */
6c2c63d3
RH
823 cond = TCG_COND_EQ;
824 value = cpu_ZF;
d9ba4830 825 break;
6c2c63d3 826
d9ba4830 827 case 2: /* cs: C */
d9ba4830 828 case 3: /* cc: !C */
6c2c63d3
RH
829 cond = TCG_COND_NE;
830 value = cpu_CF;
d9ba4830 831 break;
6c2c63d3 832
d9ba4830 833 case 4: /* mi: N */
d9ba4830 834 case 5: /* pl: !N */
6c2c63d3
RH
835 cond = TCG_COND_LT;
836 value = cpu_NF;
d9ba4830 837 break;
6c2c63d3 838
d9ba4830 839 case 6: /* vs: V */
d9ba4830 840 case 7: /* vc: !V */
6c2c63d3
RH
841 cond = TCG_COND_LT;
842 value = cpu_VF;
d9ba4830 843 break;
6c2c63d3 844
d9ba4830 845 case 8: /* hi: C && !Z */
6c2c63d3
RH
846 case 9: /* ls: !C || Z -> !(C && !Z) */
847 cond = TCG_COND_NE;
848 value = tcg_temp_new_i32();
849 global = false;
850 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
851 ZF is non-zero for !Z; so AND the two subexpressions. */
852 tcg_gen_neg_i32(value, cpu_CF);
853 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 854 break;
6c2c63d3 855
d9ba4830 856 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 857 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
858 /* Since we're only interested in the sign bit, == 0 is >= 0. */
859 cond = TCG_COND_GE;
860 value = tcg_temp_new_i32();
861 global = false;
862 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 863 break;
6c2c63d3 864
d9ba4830 865 case 12: /* gt: !Z && N == V */
d9ba4830 866 case 13: /* le: Z || N != V */
6c2c63d3
RH
867 cond = TCG_COND_NE;
868 value = tcg_temp_new_i32();
869 global = false;
870 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
871 * the sign bit then AND with ZF to yield the result. */
872 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
873 tcg_gen_sari_i32(value, value, 31);
874 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 875 break;
6c2c63d3 876
9305eac0
RH
877 case 14: /* always */
878 case 15: /* always */
879 /* Use the ALWAYS condition, which will fold early.
880 * It doesn't matter what we use for the value. */
881 cond = TCG_COND_ALWAYS;
882 value = cpu_ZF;
883 goto no_invert;
884
d9ba4830
PB
885 default:
886 fprintf(stderr, "Bad condition code 0x%x\n", cc);
887 abort();
888 }
6c2c63d3
RH
889
890 if (cc & 1) {
891 cond = tcg_invert_cond(cond);
892 }
893
9305eac0 894 no_invert:
6c2c63d3
RH
895 cmp->cond = cond;
896 cmp->value = value;
897 cmp->value_global = global;
898}
899
900void arm_free_cc(DisasCompare *cmp)
901{
902 if (!cmp->value_global) {
903 tcg_temp_free_i32(cmp->value);
904 }
905}
906
907void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
908{
909 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
910}
911
912void arm_gen_test_cc(int cc, TCGLabel *label)
913{
914 DisasCompare cmp;
915 arm_test_cc(&cmp, cc);
916 arm_jump_cc(&cmp, label);
917 arm_free_cc(&cmp);
d9ba4830 918}
2c0262af 919
b1d8e52e 920static const uint8_t table_logic_cc[16] = {
2c0262af
FB
921 1, /* and */
922 1, /* xor */
923 0, /* sub */
924 0, /* rsb */
925 0, /* add */
926 0, /* adc */
927 0, /* sbc */
928 0, /* rsc */
929 1, /* andl */
930 1, /* xorl */
931 0, /* cmp */
932 0, /* cmn */
933 1, /* orr */
934 1, /* mov */
935 1, /* bic */
936 1, /* mvn */
937};
3b46e624 938
4d5e8c96
PM
939static inline void gen_set_condexec(DisasContext *s)
940{
941 if (s->condexec_mask) {
942 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
943 TCGv_i32 tmp = tcg_temp_new_i32();
944 tcg_gen_movi_i32(tmp, val);
945 store_cpu_field(tmp, condexec_bits);
946 }
947}
948
949static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
950{
951 tcg_gen_movi_i32(cpu_R[15], val);
952}
953
d9ba4830
PB
954/* Set PC and Thumb state from an immediate address. */
955static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 956{
39d5492a 957 TCGv_i32 tmp;
99c475ab 958
dcba3a8d 959 s->base.is_jmp = DISAS_JUMP;
d9ba4830 960 if (s->thumb != (addr & 1)) {
7d1b0095 961 tmp = tcg_temp_new_i32();
d9ba4830 962 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 963 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 964 tcg_temp_free_i32(tmp);
d9ba4830 965 }
155c3eac 966 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
967}
968
969/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 970static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 971{
dcba3a8d 972 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
973 tcg_gen_andi_i32(cpu_R[15], var, ~1);
974 tcg_gen_andi_i32(var, var, 1);
975 store_cpu_field(var, thumb);
d9ba4830
PB
976}
977
3bb8a96f
PM
978/* Set PC and Thumb state from var. var is marked as dead.
979 * For M-profile CPUs, include logic to detect exception-return
980 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
981 * and BX reg, and no others, and happens only for code in Handler mode.
982 */
983static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
984{
985 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 986 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
987 */
988 gen_bx(s, var);
d02a8698
PM
989 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
990 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 991 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
992 }
993}
994
995static inline void gen_bx_excret_final_code(DisasContext *s)
996{
997 /* Generate the code to finish possible exception return and end the TB */
998 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
999 uint32_t min_magic;
1000
1001 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
1002 /* Covers FNC_RETURN and EXC_RETURN magic */
1003 min_magic = FNC_RETURN_MIN_MAGIC;
1004 } else {
1005 /* EXC_RETURN magic only */
1006 min_magic = EXC_RETURN_MIN_MAGIC;
1007 }
3bb8a96f
PM
1008
1009 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 1010 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
1011 /* No: end the TB as we would for a DISAS_JMP */
1012 if (is_singlestepping(s)) {
1013 gen_singlestep_exception(s);
1014 } else {
07ea28b4 1015 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
1016 }
1017 gen_set_label(excret_label);
1018 /* Yes: this is an exception return.
1019 * At this point in runtime env->regs[15] and env->thumb will hold
1020 * the exception-return magic number, which do_v7m_exception_exit()
1021 * will read. Nothing else will be able to see those values because
1022 * the cpu-exec main loop guarantees that we will always go straight
1023 * from raising the exception to the exception-handling code.
1024 *
1025 * gen_ss_advance(s) does nothing on M profile currently but
1026 * calling it is conceptually the right thing as we have executed
1027 * this instruction (compare SWI, HVC, SMC handling).
1028 */
1029 gen_ss_advance(s);
1030 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1031}
1032
fb602cb7
PM
1033static inline void gen_bxns(DisasContext *s, int rm)
1034{
1035 TCGv_i32 var = load_reg(s, rm);
1036
1037 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1038 * we need to sync state before calling it, but:
1039 * - we don't need to do gen_set_pc_im() because the bxns helper will
1040 * always set the PC itself
1041 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1042 * unless it's outside an IT block or the last insn in an IT block,
1043 * so we know that condexec == 0 (already set at the top of the TB)
1044 * is correct in the non-UNPREDICTABLE cases, and we can choose
1045 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1046 */
1047 gen_helper_v7m_bxns(cpu_env, var);
1048 tcg_temp_free_i32(var);
ef475b5d 1049 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1050}
1051
3e3fa230
PM
1052static inline void gen_blxns(DisasContext *s, int rm)
1053{
1054 TCGv_i32 var = load_reg(s, rm);
1055
1056 /* We don't need to sync condexec state, for the same reason as bxns.
1057 * We do however need to set the PC, because the blxns helper reads it.
1058 * The blxns helper may throw an exception.
1059 */
1060 gen_set_pc_im(s, s->pc);
1061 gen_helper_v7m_blxns(cpu_env, var);
1062 tcg_temp_free_i32(var);
1063 s->base.is_jmp = DISAS_EXIT;
1064}
1065
21aeb343
JR
1066/* Variant of store_reg which uses branch&exchange logic when storing
1067 to r15 in ARM architecture v7 and above. The source must be a temporary
1068 and will be marked as dead. */
7dcc1f89 1069static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1070{
1071 if (reg == 15 && ENABLE_ARCH_7) {
1072 gen_bx(s, var);
1073 } else {
1074 store_reg(s, reg, var);
1075 }
1076}
1077
be5e7a76
DES
1078/* Variant of store_reg which uses branch&exchange logic when storing
1079 * to r15 in ARM architecture v5T and above. This is used for storing
1080 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1081 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1082static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1083{
1084 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1085 gen_bx_excret(s, var);
be5e7a76
DES
1086 } else {
1087 store_reg(s, reg, var);
1088 }
1089}
1090
e334bd31
PB
1091#ifdef CONFIG_USER_ONLY
1092#define IS_USER_ONLY 1
1093#else
1094#define IS_USER_ONLY 0
1095#endif
1096
08307563
PM
1097/* Abstractions of "generate code to do a guest load/store for
1098 * AArch32", where a vaddr is always 32 bits (and is zero
1099 * extended if we're a 64 bit core) and data is also
1100 * 32 bits unless specifically doing a 64 bit access.
1101 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1102 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1103 */
08307563 1104
7f5616f5 1105static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1106{
7f5616f5
RH
1107 TCGv addr = tcg_temp_new();
1108 tcg_gen_extu_i32_tl(addr, a32);
1109
e334bd31 1110 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1111 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1112 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1113 }
7f5616f5 1114 return addr;
08307563
PM
1115}
1116
7f5616f5
RH
1117static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1118 int index, TCGMemOp opc)
08307563 1119{
2aeba0d0
JS
1120 TCGv addr;
1121
1122 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1123 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1124 opc |= MO_ALIGN;
1125 }
1126
1127 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1128 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1129 tcg_temp_free(addr);
08307563
PM
1130}
1131
7f5616f5
RH
1132static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1133 int index, TCGMemOp opc)
1134{
2aeba0d0
JS
1135 TCGv addr;
1136
1137 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1138 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1139 opc |= MO_ALIGN;
1140 }
1141
1142 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1143 tcg_gen_qemu_st_i32(val, addr, index, opc);
1144 tcg_temp_free(addr);
1145}
08307563 1146
7f5616f5 1147#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1148static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1149 TCGv_i32 a32, int index) \
08307563 1150{ \
7f5616f5 1151 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1152} \
1153static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1154 TCGv_i32 val, \
1155 TCGv_i32 a32, int index, \
1156 ISSInfo issinfo) \
1157{ \
1158 gen_aa32_ld##SUFF(s, val, a32, index); \
1159 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1160}
1161
7f5616f5 1162#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1163static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1164 TCGv_i32 a32, int index) \
08307563 1165{ \
7f5616f5 1166 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1167} \
1168static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1169 TCGv_i32 val, \
1170 TCGv_i32 a32, int index, \
1171 ISSInfo issinfo) \
1172{ \
1173 gen_aa32_st##SUFF(s, val, a32, index); \
1174 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1175}
1176
7f5616f5 1177static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1178{
e334bd31
PB
1179 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1180 if (!IS_USER_ONLY && s->sctlr_b) {
1181 tcg_gen_rotri_i64(val, val, 32);
1182 }
08307563
PM
1183}
1184
7f5616f5
RH
1185static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1186 int index, TCGMemOp opc)
08307563 1187{
7f5616f5
RH
1188 TCGv addr = gen_aa32_addr(s, a32, opc);
1189 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1190 gen_aa32_frob64(s, val);
1191 tcg_temp_free(addr);
1192}
1193
1194static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1195 TCGv_i32 a32, int index)
1196{
1197 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1198}
1199
1200static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1201 int index, TCGMemOp opc)
1202{
1203 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1204
1205 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1206 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1207 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1208 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1209 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1210 tcg_temp_free_i64(tmp);
e334bd31 1211 } else {
7f5616f5 1212 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1213 }
7f5616f5 1214 tcg_temp_free(addr);
08307563
PM
1215}
1216
7f5616f5
RH
1217static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1218 TCGv_i32 a32, int index)
1219{
1220 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1221}
08307563 1222
7f5616f5
RH
1223DO_GEN_LD(8s, MO_SB)
1224DO_GEN_LD(8u, MO_UB)
1225DO_GEN_LD(16s, MO_SW)
1226DO_GEN_LD(16u, MO_UW)
1227DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1228DO_GEN_ST(8, MO_UB)
1229DO_GEN_ST(16, MO_UW)
1230DO_GEN_ST(32, MO_UL)
08307563 1231
37e6456e
PM
1232static inline void gen_hvc(DisasContext *s, int imm16)
1233{
1234 /* The pre HVC helper handles cases when HVC gets trapped
1235 * as an undefined insn by runtime configuration (ie before
1236 * the insn really executes).
1237 */
1238 gen_set_pc_im(s, s->pc - 4);
1239 gen_helper_pre_hvc(cpu_env);
1240 /* Otherwise we will treat this as a real exception which
1241 * happens after execution of the insn. (The distinction matters
1242 * for the PC value reported to the exception handler and also
1243 * for single stepping.)
1244 */
1245 s->svc_imm = imm16;
1246 gen_set_pc_im(s, s->pc);
dcba3a8d 1247 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1248}
1249
1250static inline void gen_smc(DisasContext *s)
1251{
1252 /* As with HVC, we may take an exception either before or after
1253 * the insn executes.
1254 */
1255 TCGv_i32 tmp;
1256
1257 gen_set_pc_im(s, s->pc - 4);
1258 tmp = tcg_const_i32(syn_aa32_smc());
1259 gen_helper_pre_smc(cpu_env, tmp);
1260 tcg_temp_free_i32(tmp);
1261 gen_set_pc_im(s, s->pc);
dcba3a8d 1262 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1263}
1264
d4a2dc67
PM
1265static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1266{
1267 gen_set_condexec(s);
1268 gen_set_pc_im(s, s->pc - offset);
1269 gen_exception_internal(excp);
dcba3a8d 1270 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1271}
1272
73710361
GB
1273static void gen_exception_insn(DisasContext *s, int offset, int excp,
1274 int syn, uint32_t target_el)
d4a2dc67
PM
1275{
1276 gen_set_condexec(s);
1277 gen_set_pc_im(s, s->pc - offset);
73710361 1278 gen_exception(excp, syn, target_el);
dcba3a8d 1279 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1280}
1281
c900a2e6
PM
1282static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1283{
1284 TCGv_i32 tcg_syn;
1285
1286 gen_set_condexec(s);
1287 gen_set_pc_im(s, s->pc - offset);
1288 tcg_syn = tcg_const_i32(syn);
1289 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1290 tcg_temp_free_i32(tcg_syn);
1291 s->base.is_jmp = DISAS_NORETURN;
1292}
1293
b5ff1b31
FB
1294/* Force a TB lookup after an instruction that changes the CPU state. */
1295static inline void gen_lookup_tb(DisasContext *s)
1296{
a6445c52 1297 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1298 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1299}
1300
19a6e31c
PM
1301static inline void gen_hlt(DisasContext *s, int imm)
1302{
1303 /* HLT. This has two purposes.
1304 * Architecturally, it is an external halting debug instruction.
1305 * Since QEMU doesn't implement external debug, we treat this as
1306 * it is required for halting debug disabled: it will UNDEF.
1307 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1308 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1309 * must trigger semihosting even for ARMv7 and earlier, where
1310 * HLT was an undefined encoding.
1311 * In system mode, we don't allow userspace access to
1312 * semihosting, to provide some semblance of security
1313 * (and for consistency with our 32-bit semihosting).
1314 */
1315 if (semihosting_enabled() &&
1316#ifndef CONFIG_USER_ONLY
1317 s->current_el != 0 &&
1318#endif
1319 (imm == (s->thumb ? 0x3c : 0xf000))) {
1320 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1321 return;
1322 }
1323
1324 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1325 default_exception_el(s));
1326}
1327
b0109805 1328static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1329 TCGv_i32 var)
2c0262af 1330{
1e8d4eec 1331 int val, rm, shift, shiftop;
39d5492a 1332 TCGv_i32 offset;
2c0262af
FB
1333
1334 if (!(insn & (1 << 25))) {
1335 /* immediate */
1336 val = insn & 0xfff;
1337 if (!(insn & (1 << 23)))
1338 val = -val;
537730b9 1339 if (val != 0)
b0109805 1340 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1341 } else {
1342 /* shift/register */
1343 rm = (insn) & 0xf;
1344 shift = (insn >> 7) & 0x1f;
1e8d4eec 1345 shiftop = (insn >> 5) & 3;
b26eefb6 1346 offset = load_reg(s, rm);
9a119ff6 1347 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1348 if (!(insn & (1 << 23)))
b0109805 1349 tcg_gen_sub_i32(var, var, offset);
2c0262af 1350 else
b0109805 1351 tcg_gen_add_i32(var, var, offset);
7d1b0095 1352 tcg_temp_free_i32(offset);
2c0262af
FB
1353 }
1354}
1355
191f9a93 1356static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1357 int extra, TCGv_i32 var)
2c0262af
FB
1358{
1359 int val, rm;
39d5492a 1360 TCGv_i32 offset;
3b46e624 1361
2c0262af
FB
1362 if (insn & (1 << 22)) {
1363 /* immediate */
1364 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1365 if (!(insn & (1 << 23)))
1366 val = -val;
18acad92 1367 val += extra;
537730b9 1368 if (val != 0)
b0109805 1369 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1370 } else {
1371 /* register */
191f9a93 1372 if (extra)
b0109805 1373 tcg_gen_addi_i32(var, var, extra);
2c0262af 1374 rm = (insn) & 0xf;
b26eefb6 1375 offset = load_reg(s, rm);
2c0262af 1376 if (!(insn & (1 << 23)))
b0109805 1377 tcg_gen_sub_i32(var, var, offset);
2c0262af 1378 else
b0109805 1379 tcg_gen_add_i32(var, var, offset);
7d1b0095 1380 tcg_temp_free_i32(offset);
2c0262af
FB
1381 }
1382}
1383
5aaebd13
PM
1384static TCGv_ptr get_fpstatus_ptr(int neon)
1385{
1386 TCGv_ptr statusptr = tcg_temp_new_ptr();
1387 int offset;
1388 if (neon) {
0ecb72a5 1389 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1390 } else {
0ecb72a5 1391 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1392 }
1393 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1394 return statusptr;
1395}
1396
4373f3ce
PB
1397#define VFP_OP2(name) \
1398static inline void gen_vfp_##name(int dp) \
1399{ \
ae1857ec
PM
1400 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1401 if (dp) { \
1402 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1403 } else { \
1404 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1405 } \
1406 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1407}
1408
4373f3ce
PB
1409VFP_OP2(add)
1410VFP_OP2(sub)
1411VFP_OP2(mul)
1412VFP_OP2(div)
1413
1414#undef VFP_OP2
1415
605a6aed
PM
1416static inline void gen_vfp_F1_mul(int dp)
1417{
1418 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1419 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1420 if (dp) {
ae1857ec 1421 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1422 } else {
ae1857ec 1423 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1424 }
ae1857ec 1425 tcg_temp_free_ptr(fpst);
605a6aed
PM
1426}
1427
1428static inline void gen_vfp_F1_neg(int dp)
1429{
1430 /* Like gen_vfp_neg() but put result in F1 */
1431 if (dp) {
1432 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1433 } else {
1434 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1435 }
1436}
1437
4373f3ce
PB
1438static inline void gen_vfp_abs(int dp)
1439{
1440 if (dp)
1441 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1442 else
1443 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1444}
1445
1446static inline void gen_vfp_neg(int dp)
1447{
1448 if (dp)
1449 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1450 else
1451 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1452}
1453
1454static inline void gen_vfp_sqrt(int dp)
1455{
1456 if (dp)
1457 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1458 else
1459 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1460}
1461
1462static inline void gen_vfp_cmp(int dp)
1463{
1464 if (dp)
1465 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1466 else
1467 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1468}
1469
1470static inline void gen_vfp_cmpe(int dp)
1471{
1472 if (dp)
1473 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1474 else
1475 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1476}
1477
1478static inline void gen_vfp_F1_ld0(int dp)
1479{
1480 if (dp)
5b340b51 1481 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1482 else
5b340b51 1483 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1484}
1485
5500b06c
PM
1486#define VFP_GEN_ITOF(name) \
1487static inline void gen_vfp_##name(int dp, int neon) \
1488{ \
5aaebd13 1489 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1490 if (dp) { \
1491 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1492 } else { \
1493 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1494 } \
b7fa9214 1495 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1496}
1497
5500b06c
PM
1498VFP_GEN_ITOF(uito)
1499VFP_GEN_ITOF(sito)
1500#undef VFP_GEN_ITOF
4373f3ce 1501
5500b06c
PM
1502#define VFP_GEN_FTOI(name) \
1503static inline void gen_vfp_##name(int dp, int neon) \
1504{ \
5aaebd13 1505 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1506 if (dp) { \
1507 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1508 } else { \
1509 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1510 } \
b7fa9214 1511 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1512}
1513
5500b06c
PM
1514VFP_GEN_FTOI(toui)
1515VFP_GEN_FTOI(touiz)
1516VFP_GEN_FTOI(tosi)
1517VFP_GEN_FTOI(tosiz)
1518#undef VFP_GEN_FTOI
4373f3ce 1519
16d5b3ca 1520#define VFP_GEN_FIX(name, round) \
5500b06c 1521static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1522{ \
39d5492a 1523 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1524 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1525 if (dp) { \
16d5b3ca
WN
1526 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1527 statusptr); \
5500b06c 1528 } else { \
16d5b3ca
WN
1529 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1530 statusptr); \
5500b06c 1531 } \
b75263d6 1532 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1533 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1534}
16d5b3ca
WN
1535VFP_GEN_FIX(tosh, _round_to_zero)
1536VFP_GEN_FIX(tosl, _round_to_zero)
1537VFP_GEN_FIX(touh, _round_to_zero)
1538VFP_GEN_FIX(toul, _round_to_zero)
1539VFP_GEN_FIX(shto, )
1540VFP_GEN_FIX(slto, )
1541VFP_GEN_FIX(uhto, )
1542VFP_GEN_FIX(ulto, )
4373f3ce 1543#undef VFP_GEN_FIX
9ee6e8bb 1544
39d5492a 1545static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1546{
08307563 1547 if (dp) {
12dcc321 1548 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1549 } else {
12dcc321 1550 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1551 }
b5ff1b31
FB
1552}
1553
39d5492a 1554static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1555{
08307563 1556 if (dp) {
12dcc321 1557 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1558 } else {
12dcc321 1559 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1560 }
b5ff1b31
FB
1561}
1562
c39c2b90 1563static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1564{
9a2b5256 1565 if (dp) {
c39c2b90 1566 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1567 } else {
c39c2b90 1568 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1569 if (reg & 1) {
1570 ofs += offsetof(CPU_DoubleU, l.upper);
1571 } else {
1572 ofs += offsetof(CPU_DoubleU, l.lower);
1573 }
1574 return ofs;
8e96005d
FB
1575 }
1576}
9ee6e8bb
PB
1577
1578/* Return the offset of a 32-bit piece of a NEON register.
1579 zero is the least significant end of the register. */
1580static inline long
1581neon_reg_offset (int reg, int n)
1582{
1583 int sreg;
1584 sreg = reg * 2 + n;
1585 return vfp_reg_offset(0, sreg);
1586}
1587
39d5492a 1588static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1589{
39d5492a 1590 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1591 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1592 return tmp;
1593}
1594
39d5492a 1595static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1596{
1597 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1598 tcg_temp_free_i32(var);
8f8e3aa4
PB
1599}
1600
a7812ae4 1601static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1602{
1603 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1604}
1605
a7812ae4 1606static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1607{
1608 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1609}
1610
1a66ac61
RH
1611static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1612{
1613 TCGv_ptr ret = tcg_temp_new_ptr();
1614 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1615 return ret;
1616}
1617
4373f3ce
PB
1618#define tcg_gen_ld_f32 tcg_gen_ld_i32
1619#define tcg_gen_ld_f64 tcg_gen_ld_i64
1620#define tcg_gen_st_f32 tcg_gen_st_i32
1621#define tcg_gen_st_f64 tcg_gen_st_i64
1622
b7bcbe95
FB
1623static inline void gen_mov_F0_vreg(int dp, int reg)
1624{
1625 if (dp)
4373f3ce 1626 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1627 else
4373f3ce 1628 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1629}
1630
1631static inline void gen_mov_F1_vreg(int dp, int reg)
1632{
1633 if (dp)
4373f3ce 1634 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1635 else
4373f3ce 1636 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1637}
1638
1639static inline void gen_mov_vreg_F0(int dp, int reg)
1640{
1641 if (dp)
4373f3ce 1642 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1643 else
4373f3ce 1644 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1645}
1646
d00584b7 1647#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1648
a7812ae4 1649static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1650{
0ecb72a5 1651 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1652}
1653
a7812ae4 1654static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1655{
0ecb72a5 1656 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1657}
1658
39d5492a 1659static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1660{
39d5492a 1661 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1662 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1663 return var;
e677137d
PB
1664}
1665
39d5492a 1666static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1667{
0ecb72a5 1668 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1669 tcg_temp_free_i32(var);
e677137d
PB
1670}
1671
1672static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1673{
1674 iwmmxt_store_reg(cpu_M0, rn);
1675}
1676
1677static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1678{
1679 iwmmxt_load_reg(cpu_M0, rn);
1680}
1681
1682static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1683{
1684 iwmmxt_load_reg(cpu_V1, rn);
1685 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1686}
1687
1688static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1689{
1690 iwmmxt_load_reg(cpu_V1, rn);
1691 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1692}
1693
1694static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1695{
1696 iwmmxt_load_reg(cpu_V1, rn);
1697 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1698}
1699
1700#define IWMMXT_OP(name) \
1701static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1702{ \
1703 iwmmxt_load_reg(cpu_V1, rn); \
1704 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1705}
1706
477955bd
PM
1707#define IWMMXT_OP_ENV(name) \
1708static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1709{ \
1710 iwmmxt_load_reg(cpu_V1, rn); \
1711 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1712}
1713
1714#define IWMMXT_OP_ENV_SIZE(name) \
1715IWMMXT_OP_ENV(name##b) \
1716IWMMXT_OP_ENV(name##w) \
1717IWMMXT_OP_ENV(name##l)
e677137d 1718
477955bd 1719#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1720static inline void gen_op_iwmmxt_##name##_M0(void) \
1721{ \
477955bd 1722 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1723}
1724
1725IWMMXT_OP(maddsq)
1726IWMMXT_OP(madduq)
1727IWMMXT_OP(sadb)
1728IWMMXT_OP(sadw)
1729IWMMXT_OP(mulslw)
1730IWMMXT_OP(mulshw)
1731IWMMXT_OP(mululw)
1732IWMMXT_OP(muluhw)
1733IWMMXT_OP(macsw)
1734IWMMXT_OP(macuw)
1735
477955bd
PM
1736IWMMXT_OP_ENV_SIZE(unpackl)
1737IWMMXT_OP_ENV_SIZE(unpackh)
1738
1739IWMMXT_OP_ENV1(unpacklub)
1740IWMMXT_OP_ENV1(unpackluw)
1741IWMMXT_OP_ENV1(unpacklul)
1742IWMMXT_OP_ENV1(unpackhub)
1743IWMMXT_OP_ENV1(unpackhuw)
1744IWMMXT_OP_ENV1(unpackhul)
1745IWMMXT_OP_ENV1(unpacklsb)
1746IWMMXT_OP_ENV1(unpacklsw)
1747IWMMXT_OP_ENV1(unpacklsl)
1748IWMMXT_OP_ENV1(unpackhsb)
1749IWMMXT_OP_ENV1(unpackhsw)
1750IWMMXT_OP_ENV1(unpackhsl)
1751
1752IWMMXT_OP_ENV_SIZE(cmpeq)
1753IWMMXT_OP_ENV_SIZE(cmpgtu)
1754IWMMXT_OP_ENV_SIZE(cmpgts)
1755
1756IWMMXT_OP_ENV_SIZE(mins)
1757IWMMXT_OP_ENV_SIZE(minu)
1758IWMMXT_OP_ENV_SIZE(maxs)
1759IWMMXT_OP_ENV_SIZE(maxu)
1760
1761IWMMXT_OP_ENV_SIZE(subn)
1762IWMMXT_OP_ENV_SIZE(addn)
1763IWMMXT_OP_ENV_SIZE(subu)
1764IWMMXT_OP_ENV_SIZE(addu)
1765IWMMXT_OP_ENV_SIZE(subs)
1766IWMMXT_OP_ENV_SIZE(adds)
1767
1768IWMMXT_OP_ENV(avgb0)
1769IWMMXT_OP_ENV(avgb1)
1770IWMMXT_OP_ENV(avgw0)
1771IWMMXT_OP_ENV(avgw1)
e677137d 1772
477955bd
PM
1773IWMMXT_OP_ENV(packuw)
1774IWMMXT_OP_ENV(packul)
1775IWMMXT_OP_ENV(packuq)
1776IWMMXT_OP_ENV(packsw)
1777IWMMXT_OP_ENV(packsl)
1778IWMMXT_OP_ENV(packsq)
e677137d 1779
e677137d
PB
1780static void gen_op_iwmmxt_set_mup(void)
1781{
39d5492a 1782 TCGv_i32 tmp;
e677137d
PB
1783 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1784 tcg_gen_ori_i32(tmp, tmp, 2);
1785 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1786}
1787
1788static void gen_op_iwmmxt_set_cup(void)
1789{
39d5492a 1790 TCGv_i32 tmp;
e677137d
PB
1791 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1792 tcg_gen_ori_i32(tmp, tmp, 1);
1793 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1794}
1795
1796static void gen_op_iwmmxt_setpsr_nz(void)
1797{
39d5492a 1798 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1799 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1800 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1801}
1802
1803static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1804{
1805 iwmmxt_load_reg(cpu_V1, rn);
86831435 1806 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1807 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1808}
1809
39d5492a
PM
1810static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1811 TCGv_i32 dest)
18c9b560
AZ
1812{
1813 int rd;
1814 uint32_t offset;
39d5492a 1815 TCGv_i32 tmp;
18c9b560
AZ
1816
1817 rd = (insn >> 16) & 0xf;
da6b5335 1818 tmp = load_reg(s, rd);
18c9b560
AZ
1819
1820 offset = (insn & 0xff) << ((insn >> 7) & 2);
1821 if (insn & (1 << 24)) {
1822 /* Pre indexed */
1823 if (insn & (1 << 23))
da6b5335 1824 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1825 else
da6b5335
FN
1826 tcg_gen_addi_i32(tmp, tmp, -offset);
1827 tcg_gen_mov_i32(dest, tmp);
18c9b560 1828 if (insn & (1 << 21))
da6b5335
FN
1829 store_reg(s, rd, tmp);
1830 else
7d1b0095 1831 tcg_temp_free_i32(tmp);
18c9b560
AZ
1832 } else if (insn & (1 << 21)) {
1833 /* Post indexed */
da6b5335 1834 tcg_gen_mov_i32(dest, tmp);
18c9b560 1835 if (insn & (1 << 23))
da6b5335 1836 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1837 else
da6b5335
FN
1838 tcg_gen_addi_i32(tmp, tmp, -offset);
1839 store_reg(s, rd, tmp);
18c9b560
AZ
1840 } else if (!(insn & (1 << 23)))
1841 return 1;
1842 return 0;
1843}
1844
39d5492a 1845static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1846{
1847 int rd = (insn >> 0) & 0xf;
39d5492a 1848 TCGv_i32 tmp;
18c9b560 1849
da6b5335
FN
1850 if (insn & (1 << 8)) {
1851 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1852 return 1;
da6b5335
FN
1853 } else {
1854 tmp = iwmmxt_load_creg(rd);
1855 }
1856 } else {
7d1b0095 1857 tmp = tcg_temp_new_i32();
da6b5335 1858 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1859 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1860 }
1861 tcg_gen_andi_i32(tmp, tmp, mask);
1862 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1863 tcg_temp_free_i32(tmp);
18c9b560
AZ
1864 return 0;
1865}
1866
a1c7273b 1867/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1868 (ie. an undefined instruction). */
7dcc1f89 1869static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1870{
1871 int rd, wrd;
1872 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1873 TCGv_i32 addr;
1874 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1875
1876 if ((insn & 0x0e000e00) == 0x0c000000) {
1877 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1878 wrd = insn & 0xf;
1879 rdlo = (insn >> 12) & 0xf;
1880 rdhi = (insn >> 16) & 0xf;
d00584b7 1881 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1882 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1883 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1884 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1885 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1886 } else { /* TMCRR */
da6b5335
FN
1887 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1888 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1889 gen_op_iwmmxt_set_mup();
1890 }
1891 return 0;
1892 }
1893
1894 wrd = (insn >> 12) & 0xf;
7d1b0095 1895 addr = tcg_temp_new_i32();
da6b5335 1896 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1897 tcg_temp_free_i32(addr);
18c9b560 1898 return 1;
da6b5335 1899 }
18c9b560 1900 if (insn & ARM_CP_RW_BIT) {
d00584b7 1901 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1902 tmp = tcg_temp_new_i32();
12dcc321 1903 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1904 iwmmxt_store_creg(wrd, tmp);
18c9b560 1905 } else {
e677137d
PB
1906 i = 1;
1907 if (insn & (1 << 8)) {
d00584b7 1908 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1909 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1910 i = 0;
d00584b7 1911 } else { /* WLDRW wRd */
29531141 1912 tmp = tcg_temp_new_i32();
12dcc321 1913 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1914 }
1915 } else {
29531141 1916 tmp = tcg_temp_new_i32();
d00584b7 1917 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1918 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 1919 } else { /* WLDRB */
12dcc321 1920 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1921 }
1922 }
1923 if (i) {
1924 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1925 tcg_temp_free_i32(tmp);
e677137d 1926 }
18c9b560
AZ
1927 gen_op_iwmmxt_movq_wRn_M0(wrd);
1928 }
1929 } else {
d00584b7 1930 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1931 tmp = iwmmxt_load_creg(wrd);
12dcc321 1932 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1933 } else {
1934 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1935 tmp = tcg_temp_new_i32();
e677137d 1936 if (insn & (1 << 8)) {
d00584b7 1937 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1938 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 1939 } else { /* WSTRW wRd */
ecc7b3aa 1940 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1941 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1942 }
1943 } else {
d00584b7 1944 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1945 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1946 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 1947 } else { /* WSTRB */
ecc7b3aa 1948 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1949 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1950 }
1951 }
18c9b560 1952 }
29531141 1953 tcg_temp_free_i32(tmp);
18c9b560 1954 }
7d1b0095 1955 tcg_temp_free_i32(addr);
18c9b560
AZ
1956 return 0;
1957 }
1958
1959 if ((insn & 0x0f000000) != 0x0e000000)
1960 return 1;
1961
1962 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 1963 case 0x000: /* WOR */
18c9b560
AZ
1964 wrd = (insn >> 12) & 0xf;
1965 rd0 = (insn >> 0) & 0xf;
1966 rd1 = (insn >> 16) & 0xf;
1967 gen_op_iwmmxt_movq_M0_wRn(rd0);
1968 gen_op_iwmmxt_orq_M0_wRn(rd1);
1969 gen_op_iwmmxt_setpsr_nz();
1970 gen_op_iwmmxt_movq_wRn_M0(wrd);
1971 gen_op_iwmmxt_set_mup();
1972 gen_op_iwmmxt_set_cup();
1973 break;
d00584b7 1974 case 0x011: /* TMCR */
18c9b560
AZ
1975 if (insn & 0xf)
1976 return 1;
1977 rd = (insn >> 12) & 0xf;
1978 wrd = (insn >> 16) & 0xf;
1979 switch (wrd) {
1980 case ARM_IWMMXT_wCID:
1981 case ARM_IWMMXT_wCASF:
1982 break;
1983 case ARM_IWMMXT_wCon:
1984 gen_op_iwmmxt_set_cup();
1985 /* Fall through. */
1986 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1987 tmp = iwmmxt_load_creg(wrd);
1988 tmp2 = load_reg(s, rd);
f669df27 1989 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1990 tcg_temp_free_i32(tmp2);
da6b5335 1991 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1992 break;
1993 case ARM_IWMMXT_wCGR0:
1994 case ARM_IWMMXT_wCGR1:
1995 case ARM_IWMMXT_wCGR2:
1996 case ARM_IWMMXT_wCGR3:
1997 gen_op_iwmmxt_set_cup();
da6b5335
FN
1998 tmp = load_reg(s, rd);
1999 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2000 break;
2001 default:
2002 return 1;
2003 }
2004 break;
d00584b7 2005 case 0x100: /* WXOR */
18c9b560
AZ
2006 wrd = (insn >> 12) & 0xf;
2007 rd0 = (insn >> 0) & 0xf;
2008 rd1 = (insn >> 16) & 0xf;
2009 gen_op_iwmmxt_movq_M0_wRn(rd0);
2010 gen_op_iwmmxt_xorq_M0_wRn(rd1);
2011 gen_op_iwmmxt_setpsr_nz();
2012 gen_op_iwmmxt_movq_wRn_M0(wrd);
2013 gen_op_iwmmxt_set_mup();
2014 gen_op_iwmmxt_set_cup();
2015 break;
d00584b7 2016 case 0x111: /* TMRC */
18c9b560
AZ
2017 if (insn & 0xf)
2018 return 1;
2019 rd = (insn >> 12) & 0xf;
2020 wrd = (insn >> 16) & 0xf;
da6b5335
FN
2021 tmp = iwmmxt_load_creg(wrd);
2022 store_reg(s, rd, tmp);
18c9b560 2023 break;
d00584b7 2024 case 0x300: /* WANDN */
18c9b560
AZ
2025 wrd = (insn >> 12) & 0xf;
2026 rd0 = (insn >> 0) & 0xf;
2027 rd1 = (insn >> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 2029 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
2030 gen_op_iwmmxt_andq_M0_wRn(rd1);
2031 gen_op_iwmmxt_setpsr_nz();
2032 gen_op_iwmmxt_movq_wRn_M0(wrd);
2033 gen_op_iwmmxt_set_mup();
2034 gen_op_iwmmxt_set_cup();
2035 break;
d00584b7 2036 case 0x200: /* WAND */
18c9b560
AZ
2037 wrd = (insn >> 12) & 0xf;
2038 rd0 = (insn >> 0) & 0xf;
2039 rd1 = (insn >> 16) & 0xf;
2040 gen_op_iwmmxt_movq_M0_wRn(rd0);
2041 gen_op_iwmmxt_andq_M0_wRn(rd1);
2042 gen_op_iwmmxt_setpsr_nz();
2043 gen_op_iwmmxt_movq_wRn_M0(wrd);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2046 break;
d00584b7 2047 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
2048 wrd = (insn >> 12) & 0xf;
2049 rd0 = (insn >> 0) & 0xf;
2050 rd1 = (insn >> 16) & 0xf;
2051 gen_op_iwmmxt_movq_M0_wRn(rd0);
2052 if (insn & (1 << 21))
2053 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2054 else
2055 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2056 gen_op_iwmmxt_movq_wRn_M0(wrd);
2057 gen_op_iwmmxt_set_mup();
2058 break;
d00584b7 2059 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
2060 wrd = (insn >> 12) & 0xf;
2061 rd0 = (insn >> 16) & 0xf;
2062 rd1 = (insn >> 0) & 0xf;
2063 gen_op_iwmmxt_movq_M0_wRn(rd0);
2064 switch ((insn >> 22) & 3) {
2065 case 0:
2066 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2067 break;
2068 case 1:
2069 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2070 break;
2071 case 2:
2072 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2073 break;
2074 case 3:
2075 return 1;
2076 }
2077 gen_op_iwmmxt_movq_wRn_M0(wrd);
2078 gen_op_iwmmxt_set_mup();
2079 gen_op_iwmmxt_set_cup();
2080 break;
d00584b7 2081 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
2082 wrd = (insn >> 12) & 0xf;
2083 rd0 = (insn >> 16) & 0xf;
2084 rd1 = (insn >> 0) & 0xf;
2085 gen_op_iwmmxt_movq_M0_wRn(rd0);
2086 switch ((insn >> 22) & 3) {
2087 case 0:
2088 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2089 break;
2090 case 1:
2091 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2092 break;
2093 case 2:
2094 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2095 break;
2096 case 3:
2097 return 1;
2098 }
2099 gen_op_iwmmxt_movq_wRn_M0(wrd);
2100 gen_op_iwmmxt_set_mup();
2101 gen_op_iwmmxt_set_cup();
2102 break;
d00584b7 2103 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
2104 wrd = (insn >> 12) & 0xf;
2105 rd0 = (insn >> 16) & 0xf;
2106 rd1 = (insn >> 0) & 0xf;
2107 gen_op_iwmmxt_movq_M0_wRn(rd0);
2108 if (insn & (1 << 22))
2109 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2110 else
2111 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2112 if (!(insn & (1 << 20)))
2113 gen_op_iwmmxt_addl_M0_wRn(wrd);
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 break;
d00584b7 2117 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
2118 wrd = (insn >> 12) & 0xf;
2119 rd0 = (insn >> 16) & 0xf;
2120 rd1 = (insn >> 0) & 0xf;
2121 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2122 if (insn & (1 << 21)) {
2123 if (insn & (1 << 20))
2124 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2125 else
2126 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2127 } else {
2128 if (insn & (1 << 20))
2129 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2132 }
18c9b560
AZ
2133 gen_op_iwmmxt_movq_wRn_M0(wrd);
2134 gen_op_iwmmxt_set_mup();
2135 break;
d00584b7 2136 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
2137 wrd = (insn >> 12) & 0xf;
2138 rd0 = (insn >> 16) & 0xf;
2139 rd1 = (insn >> 0) & 0xf;
2140 gen_op_iwmmxt_movq_M0_wRn(rd0);
2141 if (insn & (1 << 21))
2142 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2143 else
2144 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2145 if (!(insn & (1 << 20))) {
e677137d
PB
2146 iwmmxt_load_reg(cpu_V1, wrd);
2147 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2148 }
2149 gen_op_iwmmxt_movq_wRn_M0(wrd);
2150 gen_op_iwmmxt_set_mup();
2151 break;
d00584b7 2152 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2153 wrd = (insn >> 12) & 0xf;
2154 rd0 = (insn >> 16) & 0xf;
2155 rd1 = (insn >> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0);
2157 switch ((insn >> 22) & 3) {
2158 case 0:
2159 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2160 break;
2161 case 1:
2162 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2163 break;
2164 case 2:
2165 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2166 break;
2167 case 3:
2168 return 1;
2169 }
2170 gen_op_iwmmxt_movq_wRn_M0(wrd);
2171 gen_op_iwmmxt_set_mup();
2172 gen_op_iwmmxt_set_cup();
2173 break;
d00584b7 2174 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2175 wrd = (insn >> 12) & 0xf;
2176 rd0 = (insn >> 16) & 0xf;
2177 rd1 = (insn >> 0) & 0xf;
2178 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2179 if (insn & (1 << 22)) {
2180 if (insn & (1 << 20))
2181 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2182 else
2183 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2184 } else {
2185 if (insn & (1 << 20))
2186 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2187 else
2188 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2189 }
18c9b560
AZ
2190 gen_op_iwmmxt_movq_wRn_M0(wrd);
2191 gen_op_iwmmxt_set_mup();
2192 gen_op_iwmmxt_set_cup();
2193 break;
d00584b7 2194 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2195 wrd = (insn >> 12) & 0xf;
2196 rd0 = (insn >> 16) & 0xf;
2197 rd1 = (insn >> 0) & 0xf;
2198 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2199 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2200 tcg_gen_andi_i32(tmp, tmp, 7);
2201 iwmmxt_load_reg(cpu_V1, rd1);
2202 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2203 tcg_temp_free_i32(tmp);
18c9b560
AZ
2204 gen_op_iwmmxt_movq_wRn_M0(wrd);
2205 gen_op_iwmmxt_set_mup();
2206 break;
d00584b7 2207 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2208 if (((insn >> 6) & 3) == 3)
2209 return 1;
18c9b560
AZ
2210 rd = (insn >> 12) & 0xf;
2211 wrd = (insn >> 16) & 0xf;
da6b5335 2212 tmp = load_reg(s, rd);
18c9b560
AZ
2213 gen_op_iwmmxt_movq_M0_wRn(wrd);
2214 switch ((insn >> 6) & 3) {
2215 case 0:
da6b5335
FN
2216 tmp2 = tcg_const_i32(0xff);
2217 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2218 break;
2219 case 1:
da6b5335
FN
2220 tmp2 = tcg_const_i32(0xffff);
2221 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2222 break;
2223 case 2:
da6b5335
FN
2224 tmp2 = tcg_const_i32(0xffffffff);
2225 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2226 break;
da6b5335 2227 default:
f764718d
RH
2228 tmp2 = NULL;
2229 tmp3 = NULL;
18c9b560 2230 }
da6b5335 2231 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2232 tcg_temp_free_i32(tmp3);
2233 tcg_temp_free_i32(tmp2);
7d1b0095 2234 tcg_temp_free_i32(tmp);
18c9b560
AZ
2235 gen_op_iwmmxt_movq_wRn_M0(wrd);
2236 gen_op_iwmmxt_set_mup();
2237 break;
d00584b7 2238 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2239 rd = (insn >> 12) & 0xf;
2240 wrd = (insn >> 16) & 0xf;
da6b5335 2241 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2242 return 1;
2243 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2244 tmp = tcg_temp_new_i32();
18c9b560
AZ
2245 switch ((insn >> 22) & 3) {
2246 case 0:
da6b5335 2247 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2248 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2249 if (insn & 8) {
2250 tcg_gen_ext8s_i32(tmp, tmp);
2251 } else {
2252 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2253 }
2254 break;
2255 case 1:
da6b5335 2256 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2257 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2258 if (insn & 8) {
2259 tcg_gen_ext16s_i32(tmp, tmp);
2260 } else {
2261 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2262 }
2263 break;
2264 case 2:
da6b5335 2265 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2266 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2267 break;
18c9b560 2268 }
da6b5335 2269 store_reg(s, rd, tmp);
18c9b560 2270 break;
d00584b7 2271 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2272 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2273 return 1;
da6b5335 2274 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2275 switch ((insn >> 22) & 3) {
2276 case 0:
da6b5335 2277 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2278 break;
2279 case 1:
da6b5335 2280 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2281 break;
2282 case 2:
da6b5335 2283 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2284 break;
18c9b560 2285 }
da6b5335
FN
2286 tcg_gen_shli_i32(tmp, tmp, 28);
2287 gen_set_nzcv(tmp);
7d1b0095 2288 tcg_temp_free_i32(tmp);
18c9b560 2289 break;
d00584b7 2290 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2291 if (((insn >> 6) & 3) == 3)
2292 return 1;
18c9b560
AZ
2293 rd = (insn >> 12) & 0xf;
2294 wrd = (insn >> 16) & 0xf;
da6b5335 2295 tmp = load_reg(s, rd);
18c9b560
AZ
2296 switch ((insn >> 6) & 3) {
2297 case 0:
da6b5335 2298 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2299 break;
2300 case 1:
da6b5335 2301 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2302 break;
2303 case 2:
da6b5335 2304 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2305 break;
18c9b560 2306 }
7d1b0095 2307 tcg_temp_free_i32(tmp);
18c9b560
AZ
2308 gen_op_iwmmxt_movq_wRn_M0(wrd);
2309 gen_op_iwmmxt_set_mup();
2310 break;
d00584b7 2311 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2312 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2313 return 1;
da6b5335 2314 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2315 tmp2 = tcg_temp_new_i32();
da6b5335 2316 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2317 switch ((insn >> 22) & 3) {
2318 case 0:
2319 for (i = 0; i < 7; i ++) {
da6b5335
FN
2320 tcg_gen_shli_i32(tmp2, tmp2, 4);
2321 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2322 }
2323 break;
2324 case 1:
2325 for (i = 0; i < 3; i ++) {
da6b5335
FN
2326 tcg_gen_shli_i32(tmp2, tmp2, 8);
2327 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2328 }
2329 break;
2330 case 2:
da6b5335
FN
2331 tcg_gen_shli_i32(tmp2, tmp2, 16);
2332 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2333 break;
18c9b560 2334 }
da6b5335 2335 gen_set_nzcv(tmp);
7d1b0095
PM
2336 tcg_temp_free_i32(tmp2);
2337 tcg_temp_free_i32(tmp);
18c9b560 2338 break;
d00584b7 2339 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2340 wrd = (insn >> 12) & 0xf;
2341 rd0 = (insn >> 16) & 0xf;
2342 gen_op_iwmmxt_movq_M0_wRn(rd0);
2343 switch ((insn >> 22) & 3) {
2344 case 0:
e677137d 2345 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2346 break;
2347 case 1:
e677137d 2348 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2349 break;
2350 case 2:
e677137d 2351 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2352 break;
2353 case 3:
2354 return 1;
2355 }
2356 gen_op_iwmmxt_movq_wRn_M0(wrd);
2357 gen_op_iwmmxt_set_mup();
2358 break;
d00584b7 2359 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2360 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2361 return 1;
da6b5335 2362 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2363 tmp2 = tcg_temp_new_i32();
da6b5335 2364 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2365 switch ((insn >> 22) & 3) {
2366 case 0:
2367 for (i = 0; i < 7; i ++) {
da6b5335
FN
2368 tcg_gen_shli_i32(tmp2, tmp2, 4);
2369 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2370 }
2371 break;
2372 case 1:
2373 for (i = 0; i < 3; i ++) {
da6b5335
FN
2374 tcg_gen_shli_i32(tmp2, tmp2, 8);
2375 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2376 }
2377 break;
2378 case 2:
da6b5335
FN
2379 tcg_gen_shli_i32(tmp2, tmp2, 16);
2380 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2381 break;
18c9b560 2382 }
da6b5335 2383 gen_set_nzcv(tmp);
7d1b0095
PM
2384 tcg_temp_free_i32(tmp2);
2385 tcg_temp_free_i32(tmp);
18c9b560 2386 break;
d00584b7 2387 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2388 rd = (insn >> 12) & 0xf;
2389 rd0 = (insn >> 16) & 0xf;
da6b5335 2390 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2391 return 1;
2392 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2393 tmp = tcg_temp_new_i32();
18c9b560
AZ
2394 switch ((insn >> 22) & 3) {
2395 case 0:
da6b5335 2396 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2397 break;
2398 case 1:
da6b5335 2399 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2400 break;
2401 case 2:
da6b5335 2402 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2403 break;
18c9b560 2404 }
da6b5335 2405 store_reg(s, rd, tmp);
18c9b560 2406 break;
d00584b7 2407 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2408 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2409 wrd = (insn >> 12) & 0xf;
2410 rd0 = (insn >> 16) & 0xf;
2411 rd1 = (insn >> 0) & 0xf;
2412 gen_op_iwmmxt_movq_M0_wRn(rd0);
2413 switch ((insn >> 22) & 3) {
2414 case 0:
2415 if (insn & (1 << 21))
2416 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2417 else
2418 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2419 break;
2420 case 1:
2421 if (insn & (1 << 21))
2422 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2423 else
2424 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2425 break;
2426 case 2:
2427 if (insn & (1 << 21))
2428 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2429 else
2430 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2431 break;
2432 case 3:
2433 return 1;
2434 }
2435 gen_op_iwmmxt_movq_wRn_M0(wrd);
2436 gen_op_iwmmxt_set_mup();
2437 gen_op_iwmmxt_set_cup();
2438 break;
d00584b7 2439 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2440 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2441 wrd = (insn >> 12) & 0xf;
2442 rd0 = (insn >> 16) & 0xf;
2443 gen_op_iwmmxt_movq_M0_wRn(rd0);
2444 switch ((insn >> 22) & 3) {
2445 case 0:
2446 if (insn & (1 << 21))
2447 gen_op_iwmmxt_unpacklsb_M0();
2448 else
2449 gen_op_iwmmxt_unpacklub_M0();
2450 break;
2451 case 1:
2452 if (insn & (1 << 21))
2453 gen_op_iwmmxt_unpacklsw_M0();
2454 else
2455 gen_op_iwmmxt_unpackluw_M0();
2456 break;
2457 case 2:
2458 if (insn & (1 << 21))
2459 gen_op_iwmmxt_unpacklsl_M0();
2460 else
2461 gen_op_iwmmxt_unpacklul_M0();
2462 break;
2463 case 3:
2464 return 1;
2465 }
2466 gen_op_iwmmxt_movq_wRn_M0(wrd);
2467 gen_op_iwmmxt_set_mup();
2468 gen_op_iwmmxt_set_cup();
2469 break;
d00584b7 2470 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2471 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2472 wrd = (insn >> 12) & 0xf;
2473 rd0 = (insn >> 16) & 0xf;
2474 gen_op_iwmmxt_movq_M0_wRn(rd0);
2475 switch ((insn >> 22) & 3) {
2476 case 0:
2477 if (insn & (1 << 21))
2478 gen_op_iwmmxt_unpackhsb_M0();
2479 else
2480 gen_op_iwmmxt_unpackhub_M0();
2481 break;
2482 case 1:
2483 if (insn & (1 << 21))
2484 gen_op_iwmmxt_unpackhsw_M0();
2485 else
2486 gen_op_iwmmxt_unpackhuw_M0();
2487 break;
2488 case 2:
2489 if (insn & (1 << 21))
2490 gen_op_iwmmxt_unpackhsl_M0();
2491 else
2492 gen_op_iwmmxt_unpackhul_M0();
2493 break;
2494 case 3:
2495 return 1;
2496 }
2497 gen_op_iwmmxt_movq_wRn_M0(wrd);
2498 gen_op_iwmmxt_set_mup();
2499 gen_op_iwmmxt_set_cup();
2500 break;
d00584b7 2501 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2502 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2503 if (((insn >> 22) & 3) == 0)
2504 return 1;
18c9b560
AZ
2505 wrd = (insn >> 12) & 0xf;
2506 rd0 = (insn >> 16) & 0xf;
2507 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2508 tmp = tcg_temp_new_i32();
da6b5335 2509 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2510 tcg_temp_free_i32(tmp);
18c9b560 2511 return 1;
da6b5335 2512 }
18c9b560 2513 switch ((insn >> 22) & 3) {
18c9b560 2514 case 1:
477955bd 2515 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2516 break;
2517 case 2:
477955bd 2518 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2519 break;
2520 case 3:
477955bd 2521 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2522 break;
2523 }
7d1b0095 2524 tcg_temp_free_i32(tmp);
18c9b560
AZ
2525 gen_op_iwmmxt_movq_wRn_M0(wrd);
2526 gen_op_iwmmxt_set_mup();
2527 gen_op_iwmmxt_set_cup();
2528 break;
d00584b7 2529 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2530 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2531 if (((insn >> 22) & 3) == 0)
2532 return 1;
18c9b560
AZ
2533 wrd = (insn >> 12) & 0xf;
2534 rd0 = (insn >> 16) & 0xf;
2535 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2536 tmp = tcg_temp_new_i32();
da6b5335 2537 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2538 tcg_temp_free_i32(tmp);
18c9b560 2539 return 1;
da6b5335 2540 }
18c9b560 2541 switch ((insn >> 22) & 3) {
18c9b560 2542 case 1:
477955bd 2543 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2544 break;
2545 case 2:
477955bd 2546 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2547 break;
2548 case 3:
477955bd 2549 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2550 break;
2551 }
7d1b0095 2552 tcg_temp_free_i32(tmp);
18c9b560
AZ
2553 gen_op_iwmmxt_movq_wRn_M0(wrd);
2554 gen_op_iwmmxt_set_mup();
2555 gen_op_iwmmxt_set_cup();
2556 break;
d00584b7 2557 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2558 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2559 if (((insn >> 22) & 3) == 0)
2560 return 1;
18c9b560
AZ
2561 wrd = (insn >> 12) & 0xf;
2562 rd0 = (insn >> 16) & 0xf;
2563 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2564 tmp = tcg_temp_new_i32();
da6b5335 2565 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2566 tcg_temp_free_i32(tmp);
18c9b560 2567 return 1;
da6b5335 2568 }
18c9b560 2569 switch ((insn >> 22) & 3) {
18c9b560 2570 case 1:
477955bd 2571 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2572 break;
2573 case 2:
477955bd 2574 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2575 break;
2576 case 3:
477955bd 2577 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2578 break;
2579 }
7d1b0095 2580 tcg_temp_free_i32(tmp);
18c9b560
AZ
2581 gen_op_iwmmxt_movq_wRn_M0(wrd);
2582 gen_op_iwmmxt_set_mup();
2583 gen_op_iwmmxt_set_cup();
2584 break;
d00584b7 2585 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2586 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2587 if (((insn >> 22) & 3) == 0)
2588 return 1;
18c9b560
AZ
2589 wrd = (insn >> 12) & 0xf;
2590 rd0 = (insn >> 16) & 0xf;
2591 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2592 tmp = tcg_temp_new_i32();
18c9b560 2593 switch ((insn >> 22) & 3) {
18c9b560 2594 case 1:
da6b5335 2595 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2596 tcg_temp_free_i32(tmp);
18c9b560 2597 return 1;
da6b5335 2598 }
477955bd 2599 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2600 break;
2601 case 2:
da6b5335 2602 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2603 tcg_temp_free_i32(tmp);
18c9b560 2604 return 1;
da6b5335 2605 }
477955bd 2606 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2607 break;
2608 case 3:
da6b5335 2609 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2610 tcg_temp_free_i32(tmp);
18c9b560 2611 return 1;
da6b5335 2612 }
477955bd 2613 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2614 break;
2615 }
7d1b0095 2616 tcg_temp_free_i32(tmp);
18c9b560
AZ
2617 gen_op_iwmmxt_movq_wRn_M0(wrd);
2618 gen_op_iwmmxt_set_mup();
2619 gen_op_iwmmxt_set_cup();
2620 break;
d00584b7 2621 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2622 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2623 wrd = (insn >> 12) & 0xf;
2624 rd0 = (insn >> 16) & 0xf;
2625 rd1 = (insn >> 0) & 0xf;
2626 gen_op_iwmmxt_movq_M0_wRn(rd0);
2627 switch ((insn >> 22) & 3) {
2628 case 0:
2629 if (insn & (1 << 21))
2630 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2631 else
2632 gen_op_iwmmxt_minub_M0_wRn(rd1);
2633 break;
2634 case 1:
2635 if (insn & (1 << 21))
2636 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2637 else
2638 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2639 break;
2640 case 2:
2641 if (insn & (1 << 21))
2642 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2643 else
2644 gen_op_iwmmxt_minul_M0_wRn(rd1);
2645 break;
2646 case 3:
2647 return 1;
2648 }
2649 gen_op_iwmmxt_movq_wRn_M0(wrd);
2650 gen_op_iwmmxt_set_mup();
2651 break;
d00584b7 2652 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2653 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2654 wrd = (insn >> 12) & 0xf;
2655 rd0 = (insn >> 16) & 0xf;
2656 rd1 = (insn >> 0) & 0xf;
2657 gen_op_iwmmxt_movq_M0_wRn(rd0);
2658 switch ((insn >> 22) & 3) {
2659 case 0:
2660 if (insn & (1 << 21))
2661 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2662 else
2663 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2664 break;
2665 case 1:
2666 if (insn & (1 << 21))
2667 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2668 else
2669 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2670 break;
2671 case 2:
2672 if (insn & (1 << 21))
2673 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2674 else
2675 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2676 break;
2677 case 3:
2678 return 1;
2679 }
2680 gen_op_iwmmxt_movq_wRn_M0(wrd);
2681 gen_op_iwmmxt_set_mup();
2682 break;
d00584b7 2683 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2684 case 0x402: case 0x502: case 0x602: case 0x702:
2685 wrd = (insn >> 12) & 0xf;
2686 rd0 = (insn >> 16) & 0xf;
2687 rd1 = (insn >> 0) & 0xf;
2688 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2689 tmp = tcg_const_i32((insn >> 20) & 3);
2690 iwmmxt_load_reg(cpu_V1, rd1);
2691 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2692 tcg_temp_free_i32(tmp);
18c9b560
AZ
2693 gen_op_iwmmxt_movq_wRn_M0(wrd);
2694 gen_op_iwmmxt_set_mup();
2695 break;
d00584b7 2696 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2697 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2698 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2699 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2700 wrd = (insn >> 12) & 0xf;
2701 rd0 = (insn >> 16) & 0xf;
2702 rd1 = (insn >> 0) & 0xf;
2703 gen_op_iwmmxt_movq_M0_wRn(rd0);
2704 switch ((insn >> 20) & 0xf) {
2705 case 0x0:
2706 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2707 break;
2708 case 0x1:
2709 gen_op_iwmmxt_subub_M0_wRn(rd1);
2710 break;
2711 case 0x3:
2712 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2713 break;
2714 case 0x4:
2715 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2716 break;
2717 case 0x5:
2718 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2719 break;
2720 case 0x7:
2721 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2722 break;
2723 case 0x8:
2724 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2725 break;
2726 case 0x9:
2727 gen_op_iwmmxt_subul_M0_wRn(rd1);
2728 break;
2729 case 0xb:
2730 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2731 break;
2732 default:
2733 return 1;
2734 }
2735 gen_op_iwmmxt_movq_wRn_M0(wrd);
2736 gen_op_iwmmxt_set_mup();
2737 gen_op_iwmmxt_set_cup();
2738 break;
d00584b7 2739 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2740 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2741 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2742 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2743 wrd = (insn >> 12) & 0xf;
2744 rd0 = (insn >> 16) & 0xf;
2745 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2746 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2747 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2748 tcg_temp_free_i32(tmp);
18c9b560
AZ
2749 gen_op_iwmmxt_movq_wRn_M0(wrd);
2750 gen_op_iwmmxt_set_mup();
2751 gen_op_iwmmxt_set_cup();
2752 break;
d00584b7 2753 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2754 case 0x418: case 0x518: case 0x618: case 0x718:
2755 case 0x818: case 0x918: case 0xa18: case 0xb18:
2756 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2757 wrd = (insn >> 12) & 0xf;
2758 rd0 = (insn >> 16) & 0xf;
2759 rd1 = (insn >> 0) & 0xf;
2760 gen_op_iwmmxt_movq_M0_wRn(rd0);
2761 switch ((insn >> 20) & 0xf) {
2762 case 0x0:
2763 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2764 break;
2765 case 0x1:
2766 gen_op_iwmmxt_addub_M0_wRn(rd1);
2767 break;
2768 case 0x3:
2769 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2770 break;
2771 case 0x4:
2772 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2773 break;
2774 case 0x5:
2775 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2776 break;
2777 case 0x7:
2778 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2779 break;
2780 case 0x8:
2781 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2782 break;
2783 case 0x9:
2784 gen_op_iwmmxt_addul_M0_wRn(rd1);
2785 break;
2786 case 0xb:
2787 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2788 break;
2789 default:
2790 return 1;
2791 }
2792 gen_op_iwmmxt_movq_wRn_M0(wrd);
2793 gen_op_iwmmxt_set_mup();
2794 gen_op_iwmmxt_set_cup();
2795 break;
d00584b7 2796 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2797 case 0x408: case 0x508: case 0x608: case 0x708:
2798 case 0x808: case 0x908: case 0xa08: case 0xb08:
2799 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2800 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2801 return 1;
18c9b560
AZ
2802 wrd = (insn >> 12) & 0xf;
2803 rd0 = (insn >> 16) & 0xf;
2804 rd1 = (insn >> 0) & 0xf;
2805 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2806 switch ((insn >> 22) & 3) {
18c9b560
AZ
2807 case 1:
2808 if (insn & (1 << 21))
2809 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2810 else
2811 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2812 break;
2813 case 2:
2814 if (insn & (1 << 21))
2815 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2816 else
2817 gen_op_iwmmxt_packul_M0_wRn(rd1);
2818 break;
2819 case 3:
2820 if (insn & (1 << 21))
2821 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2822 else
2823 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2824 break;
2825 }
2826 gen_op_iwmmxt_movq_wRn_M0(wrd);
2827 gen_op_iwmmxt_set_mup();
2828 gen_op_iwmmxt_set_cup();
2829 break;
2830 case 0x201: case 0x203: case 0x205: case 0x207:
2831 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2832 case 0x211: case 0x213: case 0x215: case 0x217:
2833 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2834 wrd = (insn >> 5) & 0xf;
2835 rd0 = (insn >> 12) & 0xf;
2836 rd1 = (insn >> 0) & 0xf;
2837 if (rd0 == 0xf || rd1 == 0xf)
2838 return 1;
2839 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2840 tmp = load_reg(s, rd0);
2841 tmp2 = load_reg(s, rd1);
18c9b560 2842 switch ((insn >> 16) & 0xf) {
d00584b7 2843 case 0x0: /* TMIA */
da6b5335 2844 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2845 break;
d00584b7 2846 case 0x8: /* TMIAPH */
da6b5335 2847 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2848 break;
d00584b7 2849 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2850 if (insn & (1 << 16))
da6b5335 2851 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2852 if (insn & (1 << 17))
da6b5335
FN
2853 tcg_gen_shri_i32(tmp2, tmp2, 16);
2854 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2855 break;
2856 default:
7d1b0095
PM
2857 tcg_temp_free_i32(tmp2);
2858 tcg_temp_free_i32(tmp);
18c9b560
AZ
2859 return 1;
2860 }
7d1b0095
PM
2861 tcg_temp_free_i32(tmp2);
2862 tcg_temp_free_i32(tmp);
18c9b560
AZ
2863 gen_op_iwmmxt_movq_wRn_M0(wrd);
2864 gen_op_iwmmxt_set_mup();
2865 break;
2866 default:
2867 return 1;
2868 }
2869
2870 return 0;
2871}
2872
a1c7273b 2873/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2874 (ie. an undefined instruction). */
7dcc1f89 2875static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2876{
2877 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2878 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2879
2880 if ((insn & 0x0ff00f10) == 0x0e200010) {
2881 /* Multiply with Internal Accumulate Format */
2882 rd0 = (insn >> 12) & 0xf;
2883 rd1 = insn & 0xf;
2884 acc = (insn >> 5) & 7;
2885
2886 if (acc != 0)
2887 return 1;
2888
3a554c0f
FN
2889 tmp = load_reg(s, rd0);
2890 tmp2 = load_reg(s, rd1);
18c9b560 2891 switch ((insn >> 16) & 0xf) {
d00584b7 2892 case 0x0: /* MIA */
3a554c0f 2893 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2894 break;
d00584b7 2895 case 0x8: /* MIAPH */
3a554c0f 2896 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2897 break;
d00584b7
PM
2898 case 0xc: /* MIABB */
2899 case 0xd: /* MIABT */
2900 case 0xe: /* MIATB */
2901 case 0xf: /* MIATT */
18c9b560 2902 if (insn & (1 << 16))
3a554c0f 2903 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2904 if (insn & (1 << 17))
3a554c0f
FN
2905 tcg_gen_shri_i32(tmp2, tmp2, 16);
2906 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2907 break;
2908 default:
2909 return 1;
2910 }
7d1b0095
PM
2911 tcg_temp_free_i32(tmp2);
2912 tcg_temp_free_i32(tmp);
18c9b560
AZ
2913
2914 gen_op_iwmmxt_movq_wRn_M0(acc);
2915 return 0;
2916 }
2917
2918 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2919 /* Internal Accumulator Access Format */
2920 rdhi = (insn >> 16) & 0xf;
2921 rdlo = (insn >> 12) & 0xf;
2922 acc = insn & 7;
2923
2924 if (acc != 0)
2925 return 1;
2926
d00584b7 2927 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2928 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2929 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2930 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2931 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2932 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 2933 } else { /* MAR */
3a554c0f
FN
2934 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2935 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2936 }
2937 return 0;
2938 }
2939
2940 return 1;
2941}
2942
9ee6e8bb
PB
2943#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2944#define VFP_SREG(insn, bigbit, smallbit) \
2945 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2946#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2947 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2948 reg = (((insn) >> (bigbit)) & 0x0f) \
2949 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2950 } else { \
2951 if (insn & (1 << (smallbit))) \
2952 return 1; \
2953 reg = ((insn) >> (bigbit)) & 0x0f; \
2954 }} while (0)
2955
2956#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2957#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2958#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2959#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2960#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2961#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2962
4373f3ce 2963/* Move between integer and VFP cores. */
39d5492a 2964static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2965{
39d5492a 2966 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2967 tcg_gen_mov_i32(tmp, cpu_F0s);
2968 return tmp;
2969}
2970
39d5492a 2971static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2972{
2973 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2974 tcg_temp_free_i32(tmp);
4373f3ce
PB
2975}
2976
39d5492a 2977static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2978{
39d5492a 2979 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2980 if (shift)
2981 tcg_gen_shri_i32(var, var, shift);
86831435 2982 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2983 tcg_gen_shli_i32(tmp, var, 8);
2984 tcg_gen_or_i32(var, var, tmp);
2985 tcg_gen_shli_i32(tmp, var, 16);
2986 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2987 tcg_temp_free_i32(tmp);
ad69471c
PB
2988}
2989
39d5492a 2990static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2991{
39d5492a 2992 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2993 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2994 tcg_gen_shli_i32(tmp, var, 16);
2995 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2996 tcg_temp_free_i32(tmp);
ad69471c
PB
2997}
2998
39d5492a 2999static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 3000{
39d5492a 3001 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
3002 tcg_gen_andi_i32(var, var, 0xffff0000);
3003 tcg_gen_shri_i32(tmp, var, 16);
3004 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3005 tcg_temp_free_i32(tmp);
ad69471c
PB
3006}
3007
39d5492a 3008static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
3009{
3010 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 3011 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
3012 switch (size) {
3013 case 0:
12dcc321 3014 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3015 gen_neon_dup_u8(tmp, 0);
3016 break;
3017 case 1:
12dcc321 3018 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3019 gen_neon_dup_low16(tmp);
3020 break;
3021 case 2:
12dcc321 3022 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3023 break;
3024 default: /* Avoid compiler warnings. */
3025 abort();
3026 }
3027 return tmp;
3028}
3029
04731fb5
WN
3030static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3031 uint32_t dp)
3032{
3033 uint32_t cc = extract32(insn, 20, 2);
3034
3035 if (dp) {
3036 TCGv_i64 frn, frm, dest;
3037 TCGv_i64 tmp, zero, zf, nf, vf;
3038
3039 zero = tcg_const_i64(0);
3040
3041 frn = tcg_temp_new_i64();
3042 frm = tcg_temp_new_i64();
3043 dest = tcg_temp_new_i64();
3044
3045 zf = tcg_temp_new_i64();
3046 nf = tcg_temp_new_i64();
3047 vf = tcg_temp_new_i64();
3048
3049 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3050 tcg_gen_ext_i32_i64(nf, cpu_NF);
3051 tcg_gen_ext_i32_i64(vf, cpu_VF);
3052
3053 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3054 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3055 switch (cc) {
3056 case 0: /* eq: Z */
3057 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3058 frn, frm);
3059 break;
3060 case 1: /* vs: V */
3061 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3062 frn, frm);
3063 break;
3064 case 2: /* ge: N == V -> N ^ V == 0 */
3065 tmp = tcg_temp_new_i64();
3066 tcg_gen_xor_i64(tmp, vf, nf);
3067 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3068 frn, frm);
3069 tcg_temp_free_i64(tmp);
3070 break;
3071 case 3: /* gt: !Z && N == V */
3072 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3073 frn, frm);
3074 tmp = tcg_temp_new_i64();
3075 tcg_gen_xor_i64(tmp, vf, nf);
3076 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3077 dest, frm);
3078 tcg_temp_free_i64(tmp);
3079 break;
3080 }
3081 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3082 tcg_temp_free_i64(frn);
3083 tcg_temp_free_i64(frm);
3084 tcg_temp_free_i64(dest);
3085
3086 tcg_temp_free_i64(zf);
3087 tcg_temp_free_i64(nf);
3088 tcg_temp_free_i64(vf);
3089
3090 tcg_temp_free_i64(zero);
3091 } else {
3092 TCGv_i32 frn, frm, dest;
3093 TCGv_i32 tmp, zero;
3094
3095 zero = tcg_const_i32(0);
3096
3097 frn = tcg_temp_new_i32();
3098 frm = tcg_temp_new_i32();
3099 dest = tcg_temp_new_i32();
3100 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3101 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3102 switch (cc) {
3103 case 0: /* eq: Z */
3104 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3105 frn, frm);
3106 break;
3107 case 1: /* vs: V */
3108 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3109 frn, frm);
3110 break;
3111 case 2: /* ge: N == V -> N ^ V == 0 */
3112 tmp = tcg_temp_new_i32();
3113 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3114 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3115 frn, frm);
3116 tcg_temp_free_i32(tmp);
3117 break;
3118 case 3: /* gt: !Z && N == V */
3119 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3120 frn, frm);
3121 tmp = tcg_temp_new_i32();
3122 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3123 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3124 dest, frm);
3125 tcg_temp_free_i32(tmp);
3126 break;
3127 }
3128 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3129 tcg_temp_free_i32(frn);
3130 tcg_temp_free_i32(frm);
3131 tcg_temp_free_i32(dest);
3132
3133 tcg_temp_free_i32(zero);
3134 }
3135
3136 return 0;
3137}
3138
40cfacdd
WN
3139static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3140 uint32_t rm, uint32_t dp)
3141{
3142 uint32_t vmin = extract32(insn, 6, 1);
3143 TCGv_ptr fpst = get_fpstatus_ptr(0);
3144
3145 if (dp) {
3146 TCGv_i64 frn, frm, dest;
3147
3148 frn = tcg_temp_new_i64();
3149 frm = tcg_temp_new_i64();
3150 dest = tcg_temp_new_i64();
3151
3152 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3153 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3154 if (vmin) {
f71a2ae5 3155 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3156 } else {
f71a2ae5 3157 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3158 }
3159 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3160 tcg_temp_free_i64(frn);
3161 tcg_temp_free_i64(frm);
3162 tcg_temp_free_i64(dest);
3163 } else {
3164 TCGv_i32 frn, frm, dest;
3165
3166 frn = tcg_temp_new_i32();
3167 frm = tcg_temp_new_i32();
3168 dest = tcg_temp_new_i32();
3169
3170 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3171 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3172 if (vmin) {
f71a2ae5 3173 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3174 } else {
f71a2ae5 3175 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3176 }
3177 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3178 tcg_temp_free_i32(frn);
3179 tcg_temp_free_i32(frm);
3180 tcg_temp_free_i32(dest);
3181 }
3182
3183 tcg_temp_free_ptr(fpst);
3184 return 0;
3185}
3186
7655f39b
WN
3187static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3188 int rounding)
3189{
3190 TCGv_ptr fpst = get_fpstatus_ptr(0);
3191 TCGv_i32 tcg_rmode;
3192
3193 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3194 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3195
3196 if (dp) {
3197 TCGv_i64 tcg_op;
3198 TCGv_i64 tcg_res;
3199 tcg_op = tcg_temp_new_i64();
3200 tcg_res = tcg_temp_new_i64();
3201 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3202 gen_helper_rintd(tcg_res, tcg_op, fpst);
3203 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3204 tcg_temp_free_i64(tcg_op);
3205 tcg_temp_free_i64(tcg_res);
3206 } else {
3207 TCGv_i32 tcg_op;
3208 TCGv_i32 tcg_res;
3209 tcg_op = tcg_temp_new_i32();
3210 tcg_res = tcg_temp_new_i32();
3211 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3212 gen_helper_rints(tcg_res, tcg_op, fpst);
3213 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3214 tcg_temp_free_i32(tcg_op);
3215 tcg_temp_free_i32(tcg_res);
3216 }
3217
9b049916 3218 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3219 tcg_temp_free_i32(tcg_rmode);
3220
3221 tcg_temp_free_ptr(fpst);
3222 return 0;
3223}
3224
c9975a83
WN
3225static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3226 int rounding)
3227{
3228 bool is_signed = extract32(insn, 7, 1);
3229 TCGv_ptr fpst = get_fpstatus_ptr(0);
3230 TCGv_i32 tcg_rmode, tcg_shift;
3231
3232 tcg_shift = tcg_const_i32(0);
3233
3234 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3235 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3236
3237 if (dp) {
3238 TCGv_i64 tcg_double, tcg_res;
3239 TCGv_i32 tcg_tmp;
3240 /* Rd is encoded as a single precision register even when the source
3241 * is double precision.
3242 */
3243 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3244 tcg_double = tcg_temp_new_i64();
3245 tcg_res = tcg_temp_new_i64();
3246 tcg_tmp = tcg_temp_new_i32();
3247 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3248 if (is_signed) {
3249 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3250 } else {
3251 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3252 }
ecc7b3aa 3253 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3254 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3255 tcg_temp_free_i32(tcg_tmp);
3256 tcg_temp_free_i64(tcg_res);
3257 tcg_temp_free_i64(tcg_double);
3258 } else {
3259 TCGv_i32 tcg_single, tcg_res;
3260 tcg_single = tcg_temp_new_i32();
3261 tcg_res = tcg_temp_new_i32();
3262 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3263 if (is_signed) {
3264 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3265 } else {
3266 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3267 }
3268 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3269 tcg_temp_free_i32(tcg_res);
3270 tcg_temp_free_i32(tcg_single);
3271 }
3272
9b049916 3273 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3274 tcg_temp_free_i32(tcg_rmode);
3275
3276 tcg_temp_free_i32(tcg_shift);
3277
3278 tcg_temp_free_ptr(fpst);
3279
3280 return 0;
3281}
7655f39b
WN
3282
3283/* Table for converting the most common AArch32 encoding of
3284 * rounding mode to arm_fprounding order (which matches the
3285 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3286 */
3287static const uint8_t fp_decode_rm[] = {
3288 FPROUNDING_TIEAWAY,
3289 FPROUNDING_TIEEVEN,
3290 FPROUNDING_POSINF,
3291 FPROUNDING_NEGINF,
3292};
3293
7dcc1f89 3294static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3295{
3296 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3297
d614a513 3298 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3299 return 1;
3300 }
3301
3302 if (dp) {
3303 VFP_DREG_D(rd, insn);
3304 VFP_DREG_N(rn, insn);
3305 VFP_DREG_M(rm, insn);
3306 } else {
3307 rd = VFP_SREG_D(insn);
3308 rn = VFP_SREG_N(insn);
3309 rm = VFP_SREG_M(insn);
3310 }
3311
3312 if ((insn & 0x0f800e50) == 0x0e000a00) {
3313 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3314 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3315 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3316 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3317 /* VRINTA, VRINTN, VRINTP, VRINTM */
3318 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3319 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3320 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3321 /* VCVTA, VCVTN, VCVTP, VCVTM */
3322 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3323 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3324 }
3325 return 1;
3326}
3327
a1c7273b 3328/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3329 (ie. an undefined instruction). */
7dcc1f89 3330static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3331{
3332 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3333 int dp, veclen;
39d5492a
PM
3334 TCGv_i32 addr;
3335 TCGv_i32 tmp;
3336 TCGv_i32 tmp2;
b7bcbe95 3337
d614a513 3338 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3339 return 1;
d614a513 3340 }
40f137e1 3341
2c7ffc41
PM
3342 /* FIXME: this access check should not take precedence over UNDEF
3343 * for invalid encodings; we will generate incorrect syndrome information
3344 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3345 */
9dbbc748 3346 if (s->fp_excp_el) {
2c7ffc41 3347 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3348 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3349 return 0;
3350 }
3351
5df8bac1 3352 if (!s->vfp_enabled) {
9ee6e8bb 3353 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3354 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3355 return 1;
3356 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3357 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3358 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3359 return 1;
a50c0f51 3360 }
40f137e1 3361 }
6a57f3eb
WN
3362
3363 if (extract32(insn, 28, 4) == 0xf) {
3364 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3365 * only used in v8 and above.
3366 */
7dcc1f89 3367 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3368 }
3369
b7bcbe95
FB
3370 dp = ((insn & 0xf00) == 0xb00);
3371 switch ((insn >> 24) & 0xf) {
3372 case 0xe:
3373 if (insn & (1 << 4)) {
3374 /* single register transfer */
b7bcbe95
FB
3375 rd = (insn >> 12) & 0xf;
3376 if (dp) {
9ee6e8bb
PB
3377 int size;
3378 int pass;
3379
3380 VFP_DREG_N(rn, insn);
3381 if (insn & 0xf)
b7bcbe95 3382 return 1;
9ee6e8bb 3383 if (insn & 0x00c00060
d614a513 3384 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3385 return 1;
d614a513 3386 }
9ee6e8bb
PB
3387
3388 pass = (insn >> 21) & 1;
3389 if (insn & (1 << 22)) {
3390 size = 0;
3391 offset = ((insn >> 5) & 3) * 8;
3392 } else if (insn & (1 << 5)) {
3393 size = 1;
3394 offset = (insn & (1 << 6)) ? 16 : 0;
3395 } else {
3396 size = 2;
3397 offset = 0;
3398 }
18c9b560 3399 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3400 /* vfp->arm */
ad69471c 3401 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3402 switch (size) {
3403 case 0:
9ee6e8bb 3404 if (offset)
ad69471c 3405 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3406 if (insn & (1 << 23))
ad69471c 3407 gen_uxtb(tmp);
9ee6e8bb 3408 else
ad69471c 3409 gen_sxtb(tmp);
9ee6e8bb
PB
3410 break;
3411 case 1:
9ee6e8bb
PB
3412 if (insn & (1 << 23)) {
3413 if (offset) {
ad69471c 3414 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3415 } else {
ad69471c 3416 gen_uxth(tmp);
9ee6e8bb
PB
3417 }
3418 } else {
3419 if (offset) {
ad69471c 3420 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3421 } else {
ad69471c 3422 gen_sxth(tmp);
9ee6e8bb
PB
3423 }
3424 }
3425 break;
3426 case 2:
9ee6e8bb
PB
3427 break;
3428 }
ad69471c 3429 store_reg(s, rd, tmp);
b7bcbe95
FB
3430 } else {
3431 /* arm->vfp */
ad69471c 3432 tmp = load_reg(s, rd);
9ee6e8bb
PB
3433 if (insn & (1 << 23)) {
3434 /* VDUP */
3435 if (size == 0) {
ad69471c 3436 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3437 } else if (size == 1) {
ad69471c 3438 gen_neon_dup_low16(tmp);
9ee6e8bb 3439 }
cbbccffc 3440 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3441 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3442 tcg_gen_mov_i32(tmp2, tmp);
3443 neon_store_reg(rn, n, tmp2);
3444 }
3445 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3446 } else {
3447 /* VMOV */
3448 switch (size) {
3449 case 0:
ad69471c 3450 tmp2 = neon_load_reg(rn, pass);
d593c48e 3451 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3452 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3453 break;
3454 case 1:
ad69471c 3455 tmp2 = neon_load_reg(rn, pass);
d593c48e 3456 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3457 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3458 break;
3459 case 2:
9ee6e8bb
PB
3460 break;
3461 }
ad69471c 3462 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3463 }
b7bcbe95 3464 }
9ee6e8bb
PB
3465 } else { /* !dp */
3466 if ((insn & 0x6f) != 0x00)
3467 return 1;
3468 rn = VFP_SREG_N(insn);
18c9b560 3469 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3470 /* vfp->arm */
3471 if (insn & (1 << 21)) {
3472 /* system register */
40f137e1 3473 rn >>= 1;
9ee6e8bb 3474
b7bcbe95 3475 switch (rn) {
40f137e1 3476 case ARM_VFP_FPSID:
4373f3ce 3477 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3478 VFP3 restricts all id registers to privileged
3479 accesses. */
3480 if (IS_USER(s)
d614a513 3481 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3482 return 1;
d614a513 3483 }
4373f3ce 3484 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3485 break;
40f137e1 3486 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3487 if (IS_USER(s))
3488 return 1;
4373f3ce 3489 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3490 break;
40f137e1
PB
3491 case ARM_VFP_FPINST:
3492 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3493 /* Not present in VFP3. */
3494 if (IS_USER(s)
d614a513 3495 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3496 return 1;
d614a513 3497 }
4373f3ce 3498 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3499 break;
40f137e1 3500 case ARM_VFP_FPSCR:
601d70b9 3501 if (rd == 15) {
4373f3ce
PB
3502 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3503 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3504 } else {
7d1b0095 3505 tmp = tcg_temp_new_i32();
4373f3ce
PB
3506 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3507 }
b7bcbe95 3508 break;
a50c0f51 3509 case ARM_VFP_MVFR2:
d614a513 3510 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3511 return 1;
3512 }
3513 /* fall through */
9ee6e8bb
PB
3514 case ARM_VFP_MVFR0:
3515 case ARM_VFP_MVFR1:
3516 if (IS_USER(s)
d614a513 3517 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3518 return 1;
d614a513 3519 }
4373f3ce 3520 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3521 break;
b7bcbe95
FB
3522 default:
3523 return 1;
3524 }
3525 } else {
3526 gen_mov_F0_vreg(0, rn);
4373f3ce 3527 tmp = gen_vfp_mrs();
b7bcbe95
FB
3528 }
3529 if (rd == 15) {
b5ff1b31 3530 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3531 gen_set_nzcv(tmp);
7d1b0095 3532 tcg_temp_free_i32(tmp);
4373f3ce
PB
3533 } else {
3534 store_reg(s, rd, tmp);
3535 }
b7bcbe95
FB
3536 } else {
3537 /* arm->vfp */
b7bcbe95 3538 if (insn & (1 << 21)) {
40f137e1 3539 rn >>= 1;
b7bcbe95
FB
3540 /* system register */
3541 switch (rn) {
40f137e1 3542 case ARM_VFP_FPSID:
9ee6e8bb
PB
3543 case ARM_VFP_MVFR0:
3544 case ARM_VFP_MVFR1:
b7bcbe95
FB
3545 /* Writes are ignored. */
3546 break;
40f137e1 3547 case ARM_VFP_FPSCR:
e4c1cfa5 3548 tmp = load_reg(s, rd);
4373f3ce 3549 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3550 tcg_temp_free_i32(tmp);
b5ff1b31 3551 gen_lookup_tb(s);
b7bcbe95 3552 break;
40f137e1 3553 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3554 if (IS_USER(s))
3555 return 1;
71b3c3de
JR
3556 /* TODO: VFP subarchitecture support.
3557 * For now, keep the EN bit only */
e4c1cfa5 3558 tmp = load_reg(s, rd);
71b3c3de 3559 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3560 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3561 gen_lookup_tb(s);
3562 break;
3563 case ARM_VFP_FPINST:
3564 case ARM_VFP_FPINST2:
23adb861
PM
3565 if (IS_USER(s)) {
3566 return 1;
3567 }
e4c1cfa5 3568 tmp = load_reg(s, rd);
4373f3ce 3569 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3570 break;
b7bcbe95
FB
3571 default:
3572 return 1;
3573 }
3574 } else {
e4c1cfa5 3575 tmp = load_reg(s, rd);
4373f3ce 3576 gen_vfp_msr(tmp);
b7bcbe95
FB
3577 gen_mov_vreg_F0(0, rn);
3578 }
3579 }
3580 }
3581 } else {
3582 /* data processing */
3583 /* The opcode is in bits 23, 21, 20 and 6. */
3584 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3585 if (dp) {
3586 if (op == 15) {
3587 /* rn is opcode */
3588 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3589 } else {
3590 /* rn is register number */
9ee6e8bb 3591 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3592 }
3593
239c20c7
WN
3594 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3595 ((rn & 0x1e) == 0x6))) {
3596 /* Integer or single/half precision destination. */
9ee6e8bb 3597 rd = VFP_SREG_D(insn);
b7bcbe95 3598 } else {
9ee6e8bb 3599 VFP_DREG_D(rd, insn);
b7bcbe95 3600 }
04595bf6 3601 if (op == 15 &&
239c20c7
WN
3602 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3603 ((rn & 0x1e) == 0x4))) {
3604 /* VCVT from int or half precision is always from S reg
3605 * regardless of dp bit. VCVT with immediate frac_bits
3606 * has same format as SREG_M.
04595bf6
PM
3607 */
3608 rm = VFP_SREG_M(insn);
b7bcbe95 3609 } else {
9ee6e8bb 3610 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3611 }
3612 } else {
9ee6e8bb 3613 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3614 if (op == 15 && rn == 15) {
3615 /* Double precision destination. */
9ee6e8bb
PB
3616 VFP_DREG_D(rd, insn);
3617 } else {
3618 rd = VFP_SREG_D(insn);
3619 }
04595bf6
PM
3620 /* NB that we implicitly rely on the encoding for the frac_bits
3621 * in VCVT of fixed to float being the same as that of an SREG_M
3622 */
9ee6e8bb 3623 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3624 }
3625
69d1fc22 3626 veclen = s->vec_len;
b7bcbe95
FB
3627 if (op == 15 && rn > 3)
3628 veclen = 0;
3629
3630 /* Shut up compiler warnings. */
3631 delta_m = 0;
3632 delta_d = 0;
3633 bank_mask = 0;
3b46e624 3634
b7bcbe95
FB
3635 if (veclen > 0) {
3636 if (dp)
3637 bank_mask = 0xc;
3638 else
3639 bank_mask = 0x18;
3640
3641 /* Figure out what type of vector operation this is. */
3642 if ((rd & bank_mask) == 0) {
3643 /* scalar */
3644 veclen = 0;
3645 } else {
3646 if (dp)
69d1fc22 3647 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3648 else
69d1fc22 3649 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3650
3651 if ((rm & bank_mask) == 0) {
3652 /* mixed scalar/vector */
3653 delta_m = 0;
3654 } else {
3655 /* vector */
3656 delta_m = delta_d;
3657 }
3658 }
3659 }
3660
3661 /* Load the initial operands. */
3662 if (op == 15) {
3663 switch (rn) {
3664 case 16:
3665 case 17:
3666 /* Integer source */
3667 gen_mov_F0_vreg(0, rm);
3668 break;
3669 case 8:
3670 case 9:
3671 /* Compare */
3672 gen_mov_F0_vreg(dp, rd);
3673 gen_mov_F1_vreg(dp, rm);
3674 break;
3675 case 10:
3676 case 11:
3677 /* Compare with zero */
3678 gen_mov_F0_vreg(dp, rd);
3679 gen_vfp_F1_ld0(dp);
3680 break;
9ee6e8bb
PB
3681 case 20:
3682 case 21:
3683 case 22:
3684 case 23:
644ad806
PB
3685 case 28:
3686 case 29:
3687 case 30:
3688 case 31:
9ee6e8bb
PB
3689 /* Source and destination the same. */
3690 gen_mov_F0_vreg(dp, rd);
3691 break;
6e0c0ed1
PM
3692 case 4:
3693 case 5:
3694 case 6:
3695 case 7:
239c20c7
WN
3696 /* VCVTB, VCVTT: only present with the halfprec extension
3697 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3698 * (we choose to UNDEF)
6e0c0ed1 3699 */
d614a513
PM
3700 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3701 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3702 return 1;
3703 }
239c20c7
WN
3704 if (!extract32(rn, 1, 1)) {
3705 /* Half precision source. */
3706 gen_mov_F0_vreg(0, rm);
3707 break;
3708 }
6e0c0ed1 3709 /* Otherwise fall through */
b7bcbe95
FB
3710 default:
3711 /* One source operand. */
3712 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3713 break;
b7bcbe95
FB
3714 }
3715 } else {
3716 /* Two source operands. */
3717 gen_mov_F0_vreg(dp, rn);
3718 gen_mov_F1_vreg(dp, rm);
3719 }
3720
3721 for (;;) {
3722 /* Perform the calculation. */
3723 switch (op) {
605a6aed
PM
3724 case 0: /* VMLA: fd + (fn * fm) */
3725 /* Note that order of inputs to the add matters for NaNs */
3726 gen_vfp_F1_mul(dp);
3727 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3728 gen_vfp_add(dp);
3729 break;
605a6aed 3730 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3731 gen_vfp_mul(dp);
605a6aed
PM
3732 gen_vfp_F1_neg(dp);
3733 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3734 gen_vfp_add(dp);
3735 break;
605a6aed
PM
3736 case 2: /* VNMLS: -fd + (fn * fm) */
3737 /* Note that it isn't valid to replace (-A + B) with (B - A)
3738 * or similar plausible looking simplifications
3739 * because this will give wrong results for NaNs.
3740 */
3741 gen_vfp_F1_mul(dp);
3742 gen_mov_F0_vreg(dp, rd);
3743 gen_vfp_neg(dp);
3744 gen_vfp_add(dp);
b7bcbe95 3745 break;
605a6aed 3746 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3747 gen_vfp_mul(dp);
605a6aed
PM
3748 gen_vfp_F1_neg(dp);
3749 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3750 gen_vfp_neg(dp);
605a6aed 3751 gen_vfp_add(dp);
b7bcbe95
FB
3752 break;
3753 case 4: /* mul: fn * fm */
3754 gen_vfp_mul(dp);
3755 break;
3756 case 5: /* nmul: -(fn * fm) */
3757 gen_vfp_mul(dp);
3758 gen_vfp_neg(dp);
3759 break;
3760 case 6: /* add: fn + fm */
3761 gen_vfp_add(dp);
3762 break;
3763 case 7: /* sub: fn - fm */
3764 gen_vfp_sub(dp);
3765 break;
3766 case 8: /* div: fn / fm */
3767 gen_vfp_div(dp);
3768 break;
da97f52c
PM
3769 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3770 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3771 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3772 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3773 /* These are fused multiply-add, and must be done as one
3774 * floating point operation with no rounding between the
3775 * multiplication and addition steps.
3776 * NB that doing the negations here as separate steps is
3777 * correct : an input NaN should come out with its sign bit
3778 * flipped if it is a negated-input.
3779 */
d614a513 3780 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3781 return 1;
3782 }
3783 if (dp) {
3784 TCGv_ptr fpst;
3785 TCGv_i64 frd;
3786 if (op & 1) {
3787 /* VFNMS, VFMS */
3788 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3789 }
3790 frd = tcg_temp_new_i64();
3791 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3792 if (op & 2) {
3793 /* VFNMA, VFNMS */
3794 gen_helper_vfp_negd(frd, frd);
3795 }
3796 fpst = get_fpstatus_ptr(0);
3797 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3798 cpu_F1d, frd, fpst);
3799 tcg_temp_free_ptr(fpst);
3800 tcg_temp_free_i64(frd);
3801 } else {
3802 TCGv_ptr fpst;
3803 TCGv_i32 frd;
3804 if (op & 1) {
3805 /* VFNMS, VFMS */
3806 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3807 }
3808 frd = tcg_temp_new_i32();
3809 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3810 if (op & 2) {
3811 gen_helper_vfp_negs(frd, frd);
3812 }
3813 fpst = get_fpstatus_ptr(0);
3814 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3815 cpu_F1s, frd, fpst);
3816 tcg_temp_free_ptr(fpst);
3817 tcg_temp_free_i32(frd);
3818 }
3819 break;
9ee6e8bb 3820 case 14: /* fconst */
d614a513
PM
3821 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3822 return 1;
3823 }
9ee6e8bb
PB
3824
3825 n = (insn << 12) & 0x80000000;
3826 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3827 if (dp) {
3828 if (i & 0x40)
3829 i |= 0x3f80;
3830 else
3831 i |= 0x4000;
3832 n |= i << 16;
4373f3ce 3833 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3834 } else {
3835 if (i & 0x40)
3836 i |= 0x780;
3837 else
3838 i |= 0x800;
3839 n |= i << 19;
5b340b51 3840 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3841 }
9ee6e8bb 3842 break;
b7bcbe95
FB
3843 case 15: /* extension space */
3844 switch (rn) {
3845 case 0: /* cpy */
3846 /* no-op */
3847 break;
3848 case 1: /* abs */
3849 gen_vfp_abs(dp);
3850 break;
3851 case 2: /* neg */
3852 gen_vfp_neg(dp);
3853 break;
3854 case 3: /* sqrt */
3855 gen_vfp_sqrt(dp);
3856 break;
239c20c7 3857 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
486624fc
AB
3858 {
3859 TCGv_ptr fpst = get_fpstatus_ptr(false);
3860 TCGv_i32 ahp_mode = get_ahp_flag();
60011498
PB
3861 tmp = gen_vfp_mrs();
3862 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3863 if (dp) {
3864 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3865 fpst, ahp_mode);
239c20c7
WN
3866 } else {
3867 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3868 fpst, ahp_mode);
239c20c7 3869 }
486624fc
AB
3870 tcg_temp_free_i32(ahp_mode);
3871 tcg_temp_free_ptr(fpst);
7d1b0095 3872 tcg_temp_free_i32(tmp);
60011498 3873 break;
486624fc 3874 }
239c20c7 3875 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
486624fc
AB
3876 {
3877 TCGv_ptr fpst = get_fpstatus_ptr(false);
3878 TCGv_i32 ahp = get_ahp_flag();
60011498
PB
3879 tmp = gen_vfp_mrs();
3880 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3881 if (dp) {
3882 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3883 fpst, ahp);
239c20c7
WN
3884 } else {
3885 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3886 fpst, ahp);
239c20c7 3887 }
7d1b0095 3888 tcg_temp_free_i32(tmp);
486624fc
AB
3889 tcg_temp_free_i32(ahp);
3890 tcg_temp_free_ptr(fpst);
60011498 3891 break;
486624fc 3892 }
239c20c7 3893 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
486624fc
AB
3894 {
3895 TCGv_ptr fpst = get_fpstatus_ptr(false);
3896 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3897 tmp = tcg_temp_new_i32();
486624fc 3898
239c20c7
WN
3899 if (dp) {
3900 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3901 fpst, ahp);
239c20c7
WN
3902 } else {
3903 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3904 fpst, ahp);
239c20c7 3905 }
486624fc
AB
3906 tcg_temp_free_i32(ahp);
3907 tcg_temp_free_ptr(fpst);
60011498
PB
3908 gen_mov_F0_vreg(0, rd);
3909 tmp2 = gen_vfp_mrs();
3910 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3911 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3912 tcg_temp_free_i32(tmp2);
60011498
PB
3913 gen_vfp_msr(tmp);
3914 break;
486624fc 3915 }
239c20c7 3916 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
486624fc
AB
3917 {
3918 TCGv_ptr fpst = get_fpstatus_ptr(false);
3919 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3920 tmp = tcg_temp_new_i32();
239c20c7
WN
3921 if (dp) {
3922 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3923 fpst, ahp);
239c20c7
WN
3924 } else {
3925 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3926 fpst, ahp);
239c20c7 3927 }
486624fc
AB
3928 tcg_temp_free_i32(ahp);
3929 tcg_temp_free_ptr(fpst);
60011498
PB
3930 tcg_gen_shli_i32(tmp, tmp, 16);
3931 gen_mov_F0_vreg(0, rd);
3932 tmp2 = gen_vfp_mrs();
3933 tcg_gen_ext16u_i32(tmp2, tmp2);
3934 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3935 tcg_temp_free_i32(tmp2);
60011498
PB
3936 gen_vfp_msr(tmp);
3937 break;
486624fc 3938 }
b7bcbe95
FB
3939 case 8: /* cmp */
3940 gen_vfp_cmp(dp);
3941 break;
3942 case 9: /* cmpe */
3943 gen_vfp_cmpe(dp);
3944 break;
3945 case 10: /* cmpz */
3946 gen_vfp_cmp(dp);
3947 break;
3948 case 11: /* cmpez */
3949 gen_vfp_F1_ld0(dp);
3950 gen_vfp_cmpe(dp);
3951 break;
664c6733
WN
3952 case 12: /* vrintr */
3953 {
3954 TCGv_ptr fpst = get_fpstatus_ptr(0);
3955 if (dp) {
3956 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3957 } else {
3958 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3959 }
3960 tcg_temp_free_ptr(fpst);
3961 break;
3962 }
a290c62a
WN
3963 case 13: /* vrintz */
3964 {
3965 TCGv_ptr fpst = get_fpstatus_ptr(0);
3966 TCGv_i32 tcg_rmode;
3967 tcg_rmode = tcg_const_i32(float_round_to_zero);
9b049916 3968 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
3969 if (dp) {
3970 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3971 } else {
3972 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3973 }
9b049916 3974 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
3975 tcg_temp_free_i32(tcg_rmode);
3976 tcg_temp_free_ptr(fpst);
3977 break;
3978 }
4e82bc01
WN
3979 case 14: /* vrintx */
3980 {
3981 TCGv_ptr fpst = get_fpstatus_ptr(0);
3982 if (dp) {
3983 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3984 } else {
3985 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3986 }
3987 tcg_temp_free_ptr(fpst);
3988 break;
3989 }
b7bcbe95
FB
3990 case 15: /* single<->double conversion */
3991 if (dp)
4373f3ce 3992 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3993 else
4373f3ce 3994 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3995 break;
3996 case 16: /* fuito */
5500b06c 3997 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3998 break;
3999 case 17: /* fsito */
5500b06c 4000 gen_vfp_sito(dp, 0);
b7bcbe95 4001 break;
9ee6e8bb 4002 case 20: /* fshto */
d614a513
PM
4003 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4004 return 1;
4005 }
5500b06c 4006 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
4007 break;
4008 case 21: /* fslto */
d614a513
PM
4009 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4010 return 1;
4011 }
5500b06c 4012 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
4013 break;
4014 case 22: /* fuhto */
d614a513
PM
4015 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4016 return 1;
4017 }
5500b06c 4018 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
4019 break;
4020 case 23: /* fulto */
d614a513
PM
4021 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4022 return 1;
4023 }
5500b06c 4024 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 4025 break;
b7bcbe95 4026 case 24: /* ftoui */
5500b06c 4027 gen_vfp_toui(dp, 0);
b7bcbe95
FB
4028 break;
4029 case 25: /* ftouiz */
5500b06c 4030 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
4031 break;
4032 case 26: /* ftosi */
5500b06c 4033 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
4034 break;
4035 case 27: /* ftosiz */
5500b06c 4036 gen_vfp_tosiz(dp, 0);
b7bcbe95 4037 break;
9ee6e8bb 4038 case 28: /* ftosh */
d614a513
PM
4039 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4040 return 1;
4041 }
5500b06c 4042 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
4043 break;
4044 case 29: /* ftosl */
d614a513
PM
4045 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4046 return 1;
4047 }
5500b06c 4048 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
4049 break;
4050 case 30: /* ftouh */
d614a513
PM
4051 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4052 return 1;
4053 }
5500b06c 4054 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
4055 break;
4056 case 31: /* ftoul */
d614a513
PM
4057 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4058 return 1;
4059 }
5500b06c 4060 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 4061 break;
b7bcbe95 4062 default: /* undefined */
b7bcbe95
FB
4063 return 1;
4064 }
4065 break;
4066 default: /* undefined */
b7bcbe95
FB
4067 return 1;
4068 }
4069
4070 /* Write back the result. */
239c20c7
WN
4071 if (op == 15 && (rn >= 8 && rn <= 11)) {
4072 /* Comparison, do nothing. */
4073 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4074 (rn & 0x1e) == 0x6)) {
4075 /* VCVT double to int: always integer result.
4076 * VCVT double to half precision is always a single
4077 * precision result.
4078 */
b7bcbe95 4079 gen_mov_vreg_F0(0, rd);
239c20c7 4080 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
4081 /* conversion */
4082 gen_mov_vreg_F0(!dp, rd);
239c20c7 4083 } else {
b7bcbe95 4084 gen_mov_vreg_F0(dp, rd);
239c20c7 4085 }
b7bcbe95
FB
4086
4087 /* break out of the loop if we have finished */
4088 if (veclen == 0)
4089 break;
4090
4091 if (op == 15 && delta_m == 0) {
4092 /* single source one-many */
4093 while (veclen--) {
4094 rd = ((rd + delta_d) & (bank_mask - 1))
4095 | (rd & bank_mask);
4096 gen_mov_vreg_F0(dp, rd);
4097 }
4098 break;
4099 }
4100 /* Setup the next operands. */
4101 veclen--;
4102 rd = ((rd + delta_d) & (bank_mask - 1))
4103 | (rd & bank_mask);
4104
4105 if (op == 15) {
4106 /* One source operand. */
4107 rm = ((rm + delta_m) & (bank_mask - 1))
4108 | (rm & bank_mask);
4109 gen_mov_F0_vreg(dp, rm);
4110 } else {
4111 /* Two source operands. */
4112 rn = ((rn + delta_d) & (bank_mask - 1))
4113 | (rn & bank_mask);
4114 gen_mov_F0_vreg(dp, rn);
4115 if (delta_m) {
4116 rm = ((rm + delta_m) & (bank_mask - 1))
4117 | (rm & bank_mask);
4118 gen_mov_F1_vreg(dp, rm);
4119 }
4120 }
4121 }
4122 }
4123 break;
4124 case 0xc:
4125 case 0xd:
8387da81 4126 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4127 /* two-register transfer */
4128 rn = (insn >> 16) & 0xf;
4129 rd = (insn >> 12) & 0xf;
4130 if (dp) {
9ee6e8bb
PB
4131 VFP_DREG_M(rm, insn);
4132 } else {
4133 rm = VFP_SREG_M(insn);
4134 }
b7bcbe95 4135
18c9b560 4136 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4137 /* vfp->arm */
4138 if (dp) {
4373f3ce
PB
4139 gen_mov_F0_vreg(0, rm * 2);
4140 tmp = gen_vfp_mrs();
4141 store_reg(s, rd, tmp);
4142 gen_mov_F0_vreg(0, rm * 2 + 1);
4143 tmp = gen_vfp_mrs();
4144 store_reg(s, rn, tmp);
b7bcbe95
FB
4145 } else {
4146 gen_mov_F0_vreg(0, rm);
4373f3ce 4147 tmp = gen_vfp_mrs();
8387da81 4148 store_reg(s, rd, tmp);
b7bcbe95 4149 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4150 tmp = gen_vfp_mrs();
8387da81 4151 store_reg(s, rn, tmp);
b7bcbe95
FB
4152 }
4153 } else {
4154 /* arm->vfp */
4155 if (dp) {
4373f3ce
PB
4156 tmp = load_reg(s, rd);
4157 gen_vfp_msr(tmp);
4158 gen_mov_vreg_F0(0, rm * 2);
4159 tmp = load_reg(s, rn);
4160 gen_vfp_msr(tmp);
4161 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4162 } else {
8387da81 4163 tmp = load_reg(s, rd);
4373f3ce 4164 gen_vfp_msr(tmp);
b7bcbe95 4165 gen_mov_vreg_F0(0, rm);
8387da81 4166 tmp = load_reg(s, rn);
4373f3ce 4167 gen_vfp_msr(tmp);
b7bcbe95
FB
4168 gen_mov_vreg_F0(0, rm + 1);
4169 }
4170 }
4171 } else {
4172 /* Load/store */
4173 rn = (insn >> 16) & 0xf;
4174 if (dp)
9ee6e8bb 4175 VFP_DREG_D(rd, insn);
b7bcbe95 4176 else
9ee6e8bb 4177 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4178 if ((insn & 0x01200000) == 0x01000000) {
4179 /* Single load/store */
4180 offset = (insn & 0xff) << 2;
4181 if ((insn & (1 << 23)) == 0)
4182 offset = -offset;
934814f1
PM
4183 if (s->thumb && rn == 15) {
4184 /* This is actually UNPREDICTABLE */
4185 addr = tcg_temp_new_i32();
4186 tcg_gen_movi_i32(addr, s->pc & ~2);
4187 } else {
4188 addr = load_reg(s, rn);
4189 }
312eea9f 4190 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4191 if (insn & (1 << 20)) {
312eea9f 4192 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4193 gen_mov_vreg_F0(dp, rd);
4194 } else {
4195 gen_mov_F0_vreg(dp, rd);
312eea9f 4196 gen_vfp_st(s, dp, addr);
b7bcbe95 4197 }
7d1b0095 4198 tcg_temp_free_i32(addr);
b7bcbe95
FB
4199 } else {
4200 /* load/store multiple */
934814f1 4201 int w = insn & (1 << 21);
b7bcbe95
FB
4202 if (dp)
4203 n = (insn >> 1) & 0x7f;
4204 else
4205 n = insn & 0xff;
4206
934814f1
PM
4207 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4208 /* P == U , W == 1 => UNDEF */
4209 return 1;
4210 }
4211 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4212 /* UNPREDICTABLE cases for bad immediates: we choose to
4213 * UNDEF to avoid generating huge numbers of TCG ops
4214 */
4215 return 1;
4216 }
4217 if (rn == 15 && w) {
4218 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4219 return 1;
4220 }
4221
4222 if (s->thumb && rn == 15) {
4223 /* This is actually UNPREDICTABLE */
4224 addr = tcg_temp_new_i32();
4225 tcg_gen_movi_i32(addr, s->pc & ~2);
4226 } else {
4227 addr = load_reg(s, rn);
4228 }
b7bcbe95 4229 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4230 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95 4231
8a954faf
PM
4232 if (s->v8m_stackcheck && rn == 13 && w) {
4233 /*
4234 * Here 'addr' is the lowest address we will store to,
4235 * and is either the old SP (if post-increment) or
4236 * the new SP (if pre-decrement). For post-increment
4237 * where the old value is below the limit and the new
4238 * value is above, it is UNKNOWN whether the limit check
4239 * triggers; we choose to trigger.
4240 */
4241 gen_helper_v8m_stackcheck(cpu_env, addr);
4242 }
4243
b7bcbe95
FB
4244 if (dp)
4245 offset = 8;
4246 else
4247 offset = 4;
4248 for (i = 0; i < n; i++) {
18c9b560 4249 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4250 /* load */
312eea9f 4251 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4252 gen_mov_vreg_F0(dp, rd + i);
4253 } else {
4254 /* store */
4255 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4256 gen_vfp_st(s, dp, addr);
b7bcbe95 4257 }
312eea9f 4258 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4259 }
934814f1 4260 if (w) {
b7bcbe95
FB
4261 /* writeback */
4262 if (insn & (1 << 24))
4263 offset = -offset * n;
4264 else if (dp && (insn & 1))
4265 offset = 4;
4266 else
4267 offset = 0;
4268
4269 if (offset != 0)
312eea9f
FN
4270 tcg_gen_addi_i32(addr, addr, offset);
4271 store_reg(s, rn, addr);
4272 } else {
7d1b0095 4273 tcg_temp_free_i32(addr);
b7bcbe95
FB
4274 }
4275 }
4276 }
4277 break;
4278 default:
4279 /* Should never happen. */
4280 return 1;
4281 }
4282 return 0;
4283}
4284
90aa39a1 4285static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4286{
90aa39a1 4287#ifndef CONFIG_USER_ONLY
dcba3a8d 4288 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4289 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4290#else
4291 return true;
4292#endif
4293}
6e256c93 4294
8a6b28c7
EC
4295static void gen_goto_ptr(void)
4296{
7f11636d 4297 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4298}
4299
4cae8f56
AB
4300/* This will end the TB but doesn't guarantee we'll return to
4301 * cpu_loop_exec. Any live exit_requests will be processed as we
4302 * enter the next TB.
4303 */
8a6b28c7 4304static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4305{
4306 if (use_goto_tb(s, dest)) {
57fec1fe 4307 tcg_gen_goto_tb(n);
eaed129d 4308 gen_set_pc_im(s, dest);
07ea28b4 4309 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 4310 } else {
eaed129d 4311 gen_set_pc_im(s, dest);
8a6b28c7 4312 gen_goto_ptr();
6e256c93 4313 }
dcba3a8d 4314 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4315}
4316
8aaca4c0
FB
4317static inline void gen_jmp (DisasContext *s, uint32_t dest)
4318{
b636649f 4319 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4320 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4321 if (s->thumb)
d9ba4830
PB
4322 dest |= 1;
4323 gen_bx_im(s, dest);
8aaca4c0 4324 } else {
6e256c93 4325 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4326 }
4327}
4328
39d5492a 4329static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4330{
ee097184 4331 if (x)
d9ba4830 4332 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4333 else
d9ba4830 4334 gen_sxth(t0);
ee097184 4335 if (y)
d9ba4830 4336 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4337 else
d9ba4830
PB
4338 gen_sxth(t1);
4339 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4340}
4341
4342/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4343static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4344{
b5ff1b31
FB
4345 uint32_t mask;
4346
4347 mask = 0;
4348 if (flags & (1 << 0))
4349 mask |= 0xff;
4350 if (flags & (1 << 1))
4351 mask |= 0xff00;
4352 if (flags & (1 << 2))
4353 mask |= 0xff0000;
4354 if (flags & (1 << 3))
4355 mask |= 0xff000000;
9ee6e8bb 4356
2ae23e75 4357 /* Mask out undefined bits. */
9ee6e8bb 4358 mask &= ~CPSR_RESERVED;
d614a513 4359 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4360 mask &= ~CPSR_T;
d614a513
PM
4361 }
4362 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4363 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4364 }
4365 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4366 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4367 }
4368 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4369 mask &= ~CPSR_IT;
d614a513 4370 }
4051e12c
PM
4371 /* Mask out execution state and reserved bits. */
4372 if (!spsr) {
4373 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4374 }
b5ff1b31
FB
4375 /* Mask out privileged bits. */
4376 if (IS_USER(s))
9ee6e8bb 4377 mask &= CPSR_USER;
b5ff1b31
FB
4378 return mask;
4379}
4380
2fbac54b 4381/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4382static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4383{
39d5492a 4384 TCGv_i32 tmp;
b5ff1b31
FB
4385 if (spsr) {
4386 /* ??? This is also undefined in system mode. */
4387 if (IS_USER(s))
4388 return 1;
d9ba4830
PB
4389
4390 tmp = load_cpu_field(spsr);
4391 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4392 tcg_gen_andi_i32(t0, t0, mask);
4393 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4394 store_cpu_field(tmp, spsr);
b5ff1b31 4395 } else {
2fbac54b 4396 gen_set_cpsr(t0, mask);
b5ff1b31 4397 }
7d1b0095 4398 tcg_temp_free_i32(t0);
b5ff1b31
FB
4399 gen_lookup_tb(s);
4400 return 0;
4401}
4402
2fbac54b
FN
4403/* Returns nonzero if access to the PSR is not permitted. */
4404static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4405{
39d5492a 4406 TCGv_i32 tmp;
7d1b0095 4407 tmp = tcg_temp_new_i32();
2fbac54b
FN
4408 tcg_gen_movi_i32(tmp, val);
4409 return gen_set_psr(s, mask, spsr, tmp);
4410}
4411
8bfd0550
PM
4412static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4413 int *tgtmode, int *regno)
4414{
4415 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4416 * the target mode and register number, and identify the various
4417 * unpredictable cases.
4418 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4419 * + executed in user mode
4420 * + using R15 as the src/dest register
4421 * + accessing an unimplemented register
4422 * + accessing a register that's inaccessible at current PL/security state*
4423 * + accessing a register that you could access with a different insn
4424 * We choose to UNDEF in all these cases.
4425 * Since we don't know which of the various AArch32 modes we are in
4426 * we have to defer some checks to runtime.
4427 * Accesses to Monitor mode registers from Secure EL1 (which implies
4428 * that EL3 is AArch64) must trap to EL3.
4429 *
4430 * If the access checks fail this function will emit code to take
4431 * an exception and return false. Otherwise it will return true,
4432 * and set *tgtmode and *regno appropriately.
4433 */
4434 int exc_target = default_exception_el(s);
4435
4436 /* These instructions are present only in ARMv8, or in ARMv7 with the
4437 * Virtualization Extensions.
4438 */
4439 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4440 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4441 goto undef;
4442 }
4443
4444 if (IS_USER(s) || rn == 15) {
4445 goto undef;
4446 }
4447
4448 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4449 * of registers into (r, sysm).
4450 */
4451 if (r) {
4452 /* SPSRs for other modes */
4453 switch (sysm) {
4454 case 0xe: /* SPSR_fiq */
4455 *tgtmode = ARM_CPU_MODE_FIQ;
4456 break;
4457 case 0x10: /* SPSR_irq */
4458 *tgtmode = ARM_CPU_MODE_IRQ;
4459 break;
4460 case 0x12: /* SPSR_svc */
4461 *tgtmode = ARM_CPU_MODE_SVC;
4462 break;
4463 case 0x14: /* SPSR_abt */
4464 *tgtmode = ARM_CPU_MODE_ABT;
4465 break;
4466 case 0x16: /* SPSR_und */
4467 *tgtmode = ARM_CPU_MODE_UND;
4468 break;
4469 case 0x1c: /* SPSR_mon */
4470 *tgtmode = ARM_CPU_MODE_MON;
4471 break;
4472 case 0x1e: /* SPSR_hyp */
4473 *tgtmode = ARM_CPU_MODE_HYP;
4474 break;
4475 default: /* unallocated */
4476 goto undef;
4477 }
4478 /* We arbitrarily assign SPSR a register number of 16. */
4479 *regno = 16;
4480 } else {
4481 /* general purpose registers for other modes */
4482 switch (sysm) {
4483 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4484 *tgtmode = ARM_CPU_MODE_USR;
4485 *regno = sysm + 8;
4486 break;
4487 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4488 *tgtmode = ARM_CPU_MODE_FIQ;
4489 *regno = sysm;
4490 break;
4491 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4492 *tgtmode = ARM_CPU_MODE_IRQ;
4493 *regno = sysm & 1 ? 13 : 14;
4494 break;
4495 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4496 *tgtmode = ARM_CPU_MODE_SVC;
4497 *regno = sysm & 1 ? 13 : 14;
4498 break;
4499 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4500 *tgtmode = ARM_CPU_MODE_ABT;
4501 *regno = sysm & 1 ? 13 : 14;
4502 break;
4503 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4504 *tgtmode = ARM_CPU_MODE_UND;
4505 *regno = sysm & 1 ? 13 : 14;
4506 break;
4507 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4508 *tgtmode = ARM_CPU_MODE_MON;
4509 *regno = sysm & 1 ? 13 : 14;
4510 break;
4511 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4512 *tgtmode = ARM_CPU_MODE_HYP;
4513 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4514 *regno = sysm & 1 ? 13 : 17;
4515 break;
4516 default: /* unallocated */
4517 goto undef;
4518 }
4519 }
4520
4521 /* Catch the 'accessing inaccessible register' cases we can detect
4522 * at translate time.
4523 */
4524 switch (*tgtmode) {
4525 case ARM_CPU_MODE_MON:
4526 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4527 goto undef;
4528 }
4529 if (s->current_el == 1) {
4530 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4531 * then accesses to Mon registers trap to EL3
4532 */
4533 exc_target = 3;
4534 goto undef;
4535 }
4536 break;
4537 case ARM_CPU_MODE_HYP:
aec4dd09
PM
4538 /*
4539 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
4540 * (and so we can forbid accesses from EL2 or below). elr_hyp
4541 * can be accessed also from Hyp mode, so forbid accesses from
4542 * EL0 or EL1.
8bfd0550 4543 */
aec4dd09
PM
4544 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
4545 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
4546 goto undef;
4547 }
4548 break;
4549 default:
4550 break;
4551 }
4552
4553 return true;
4554
4555undef:
4556 /* If we get here then some access check did not pass */
4557 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4558 return false;
4559}
4560
4561static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4562{
4563 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4564 int tgtmode = 0, regno = 0;
4565
4566 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4567 return;
4568 }
4569
4570 /* Sync state because msr_banked() can raise exceptions */
4571 gen_set_condexec(s);
4572 gen_set_pc_im(s, s->pc - 4);
4573 tcg_reg = load_reg(s, rn);
4574 tcg_tgtmode = tcg_const_i32(tgtmode);
4575 tcg_regno = tcg_const_i32(regno);
4576 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4577 tcg_temp_free_i32(tcg_tgtmode);
4578 tcg_temp_free_i32(tcg_regno);
4579 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4580 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4581}
4582
4583static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4584{
4585 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4586 int tgtmode = 0, regno = 0;
4587
4588 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4589 return;
4590 }
4591
4592 /* Sync state because mrs_banked() can raise exceptions */
4593 gen_set_condexec(s);
4594 gen_set_pc_im(s, s->pc - 4);
4595 tcg_reg = tcg_temp_new_i32();
4596 tcg_tgtmode = tcg_const_i32(tgtmode);
4597 tcg_regno = tcg_const_i32(regno);
4598 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4599 tcg_temp_free_i32(tcg_tgtmode);
4600 tcg_temp_free_i32(tcg_regno);
4601 store_reg(s, rn, tcg_reg);
dcba3a8d 4602 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4603}
4604
fb0e8e79
PM
4605/* Store value to PC as for an exception return (ie don't
4606 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4607 * will do the masking based on the new value of the Thumb bit.
4608 */
4609static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4610{
fb0e8e79
PM
4611 tcg_gen_mov_i32(cpu_R[15], pc);
4612 tcg_temp_free_i32(pc);
b5ff1b31
FB
4613}
4614
b0109805 4615/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4616static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4617{
fb0e8e79
PM
4618 store_pc_exc_ret(s, pc);
4619 /* The cpsr_write_eret helper will mask the low bits of PC
4620 * appropriately depending on the new Thumb bit, so it must
4621 * be called after storing the new PC.
4622 */
e69ad9df
AL
4623 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4624 gen_io_start();
4625 }
235ea1f5 4626 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
4627 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4628 gen_io_end();
4629 }
7d1b0095 4630 tcg_temp_free_i32(cpsr);
b29fd33d 4631 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4632 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4633}
3b46e624 4634
fb0e8e79
PM
4635/* Generate an old-style exception return. Marks pc as dead. */
4636static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4637{
4638 gen_rfe(s, pc, load_cpu_field(spsr));
4639}
4640
c22edfeb
AB
4641/*
4642 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4643 * only call the helper when running single threaded TCG code to ensure
4644 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4645 * just skip this instruction. Currently the SEV/SEVL instructions
4646 * which are *one* of many ways to wake the CPU from WFE are not
4647 * implemented so we can't sleep like WFI does.
4648 */
9ee6e8bb
PB
4649static void gen_nop_hint(DisasContext *s, int val)
4650{
4651 switch (val) {
2399d4e7
EC
4652 /* When running in MTTCG we don't generate jumps to the yield and
4653 * WFE helpers as it won't affect the scheduling of other vCPUs.
4654 * If we wanted to more completely model WFE/SEV so we don't busy
4655 * spin unnecessarily we would need to do something more involved.
4656 */
c87e5a61 4657 case 1: /* yield */
2399d4e7 4658 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4659 gen_set_pc_im(s, s->pc);
dcba3a8d 4660 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4661 }
c87e5a61 4662 break;
9ee6e8bb 4663 case 3: /* wfi */
eaed129d 4664 gen_set_pc_im(s, s->pc);
dcba3a8d 4665 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4666 break;
4667 case 2: /* wfe */
2399d4e7 4668 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4669 gen_set_pc_im(s, s->pc);
dcba3a8d 4670 s->base.is_jmp = DISAS_WFE;
c22edfeb 4671 }
72c1d3af 4672 break;
9ee6e8bb 4673 case 4: /* sev */
12b10571
MR
4674 case 5: /* sevl */
4675 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4676 default: /* nop */
4677 break;
4678 }
4679}
99c475ab 4680
ad69471c 4681#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4682
39d5492a 4683static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4684{
4685 switch (size) {
dd8fbd78
FN
4686 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4687 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4688 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4689 default: abort();
9ee6e8bb 4690 }
9ee6e8bb
PB
4691}
4692
39d5492a 4693static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4694{
4695 switch (size) {
dd8fbd78
FN
4696 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4697 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4698 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4699 default: return;
4700 }
4701}
4702
4703/* 32-bit pairwise ops end up the same as the elementwise versions. */
4704#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4705#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4706#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4707#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4708
ad69471c
PB
4709#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4710 switch ((size << 1) | u) { \
4711 case 0: \
dd8fbd78 4712 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4713 break; \
4714 case 1: \
dd8fbd78 4715 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4716 break; \
4717 case 2: \
dd8fbd78 4718 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4719 break; \
4720 case 3: \
dd8fbd78 4721 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4722 break; \
4723 case 4: \
dd8fbd78 4724 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4725 break; \
4726 case 5: \
dd8fbd78 4727 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4728 break; \
4729 default: return 1; \
4730 }} while (0)
9ee6e8bb
PB
4731
4732#define GEN_NEON_INTEGER_OP(name) do { \
4733 switch ((size << 1) | u) { \
ad69471c 4734 case 0: \
dd8fbd78 4735 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4736 break; \
4737 case 1: \
dd8fbd78 4738 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4739 break; \
4740 case 2: \
dd8fbd78 4741 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4742 break; \
4743 case 3: \
dd8fbd78 4744 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4745 break; \
4746 case 4: \
dd8fbd78 4747 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4748 break; \
4749 case 5: \
dd8fbd78 4750 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4751 break; \
9ee6e8bb
PB
4752 default: return 1; \
4753 }} while (0)
4754
39d5492a 4755static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4756{
39d5492a 4757 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4758 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4759 return tmp;
9ee6e8bb
PB
4760}
4761
39d5492a 4762static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4763{
dd8fbd78 4764 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4765 tcg_temp_free_i32(var);
9ee6e8bb
PB
4766}
4767
39d5492a 4768static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4769{
39d5492a 4770 TCGv_i32 tmp;
9ee6e8bb 4771 if (size == 1) {
0fad6efc
PM
4772 tmp = neon_load_reg(reg & 7, reg >> 4);
4773 if (reg & 8) {
dd8fbd78 4774 gen_neon_dup_high16(tmp);
0fad6efc
PM
4775 } else {
4776 gen_neon_dup_low16(tmp);
dd8fbd78 4777 }
0fad6efc
PM
4778 } else {
4779 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4780 }
dd8fbd78 4781 return tmp;
9ee6e8bb
PB
4782}
4783
02acedf9 4784static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4785{
b13708bb
RH
4786 TCGv_ptr pd, pm;
4787
600b828c 4788 if (!q && size == 2) {
02acedf9
PM
4789 return 1;
4790 }
b13708bb
RH
4791 pd = vfp_reg_ptr(true, rd);
4792 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4793 if (q) {
4794 switch (size) {
4795 case 0:
b13708bb 4796 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4797 break;
4798 case 1:
b13708bb 4799 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4800 break;
4801 case 2:
b13708bb 4802 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4803 break;
4804 default:
4805 abort();
4806 }
4807 } else {
4808 switch (size) {
4809 case 0:
b13708bb 4810 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4811 break;
4812 case 1:
b13708bb 4813 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4814 break;
4815 default:
4816 abort();
4817 }
4818 }
b13708bb
RH
4819 tcg_temp_free_ptr(pd);
4820 tcg_temp_free_ptr(pm);
02acedf9 4821 return 0;
19457615
FN
4822}
4823
d68a6f3a 4824static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4825{
b13708bb
RH
4826 TCGv_ptr pd, pm;
4827
600b828c 4828 if (!q && size == 2) {
d68a6f3a
PM
4829 return 1;
4830 }
b13708bb
RH
4831 pd = vfp_reg_ptr(true, rd);
4832 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4833 if (q) {
4834 switch (size) {
4835 case 0:
b13708bb 4836 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4837 break;
4838 case 1:
b13708bb 4839 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4840 break;
4841 case 2:
b13708bb 4842 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4843 break;
4844 default:
4845 abort();
4846 }
4847 } else {
4848 switch (size) {
4849 case 0:
b13708bb 4850 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4851 break;
4852 case 1:
b13708bb 4853 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4854 break;
4855 default:
4856 abort();
4857 }
4858 }
b13708bb
RH
4859 tcg_temp_free_ptr(pd);
4860 tcg_temp_free_ptr(pm);
d68a6f3a 4861 return 0;
19457615
FN
4862}
4863
39d5492a 4864static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4865{
39d5492a 4866 TCGv_i32 rd, tmp;
19457615 4867
7d1b0095
PM
4868 rd = tcg_temp_new_i32();
4869 tmp = tcg_temp_new_i32();
19457615
FN
4870
4871 tcg_gen_shli_i32(rd, t0, 8);
4872 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4873 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4874 tcg_gen_or_i32(rd, rd, tmp);
4875
4876 tcg_gen_shri_i32(t1, t1, 8);
4877 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4878 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4879 tcg_gen_or_i32(t1, t1, tmp);
4880 tcg_gen_mov_i32(t0, rd);
4881
7d1b0095
PM
4882 tcg_temp_free_i32(tmp);
4883 tcg_temp_free_i32(rd);
19457615
FN
4884}
4885
39d5492a 4886static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4887{
39d5492a 4888 TCGv_i32 rd, tmp;
19457615 4889
7d1b0095
PM
4890 rd = tcg_temp_new_i32();
4891 tmp = tcg_temp_new_i32();
19457615
FN
4892
4893 tcg_gen_shli_i32(rd, t0, 16);
4894 tcg_gen_andi_i32(tmp, t1, 0xffff);
4895 tcg_gen_or_i32(rd, rd, tmp);
4896 tcg_gen_shri_i32(t1, t1, 16);
4897 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4898 tcg_gen_or_i32(t1, t1, tmp);
4899 tcg_gen_mov_i32(t0, rd);
4900
7d1b0095
PM
4901 tcg_temp_free_i32(tmp);
4902 tcg_temp_free_i32(rd);
19457615
FN
4903}
4904
4905
9ee6e8bb
PB
4906static struct {
4907 int nregs;
4908 int interleave;
4909 int spacing;
4910} neon_ls_element_type[11] = {
4911 {4, 4, 1},
4912 {4, 4, 2},
4913 {4, 1, 1},
4914 {4, 2, 1},
4915 {3, 3, 1},
4916 {3, 3, 2},
4917 {3, 1, 1},
4918 {1, 1, 1},
4919 {2, 2, 1},
4920 {2, 2, 2},
4921 {2, 1, 1}
4922};
4923
4924/* Translate a NEON load/store element instruction. Return nonzero if the
4925 instruction is invalid. */
7dcc1f89 4926static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4927{
4928 int rd, rn, rm;
4929 int op;
4930 int nregs;
4931 int interleave;
84496233 4932 int spacing;
9ee6e8bb
PB
4933 int stride;
4934 int size;
4935 int reg;
4936 int pass;
4937 int load;
4938 int shift;
9ee6e8bb 4939 int n;
39d5492a
PM
4940 TCGv_i32 addr;
4941 TCGv_i32 tmp;
4942 TCGv_i32 tmp2;
84496233 4943 TCGv_i64 tmp64;
9ee6e8bb 4944
2c7ffc41
PM
4945 /* FIXME: this access check should not take precedence over UNDEF
4946 * for invalid encodings; we will generate incorrect syndrome information
4947 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4948 */
9dbbc748 4949 if (s->fp_excp_el) {
2c7ffc41 4950 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4951 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4952 return 0;
4953 }
4954
5df8bac1 4955 if (!s->vfp_enabled)
9ee6e8bb
PB
4956 return 1;
4957 VFP_DREG_D(rd, insn);
4958 rn = (insn >> 16) & 0xf;
4959 rm = insn & 0xf;
4960 load = (insn & (1 << 21)) != 0;
4961 if ((insn & (1 << 23)) == 0) {
4962 /* Load store all elements. */
4963 op = (insn >> 8) & 0xf;
4964 size = (insn >> 6) & 3;
84496233 4965 if (op > 10)
9ee6e8bb 4966 return 1;
f2dd89d0
PM
4967 /* Catch UNDEF cases for bad values of align field */
4968 switch (op & 0xc) {
4969 case 4:
4970 if (((insn >> 5) & 1) == 1) {
4971 return 1;
4972 }
4973 break;
4974 case 8:
4975 if (((insn >> 4) & 3) == 3) {
4976 return 1;
4977 }
4978 break;
4979 default:
4980 break;
4981 }
9ee6e8bb
PB
4982 nregs = neon_ls_element_type[op].nregs;
4983 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4984 spacing = neon_ls_element_type[op].spacing;
4985 if (size == 3 && (interleave | spacing) != 1)
4986 return 1;
e318a60b 4987 addr = tcg_temp_new_i32();
dcc65026 4988 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4989 stride = (1 << size) * interleave;
4990 for (reg = 0; reg < nregs; reg++) {
4991 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4992 load_reg_var(s, addr, rn);
4993 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4994 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4995 load_reg_var(s, addr, rn);
4996 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4997 }
84496233 4998 if (size == 3) {
8ed1237d 4999 tmp64 = tcg_temp_new_i64();
84496233 5000 if (load) {
12dcc321 5001 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 5002 neon_store_reg64(tmp64, rd);
84496233 5003 } else {
84496233 5004 neon_load_reg64(tmp64, rd);
12dcc321 5005 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 5006 }
8ed1237d 5007 tcg_temp_free_i64(tmp64);
84496233
JR
5008 tcg_gen_addi_i32(addr, addr, stride);
5009 } else {
5010 for (pass = 0; pass < 2; pass++) {
5011 if (size == 2) {
5012 if (load) {
58ab8e96 5013 tmp = tcg_temp_new_i32();
12dcc321 5014 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
5015 neon_store_reg(rd, pass, tmp);
5016 } else {
5017 tmp = neon_load_reg(rd, pass);
12dcc321 5018 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 5019 tcg_temp_free_i32(tmp);
84496233 5020 }
1b2b1e54 5021 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
5022 } else if (size == 1) {
5023 if (load) {
58ab8e96 5024 tmp = tcg_temp_new_i32();
12dcc321 5025 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 5026 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 5027 tmp2 = tcg_temp_new_i32();
12dcc321 5028 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 5029 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
5030 tcg_gen_shli_i32(tmp2, tmp2, 16);
5031 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5032 tcg_temp_free_i32(tmp2);
84496233
JR
5033 neon_store_reg(rd, pass, tmp);
5034 } else {
5035 tmp = neon_load_reg(rd, pass);
7d1b0095 5036 tmp2 = tcg_temp_new_i32();
84496233 5037 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 5038 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 5039 tcg_temp_free_i32(tmp);
84496233 5040 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 5041 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 5042 tcg_temp_free_i32(tmp2);
1b2b1e54 5043 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 5044 }
84496233
JR
5045 } else /* size == 0 */ {
5046 if (load) {
f764718d 5047 tmp2 = NULL;
84496233 5048 for (n = 0; n < 4; n++) {
58ab8e96 5049 tmp = tcg_temp_new_i32();
12dcc321 5050 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
5051 tcg_gen_addi_i32(addr, addr, stride);
5052 if (n == 0) {
5053 tmp2 = tmp;
5054 } else {
41ba8341
PB
5055 tcg_gen_shli_i32(tmp, tmp, n * 8);
5056 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 5057 tcg_temp_free_i32(tmp);
84496233 5058 }
9ee6e8bb 5059 }
84496233
JR
5060 neon_store_reg(rd, pass, tmp2);
5061 } else {
5062 tmp2 = neon_load_reg(rd, pass);
5063 for (n = 0; n < 4; n++) {
7d1b0095 5064 tmp = tcg_temp_new_i32();
84496233
JR
5065 if (n == 0) {
5066 tcg_gen_mov_i32(tmp, tmp2);
5067 } else {
5068 tcg_gen_shri_i32(tmp, tmp2, n * 8);
5069 }
12dcc321 5070 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 5071 tcg_temp_free_i32(tmp);
84496233
JR
5072 tcg_gen_addi_i32(addr, addr, stride);
5073 }
7d1b0095 5074 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5075 }
5076 }
5077 }
5078 }
84496233 5079 rd += spacing;
9ee6e8bb 5080 }
e318a60b 5081 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5082 stride = nregs * 8;
5083 } else {
5084 size = (insn >> 10) & 3;
5085 if (size == 3) {
5086 /* Load single element to all lanes. */
8e18cde3
PM
5087 int a = (insn >> 4) & 1;
5088 if (!load) {
9ee6e8bb 5089 return 1;
8e18cde3 5090 }
9ee6e8bb
PB
5091 size = (insn >> 6) & 3;
5092 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
5093
5094 if (size == 3) {
5095 if (nregs != 4 || a == 0) {
9ee6e8bb 5096 return 1;
99c475ab 5097 }
8e18cde3
PM
5098 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5099 size = 2;
5100 }
5101 if (nregs == 1 && a == 1 && size == 0) {
5102 return 1;
5103 }
5104 if (nregs == 3 && a == 1) {
5105 return 1;
5106 }
e318a60b 5107 addr = tcg_temp_new_i32();
8e18cde3
PM
5108 load_reg_var(s, addr, rn);
5109 if (nregs == 1) {
5110 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5111 tmp = gen_load_and_replicate(s, addr, size);
5112 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5113 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5114 if (insn & (1 << 5)) {
5115 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5116 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5117 }
5118 tcg_temp_free_i32(tmp);
5119 } else {
5120 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5121 stride = (insn & (1 << 5)) ? 2 : 1;
5122 for (reg = 0; reg < nregs; reg++) {
5123 tmp = gen_load_and_replicate(s, addr, size);
5124 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5125 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5126 tcg_temp_free_i32(tmp);
5127 tcg_gen_addi_i32(addr, addr, 1 << size);
5128 rd += stride;
5129 }
9ee6e8bb 5130 }
e318a60b 5131 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5132 stride = (1 << size) * nregs;
5133 } else {
5134 /* Single element. */
93262b16 5135 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
5136 pass = (insn >> 7) & 1;
5137 switch (size) {
5138 case 0:
5139 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
5140 stride = 1;
5141 break;
5142 case 1:
5143 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
5144 stride = (insn & (1 << 5)) ? 2 : 1;
5145 break;
5146 case 2:
5147 shift = 0;
9ee6e8bb
PB
5148 stride = (insn & (1 << 6)) ? 2 : 1;
5149 break;
5150 default:
5151 abort();
5152 }
5153 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5154 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5155 switch (nregs) {
5156 case 1:
5157 if (((idx & (1 << size)) != 0) ||
5158 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5159 return 1;
5160 }
5161 break;
5162 case 3:
5163 if ((idx & 1) != 0) {
5164 return 1;
5165 }
5166 /* fall through */
5167 case 2:
5168 if (size == 2 && (idx & 2) != 0) {
5169 return 1;
5170 }
5171 break;
5172 case 4:
5173 if ((size == 2) && ((idx & 3) == 3)) {
5174 return 1;
5175 }
5176 break;
5177 default:
5178 abort();
5179 }
5180 if ((rd + stride * (nregs - 1)) > 31) {
5181 /* Attempts to write off the end of the register file
5182 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5183 * the neon_load_reg() would write off the end of the array.
5184 */
5185 return 1;
5186 }
e318a60b 5187 addr = tcg_temp_new_i32();
dcc65026 5188 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5189 for (reg = 0; reg < nregs; reg++) {
5190 if (load) {
58ab8e96 5191 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5192 switch (size) {
5193 case 0:
12dcc321 5194 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5195 break;
5196 case 1:
12dcc321 5197 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5198 break;
5199 case 2:
12dcc321 5200 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5201 break;
a50f5b91
PB
5202 default: /* Avoid compiler warnings. */
5203 abort();
9ee6e8bb
PB
5204 }
5205 if (size != 2) {
8f8e3aa4 5206 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5207 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5208 shift, size ? 16 : 8);
7d1b0095 5209 tcg_temp_free_i32(tmp2);
9ee6e8bb 5210 }
8f8e3aa4 5211 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5212 } else { /* Store */
8f8e3aa4
PB
5213 tmp = neon_load_reg(rd, pass);
5214 if (shift)
5215 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5216 switch (size) {
5217 case 0:
12dcc321 5218 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5219 break;
5220 case 1:
12dcc321 5221 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5222 break;
5223 case 2:
12dcc321 5224 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5225 break;
99c475ab 5226 }
58ab8e96 5227 tcg_temp_free_i32(tmp);
99c475ab 5228 }
9ee6e8bb 5229 rd += stride;
1b2b1e54 5230 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5231 }
e318a60b 5232 tcg_temp_free_i32(addr);
9ee6e8bb 5233 stride = nregs * (1 << size);
99c475ab 5234 }
9ee6e8bb
PB
5235 }
5236 if (rm != 15) {
39d5492a 5237 TCGv_i32 base;
b26eefb6
PB
5238
5239 base = load_reg(s, rn);
9ee6e8bb 5240 if (rm == 13) {
b26eefb6 5241 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5242 } else {
39d5492a 5243 TCGv_i32 index;
b26eefb6
PB
5244 index = load_reg(s, rm);
5245 tcg_gen_add_i32(base, base, index);
7d1b0095 5246 tcg_temp_free_i32(index);
9ee6e8bb 5247 }
b26eefb6 5248 store_reg(s, rn, base);
9ee6e8bb
PB
5249 }
5250 return 0;
5251}
3b46e624 5252
8f8e3aa4 5253/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5254static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5255{
5256 tcg_gen_and_i32(t, t, c);
f669df27 5257 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5258 tcg_gen_or_i32(dest, t, f);
5259}
5260
39d5492a 5261static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5262{
5263 switch (size) {
5264 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5265 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5266 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5267 default: abort();
5268 }
5269}
5270
39d5492a 5271static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5272{
5273 switch (size) {
02da0b2d
PM
5274 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5275 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5276 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5277 default: abort();
5278 }
5279}
5280
39d5492a 5281static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5282{
5283 switch (size) {
02da0b2d
PM
5284 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5285 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5286 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5287 default: abort();
5288 }
5289}
5290
39d5492a 5291static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5292{
5293 switch (size) {
02da0b2d
PM
5294 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5295 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5296 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5297 default: abort();
5298 }
5299}
5300
39d5492a 5301static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5302 int q, int u)
5303{
5304 if (q) {
5305 if (u) {
5306 switch (size) {
5307 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5308 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5309 default: abort();
5310 }
5311 } else {
5312 switch (size) {
5313 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5314 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5315 default: abort();
5316 }
5317 }
5318 } else {
5319 if (u) {
5320 switch (size) {
b408a9b0
CL
5321 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5322 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5323 default: abort();
5324 }
5325 } else {
5326 switch (size) {
5327 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5328 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5329 default: abort();
5330 }
5331 }
5332 }
5333}
5334
39d5492a 5335static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5336{
5337 if (u) {
5338 switch (size) {
5339 case 0: gen_helper_neon_widen_u8(dest, src); break;
5340 case 1: gen_helper_neon_widen_u16(dest, src); break;
5341 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5342 default: abort();
5343 }
5344 } else {
5345 switch (size) {
5346 case 0: gen_helper_neon_widen_s8(dest, src); break;
5347 case 1: gen_helper_neon_widen_s16(dest, src); break;
5348 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5349 default: abort();
5350 }
5351 }
7d1b0095 5352 tcg_temp_free_i32(src);
ad69471c
PB
5353}
5354
5355static inline void gen_neon_addl(int size)
5356{
5357 switch (size) {
5358 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5359 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5360 case 2: tcg_gen_add_i64(CPU_V001); break;
5361 default: abort();
5362 }
5363}
5364
5365static inline void gen_neon_subl(int size)
5366{
5367 switch (size) {
5368 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5369 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5370 case 2: tcg_gen_sub_i64(CPU_V001); break;
5371 default: abort();
5372 }
5373}
5374
a7812ae4 5375static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5376{
5377 switch (size) {
5378 case 0: gen_helper_neon_negl_u16(var, var); break;
5379 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5380 case 2:
5381 tcg_gen_neg_i64(var, var);
5382 break;
ad69471c
PB
5383 default: abort();
5384 }
5385}
5386
a7812ae4 5387static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5388{
5389 switch (size) {
02da0b2d
PM
5390 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5391 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5392 default: abort();
5393 }
5394}
5395
39d5492a
PM
5396static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5397 int size, int u)
ad69471c 5398{
a7812ae4 5399 TCGv_i64 tmp;
ad69471c
PB
5400
5401 switch ((size << 1) | u) {
5402 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5403 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5404 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5405 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5406 case 4:
5407 tmp = gen_muls_i64_i32(a, b);
5408 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5409 tcg_temp_free_i64(tmp);
ad69471c
PB
5410 break;
5411 case 5:
5412 tmp = gen_mulu_i64_i32(a, b);
5413 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5414 tcg_temp_free_i64(tmp);
ad69471c
PB
5415 break;
5416 default: abort();
5417 }
c6067f04
CL
5418
5419 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5420 Don't forget to clean them now. */
5421 if (size < 2) {
7d1b0095
PM
5422 tcg_temp_free_i32(a);
5423 tcg_temp_free_i32(b);
c6067f04 5424 }
ad69471c
PB
5425}
5426
39d5492a
PM
5427static void gen_neon_narrow_op(int op, int u, int size,
5428 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5429{
5430 if (op) {
5431 if (u) {
5432 gen_neon_unarrow_sats(size, dest, src);
5433 } else {
5434 gen_neon_narrow(size, dest, src);
5435 }
5436 } else {
5437 if (u) {
5438 gen_neon_narrow_satu(size, dest, src);
5439 } else {
5440 gen_neon_narrow_sats(size, dest, src);
5441 }
5442 }
5443}
5444
62698be3
PM
5445/* Symbolic constants for op fields for Neon 3-register same-length.
5446 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5447 * table A7-9.
5448 */
5449#define NEON_3R_VHADD 0
5450#define NEON_3R_VQADD 1
5451#define NEON_3R_VRHADD 2
5452#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5453#define NEON_3R_VHSUB 4
5454#define NEON_3R_VQSUB 5
5455#define NEON_3R_VCGT 6
5456#define NEON_3R_VCGE 7
5457#define NEON_3R_VSHL 8
5458#define NEON_3R_VQSHL 9
5459#define NEON_3R_VRSHL 10
5460#define NEON_3R_VQRSHL 11
5461#define NEON_3R_VMAX 12
5462#define NEON_3R_VMIN 13
5463#define NEON_3R_VABD 14
5464#define NEON_3R_VABA 15
5465#define NEON_3R_VADD_VSUB 16
5466#define NEON_3R_VTST_VCEQ 17
5467#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5468#define NEON_3R_VMUL 19
5469#define NEON_3R_VPMAX 20
5470#define NEON_3R_VPMIN 21
5471#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 5472#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 5473#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 5474#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
5475#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5476#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5477#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5478#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5479#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5480#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5481
5482static const uint8_t neon_3r_sizes[] = {
5483 [NEON_3R_VHADD] = 0x7,
5484 [NEON_3R_VQADD] = 0xf,
5485 [NEON_3R_VRHADD] = 0x7,
5486 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5487 [NEON_3R_VHSUB] = 0x7,
5488 [NEON_3R_VQSUB] = 0xf,
5489 [NEON_3R_VCGT] = 0x7,
5490 [NEON_3R_VCGE] = 0x7,
5491 [NEON_3R_VSHL] = 0xf,
5492 [NEON_3R_VQSHL] = 0xf,
5493 [NEON_3R_VRSHL] = 0xf,
5494 [NEON_3R_VQRSHL] = 0xf,
5495 [NEON_3R_VMAX] = 0x7,
5496 [NEON_3R_VMIN] = 0x7,
5497 [NEON_3R_VABD] = 0x7,
5498 [NEON_3R_VABA] = 0x7,
5499 [NEON_3R_VADD_VSUB] = 0xf,
5500 [NEON_3R_VTST_VCEQ] = 0x7,
5501 [NEON_3R_VML] = 0x7,
5502 [NEON_3R_VMUL] = 0x7,
5503 [NEON_3R_VPMAX] = 0x7,
5504 [NEON_3R_VPMIN] = 0x7,
5505 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 5506 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 5507 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 5508 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
5509 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5510 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5511 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5512 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5513 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5514 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5515};
5516
600b828c
PM
5517/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5518 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5519 * table A7-13.
5520 */
5521#define NEON_2RM_VREV64 0
5522#define NEON_2RM_VREV32 1
5523#define NEON_2RM_VREV16 2
5524#define NEON_2RM_VPADDL 4
5525#define NEON_2RM_VPADDL_U 5
9d935509
AB
5526#define NEON_2RM_AESE 6 /* Includes AESD */
5527#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5528#define NEON_2RM_VCLS 8
5529#define NEON_2RM_VCLZ 9
5530#define NEON_2RM_VCNT 10
5531#define NEON_2RM_VMVN 11
5532#define NEON_2RM_VPADAL 12
5533#define NEON_2RM_VPADAL_U 13
5534#define NEON_2RM_VQABS 14
5535#define NEON_2RM_VQNEG 15
5536#define NEON_2RM_VCGT0 16
5537#define NEON_2RM_VCGE0 17
5538#define NEON_2RM_VCEQ0 18
5539#define NEON_2RM_VCLE0 19
5540#define NEON_2RM_VCLT0 20
f1ecb913 5541#define NEON_2RM_SHA1H 21
600b828c
PM
5542#define NEON_2RM_VABS 22
5543#define NEON_2RM_VNEG 23
5544#define NEON_2RM_VCGT0_F 24
5545#define NEON_2RM_VCGE0_F 25
5546#define NEON_2RM_VCEQ0_F 26
5547#define NEON_2RM_VCLE0_F 27
5548#define NEON_2RM_VCLT0_F 28
5549#define NEON_2RM_VABS_F 30
5550#define NEON_2RM_VNEG_F 31
5551#define NEON_2RM_VSWP 32
5552#define NEON_2RM_VTRN 33
5553#define NEON_2RM_VUZP 34
5554#define NEON_2RM_VZIP 35
5555#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5556#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5557#define NEON_2RM_VSHLL 38
f1ecb913 5558#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5559#define NEON_2RM_VRINTN 40
2ce70625 5560#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5561#define NEON_2RM_VRINTA 42
5562#define NEON_2RM_VRINTZ 43
600b828c 5563#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5564#define NEON_2RM_VRINTM 45
600b828c 5565#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5566#define NEON_2RM_VRINTP 47
901ad525
WN
5567#define NEON_2RM_VCVTAU 48
5568#define NEON_2RM_VCVTAS 49
5569#define NEON_2RM_VCVTNU 50
5570#define NEON_2RM_VCVTNS 51
5571#define NEON_2RM_VCVTPU 52
5572#define NEON_2RM_VCVTPS 53
5573#define NEON_2RM_VCVTMU 54
5574#define NEON_2RM_VCVTMS 55
600b828c
PM
5575#define NEON_2RM_VRECPE 56
5576#define NEON_2RM_VRSQRTE 57
5577#define NEON_2RM_VRECPE_F 58
5578#define NEON_2RM_VRSQRTE_F 59
5579#define NEON_2RM_VCVT_FS 60
5580#define NEON_2RM_VCVT_FU 61
5581#define NEON_2RM_VCVT_SF 62
5582#define NEON_2RM_VCVT_UF 63
5583
5584static int neon_2rm_is_float_op(int op)
5585{
5586 /* Return true if this neon 2reg-misc op is float-to-float */
5587 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5588 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5589 op == NEON_2RM_VRINTM ||
5590 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5591 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5592}
5593
fe8fcf3d
PM
5594static bool neon_2rm_is_v8_op(int op)
5595{
5596 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5597 switch (op) {
5598 case NEON_2RM_VRINTN:
5599 case NEON_2RM_VRINTA:
5600 case NEON_2RM_VRINTM:
5601 case NEON_2RM_VRINTP:
5602 case NEON_2RM_VRINTZ:
5603 case NEON_2RM_VRINTX:
5604 case NEON_2RM_VCVTAU:
5605 case NEON_2RM_VCVTAS:
5606 case NEON_2RM_VCVTNU:
5607 case NEON_2RM_VCVTNS:
5608 case NEON_2RM_VCVTPU:
5609 case NEON_2RM_VCVTPS:
5610 case NEON_2RM_VCVTMU:
5611 case NEON_2RM_VCVTMS:
5612 return true;
5613 default:
5614 return false;
5615 }
5616}
5617
600b828c
PM
5618/* Each entry in this array has bit n set if the insn allows
5619 * size value n (otherwise it will UNDEF). Since unallocated
5620 * op values will have no bits set they always UNDEF.
5621 */
5622static const uint8_t neon_2rm_sizes[] = {
5623 [NEON_2RM_VREV64] = 0x7,
5624 [NEON_2RM_VREV32] = 0x3,
5625 [NEON_2RM_VREV16] = 0x1,
5626 [NEON_2RM_VPADDL] = 0x7,
5627 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5628 [NEON_2RM_AESE] = 0x1,
5629 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5630 [NEON_2RM_VCLS] = 0x7,
5631 [NEON_2RM_VCLZ] = 0x7,
5632 [NEON_2RM_VCNT] = 0x1,
5633 [NEON_2RM_VMVN] = 0x1,
5634 [NEON_2RM_VPADAL] = 0x7,
5635 [NEON_2RM_VPADAL_U] = 0x7,
5636 [NEON_2RM_VQABS] = 0x7,
5637 [NEON_2RM_VQNEG] = 0x7,
5638 [NEON_2RM_VCGT0] = 0x7,
5639 [NEON_2RM_VCGE0] = 0x7,
5640 [NEON_2RM_VCEQ0] = 0x7,
5641 [NEON_2RM_VCLE0] = 0x7,
5642 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5643 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5644 [NEON_2RM_VABS] = 0x7,
5645 [NEON_2RM_VNEG] = 0x7,
5646 [NEON_2RM_VCGT0_F] = 0x4,
5647 [NEON_2RM_VCGE0_F] = 0x4,
5648 [NEON_2RM_VCEQ0_F] = 0x4,
5649 [NEON_2RM_VCLE0_F] = 0x4,
5650 [NEON_2RM_VCLT0_F] = 0x4,
5651 [NEON_2RM_VABS_F] = 0x4,
5652 [NEON_2RM_VNEG_F] = 0x4,
5653 [NEON_2RM_VSWP] = 0x1,
5654 [NEON_2RM_VTRN] = 0x7,
5655 [NEON_2RM_VUZP] = 0x7,
5656 [NEON_2RM_VZIP] = 0x7,
5657 [NEON_2RM_VMOVN] = 0x7,
5658 [NEON_2RM_VQMOVN] = 0x7,
5659 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5660 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5661 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5662 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5663 [NEON_2RM_VRINTA] = 0x4,
5664 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5665 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5666 [NEON_2RM_VRINTM] = 0x4,
600b828c 5667 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5668 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5669 [NEON_2RM_VCVTAU] = 0x4,
5670 [NEON_2RM_VCVTAS] = 0x4,
5671 [NEON_2RM_VCVTNU] = 0x4,
5672 [NEON_2RM_VCVTNS] = 0x4,
5673 [NEON_2RM_VCVTPU] = 0x4,
5674 [NEON_2RM_VCVTPS] = 0x4,
5675 [NEON_2RM_VCVTMU] = 0x4,
5676 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5677 [NEON_2RM_VRECPE] = 0x4,
5678 [NEON_2RM_VRSQRTE] = 0x4,
5679 [NEON_2RM_VRECPE_F] = 0x4,
5680 [NEON_2RM_VRSQRTE_F] = 0x4,
5681 [NEON_2RM_VCVT_FS] = 0x4,
5682 [NEON_2RM_VCVT_FU] = 0x4,
5683 [NEON_2RM_VCVT_SF] = 0x4,
5684 [NEON_2RM_VCVT_UF] = 0x4,
5685};
5686
36a71934
RH
5687
5688/* Expand v8.1 simd helper. */
5689static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5690 int q, int rd, int rn, int rm)
5691{
962fcbf2 5692 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
5693 int opr_sz = (1 + q) * 8;
5694 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5695 vfp_reg_offset(1, rn),
5696 vfp_reg_offset(1, rm), cpu_env,
5697 opr_sz, opr_sz, 0, fn);
5698 return 0;
5699 }
5700 return 1;
5701}
5702
9ee6e8bb
PB
5703/* Translate a NEON data processing instruction. Return nonzero if the
5704 instruction is invalid.
ad69471c
PB
5705 We process data in a mixture of 32-bit and 64-bit chunks.
5706 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5707
7dcc1f89 5708static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5709{
5710 int op;
5711 int q;
5712 int rd, rn, rm;
5713 int size;
5714 int shift;
5715 int pass;
5716 int count;
5717 int pairwise;
5718 int u;
ca9a32e4 5719 uint32_t imm, mask;
39d5492a 5720 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 5721 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 5722 TCGv_i64 tmp64;
9ee6e8bb 5723
2c7ffc41
PM
5724 /* FIXME: this access check should not take precedence over UNDEF
5725 * for invalid encodings; we will generate incorrect syndrome information
5726 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5727 */
9dbbc748 5728 if (s->fp_excp_el) {
2c7ffc41 5729 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5730 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5731 return 0;
5732 }
5733
5df8bac1 5734 if (!s->vfp_enabled)
9ee6e8bb
PB
5735 return 1;
5736 q = (insn & (1 << 6)) != 0;
5737 u = (insn >> 24) & 1;
5738 VFP_DREG_D(rd, insn);
5739 VFP_DREG_N(rn, insn);
5740 VFP_DREG_M(rm, insn);
5741 size = (insn >> 20) & 3;
5742 if ((insn & (1 << 23)) == 0) {
5743 /* Three register same length. */
5744 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5745 /* Catch invalid op and bad size combinations: UNDEF */
5746 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5747 return 1;
5748 }
25f84f79
PM
5749 /* All insns of this form UNDEF for either this condition or the
5750 * superset of cases "Q==1"; we catch the latter later.
5751 */
5752 if (q && ((rd | rn | rm) & 1)) {
5753 return 1;
5754 }
36a71934
RH
5755 switch (op) {
5756 case NEON_3R_SHA:
5757 /* The SHA-1/SHA-256 3-register instructions require special
5758 * treatment here, as their size field is overloaded as an
5759 * op type selector, and they all consume their input in a
5760 * single pass.
5761 */
f1ecb913
AB
5762 if (!q) {
5763 return 1;
5764 }
5765 if (!u) { /* SHA-1 */
962fcbf2 5766 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
5767 return 1;
5768 }
1a66ac61
RH
5769 ptr1 = vfp_reg_ptr(true, rd);
5770 ptr2 = vfp_reg_ptr(true, rn);
5771 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 5772 tmp4 = tcg_const_i32(size);
1a66ac61 5773 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
5774 tcg_temp_free_i32(tmp4);
5775 } else { /* SHA-256 */
962fcbf2 5776 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
5777 return 1;
5778 }
1a66ac61
RH
5779 ptr1 = vfp_reg_ptr(true, rd);
5780 ptr2 = vfp_reg_ptr(true, rn);
5781 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
5782 switch (size) {
5783 case 0:
1a66ac61 5784 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
5785 break;
5786 case 1:
1a66ac61 5787 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
5788 break;
5789 case 2:
1a66ac61 5790 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
5791 break;
5792 }
5793 }
1a66ac61
RH
5794 tcg_temp_free_ptr(ptr1);
5795 tcg_temp_free_ptr(ptr2);
5796 tcg_temp_free_ptr(ptr3);
f1ecb913 5797 return 0;
36a71934
RH
5798
5799 case NEON_3R_VPADD_VQRDMLAH:
5800 if (!u) {
5801 break; /* VPADD */
5802 }
5803 /* VQRDMLAH */
5804 switch (size) {
5805 case 1:
5806 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
5807 q, rd, rn, rm);
5808 case 2:
5809 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
5810 q, rd, rn, rm);
5811 }
5812 return 1;
5813
5814 case NEON_3R_VFM_VQRDMLSH:
5815 if (!u) {
5816 /* VFM, VFMS */
5817 if (size == 1) {
5818 return 1;
5819 }
5820 break;
5821 }
5822 /* VQRDMLSH */
5823 switch (size) {
5824 case 1:
5825 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
5826 q, rd, rn, rm);
5827 case 2:
5828 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
5829 q, rd, rn, rm);
5830 }
5831 return 1;
f1ecb913 5832 }
62698be3
PM
5833 if (size == 3 && op != NEON_3R_LOGIC) {
5834 /* 64-bit element instructions. */
9ee6e8bb 5835 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5836 neon_load_reg64(cpu_V0, rn + pass);
5837 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5838 switch (op) {
62698be3 5839 case NEON_3R_VQADD:
9ee6e8bb 5840 if (u) {
02da0b2d
PM
5841 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5842 cpu_V0, cpu_V1);
2c0262af 5843 } else {
02da0b2d
PM
5844 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5845 cpu_V0, cpu_V1);
2c0262af 5846 }
9ee6e8bb 5847 break;
62698be3 5848 case NEON_3R_VQSUB:
9ee6e8bb 5849 if (u) {
02da0b2d
PM
5850 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5851 cpu_V0, cpu_V1);
ad69471c 5852 } else {
02da0b2d
PM
5853 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5854 cpu_V0, cpu_V1);
ad69471c
PB
5855 }
5856 break;
62698be3 5857 case NEON_3R_VSHL:
ad69471c
PB
5858 if (u) {
5859 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5860 } else {
5861 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5862 }
5863 break;
62698be3 5864 case NEON_3R_VQSHL:
ad69471c 5865 if (u) {
02da0b2d
PM
5866 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5867 cpu_V1, cpu_V0);
ad69471c 5868 } else {
02da0b2d
PM
5869 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5870 cpu_V1, cpu_V0);
ad69471c
PB
5871 }
5872 break;
62698be3 5873 case NEON_3R_VRSHL:
ad69471c
PB
5874 if (u) {
5875 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5876 } else {
ad69471c
PB
5877 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5878 }
5879 break;
62698be3 5880 case NEON_3R_VQRSHL:
ad69471c 5881 if (u) {
02da0b2d
PM
5882 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5883 cpu_V1, cpu_V0);
ad69471c 5884 } else {
02da0b2d
PM
5885 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5886 cpu_V1, cpu_V0);
1e8d4eec 5887 }
9ee6e8bb 5888 break;
62698be3 5889 case NEON_3R_VADD_VSUB:
9ee6e8bb 5890 if (u) {
ad69471c 5891 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5892 } else {
ad69471c 5893 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5894 }
5895 break;
5896 default:
5897 abort();
2c0262af 5898 }
ad69471c 5899 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5900 }
9ee6e8bb 5901 return 0;
2c0262af 5902 }
25f84f79 5903 pairwise = 0;
9ee6e8bb 5904 switch (op) {
62698be3
PM
5905 case NEON_3R_VSHL:
5906 case NEON_3R_VQSHL:
5907 case NEON_3R_VRSHL:
5908 case NEON_3R_VQRSHL:
9ee6e8bb 5909 {
ad69471c
PB
5910 int rtmp;
5911 /* Shift instruction operands are reversed. */
5912 rtmp = rn;
9ee6e8bb 5913 rn = rm;
ad69471c 5914 rm = rtmp;
9ee6e8bb 5915 }
2c0262af 5916 break;
36a71934 5917 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
5918 case NEON_3R_VPMAX:
5919 case NEON_3R_VPMIN:
9ee6e8bb 5920 pairwise = 1;
2c0262af 5921 break;
25f84f79
PM
5922 case NEON_3R_FLOAT_ARITH:
5923 pairwise = (u && size < 2); /* if VPADD (float) */
5924 break;
5925 case NEON_3R_FLOAT_MINMAX:
5926 pairwise = u; /* if VPMIN/VPMAX (float) */
5927 break;
5928 case NEON_3R_FLOAT_CMP:
5929 if (!u && size) {
5930 /* no encoding for U=0 C=1x */
5931 return 1;
5932 }
5933 break;
5934 case NEON_3R_FLOAT_ACMP:
5935 if (!u) {
5936 return 1;
5937 }
5938 break;
505935fc
WN
5939 case NEON_3R_FLOAT_MISC:
5940 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5941 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5942 return 1;
5943 }
2c0262af 5944 break;
25f84f79
PM
5945 case NEON_3R_VMUL:
5946 if (u && (size != 0)) {
5947 /* UNDEF on invalid size for polynomial subcase */
5948 return 1;
5949 }
2c0262af 5950 break;
36a71934
RH
5951 case NEON_3R_VFM_VQRDMLSH:
5952 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
5953 return 1;
5954 }
5955 break;
9ee6e8bb 5956 default:
2c0262af 5957 break;
9ee6e8bb 5958 }
dd8fbd78 5959
25f84f79
PM
5960 if (pairwise && q) {
5961 /* All the pairwise insns UNDEF if Q is set */
5962 return 1;
5963 }
5964
9ee6e8bb
PB
5965 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5966
5967 if (pairwise) {
5968 /* Pairwise. */
a5a14945
JR
5969 if (pass < 1) {
5970 tmp = neon_load_reg(rn, 0);
5971 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5972 } else {
a5a14945
JR
5973 tmp = neon_load_reg(rm, 0);
5974 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5975 }
5976 } else {
5977 /* Elementwise. */
dd8fbd78
FN
5978 tmp = neon_load_reg(rn, pass);
5979 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5980 }
5981 switch (op) {
62698be3 5982 case NEON_3R_VHADD:
9ee6e8bb
PB
5983 GEN_NEON_INTEGER_OP(hadd);
5984 break;
62698be3 5985 case NEON_3R_VQADD:
02da0b2d 5986 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5987 break;
62698be3 5988 case NEON_3R_VRHADD:
9ee6e8bb 5989 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5990 break;
62698be3 5991 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5992 switch ((u << 2) | size) {
5993 case 0: /* VAND */
dd8fbd78 5994 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5995 break;
5996 case 1: /* BIC */
f669df27 5997 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5998 break;
5999 case 2: /* VORR */
dd8fbd78 6000 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
6001 break;
6002 case 3: /* VORN */
f669df27 6003 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
6004 break;
6005 case 4: /* VEOR */
dd8fbd78 6006 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
6007 break;
6008 case 5: /* VBSL */
dd8fbd78
FN
6009 tmp3 = neon_load_reg(rd, pass);
6010 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 6011 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
6012 break;
6013 case 6: /* VBIT */
dd8fbd78
FN
6014 tmp3 = neon_load_reg(rd, pass);
6015 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 6016 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
6017 break;
6018 case 7: /* VBIF */
dd8fbd78
FN
6019 tmp3 = neon_load_reg(rd, pass);
6020 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 6021 tcg_temp_free_i32(tmp3);
9ee6e8bb 6022 break;
2c0262af
FB
6023 }
6024 break;
62698be3 6025 case NEON_3R_VHSUB:
9ee6e8bb
PB
6026 GEN_NEON_INTEGER_OP(hsub);
6027 break;
62698be3 6028 case NEON_3R_VQSUB:
02da0b2d 6029 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 6030 break;
62698be3 6031 case NEON_3R_VCGT:
9ee6e8bb
PB
6032 GEN_NEON_INTEGER_OP(cgt);
6033 break;
62698be3 6034 case NEON_3R_VCGE:
9ee6e8bb
PB
6035 GEN_NEON_INTEGER_OP(cge);
6036 break;
62698be3 6037 case NEON_3R_VSHL:
ad69471c 6038 GEN_NEON_INTEGER_OP(shl);
2c0262af 6039 break;
62698be3 6040 case NEON_3R_VQSHL:
02da0b2d 6041 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 6042 break;
62698be3 6043 case NEON_3R_VRSHL:
ad69471c 6044 GEN_NEON_INTEGER_OP(rshl);
2c0262af 6045 break;
62698be3 6046 case NEON_3R_VQRSHL:
02da0b2d 6047 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 6048 break;
62698be3 6049 case NEON_3R_VMAX:
9ee6e8bb
PB
6050 GEN_NEON_INTEGER_OP(max);
6051 break;
62698be3 6052 case NEON_3R_VMIN:
9ee6e8bb
PB
6053 GEN_NEON_INTEGER_OP(min);
6054 break;
62698be3 6055 case NEON_3R_VABD:
9ee6e8bb
PB
6056 GEN_NEON_INTEGER_OP(abd);
6057 break;
62698be3 6058 case NEON_3R_VABA:
9ee6e8bb 6059 GEN_NEON_INTEGER_OP(abd);
7d1b0095 6060 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
6061 tmp2 = neon_load_reg(rd, pass);
6062 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 6063 break;
62698be3 6064 case NEON_3R_VADD_VSUB:
9ee6e8bb 6065 if (!u) { /* VADD */
62698be3 6066 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6067 } else { /* VSUB */
6068 switch (size) {
dd8fbd78
FN
6069 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
6070 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
6071 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 6072 default: abort();
9ee6e8bb
PB
6073 }
6074 }
6075 break;
62698be3 6076 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
6077 if (!u) { /* VTST */
6078 switch (size) {
dd8fbd78
FN
6079 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
6080 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
6081 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 6082 default: abort();
9ee6e8bb
PB
6083 }
6084 } else { /* VCEQ */
6085 switch (size) {
dd8fbd78
FN
6086 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6087 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6088 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 6089 default: abort();
9ee6e8bb
PB
6090 }
6091 }
6092 break;
62698be3 6093 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 6094 switch (size) {
dd8fbd78
FN
6095 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6096 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6097 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 6098 default: abort();
9ee6e8bb 6099 }
7d1b0095 6100 tcg_temp_free_i32(tmp2);
dd8fbd78 6101 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6102 if (u) { /* VMLS */
dd8fbd78 6103 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 6104 } else { /* VMLA */
dd8fbd78 6105 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6106 }
6107 break;
62698be3 6108 case NEON_3R_VMUL:
9ee6e8bb 6109 if (u) { /* polynomial */
dd8fbd78 6110 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
6111 } else { /* Integer */
6112 switch (size) {
dd8fbd78
FN
6113 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6114 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6115 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 6116 default: abort();
9ee6e8bb
PB
6117 }
6118 }
6119 break;
62698be3 6120 case NEON_3R_VPMAX:
9ee6e8bb
PB
6121 GEN_NEON_INTEGER_OP(pmax);
6122 break;
62698be3 6123 case NEON_3R_VPMIN:
9ee6e8bb
PB
6124 GEN_NEON_INTEGER_OP(pmin);
6125 break;
62698be3 6126 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
6127 if (!u) { /* VQDMULH */
6128 switch (size) {
02da0b2d
PM
6129 case 1:
6130 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6131 break;
6132 case 2:
6133 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6134 break;
62698be3 6135 default: abort();
9ee6e8bb 6136 }
62698be3 6137 } else { /* VQRDMULH */
9ee6e8bb 6138 switch (size) {
02da0b2d
PM
6139 case 1:
6140 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6141 break;
6142 case 2:
6143 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6144 break;
62698be3 6145 default: abort();
9ee6e8bb
PB
6146 }
6147 }
6148 break;
36a71934 6149 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 6150 switch (size) {
dd8fbd78
FN
6151 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6152 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6153 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6154 default: abort();
9ee6e8bb
PB
6155 }
6156 break;
62698be3 6157 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6158 {
6159 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6160 switch ((u << 2) | size) {
6161 case 0: /* VADD */
aa47cfdd
PM
6162 case 4: /* VPADD */
6163 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6164 break;
6165 case 2: /* VSUB */
aa47cfdd 6166 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6167 break;
6168 case 6: /* VABD */
aa47cfdd 6169 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6170 break;
6171 default:
62698be3 6172 abort();
9ee6e8bb 6173 }
aa47cfdd 6174 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6175 break;
aa47cfdd 6176 }
62698be3 6177 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6178 {
6179 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6180 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6181 if (!u) {
7d1b0095 6182 tcg_temp_free_i32(tmp2);
dd8fbd78 6183 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6184 if (size == 0) {
aa47cfdd 6185 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6186 } else {
aa47cfdd 6187 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6188 }
6189 }
aa47cfdd 6190 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6191 break;
aa47cfdd 6192 }
62698be3 6193 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6194 {
6195 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6196 if (!u) {
aa47cfdd 6197 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6198 } else {
aa47cfdd
PM
6199 if (size == 0) {
6200 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6201 } else {
6202 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6203 }
b5ff1b31 6204 }
aa47cfdd 6205 tcg_temp_free_ptr(fpstatus);
2c0262af 6206 break;
aa47cfdd 6207 }
62698be3 6208 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6209 {
6210 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6211 if (size == 0) {
6212 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6213 } else {
6214 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6215 }
6216 tcg_temp_free_ptr(fpstatus);
2c0262af 6217 break;
aa47cfdd 6218 }
62698be3 6219 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6220 {
6221 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6222 if (size == 0) {
f71a2ae5 6223 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6224 } else {
f71a2ae5 6225 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6226 }
6227 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6228 break;
aa47cfdd 6229 }
505935fc
WN
6230 case NEON_3R_FLOAT_MISC:
6231 if (u) {
6232 /* VMAXNM/VMINNM */
6233 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6234 if (size == 0) {
f71a2ae5 6235 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6236 } else {
f71a2ae5 6237 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6238 }
6239 tcg_temp_free_ptr(fpstatus);
6240 } else {
6241 if (size == 0) {
6242 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6243 } else {
6244 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6245 }
6246 }
2c0262af 6247 break;
36a71934 6248 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
6249 {
6250 /* VFMA, VFMS: fused multiply-add */
6251 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6252 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6253 if (size) {
6254 /* VFMS */
6255 gen_helper_vfp_negs(tmp, tmp);
6256 }
6257 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6258 tcg_temp_free_i32(tmp3);
6259 tcg_temp_free_ptr(fpstatus);
6260 break;
6261 }
9ee6e8bb
PB
6262 default:
6263 abort();
2c0262af 6264 }
7d1b0095 6265 tcg_temp_free_i32(tmp2);
dd8fbd78 6266
9ee6e8bb
PB
6267 /* Save the result. For elementwise operations we can put it
6268 straight into the destination register. For pairwise operations
6269 we have to be careful to avoid clobbering the source operands. */
6270 if (pairwise && rd == rm) {
dd8fbd78 6271 neon_store_scratch(pass, tmp);
9ee6e8bb 6272 } else {
dd8fbd78 6273 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6274 }
6275
6276 } /* for pass */
6277 if (pairwise && rd == rm) {
6278 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6279 tmp = neon_load_scratch(pass);
6280 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6281 }
6282 }
ad69471c 6283 /* End of 3 register same size operations. */
9ee6e8bb
PB
6284 } else if (insn & (1 << 4)) {
6285 if ((insn & 0x00380080) != 0) {
6286 /* Two registers and shift. */
6287 op = (insn >> 8) & 0xf;
6288 if (insn & (1 << 7)) {
cc13115b
PM
6289 /* 64-bit shift. */
6290 if (op > 7) {
6291 return 1;
6292 }
9ee6e8bb
PB
6293 size = 3;
6294 } else {
6295 size = 2;
6296 while ((insn & (1 << (size + 19))) == 0)
6297 size--;
6298 }
6299 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6300 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6301 by immediate using the variable shift operations. */
6302 if (op < 8) {
6303 /* Shift by immediate:
6304 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6305 if (q && ((rd | rm) & 1)) {
6306 return 1;
6307 }
6308 if (!u && (op == 4 || op == 6)) {
6309 return 1;
6310 }
9ee6e8bb
PB
6311 /* Right shifts are encoded as N - shift, where N is the
6312 element size in bits. */
6313 if (op <= 4)
6314 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6315 if (size == 3) {
6316 count = q + 1;
6317 } else {
6318 count = q ? 4: 2;
6319 }
6320 switch (size) {
6321 case 0:
6322 imm = (uint8_t) shift;
6323 imm |= imm << 8;
6324 imm |= imm << 16;
6325 break;
6326 case 1:
6327 imm = (uint16_t) shift;
6328 imm |= imm << 16;
6329 break;
6330 case 2:
6331 case 3:
6332 imm = shift;
6333 break;
6334 default:
6335 abort();
6336 }
6337
6338 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6339 if (size == 3) {
6340 neon_load_reg64(cpu_V0, rm + pass);
6341 tcg_gen_movi_i64(cpu_V1, imm);
6342 switch (op) {
6343 case 0: /* VSHR */
6344 case 1: /* VSRA */
6345 if (u)
6346 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6347 else
ad69471c 6348 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6349 break;
ad69471c
PB
6350 case 2: /* VRSHR */
6351 case 3: /* VRSRA */
6352 if (u)
6353 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6354 else
ad69471c 6355 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6356 break;
ad69471c 6357 case 4: /* VSRI */
ad69471c
PB
6358 case 5: /* VSHL, VSLI */
6359 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6360 break;
0322b26e 6361 case 6: /* VQSHLU */
02da0b2d
PM
6362 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6363 cpu_V0, cpu_V1);
ad69471c 6364 break;
0322b26e
PM
6365 case 7: /* VQSHL */
6366 if (u) {
02da0b2d 6367 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6368 cpu_V0, cpu_V1);
6369 } else {
02da0b2d 6370 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6371 cpu_V0, cpu_V1);
6372 }
9ee6e8bb 6373 break;
9ee6e8bb 6374 }
ad69471c
PB
6375 if (op == 1 || op == 3) {
6376 /* Accumulate. */
5371cb81 6377 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6378 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6379 } else if (op == 4 || (op == 5 && u)) {
6380 /* Insert */
923e6509
CL
6381 neon_load_reg64(cpu_V1, rd + pass);
6382 uint64_t mask;
6383 if (shift < -63 || shift > 63) {
6384 mask = 0;
6385 } else {
6386 if (op == 4) {
6387 mask = 0xffffffffffffffffull >> -shift;
6388 } else {
6389 mask = 0xffffffffffffffffull << shift;
6390 }
6391 }
6392 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6393 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6394 }
6395 neon_store_reg64(cpu_V0, rd + pass);
6396 } else { /* size < 3 */
6397 /* Operands in T0 and T1. */
dd8fbd78 6398 tmp = neon_load_reg(rm, pass);
7d1b0095 6399 tmp2 = tcg_temp_new_i32();
dd8fbd78 6400 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6401 switch (op) {
6402 case 0: /* VSHR */
6403 case 1: /* VSRA */
6404 GEN_NEON_INTEGER_OP(shl);
6405 break;
6406 case 2: /* VRSHR */
6407 case 3: /* VRSRA */
6408 GEN_NEON_INTEGER_OP(rshl);
6409 break;
6410 case 4: /* VSRI */
ad69471c
PB
6411 case 5: /* VSHL, VSLI */
6412 switch (size) {
dd8fbd78
FN
6413 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6414 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6415 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6416 default: abort();
ad69471c
PB
6417 }
6418 break;
0322b26e 6419 case 6: /* VQSHLU */
ad69471c 6420 switch (size) {
0322b26e 6421 case 0:
02da0b2d
PM
6422 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6423 tmp, tmp2);
0322b26e
PM
6424 break;
6425 case 1:
02da0b2d
PM
6426 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6427 tmp, tmp2);
0322b26e
PM
6428 break;
6429 case 2:
02da0b2d
PM
6430 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6431 tmp, tmp2);
0322b26e
PM
6432 break;
6433 default:
cc13115b 6434 abort();
ad69471c
PB
6435 }
6436 break;
0322b26e 6437 case 7: /* VQSHL */
02da0b2d 6438 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6439 break;
ad69471c 6440 }
7d1b0095 6441 tcg_temp_free_i32(tmp2);
ad69471c
PB
6442
6443 if (op == 1 || op == 3) {
6444 /* Accumulate. */
dd8fbd78 6445 tmp2 = neon_load_reg(rd, pass);
5371cb81 6446 gen_neon_add(size, tmp, tmp2);
7d1b0095 6447 tcg_temp_free_i32(tmp2);
ad69471c
PB
6448 } else if (op == 4 || (op == 5 && u)) {
6449 /* Insert */
6450 switch (size) {
6451 case 0:
6452 if (op == 4)
ca9a32e4 6453 mask = 0xff >> -shift;
ad69471c 6454 else
ca9a32e4
JR
6455 mask = (uint8_t)(0xff << shift);
6456 mask |= mask << 8;
6457 mask |= mask << 16;
ad69471c
PB
6458 break;
6459 case 1:
6460 if (op == 4)
ca9a32e4 6461 mask = 0xffff >> -shift;
ad69471c 6462 else
ca9a32e4
JR
6463 mask = (uint16_t)(0xffff << shift);
6464 mask |= mask << 16;
ad69471c
PB
6465 break;
6466 case 2:
ca9a32e4
JR
6467 if (shift < -31 || shift > 31) {
6468 mask = 0;
6469 } else {
6470 if (op == 4)
6471 mask = 0xffffffffu >> -shift;
6472 else
6473 mask = 0xffffffffu << shift;
6474 }
ad69471c
PB
6475 break;
6476 default:
6477 abort();
6478 }
dd8fbd78 6479 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6480 tcg_gen_andi_i32(tmp, tmp, mask);
6481 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6482 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6483 tcg_temp_free_i32(tmp2);
ad69471c 6484 }
dd8fbd78 6485 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6486 }
6487 } /* for pass */
6488 } else if (op < 10) {
ad69471c 6489 /* Shift by immediate and narrow:
9ee6e8bb 6490 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6491 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6492 if (rm & 1) {
6493 return 1;
6494 }
9ee6e8bb
PB
6495 shift = shift - (1 << (size + 3));
6496 size++;
92cdfaeb 6497 if (size == 3) {
a7812ae4 6498 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6499 neon_load_reg64(cpu_V0, rm);
6500 neon_load_reg64(cpu_V1, rm + 1);
6501 for (pass = 0; pass < 2; pass++) {
6502 TCGv_i64 in;
6503 if (pass == 0) {
6504 in = cpu_V0;
6505 } else {
6506 in = cpu_V1;
6507 }
ad69471c 6508 if (q) {
0b36f4cd 6509 if (input_unsigned) {
92cdfaeb 6510 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6511 } else {
92cdfaeb 6512 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6513 }
ad69471c 6514 } else {
0b36f4cd 6515 if (input_unsigned) {
92cdfaeb 6516 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6517 } else {
92cdfaeb 6518 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6519 }
ad69471c 6520 }
7d1b0095 6521 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6522 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6523 neon_store_reg(rd, pass, tmp);
6524 } /* for pass */
6525 tcg_temp_free_i64(tmp64);
6526 } else {
6527 if (size == 1) {
6528 imm = (uint16_t)shift;
6529 imm |= imm << 16;
2c0262af 6530 } else {
92cdfaeb
PM
6531 /* size == 2 */
6532 imm = (uint32_t)shift;
6533 }
6534 tmp2 = tcg_const_i32(imm);
6535 tmp4 = neon_load_reg(rm + 1, 0);
6536 tmp5 = neon_load_reg(rm + 1, 1);
6537 for (pass = 0; pass < 2; pass++) {
6538 if (pass == 0) {
6539 tmp = neon_load_reg(rm, 0);
6540 } else {
6541 tmp = tmp4;
6542 }
0b36f4cd
CL
6543 gen_neon_shift_narrow(size, tmp, tmp2, q,
6544 input_unsigned);
92cdfaeb
PM
6545 if (pass == 0) {
6546 tmp3 = neon_load_reg(rm, 1);
6547 } else {
6548 tmp3 = tmp5;
6549 }
0b36f4cd
CL
6550 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6551 input_unsigned);
36aa55dc 6552 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6553 tcg_temp_free_i32(tmp);
6554 tcg_temp_free_i32(tmp3);
6555 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6556 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6557 neon_store_reg(rd, pass, tmp);
6558 } /* for pass */
c6067f04 6559 tcg_temp_free_i32(tmp2);
b75263d6 6560 }
9ee6e8bb 6561 } else if (op == 10) {
cc13115b
PM
6562 /* VSHLL, VMOVL */
6563 if (q || (rd & 1)) {
9ee6e8bb 6564 return 1;
cc13115b 6565 }
ad69471c
PB
6566 tmp = neon_load_reg(rm, 0);
6567 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6568 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6569 if (pass == 1)
6570 tmp = tmp2;
6571
6572 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6573
9ee6e8bb
PB
6574 if (shift != 0) {
6575 /* The shift is less than the width of the source
ad69471c
PB
6576 type, so we can just shift the whole register. */
6577 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6578 /* Widen the result of shift: we need to clear
6579 * the potential overflow bits resulting from
6580 * left bits of the narrow input appearing as
6581 * right bits of left the neighbour narrow
6582 * input. */
ad69471c
PB
6583 if (size < 2 || !u) {
6584 uint64_t imm64;
6585 if (size == 0) {
6586 imm = (0xffu >> (8 - shift));
6587 imm |= imm << 16;
acdf01ef 6588 } else if (size == 1) {
ad69471c 6589 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6590 } else {
6591 /* size == 2 */
6592 imm = 0xffffffff >> (32 - shift);
6593 }
6594 if (size < 2) {
6595 imm64 = imm | (((uint64_t)imm) << 32);
6596 } else {
6597 imm64 = imm;
9ee6e8bb 6598 }
acdf01ef 6599 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6600 }
6601 }
ad69471c 6602 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6603 }
f73534a5 6604 } else if (op >= 14) {
9ee6e8bb 6605 /* VCVT fixed-point. */
cc13115b
PM
6606 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6607 return 1;
6608 }
f73534a5
PM
6609 /* We have already masked out the must-be-1 top bit of imm6,
6610 * hence this 32-shift where the ARM ARM has 64-imm6.
6611 */
6612 shift = 32 - shift;
9ee6e8bb 6613 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6614 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6615 if (!(op & 1)) {
9ee6e8bb 6616 if (u)
5500b06c 6617 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6618 else
5500b06c 6619 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6620 } else {
6621 if (u)
5500b06c 6622 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6623 else
5500b06c 6624 gen_vfp_tosl(0, shift, 1);
2c0262af 6625 }
4373f3ce 6626 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6627 }
6628 } else {
9ee6e8bb
PB
6629 return 1;
6630 }
6631 } else { /* (insn & 0x00380080) == 0 */
6632 int invert;
7d80fee5
PM
6633 if (q && (rd & 1)) {
6634 return 1;
6635 }
9ee6e8bb
PB
6636
6637 op = (insn >> 8) & 0xf;
6638 /* One register and immediate. */
6639 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6640 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6641 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6642 * We choose to not special-case this and will behave as if a
6643 * valid constant encoding of 0 had been given.
6644 */
9ee6e8bb
PB
6645 switch (op) {
6646 case 0: case 1:
6647 /* no-op */
6648 break;
6649 case 2: case 3:
6650 imm <<= 8;
6651 break;
6652 case 4: case 5:
6653 imm <<= 16;
6654 break;
6655 case 6: case 7:
6656 imm <<= 24;
6657 break;
6658 case 8: case 9:
6659 imm |= imm << 16;
6660 break;
6661 case 10: case 11:
6662 imm = (imm << 8) | (imm << 24);
6663 break;
6664 case 12:
8e31209e 6665 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6666 break;
6667 case 13:
6668 imm = (imm << 16) | 0xffff;
6669 break;
6670 case 14:
6671 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6672 if (invert)
6673 imm = ~imm;
6674 break;
6675 case 15:
7d80fee5
PM
6676 if (invert) {
6677 return 1;
6678 }
9ee6e8bb
PB
6679 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6680 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6681 break;
6682 }
6683 if (invert)
6684 imm = ~imm;
6685
9ee6e8bb
PB
6686 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6687 if (op & 1 && op < 12) {
ad69471c 6688 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6689 if (invert) {
6690 /* The immediate value has already been inverted, so
6691 BIC becomes AND. */
ad69471c 6692 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6693 } else {
ad69471c 6694 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6695 }
9ee6e8bb 6696 } else {
ad69471c 6697 /* VMOV, VMVN. */
7d1b0095 6698 tmp = tcg_temp_new_i32();
9ee6e8bb 6699 if (op == 14 && invert) {
a5a14945 6700 int n;
ad69471c
PB
6701 uint32_t val;
6702 val = 0;
9ee6e8bb
PB
6703 for (n = 0; n < 4; n++) {
6704 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6705 val |= 0xff << (n * 8);
9ee6e8bb 6706 }
ad69471c
PB
6707 tcg_gen_movi_i32(tmp, val);
6708 } else {
6709 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6710 }
9ee6e8bb 6711 }
ad69471c 6712 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6713 }
6714 }
e4b3861d 6715 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6716 if (size != 3) {
6717 op = (insn >> 8) & 0xf;
6718 if ((insn & (1 << 6)) == 0) {
6719 /* Three registers of different lengths. */
6720 int src1_wide;
6721 int src2_wide;
6722 int prewiden;
526d0096
PM
6723 /* undefreq: bit 0 : UNDEF if size == 0
6724 * bit 1 : UNDEF if size == 1
6725 * bit 2 : UNDEF if size == 2
6726 * bit 3 : UNDEF if U == 1
6727 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6728 */
6729 int undefreq;
6730 /* prewiden, src1_wide, src2_wide, undefreq */
6731 static const int neon_3reg_wide[16][4] = {
6732 {1, 0, 0, 0}, /* VADDL */
6733 {1, 1, 0, 0}, /* VADDW */
6734 {1, 0, 0, 0}, /* VSUBL */
6735 {1, 1, 0, 0}, /* VSUBW */
6736 {0, 1, 1, 0}, /* VADDHN */
6737 {0, 0, 0, 0}, /* VABAL */
6738 {0, 1, 1, 0}, /* VSUBHN */
6739 {0, 0, 0, 0}, /* VABDL */
6740 {0, 0, 0, 0}, /* VMLAL */
526d0096 6741 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6742 {0, 0, 0, 0}, /* VMLSL */
526d0096 6743 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6744 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6745 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6746 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6747 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6748 };
6749
6750 prewiden = neon_3reg_wide[op][0];
6751 src1_wide = neon_3reg_wide[op][1];
6752 src2_wide = neon_3reg_wide[op][2];
695272dc 6753 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6754
526d0096
PM
6755 if ((undefreq & (1 << size)) ||
6756 ((undefreq & 8) && u)) {
695272dc
PM
6757 return 1;
6758 }
6759 if ((src1_wide && (rn & 1)) ||
6760 (src2_wide && (rm & 1)) ||
6761 (!src2_wide && (rd & 1))) {
ad69471c 6762 return 1;
695272dc 6763 }
ad69471c 6764
4e624eda
PM
6765 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6766 * outside the loop below as it only performs a single pass.
6767 */
6768 if (op == 14 && size == 2) {
6769 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6770
962fcbf2 6771 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
6772 return 1;
6773 }
6774 tcg_rn = tcg_temp_new_i64();
6775 tcg_rm = tcg_temp_new_i64();
6776 tcg_rd = tcg_temp_new_i64();
6777 neon_load_reg64(tcg_rn, rn);
6778 neon_load_reg64(tcg_rm, rm);
6779 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6780 neon_store_reg64(tcg_rd, rd);
6781 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6782 neon_store_reg64(tcg_rd, rd + 1);
6783 tcg_temp_free_i64(tcg_rn);
6784 tcg_temp_free_i64(tcg_rm);
6785 tcg_temp_free_i64(tcg_rd);
6786 return 0;
6787 }
6788
9ee6e8bb
PB
6789 /* Avoid overlapping operands. Wide source operands are
6790 always aligned so will never overlap with wide
6791 destinations in problematic ways. */
8f8e3aa4 6792 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6793 tmp = neon_load_reg(rm, 1);
6794 neon_store_scratch(2, tmp);
8f8e3aa4 6795 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6796 tmp = neon_load_reg(rn, 1);
6797 neon_store_scratch(2, tmp);
9ee6e8bb 6798 }
f764718d 6799 tmp3 = NULL;
9ee6e8bb 6800 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6801 if (src1_wide) {
6802 neon_load_reg64(cpu_V0, rn + pass);
f764718d 6803 tmp = NULL;
9ee6e8bb 6804 } else {
ad69471c 6805 if (pass == 1 && rd == rn) {
dd8fbd78 6806 tmp = neon_load_scratch(2);
9ee6e8bb 6807 } else {
ad69471c
PB
6808 tmp = neon_load_reg(rn, pass);
6809 }
6810 if (prewiden) {
6811 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6812 }
6813 }
ad69471c
PB
6814 if (src2_wide) {
6815 neon_load_reg64(cpu_V1, rm + pass);
f764718d 6816 tmp2 = NULL;
9ee6e8bb 6817 } else {
ad69471c 6818 if (pass == 1 && rd == rm) {
dd8fbd78 6819 tmp2 = neon_load_scratch(2);
9ee6e8bb 6820 } else {
ad69471c
PB
6821 tmp2 = neon_load_reg(rm, pass);
6822 }
6823 if (prewiden) {
6824 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6825 }
9ee6e8bb
PB
6826 }
6827 switch (op) {
6828 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6829 gen_neon_addl(size);
9ee6e8bb 6830 break;
79b0e534 6831 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6832 gen_neon_subl(size);
9ee6e8bb
PB
6833 break;
6834 case 5: case 7: /* VABAL, VABDL */
6835 switch ((size << 1) | u) {
ad69471c
PB
6836 case 0:
6837 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6838 break;
6839 case 1:
6840 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6841 break;
6842 case 2:
6843 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6844 break;
6845 case 3:
6846 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6847 break;
6848 case 4:
6849 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6850 break;
6851 case 5:
6852 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6853 break;
9ee6e8bb
PB
6854 default: abort();
6855 }
7d1b0095
PM
6856 tcg_temp_free_i32(tmp2);
6857 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6858 break;
6859 case 8: case 9: case 10: case 11: case 12: case 13:
6860 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6861 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6862 break;
6863 case 14: /* Polynomial VMULL */
e5ca24cb 6864 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6865 tcg_temp_free_i32(tmp2);
6866 tcg_temp_free_i32(tmp);
e5ca24cb 6867 break;
695272dc
PM
6868 default: /* 15 is RESERVED: caught earlier */
6869 abort();
9ee6e8bb 6870 }
ebcd88ce
PM
6871 if (op == 13) {
6872 /* VQDMULL */
6873 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6874 neon_store_reg64(cpu_V0, rd + pass);
6875 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6876 /* Accumulate. */
ebcd88ce 6877 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6878 switch (op) {
4dc064e6
PM
6879 case 10: /* VMLSL */
6880 gen_neon_negl(cpu_V0, size);
6881 /* Fall through */
6882 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6883 gen_neon_addl(size);
9ee6e8bb
PB
6884 break;
6885 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6886 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6887 if (op == 11) {
6888 gen_neon_negl(cpu_V0, size);
6889 }
ad69471c
PB
6890 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6891 break;
9ee6e8bb
PB
6892 default:
6893 abort();
6894 }
ad69471c 6895 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6896 } else if (op == 4 || op == 6) {
6897 /* Narrowing operation. */
7d1b0095 6898 tmp = tcg_temp_new_i32();
79b0e534 6899 if (!u) {
9ee6e8bb 6900 switch (size) {
ad69471c
PB
6901 case 0:
6902 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6903 break;
6904 case 1:
6905 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6906 break;
6907 case 2:
6908 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6909 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6910 break;
9ee6e8bb
PB
6911 default: abort();
6912 }
6913 } else {
6914 switch (size) {
ad69471c
PB
6915 case 0:
6916 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6917 break;
6918 case 1:
6919 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6920 break;
6921 case 2:
6922 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6923 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6924 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6925 break;
9ee6e8bb
PB
6926 default: abort();
6927 }
6928 }
ad69471c
PB
6929 if (pass == 0) {
6930 tmp3 = tmp;
6931 } else {
6932 neon_store_reg(rd, 0, tmp3);
6933 neon_store_reg(rd, 1, tmp);
6934 }
9ee6e8bb
PB
6935 } else {
6936 /* Write back the result. */
ad69471c 6937 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6938 }
6939 }
6940 } else {
3e3326df
PM
6941 /* Two registers and a scalar. NB that for ops of this form
6942 * the ARM ARM labels bit 24 as Q, but it is in our variable
6943 * 'u', not 'q'.
6944 */
6945 if (size == 0) {
6946 return 1;
6947 }
9ee6e8bb 6948 switch (op) {
9ee6e8bb 6949 case 1: /* Float VMLA scalar */
9ee6e8bb 6950 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6951 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6952 if (size == 1) {
6953 return 1;
6954 }
6955 /* fall through */
6956 case 0: /* Integer VMLA scalar */
6957 case 4: /* Integer VMLS scalar */
6958 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6959 case 12: /* VQDMULH scalar */
6960 case 13: /* VQRDMULH scalar */
3e3326df
PM
6961 if (u && ((rd | rn) & 1)) {
6962 return 1;
6963 }
dd8fbd78
FN
6964 tmp = neon_get_scalar(size, rm);
6965 neon_store_scratch(0, tmp);
9ee6e8bb 6966 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6967 tmp = neon_load_scratch(0);
6968 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6969 if (op == 12) {
6970 if (size == 1) {
02da0b2d 6971 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6972 } else {
02da0b2d 6973 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6974 }
6975 } else if (op == 13) {
6976 if (size == 1) {
02da0b2d 6977 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6978 } else {
02da0b2d 6979 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6980 }
6981 } else if (op & 1) {
aa47cfdd
PM
6982 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6983 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6984 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6985 } else {
6986 switch (size) {
dd8fbd78
FN
6987 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6988 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6989 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6990 default: abort();
9ee6e8bb
PB
6991 }
6992 }
7d1b0095 6993 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6994 if (op < 8) {
6995 /* Accumulate. */
dd8fbd78 6996 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6997 switch (op) {
6998 case 0:
dd8fbd78 6999 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
7000 break;
7001 case 1:
aa47cfdd
PM
7002 {
7003 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7004 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
7005 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7006 break;
aa47cfdd 7007 }
9ee6e8bb 7008 case 4:
dd8fbd78 7009 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
7010 break;
7011 case 5:
aa47cfdd
PM
7012 {
7013 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7014 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
7015 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7016 break;
aa47cfdd 7017 }
9ee6e8bb
PB
7018 default:
7019 abort();
7020 }
7d1b0095 7021 tcg_temp_free_i32(tmp2);
9ee6e8bb 7022 }
dd8fbd78 7023 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7024 }
7025 break;
9ee6e8bb 7026 case 3: /* VQDMLAL scalar */
9ee6e8bb 7027 case 7: /* VQDMLSL scalar */
9ee6e8bb 7028 case 11: /* VQDMULL scalar */
3e3326df 7029 if (u == 1) {
ad69471c 7030 return 1;
3e3326df
PM
7031 }
7032 /* fall through */
7033 case 2: /* VMLAL sclar */
7034 case 6: /* VMLSL scalar */
7035 case 10: /* VMULL scalar */
7036 if (rd & 1) {
7037 return 1;
7038 }
dd8fbd78 7039 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
7040 /* We need a copy of tmp2 because gen_neon_mull
7041 * deletes it during pass 0. */
7d1b0095 7042 tmp4 = tcg_temp_new_i32();
c6067f04 7043 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 7044 tmp3 = neon_load_reg(rn, 1);
ad69471c 7045
9ee6e8bb 7046 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7047 if (pass == 0) {
7048 tmp = neon_load_reg(rn, 0);
9ee6e8bb 7049 } else {
dd8fbd78 7050 tmp = tmp3;
c6067f04 7051 tmp2 = tmp4;
9ee6e8bb 7052 }
ad69471c 7053 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
7054 if (op != 11) {
7055 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7056 }
9ee6e8bb 7057 switch (op) {
4dc064e6
PM
7058 case 6:
7059 gen_neon_negl(cpu_V0, size);
7060 /* Fall through */
7061 case 2:
ad69471c 7062 gen_neon_addl(size);
9ee6e8bb
PB
7063 break;
7064 case 3: case 7:
ad69471c 7065 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7066 if (op == 7) {
7067 gen_neon_negl(cpu_V0, size);
7068 }
ad69471c 7069 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
7070 break;
7071 case 10:
7072 /* no-op */
7073 break;
7074 case 11:
ad69471c 7075 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
7076 break;
7077 default:
7078 abort();
7079 }
ad69471c 7080 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7081 }
61adacc8
RH
7082 break;
7083 case 14: /* VQRDMLAH scalar */
7084 case 15: /* VQRDMLSH scalar */
7085 {
7086 NeonGenThreeOpEnvFn *fn;
dd8fbd78 7087
962fcbf2 7088 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
7089 return 1;
7090 }
7091 if (u && ((rd | rn) & 1)) {
7092 return 1;
7093 }
7094 if (op == 14) {
7095 if (size == 1) {
7096 fn = gen_helper_neon_qrdmlah_s16;
7097 } else {
7098 fn = gen_helper_neon_qrdmlah_s32;
7099 }
7100 } else {
7101 if (size == 1) {
7102 fn = gen_helper_neon_qrdmlsh_s16;
7103 } else {
7104 fn = gen_helper_neon_qrdmlsh_s32;
7105 }
7106 }
dd8fbd78 7107
61adacc8
RH
7108 tmp2 = neon_get_scalar(size, rm);
7109 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7110 tmp = neon_load_reg(rn, pass);
7111 tmp3 = neon_load_reg(rd, pass);
7112 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7113 tcg_temp_free_i32(tmp3);
7114 neon_store_reg(rd, pass, tmp);
7115 }
7116 tcg_temp_free_i32(tmp2);
7117 }
9ee6e8bb 7118 break;
61adacc8
RH
7119 default:
7120 g_assert_not_reached();
9ee6e8bb
PB
7121 }
7122 }
7123 } else { /* size == 3 */
7124 if (!u) {
7125 /* Extract. */
9ee6e8bb 7126 imm = (insn >> 8) & 0xf;
ad69471c
PB
7127
7128 if (imm > 7 && !q)
7129 return 1;
7130
52579ea1
PM
7131 if (q && ((rd | rn | rm) & 1)) {
7132 return 1;
7133 }
7134
ad69471c
PB
7135 if (imm == 0) {
7136 neon_load_reg64(cpu_V0, rn);
7137 if (q) {
7138 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 7139 }
ad69471c
PB
7140 } else if (imm == 8) {
7141 neon_load_reg64(cpu_V0, rn + 1);
7142 if (q) {
7143 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7144 }
ad69471c 7145 } else if (q) {
a7812ae4 7146 tmp64 = tcg_temp_new_i64();
ad69471c
PB
7147 if (imm < 8) {
7148 neon_load_reg64(cpu_V0, rn);
a7812ae4 7149 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
7150 } else {
7151 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 7152 neon_load_reg64(tmp64, rm);
ad69471c
PB
7153 }
7154 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 7155 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
7156 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7157 if (imm < 8) {
7158 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7159 } else {
ad69471c
PB
7160 neon_load_reg64(cpu_V1, rm + 1);
7161 imm -= 8;
9ee6e8bb 7162 }
ad69471c 7163 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
7164 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7165 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 7166 tcg_temp_free_i64(tmp64);
ad69471c 7167 } else {
a7812ae4 7168 /* BUGFIX */
ad69471c 7169 neon_load_reg64(cpu_V0, rn);
a7812ae4 7170 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 7171 neon_load_reg64(cpu_V1, rm);
a7812ae4 7172 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
7173 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7174 }
7175 neon_store_reg64(cpu_V0, rd);
7176 if (q) {
7177 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7178 }
7179 } else if ((insn & (1 << 11)) == 0) {
7180 /* Two register misc. */
7181 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7182 size = (insn >> 18) & 3;
600b828c
PM
7183 /* UNDEF for unknown op values and bad op-size combinations */
7184 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7185 return 1;
7186 }
fe8fcf3d
PM
7187 if (neon_2rm_is_v8_op(op) &&
7188 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7189 return 1;
7190 }
fc2a9b37
PM
7191 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7192 q && ((rm | rd) & 1)) {
7193 return 1;
7194 }
9ee6e8bb 7195 switch (op) {
600b828c 7196 case NEON_2RM_VREV64:
9ee6e8bb 7197 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7198 tmp = neon_load_reg(rm, pass * 2);
7199 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7200 switch (size) {
dd8fbd78
FN
7201 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7202 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7203 case 2: /* no-op */ break;
7204 default: abort();
7205 }
dd8fbd78 7206 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7207 if (size == 2) {
dd8fbd78 7208 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7209 } else {
9ee6e8bb 7210 switch (size) {
dd8fbd78
FN
7211 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7212 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7213 default: abort();
7214 }
dd8fbd78 7215 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7216 }
7217 }
7218 break;
600b828c
PM
7219 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7220 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7221 for (pass = 0; pass < q + 1; pass++) {
7222 tmp = neon_load_reg(rm, pass * 2);
7223 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7224 tmp = neon_load_reg(rm, pass * 2 + 1);
7225 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7226 switch (size) {
7227 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7228 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7229 case 2: tcg_gen_add_i64(CPU_V001); break;
7230 default: abort();
7231 }
600b828c 7232 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7233 /* Accumulate. */
ad69471c
PB
7234 neon_load_reg64(cpu_V1, rd + pass);
7235 gen_neon_addl(size);
9ee6e8bb 7236 }
ad69471c 7237 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7238 }
7239 break;
600b828c 7240 case NEON_2RM_VTRN:
9ee6e8bb 7241 if (size == 2) {
a5a14945 7242 int n;
9ee6e8bb 7243 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7244 tmp = neon_load_reg(rm, n);
7245 tmp2 = neon_load_reg(rd, n + 1);
7246 neon_store_reg(rm, n, tmp2);
7247 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7248 }
7249 } else {
7250 goto elementwise;
7251 }
7252 break;
600b828c 7253 case NEON_2RM_VUZP:
02acedf9 7254 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7255 return 1;
9ee6e8bb
PB
7256 }
7257 break;
600b828c 7258 case NEON_2RM_VZIP:
d68a6f3a 7259 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7260 return 1;
9ee6e8bb
PB
7261 }
7262 break;
600b828c
PM
7263 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7264 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7265 if (rm & 1) {
7266 return 1;
7267 }
f764718d 7268 tmp2 = NULL;
9ee6e8bb 7269 for (pass = 0; pass < 2; pass++) {
ad69471c 7270 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7271 tmp = tcg_temp_new_i32();
600b828c
PM
7272 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7273 tmp, cpu_V0);
ad69471c
PB
7274 if (pass == 0) {
7275 tmp2 = tmp;
7276 } else {
7277 neon_store_reg(rd, 0, tmp2);
7278 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7279 }
9ee6e8bb
PB
7280 }
7281 break;
600b828c 7282 case NEON_2RM_VSHLL:
fc2a9b37 7283 if (q || (rd & 1)) {
9ee6e8bb 7284 return 1;
600b828c 7285 }
ad69471c
PB
7286 tmp = neon_load_reg(rm, 0);
7287 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7288 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7289 if (pass == 1)
7290 tmp = tmp2;
7291 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7292 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7293 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7294 }
7295 break;
600b828c 7296 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
7297 {
7298 TCGv_ptr fpst;
7299 TCGv_i32 ahp;
7300
d614a513 7301 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7302 q || (rm & 1)) {
7303 return 1;
7304 }
7d1b0095
PM
7305 tmp = tcg_temp_new_i32();
7306 tmp2 = tcg_temp_new_i32();
486624fc
AB
7307 fpst = get_fpstatus_ptr(true);
7308 ahp = get_ahp_flag();
60011498 7309 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
486624fc 7310 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498 7311 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
486624fc 7312 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7313 tcg_gen_shli_i32(tmp2, tmp2, 16);
7314 tcg_gen_or_i32(tmp2, tmp2, tmp);
7315 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
486624fc 7316 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498
PB
7317 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7318 neon_store_reg(rd, 0, tmp2);
7d1b0095 7319 tmp2 = tcg_temp_new_i32();
486624fc 7320 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7321 tcg_gen_shli_i32(tmp2, tmp2, 16);
7322 tcg_gen_or_i32(tmp2, tmp2, tmp);
7323 neon_store_reg(rd, 1, tmp2);
7d1b0095 7324 tcg_temp_free_i32(tmp);
486624fc
AB
7325 tcg_temp_free_i32(ahp);
7326 tcg_temp_free_ptr(fpst);
60011498 7327 break;
486624fc 7328 }
600b828c 7329 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
7330 {
7331 TCGv_ptr fpst;
7332 TCGv_i32 ahp;
d614a513 7333 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7334 q || (rd & 1)) {
7335 return 1;
7336 }
486624fc
AB
7337 fpst = get_fpstatus_ptr(true);
7338 ahp = get_ahp_flag();
7d1b0095 7339 tmp3 = tcg_temp_new_i32();
60011498
PB
7340 tmp = neon_load_reg(rm, 0);
7341 tmp2 = neon_load_reg(rm, 1);
7342 tcg_gen_ext16u_i32(tmp3, tmp);
486624fc 7343 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7344 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7345 tcg_gen_shri_i32(tmp3, tmp, 16);
486624fc 7346 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7347 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7348 tcg_temp_free_i32(tmp);
60011498 7349 tcg_gen_ext16u_i32(tmp3, tmp2);
486624fc 7350 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7351 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7352 tcg_gen_shri_i32(tmp3, tmp2, 16);
486624fc 7353 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7354 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7355 tcg_temp_free_i32(tmp2);
7356 tcg_temp_free_i32(tmp3);
486624fc
AB
7357 tcg_temp_free_i32(ahp);
7358 tcg_temp_free_ptr(fpst);
60011498 7359 break;
486624fc 7360 }
9d935509 7361 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 7362 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
7363 return 1;
7364 }
1a66ac61
RH
7365 ptr1 = vfp_reg_ptr(true, rd);
7366 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7367
7368 /* Bit 6 is the lowest opcode bit; it distinguishes between
7369 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7370 */
7371 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7372
7373 if (op == NEON_2RM_AESE) {
1a66ac61 7374 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7375 } else {
1a66ac61 7376 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 7377 }
1a66ac61
RH
7378 tcg_temp_free_ptr(ptr1);
7379 tcg_temp_free_ptr(ptr2);
9d935509
AB
7380 tcg_temp_free_i32(tmp3);
7381 break;
f1ecb913 7382 case NEON_2RM_SHA1H:
962fcbf2 7383 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
7384 return 1;
7385 }
1a66ac61
RH
7386 ptr1 = vfp_reg_ptr(true, rd);
7387 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7388
1a66ac61 7389 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 7390
1a66ac61
RH
7391 tcg_temp_free_ptr(ptr1);
7392 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
7393 break;
7394 case NEON_2RM_SHA1SU1:
7395 if ((rm | rd) & 1) {
7396 return 1;
7397 }
7398 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7399 if (q) {
962fcbf2 7400 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
7401 return 1;
7402 }
962fcbf2 7403 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
7404 return 1;
7405 }
1a66ac61
RH
7406 ptr1 = vfp_reg_ptr(true, rd);
7407 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7408 if (q) {
1a66ac61 7409 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 7410 } else {
1a66ac61 7411 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 7412 }
1a66ac61
RH
7413 tcg_temp_free_ptr(ptr1);
7414 tcg_temp_free_ptr(ptr2);
f1ecb913 7415 break;
9ee6e8bb
PB
7416 default:
7417 elementwise:
7418 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7419 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7420 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7421 neon_reg_offset(rm, pass));
f764718d 7422 tmp = NULL;
9ee6e8bb 7423 } else {
dd8fbd78 7424 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7425 }
7426 switch (op) {
600b828c 7427 case NEON_2RM_VREV32:
9ee6e8bb 7428 switch (size) {
dd8fbd78
FN
7429 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7430 case 1: gen_swap_half(tmp); break;
600b828c 7431 default: abort();
9ee6e8bb
PB
7432 }
7433 break;
600b828c 7434 case NEON_2RM_VREV16:
dd8fbd78 7435 gen_rev16(tmp);
9ee6e8bb 7436 break;
600b828c 7437 case NEON_2RM_VCLS:
9ee6e8bb 7438 switch (size) {
dd8fbd78
FN
7439 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7440 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7441 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7442 default: abort();
9ee6e8bb
PB
7443 }
7444 break;
600b828c 7445 case NEON_2RM_VCLZ:
9ee6e8bb 7446 switch (size) {
dd8fbd78
FN
7447 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7448 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7449 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7450 default: abort();
9ee6e8bb
PB
7451 }
7452 break;
600b828c 7453 case NEON_2RM_VCNT:
dd8fbd78 7454 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7455 break;
600b828c 7456 case NEON_2RM_VMVN:
dd8fbd78 7457 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7458 break;
600b828c 7459 case NEON_2RM_VQABS:
9ee6e8bb 7460 switch (size) {
02da0b2d
PM
7461 case 0:
7462 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7463 break;
7464 case 1:
7465 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7466 break;
7467 case 2:
7468 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7469 break;
600b828c 7470 default: abort();
9ee6e8bb
PB
7471 }
7472 break;
600b828c 7473 case NEON_2RM_VQNEG:
9ee6e8bb 7474 switch (size) {
02da0b2d
PM
7475 case 0:
7476 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7477 break;
7478 case 1:
7479 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7480 break;
7481 case 2:
7482 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7483 break;
600b828c 7484 default: abort();
9ee6e8bb
PB
7485 }
7486 break;
600b828c 7487 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7488 tmp2 = tcg_const_i32(0);
9ee6e8bb 7489 switch(size) {
dd8fbd78
FN
7490 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7491 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7492 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7493 default: abort();
9ee6e8bb 7494 }
39d5492a 7495 tcg_temp_free_i32(tmp2);
600b828c 7496 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7497 tcg_gen_not_i32(tmp, tmp);
600b828c 7498 }
9ee6e8bb 7499 break;
600b828c 7500 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7501 tmp2 = tcg_const_i32(0);
9ee6e8bb 7502 switch(size) {
dd8fbd78
FN
7503 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7504 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7505 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7506 default: abort();
9ee6e8bb 7507 }
39d5492a 7508 tcg_temp_free_i32(tmp2);
600b828c 7509 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7510 tcg_gen_not_i32(tmp, tmp);
600b828c 7511 }
9ee6e8bb 7512 break;
600b828c 7513 case NEON_2RM_VCEQ0:
dd8fbd78 7514 tmp2 = tcg_const_i32(0);
9ee6e8bb 7515 switch(size) {
dd8fbd78
FN
7516 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7517 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7518 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7519 default: abort();
9ee6e8bb 7520 }
39d5492a 7521 tcg_temp_free_i32(tmp2);
9ee6e8bb 7522 break;
600b828c 7523 case NEON_2RM_VABS:
9ee6e8bb 7524 switch(size) {
dd8fbd78
FN
7525 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7526 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7527 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7528 default: abort();
9ee6e8bb
PB
7529 }
7530 break;
600b828c 7531 case NEON_2RM_VNEG:
dd8fbd78
FN
7532 tmp2 = tcg_const_i32(0);
7533 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7534 tcg_temp_free_i32(tmp2);
9ee6e8bb 7535 break;
600b828c 7536 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7537 {
7538 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7539 tmp2 = tcg_const_i32(0);
aa47cfdd 7540 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7541 tcg_temp_free_i32(tmp2);
aa47cfdd 7542 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7543 break;
aa47cfdd 7544 }
600b828c 7545 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7546 {
7547 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7548 tmp2 = tcg_const_i32(0);
aa47cfdd 7549 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7550 tcg_temp_free_i32(tmp2);
aa47cfdd 7551 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7552 break;
aa47cfdd 7553 }
600b828c 7554 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7555 {
7556 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7557 tmp2 = tcg_const_i32(0);
aa47cfdd 7558 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7559 tcg_temp_free_i32(tmp2);
aa47cfdd 7560 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7561 break;
aa47cfdd 7562 }
600b828c 7563 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7564 {
7565 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7566 tmp2 = tcg_const_i32(0);
aa47cfdd 7567 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7568 tcg_temp_free_i32(tmp2);
aa47cfdd 7569 tcg_temp_free_ptr(fpstatus);
0e326109 7570 break;
aa47cfdd 7571 }
600b828c 7572 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7573 {
7574 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7575 tmp2 = tcg_const_i32(0);
aa47cfdd 7576 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7577 tcg_temp_free_i32(tmp2);
aa47cfdd 7578 tcg_temp_free_ptr(fpstatus);
0e326109 7579 break;
aa47cfdd 7580 }
600b828c 7581 case NEON_2RM_VABS_F:
4373f3ce 7582 gen_vfp_abs(0);
9ee6e8bb 7583 break;
600b828c 7584 case NEON_2RM_VNEG_F:
4373f3ce 7585 gen_vfp_neg(0);
9ee6e8bb 7586 break;
600b828c 7587 case NEON_2RM_VSWP:
dd8fbd78
FN
7588 tmp2 = neon_load_reg(rd, pass);
7589 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7590 break;
600b828c 7591 case NEON_2RM_VTRN:
dd8fbd78 7592 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7593 switch (size) {
dd8fbd78
FN
7594 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7595 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7596 default: abort();
9ee6e8bb 7597 }
dd8fbd78 7598 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7599 break;
34f7b0a2
WN
7600 case NEON_2RM_VRINTN:
7601 case NEON_2RM_VRINTA:
7602 case NEON_2RM_VRINTM:
7603 case NEON_2RM_VRINTP:
7604 case NEON_2RM_VRINTZ:
7605 {
7606 TCGv_i32 tcg_rmode;
7607 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7608 int rmode;
7609
7610 if (op == NEON_2RM_VRINTZ) {
7611 rmode = FPROUNDING_ZERO;
7612 } else {
7613 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7614 }
7615
7616 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7617 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7618 cpu_env);
7619 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7620 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7621 cpu_env);
7622 tcg_temp_free_ptr(fpstatus);
7623 tcg_temp_free_i32(tcg_rmode);
7624 break;
7625 }
2ce70625
WN
7626 case NEON_2RM_VRINTX:
7627 {
7628 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7629 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7630 tcg_temp_free_ptr(fpstatus);
7631 break;
7632 }
901ad525
WN
7633 case NEON_2RM_VCVTAU:
7634 case NEON_2RM_VCVTAS:
7635 case NEON_2RM_VCVTNU:
7636 case NEON_2RM_VCVTNS:
7637 case NEON_2RM_VCVTPU:
7638 case NEON_2RM_VCVTPS:
7639 case NEON_2RM_VCVTMU:
7640 case NEON_2RM_VCVTMS:
7641 {
7642 bool is_signed = !extract32(insn, 7, 1);
7643 TCGv_ptr fpst = get_fpstatus_ptr(1);
7644 TCGv_i32 tcg_rmode, tcg_shift;
7645 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7646
7647 tcg_shift = tcg_const_i32(0);
7648 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7649 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7650 cpu_env);
7651
7652 if (is_signed) {
7653 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7654 tcg_shift, fpst);
7655 } else {
7656 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7657 tcg_shift, fpst);
7658 }
7659
7660 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7661 cpu_env);
7662 tcg_temp_free_i32(tcg_rmode);
7663 tcg_temp_free_i32(tcg_shift);
7664 tcg_temp_free_ptr(fpst);
7665 break;
7666 }
600b828c 7667 case NEON_2RM_VRECPE:
b6d4443a
AB
7668 {
7669 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7670 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7671 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7672 break;
b6d4443a 7673 }
600b828c 7674 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7675 {
7676 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7677 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7678 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7679 break;
c2fb418e 7680 }
600b828c 7681 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7682 {
7683 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7684 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7685 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7686 break;
b6d4443a 7687 }
600b828c 7688 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7689 {
7690 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7691 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7692 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7693 break;
c2fb418e 7694 }
600b828c 7695 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7696 gen_vfp_sito(0, 1);
9ee6e8bb 7697 break;
600b828c 7698 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7699 gen_vfp_uito(0, 1);
9ee6e8bb 7700 break;
600b828c 7701 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7702 gen_vfp_tosiz(0, 1);
9ee6e8bb 7703 break;
600b828c 7704 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7705 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7706 break;
7707 default:
600b828c
PM
7708 /* Reserved op values were caught by the
7709 * neon_2rm_sizes[] check earlier.
7710 */
7711 abort();
9ee6e8bb 7712 }
600b828c 7713 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7714 tcg_gen_st_f32(cpu_F0s, cpu_env,
7715 neon_reg_offset(rd, pass));
9ee6e8bb 7716 } else {
dd8fbd78 7717 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7718 }
7719 }
7720 break;
7721 }
7722 } else if ((insn & (1 << 10)) == 0) {
7723 /* VTBL, VTBX. */
56907d77
PM
7724 int n = ((insn >> 8) & 3) + 1;
7725 if ((rn + n) > 32) {
7726 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7727 * helper function running off the end of the register file.
7728 */
7729 return 1;
7730 }
7731 n <<= 3;
9ee6e8bb 7732 if (insn & (1 << 6)) {
8f8e3aa4 7733 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7734 } else {
7d1b0095 7735 tmp = tcg_temp_new_i32();
8f8e3aa4 7736 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7737 }
8f8e3aa4 7738 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 7739 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 7740 tmp5 = tcg_const_i32(n);
e7c06c4e 7741 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 7742 tcg_temp_free_i32(tmp);
9ee6e8bb 7743 if (insn & (1 << 6)) {
8f8e3aa4 7744 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7745 } else {
7d1b0095 7746 tmp = tcg_temp_new_i32();
8f8e3aa4 7747 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7748 }
8f8e3aa4 7749 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 7750 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 7751 tcg_temp_free_i32(tmp5);
e7c06c4e 7752 tcg_temp_free_ptr(ptr1);
8f8e3aa4 7753 neon_store_reg(rd, 0, tmp2);
3018f259 7754 neon_store_reg(rd, 1, tmp3);
7d1b0095 7755 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7756 } else if ((insn & 0x380) == 0) {
7757 /* VDUP */
133da6aa
JR
7758 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7759 return 1;
7760 }
9ee6e8bb 7761 if (insn & (1 << 19)) {
dd8fbd78 7762 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7763 } else {
dd8fbd78 7764 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7765 }
7766 if (insn & (1 << 16)) {
dd8fbd78 7767 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7768 } else if (insn & (1 << 17)) {
7769 if ((insn >> 18) & 1)
dd8fbd78 7770 gen_neon_dup_high16(tmp);
9ee6e8bb 7771 else
dd8fbd78 7772 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7773 }
7774 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7775 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7776 tcg_gen_mov_i32(tmp2, tmp);
7777 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7778 }
7d1b0095 7779 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7780 } else {
7781 return 1;
7782 }
7783 }
7784 }
7785 return 0;
7786}
7787
8b7209fa
RH
7788/* Advanced SIMD three registers of the same length extension.
7789 * 31 25 23 22 20 16 12 11 10 9 8 3 0
7790 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7791 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7792 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7793 */
7794static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
7795{
26c470a7
RH
7796 gen_helper_gvec_3 *fn_gvec = NULL;
7797 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
7798 int rd, rn, rm, opr_sz;
7799 int data = 0;
8b7209fa
RH
7800 bool q;
7801
7802 q = extract32(insn, 6, 1);
7803 VFP_DREG_D(rd, insn);
7804 VFP_DREG_N(rn, insn);
7805 VFP_DREG_M(rm, insn);
7806 if ((rd | rn | rm) & q) {
7807 return 1;
7808 }
7809
7810 if ((insn & 0xfe200f10) == 0xfc200800) {
7811 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
7812 int size = extract32(insn, 20, 1);
7813 data = extract32(insn, 23, 2); /* rot */
962fcbf2 7814 if (!dc_isar_feature(aa32_vcma, s)
8b7209fa
RH
7815 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7816 return 1;
7817 }
7818 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
7819 } else if ((insn & 0xfea00f10) == 0xfc800800) {
7820 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
7821 int size = extract32(insn, 20, 1);
7822 data = extract32(insn, 24, 1); /* rot */
962fcbf2 7823 if (!dc_isar_feature(aa32_vcma, s)
8b7209fa
RH
7824 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7825 return 1;
7826 }
7827 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
7828 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
7829 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
7830 bool u = extract32(insn, 4, 1);
962fcbf2 7831 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
7832 return 1;
7833 }
7834 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
8b7209fa
RH
7835 } else {
7836 return 1;
7837 }
7838
7839 if (s->fp_excp_el) {
7840 gen_exception_insn(s, 4, EXCP_UDEF,
7841 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7842 return 0;
7843 }
7844 if (!s->vfp_enabled) {
7845 return 1;
7846 }
7847
7848 opr_sz = (1 + q) * 8;
26c470a7
RH
7849 if (fn_gvec_ptr) {
7850 TCGv_ptr fpst = get_fpstatus_ptr(1);
7851 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7852 vfp_reg_offset(1, rn),
7853 vfp_reg_offset(1, rm), fpst,
7854 opr_sz, opr_sz, data, fn_gvec_ptr);
7855 tcg_temp_free_ptr(fpst);
7856 } else {
7857 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
7858 vfp_reg_offset(1, rn),
7859 vfp_reg_offset(1, rm),
7860 opr_sz, opr_sz, data, fn_gvec);
7861 }
8b7209fa
RH
7862 return 0;
7863}
7864
638808ff
RH
7865/* Advanced SIMD two registers and a scalar extension.
7866 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7867 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7868 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7869 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7870 *
7871 */
7872
7873static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7874{
26c470a7
RH
7875 gen_helper_gvec_3 *fn_gvec = NULL;
7876 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 7877 int rd, rn, rm, opr_sz, data;
638808ff
RH
7878 bool q;
7879
7880 q = extract32(insn, 6, 1);
7881 VFP_DREG_D(rd, insn);
7882 VFP_DREG_N(rn, insn);
638808ff
RH
7883 if ((rd | rn) & q) {
7884 return 1;
7885 }
7886
7887 if ((insn & 0xff000f10) == 0xfe000800) {
7888 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
7889 int rot = extract32(insn, 20, 2);
7890 int size = extract32(insn, 23, 1);
7891 int index;
7892
962fcbf2 7893 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
7894 return 1;
7895 }
2cc99919
RH
7896 if (size == 0) {
7897 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7898 return 1;
7899 }
7900 /* For fp16, rm is just Vm, and index is M. */
7901 rm = extract32(insn, 0, 4);
7902 index = extract32(insn, 5, 1);
7903 } else {
7904 /* For fp32, rm is the usual M:Vm, and index is 0. */
7905 VFP_DREG_M(rm, insn);
7906 index = 0;
7907 }
7908 data = (index << 2) | rot;
7909 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7910 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
7911 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7912 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7913 int u = extract32(insn, 4, 1);
962fcbf2 7914 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
7915 return 1;
7916 }
7917 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7918 /* rm is just Vm, and index is M. */
7919 data = extract32(insn, 5, 1); /* index */
7920 rm = extract32(insn, 0, 4);
638808ff
RH
7921 } else {
7922 return 1;
7923 }
7924
7925 if (s->fp_excp_el) {
7926 gen_exception_insn(s, 4, EXCP_UDEF,
7927 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7928 return 0;
7929 }
7930 if (!s->vfp_enabled) {
7931 return 1;
7932 }
7933
7934 opr_sz = (1 + q) * 8;
26c470a7
RH
7935 if (fn_gvec_ptr) {
7936 TCGv_ptr fpst = get_fpstatus_ptr(1);
7937 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7938 vfp_reg_offset(1, rn),
7939 vfp_reg_offset(1, rm), fpst,
7940 opr_sz, opr_sz, data, fn_gvec_ptr);
7941 tcg_temp_free_ptr(fpst);
7942 } else {
7943 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
7944 vfp_reg_offset(1, rn),
7945 vfp_reg_offset(1, rm),
7946 opr_sz, opr_sz, data, fn_gvec);
7947 }
638808ff
RH
7948 return 0;
7949}
7950
7dcc1f89 7951static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7952{
4b6a83fb
PM
7953 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7954 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7955
7956 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7957
7958 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7959 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7960 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7961 return 1;
7962 }
d614a513 7963 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7964 return disas_iwmmxt_insn(s, insn);
d614a513 7965 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7966 return disas_dsp_insn(s, insn);
c0f4af17
PM
7967 }
7968 return 1;
4b6a83fb
PM
7969 }
7970
7971 /* Otherwise treat as a generic register access */
7972 is64 = (insn & (1 << 25)) == 0;
7973 if (!is64 && ((insn & (1 << 4)) == 0)) {
7974 /* cdp */
7975 return 1;
7976 }
7977
7978 crm = insn & 0xf;
7979 if (is64) {
7980 crn = 0;
7981 opc1 = (insn >> 4) & 0xf;
7982 opc2 = 0;
7983 rt2 = (insn >> 16) & 0xf;
7984 } else {
7985 crn = (insn >> 16) & 0xf;
7986 opc1 = (insn >> 21) & 7;
7987 opc2 = (insn >> 5) & 7;
7988 rt2 = 0;
7989 }
7990 isread = (insn >> 20) & 1;
7991 rt = (insn >> 12) & 0xf;
7992
60322b39 7993 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7994 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7995 if (ri) {
7996 /* Check access permissions */
dcbff19b 7997 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7998 return 1;
7999 }
8000
c0f4af17 8001 if (ri->accessfn ||
d614a513 8002 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
8003 /* Emit code to perform further access permissions checks at
8004 * runtime; this may result in an exception.
c0f4af17
PM
8005 * Note that on XScale all cp0..c13 registers do an access check
8006 * call in order to handle c15_cpar.
f59df3f2
PM
8007 */
8008 TCGv_ptr tmpptr;
3f208fd7 8009 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
8010 uint32_t syndrome;
8011
8012 /* Note that since we are an implementation which takes an
8013 * exception on a trapped conditional instruction only if the
8014 * instruction passes its condition code check, we can take
8015 * advantage of the clause in the ARM ARM that allows us to set
8016 * the COND field in the instruction to 0xE in all cases.
8017 * We could fish the actual condition out of the insn (ARM)
8018 * or the condexec bits (Thumb) but it isn't necessary.
8019 */
8020 switch (cpnum) {
8021 case 14:
8022 if (is64) {
8023 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8024 isread, false);
8bcbf37c
PM
8025 } else {
8026 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8027 rt, isread, false);
8bcbf37c
PM
8028 }
8029 break;
8030 case 15:
8031 if (is64) {
8032 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8033 isread, false);
8bcbf37c
PM
8034 } else {
8035 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8036 rt, isread, false);
8bcbf37c
PM
8037 }
8038 break;
8039 default:
8040 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8041 * so this can only happen if this is an ARMv7 or earlier CPU,
8042 * in which case the syndrome information won't actually be
8043 * guest visible.
8044 */
d614a513 8045 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
8046 syndrome = syn_uncategorized();
8047 break;
8048 }
8049
43bfa4a1 8050 gen_set_condexec(s);
3977ee5d 8051 gen_set_pc_im(s, s->pc - 4);
f59df3f2 8052 tmpptr = tcg_const_ptr(ri);
8bcbf37c 8053 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
8054 tcg_isread = tcg_const_i32(isread);
8055 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8056 tcg_isread);
f59df3f2 8057 tcg_temp_free_ptr(tmpptr);
8bcbf37c 8058 tcg_temp_free_i32(tcg_syn);
3f208fd7 8059 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
8060 }
8061
4b6a83fb
PM
8062 /* Handle special cases first */
8063 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8064 case ARM_CP_NOP:
8065 return 0;
8066 case ARM_CP_WFI:
8067 if (isread) {
8068 return 1;
8069 }
eaed129d 8070 gen_set_pc_im(s, s->pc);
dcba3a8d 8071 s->base.is_jmp = DISAS_WFI;
2bee5105 8072 return 0;
4b6a83fb
PM
8073 default:
8074 break;
8075 }
8076
c5a49c63 8077 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8078 gen_io_start();
8079 }
8080
4b6a83fb
PM
8081 if (isread) {
8082 /* Read */
8083 if (is64) {
8084 TCGv_i64 tmp64;
8085 TCGv_i32 tmp;
8086 if (ri->type & ARM_CP_CONST) {
8087 tmp64 = tcg_const_i64(ri->resetvalue);
8088 } else if (ri->readfn) {
8089 TCGv_ptr tmpptr;
4b6a83fb
PM
8090 tmp64 = tcg_temp_new_i64();
8091 tmpptr = tcg_const_ptr(ri);
8092 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8093 tcg_temp_free_ptr(tmpptr);
8094 } else {
8095 tmp64 = tcg_temp_new_i64();
8096 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8097 }
8098 tmp = tcg_temp_new_i32();
ecc7b3aa 8099 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
8100 store_reg(s, rt, tmp);
8101 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 8102 tmp = tcg_temp_new_i32();
ecc7b3aa 8103 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 8104 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
8105 store_reg(s, rt2, tmp);
8106 } else {
39d5492a 8107 TCGv_i32 tmp;
4b6a83fb
PM
8108 if (ri->type & ARM_CP_CONST) {
8109 tmp = tcg_const_i32(ri->resetvalue);
8110 } else if (ri->readfn) {
8111 TCGv_ptr tmpptr;
4b6a83fb
PM
8112 tmp = tcg_temp_new_i32();
8113 tmpptr = tcg_const_ptr(ri);
8114 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8115 tcg_temp_free_ptr(tmpptr);
8116 } else {
8117 tmp = load_cpu_offset(ri->fieldoffset);
8118 }
8119 if (rt == 15) {
8120 /* Destination register of r15 for 32 bit loads sets
8121 * the condition codes from the high 4 bits of the value
8122 */
8123 gen_set_nzcv(tmp);
8124 tcg_temp_free_i32(tmp);
8125 } else {
8126 store_reg(s, rt, tmp);
8127 }
8128 }
8129 } else {
8130 /* Write */
8131 if (ri->type & ARM_CP_CONST) {
8132 /* If not forbidden by access permissions, treat as WI */
8133 return 0;
8134 }
8135
8136 if (is64) {
39d5492a 8137 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
8138 TCGv_i64 tmp64 = tcg_temp_new_i64();
8139 tmplo = load_reg(s, rt);
8140 tmphi = load_reg(s, rt2);
8141 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8142 tcg_temp_free_i32(tmplo);
8143 tcg_temp_free_i32(tmphi);
8144 if (ri->writefn) {
8145 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
8146 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8147 tcg_temp_free_ptr(tmpptr);
8148 } else {
8149 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8150 }
8151 tcg_temp_free_i64(tmp64);
8152 } else {
8153 if (ri->writefn) {
39d5492a 8154 TCGv_i32 tmp;
4b6a83fb 8155 TCGv_ptr tmpptr;
4b6a83fb
PM
8156 tmp = load_reg(s, rt);
8157 tmpptr = tcg_const_ptr(ri);
8158 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8159 tcg_temp_free_ptr(tmpptr);
8160 tcg_temp_free_i32(tmp);
8161 } else {
39d5492a 8162 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
8163 store_cpu_offset(tmp, ri->fieldoffset);
8164 }
8165 }
2452731c
PM
8166 }
8167
c5a49c63 8168 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8169 /* I/O operations must end the TB here (whether read or write) */
8170 gen_io_end();
8171 gen_lookup_tb(s);
8172 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
8173 /* We default to ending the TB on a coprocessor register write,
8174 * but allow this to be suppressed by the register definition
8175 * (usually only necessary to work around guest bugs).
8176 */
2452731c 8177 gen_lookup_tb(s);
4b6a83fb 8178 }
2452731c 8179
4b6a83fb
PM
8180 return 0;
8181 }
8182
626187d8
PM
8183 /* Unknown register; this might be a guest error or a QEMU
8184 * unimplemented feature.
8185 */
8186 if (is64) {
8187 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8188 "64 bit system register cp:%d opc1: %d crm:%d "
8189 "(%s)\n",
8190 isread ? "read" : "write", cpnum, opc1, crm,
8191 s->ns ? "non-secure" : "secure");
626187d8
PM
8192 } else {
8193 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8194 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8195 "(%s)\n",
8196 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8197 s->ns ? "non-secure" : "secure");
626187d8
PM
8198 }
8199
4a9a539f 8200 return 1;
9ee6e8bb
PB
8201}
8202
5e3f878a
PB
8203
8204/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 8205static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 8206{
39d5492a 8207 TCGv_i32 tmp;
7d1b0095 8208 tmp = tcg_temp_new_i32();
ecc7b3aa 8209 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 8210 store_reg(s, rlow, tmp);
7d1b0095 8211 tmp = tcg_temp_new_i32();
5e3f878a 8212 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 8213 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
8214 store_reg(s, rhigh, tmp);
8215}
8216
8217/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 8218static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 8219{
a7812ae4 8220 TCGv_i64 tmp;
39d5492a 8221 TCGv_i32 tmp2;
5e3f878a 8222
36aa55dc 8223 /* Load value and extend to 64 bits. */
a7812ae4 8224 tmp = tcg_temp_new_i64();
5e3f878a
PB
8225 tmp2 = load_reg(s, rlow);
8226 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 8227 tcg_temp_free_i32(tmp2);
5e3f878a 8228 tcg_gen_add_i64(val, val, tmp);
b75263d6 8229 tcg_temp_free_i64(tmp);
5e3f878a
PB
8230}
8231
8232/* load and add a 64-bit value from a register pair. */
a7812ae4 8233static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 8234{
a7812ae4 8235 TCGv_i64 tmp;
39d5492a
PM
8236 TCGv_i32 tmpl;
8237 TCGv_i32 tmph;
5e3f878a
PB
8238
8239 /* Load 64-bit value rd:rn. */
36aa55dc
PB
8240 tmpl = load_reg(s, rlow);
8241 tmph = load_reg(s, rhigh);
a7812ae4 8242 tmp = tcg_temp_new_i64();
36aa55dc 8243 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
8244 tcg_temp_free_i32(tmpl);
8245 tcg_temp_free_i32(tmph);
5e3f878a 8246 tcg_gen_add_i64(val, val, tmp);
b75263d6 8247 tcg_temp_free_i64(tmp);
5e3f878a
PB
8248}
8249
c9f10124 8250/* Set N and Z flags from hi|lo. */
39d5492a 8251static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 8252{
c9f10124
RH
8253 tcg_gen_mov_i32(cpu_NF, hi);
8254 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
8255}
8256
426f5abc
PB
8257/* Load/Store exclusive instructions are implemented by remembering
8258 the value/address loaded, and seeing if these are the same
354161b3 8259 when the store is performed. This should be sufficient to implement
426f5abc 8260 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
8261 regular stores. The compare vs the remembered value is done during
8262 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 8263static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 8264 TCGv_i32 addr, int size)
426f5abc 8265{
94ee24e7 8266 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 8267 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 8268
50225ad0
PM
8269 s->is_ldex = true;
8270
426f5abc 8271 if (size == 3) {
39d5492a 8272 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 8273 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 8274
3448d47b
PM
8275 /* For AArch32, architecturally the 32-bit word at the lowest
8276 * address is always Rt and the one at addr+4 is Rt2, even if
8277 * the CPU is big-endian. That means we don't want to do a
8278 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8279 * for an architecturally 64-bit access, but instead do a
8280 * 64-bit access using MO_BE if appropriate and then split
8281 * the two halves.
8282 * This only makes a difference for BE32 user-mode, where
8283 * frob64() must not flip the two halves of the 64-bit data
8284 * but this code must treat BE32 user-mode like BE32 system.
8285 */
8286 TCGv taddr = gen_aa32_addr(s, addr, opc);
8287
8288 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8289 tcg_temp_free(taddr);
354161b3 8290 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
8291 if (s->be_data == MO_BE) {
8292 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8293 } else {
8294 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8295 }
354161b3
EC
8296 tcg_temp_free_i64(t64);
8297
8298 store_reg(s, rt2, tmp2);
03d05e2d 8299 } else {
354161b3 8300 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 8301 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 8302 }
03d05e2d
PM
8303
8304 store_reg(s, rt, tmp);
8305 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
8306}
8307
8308static void gen_clrex(DisasContext *s)
8309{
03d05e2d 8310 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
8311}
8312
426f5abc 8313static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 8314 TCGv_i32 addr, int size)
426f5abc 8315{
354161b3
EC
8316 TCGv_i32 t0, t1, t2;
8317 TCGv_i64 extaddr;
8318 TCGv taddr;
42a268c2
RH
8319 TCGLabel *done_label;
8320 TCGLabel *fail_label;
354161b3 8321 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
8322
8323 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8324 [addr] = {Rt};
8325 {Rd} = 0;
8326 } else {
8327 {Rd} = 1;
8328 } */
8329 fail_label = gen_new_label();
8330 done_label = gen_new_label();
03d05e2d
PM
8331 extaddr = tcg_temp_new_i64();
8332 tcg_gen_extu_i32_i64(extaddr, addr);
8333 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8334 tcg_temp_free_i64(extaddr);
8335
354161b3
EC
8336 taddr = gen_aa32_addr(s, addr, opc);
8337 t0 = tcg_temp_new_i32();
8338 t1 = load_reg(s, rt);
426f5abc 8339 if (size == 3) {
354161b3
EC
8340 TCGv_i64 o64 = tcg_temp_new_i64();
8341 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 8342
354161b3 8343 t2 = load_reg(s, rt2);
3448d47b
PM
8344 /* For AArch32, architecturally the 32-bit word at the lowest
8345 * address is always Rt and the one at addr+4 is Rt2, even if
8346 * the CPU is big-endian. Since we're going to treat this as a
8347 * single 64-bit BE store, we need to put the two halves in the
8348 * opposite order for BE to LE, so that they end up in the right
8349 * places.
8350 * We don't want gen_aa32_frob64() because that does the wrong
8351 * thing for BE32 usermode.
8352 */
8353 if (s->be_data == MO_BE) {
8354 tcg_gen_concat_i32_i64(n64, t2, t1);
8355 } else {
8356 tcg_gen_concat_i32_i64(n64, t1, t2);
8357 }
354161b3 8358 tcg_temp_free_i32(t2);
03d05e2d 8359
354161b3
EC
8360 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8361 get_mem_index(s), opc);
8362 tcg_temp_free_i64(n64);
8363
354161b3
EC
8364 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8365 tcg_gen_extrl_i64_i32(t0, o64);
8366
8367 tcg_temp_free_i64(o64);
8368 } else {
8369 t2 = tcg_temp_new_i32();
8370 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8371 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8372 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8373 tcg_temp_free_i32(t2);
426f5abc 8374 }
354161b3
EC
8375 tcg_temp_free_i32(t1);
8376 tcg_temp_free(taddr);
8377 tcg_gen_mov_i32(cpu_R[rd], t0);
8378 tcg_temp_free_i32(t0);
426f5abc 8379 tcg_gen_br(done_label);
354161b3 8380
426f5abc
PB
8381 gen_set_label(fail_label);
8382 tcg_gen_movi_i32(cpu_R[rd], 1);
8383 gen_set_label(done_label);
03d05e2d 8384 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 8385}
426f5abc 8386
81465888
PM
8387/* gen_srs:
8388 * @env: CPUARMState
8389 * @s: DisasContext
8390 * @mode: mode field from insn (which stack to store to)
8391 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8392 * @writeback: true if writeback bit set
8393 *
8394 * Generate code for the SRS (Store Return State) insn.
8395 */
8396static void gen_srs(DisasContext *s,
8397 uint32_t mode, uint32_t amode, bool writeback)
8398{
8399 int32_t offset;
cbc0326b
PM
8400 TCGv_i32 addr, tmp;
8401 bool undef = false;
8402
8403 /* SRS is:
8404 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 8405 * and specified mode is monitor mode
cbc0326b
PM
8406 * - UNDEFINED in Hyp mode
8407 * - UNPREDICTABLE in User or System mode
8408 * - UNPREDICTABLE if the specified mode is:
8409 * -- not implemented
8410 * -- not a valid mode number
8411 * -- a mode that's at a higher exception level
8412 * -- Monitor, if we are Non-secure
f01377f5 8413 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 8414 */
ba63cf47 8415 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
8416 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8417 return;
8418 }
8419
8420 if (s->current_el == 0 || s->current_el == 2) {
8421 undef = true;
8422 }
8423
8424 switch (mode) {
8425 case ARM_CPU_MODE_USR:
8426 case ARM_CPU_MODE_FIQ:
8427 case ARM_CPU_MODE_IRQ:
8428 case ARM_CPU_MODE_SVC:
8429 case ARM_CPU_MODE_ABT:
8430 case ARM_CPU_MODE_UND:
8431 case ARM_CPU_MODE_SYS:
8432 break;
8433 case ARM_CPU_MODE_HYP:
8434 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8435 undef = true;
8436 }
8437 break;
8438 case ARM_CPU_MODE_MON:
8439 /* No need to check specifically for "are we non-secure" because
8440 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8441 * so if this isn't EL3 then we must be non-secure.
8442 */
8443 if (s->current_el != 3) {
8444 undef = true;
8445 }
8446 break;
8447 default:
8448 undef = true;
8449 }
8450
8451 if (undef) {
8452 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8453 default_exception_el(s));
8454 return;
8455 }
8456
8457 addr = tcg_temp_new_i32();
8458 tmp = tcg_const_i32(mode);
f01377f5
PM
8459 /* get_r13_banked() will raise an exception if called from System mode */
8460 gen_set_condexec(s);
8461 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8462 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8463 tcg_temp_free_i32(tmp);
8464 switch (amode) {
8465 case 0: /* DA */
8466 offset = -4;
8467 break;
8468 case 1: /* IA */
8469 offset = 0;
8470 break;
8471 case 2: /* DB */
8472 offset = -8;
8473 break;
8474 case 3: /* IB */
8475 offset = 4;
8476 break;
8477 default:
8478 abort();
8479 }
8480 tcg_gen_addi_i32(addr, addr, offset);
8481 tmp = load_reg(s, 14);
12dcc321 8482 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8483 tcg_temp_free_i32(tmp);
81465888
PM
8484 tmp = load_cpu_field(spsr);
8485 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8486 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8487 tcg_temp_free_i32(tmp);
81465888
PM
8488 if (writeback) {
8489 switch (amode) {
8490 case 0:
8491 offset = -8;
8492 break;
8493 case 1:
8494 offset = 4;
8495 break;
8496 case 2:
8497 offset = -4;
8498 break;
8499 case 3:
8500 offset = 0;
8501 break;
8502 default:
8503 abort();
8504 }
8505 tcg_gen_addi_i32(addr, addr, offset);
8506 tmp = tcg_const_i32(mode);
8507 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8508 tcg_temp_free_i32(tmp);
8509 }
8510 tcg_temp_free_i32(addr);
dcba3a8d 8511 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8512}
8513
c2d9644e
RK
8514/* Generate a label used for skipping this instruction */
8515static void arm_gen_condlabel(DisasContext *s)
8516{
8517 if (!s->condjmp) {
8518 s->condlabel = gen_new_label();
8519 s->condjmp = 1;
8520 }
8521}
8522
8523/* Skip this instruction if the ARM condition is false */
8524static void arm_skip_unless(DisasContext *s, uint32_t cond)
8525{
8526 arm_gen_condlabel(s);
8527 arm_gen_test_cc(cond ^ 1, s->condlabel);
8528}
8529
f4df2210 8530static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8531{
f4df2210 8532 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8533 TCGv_i32 tmp;
8534 TCGv_i32 tmp2;
8535 TCGv_i32 tmp3;
8536 TCGv_i32 addr;
a7812ae4 8537 TCGv_i64 tmp64;
9ee6e8bb 8538
e13886e3
PM
8539 /* M variants do not implement ARM mode; this must raise the INVSTATE
8540 * UsageFault exception.
8541 */
b53d8923 8542 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8543 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8544 default_exception_el(s));
8545 return;
b53d8923 8546 }
9ee6e8bb
PB
8547 cond = insn >> 28;
8548 if (cond == 0xf){
be5e7a76
DES
8549 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8550 * choose to UNDEF. In ARMv5 and above the space is used
8551 * for miscellaneous unconditional instructions.
8552 */
8553 ARCH(5);
8554
9ee6e8bb
PB
8555 /* Unconditional instructions. */
8556 if (((insn >> 25) & 7) == 1) {
8557 /* NEON Data processing. */
d614a513 8558 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8559 goto illegal_op;
d614a513 8560 }
9ee6e8bb 8561
7dcc1f89 8562 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8563 goto illegal_op;
7dcc1f89 8564 }
9ee6e8bb
PB
8565 return;
8566 }
8567 if ((insn & 0x0f100000) == 0x04000000) {
8568 /* NEON load/store. */
d614a513 8569 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8570 goto illegal_op;
d614a513 8571 }
9ee6e8bb 8572
7dcc1f89 8573 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8574 goto illegal_op;
7dcc1f89 8575 }
9ee6e8bb
PB
8576 return;
8577 }
6a57f3eb
WN
8578 if ((insn & 0x0f000e10) == 0x0e000a00) {
8579 /* VFP. */
7dcc1f89 8580 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8581 goto illegal_op;
8582 }
8583 return;
8584 }
3d185e5d
PM
8585 if (((insn & 0x0f30f000) == 0x0510f000) ||
8586 ((insn & 0x0f30f010) == 0x0710f000)) {
8587 if ((insn & (1 << 22)) == 0) {
8588 /* PLDW; v7MP */
d614a513 8589 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8590 goto illegal_op;
8591 }
8592 }
8593 /* Otherwise PLD; v5TE+ */
be5e7a76 8594 ARCH(5TE);
3d185e5d
PM
8595 return;
8596 }
8597 if (((insn & 0x0f70f000) == 0x0450f000) ||
8598 ((insn & 0x0f70f010) == 0x0650f000)) {
8599 ARCH(7);
8600 return; /* PLI; V7 */
8601 }
8602 if (((insn & 0x0f700000) == 0x04100000) ||
8603 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8604 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8605 goto illegal_op;
8606 }
8607 return; /* v7MP: Unallocated memory hint: must NOP */
8608 }
8609
8610 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8611 ARCH(6);
8612 /* setend */
9886ecdf
PB
8613 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8614 gen_helper_setend(cpu_env);
dcba3a8d 8615 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8616 }
8617 return;
8618 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8619 switch ((insn >> 4) & 0xf) {
8620 case 1: /* clrex */
8621 ARCH(6K);
426f5abc 8622 gen_clrex(s);
9ee6e8bb
PB
8623 return;
8624 case 4: /* dsb */
8625 case 5: /* dmb */
9ee6e8bb 8626 ARCH(7);
61e4c432 8627 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8628 return;
6df99dec
SS
8629 case 6: /* isb */
8630 /* We need to break the TB after this insn to execute
8631 * self-modifying code correctly and also to take
8632 * any pending interrupts immediately.
8633 */
0b609cc1 8634 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8635 return;
9ee6e8bb
PB
8636 default:
8637 goto illegal_op;
8638 }
8639 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8640 /* srs */
81465888
PM
8641 ARCH(6);
8642 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8643 return;
ea825eee 8644 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8645 /* rfe */
c67b6b71 8646 int32_t offset;
9ee6e8bb
PB
8647 if (IS_USER(s))
8648 goto illegal_op;
8649 ARCH(6);
8650 rn = (insn >> 16) & 0xf;
b0109805 8651 addr = load_reg(s, rn);
9ee6e8bb
PB
8652 i = (insn >> 23) & 3;
8653 switch (i) {
b0109805 8654 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8655 case 1: offset = 0; break; /* IA */
8656 case 2: offset = -8; break; /* DB */
b0109805 8657 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8658 default: abort();
8659 }
8660 if (offset)
b0109805
PB
8661 tcg_gen_addi_i32(addr, addr, offset);
8662 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8663 tmp = tcg_temp_new_i32();
12dcc321 8664 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8665 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8666 tmp2 = tcg_temp_new_i32();
12dcc321 8667 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8668 if (insn & (1 << 21)) {
8669 /* Base writeback. */
8670 switch (i) {
b0109805 8671 case 0: offset = -8; break;
c67b6b71
FN
8672 case 1: offset = 4; break;
8673 case 2: offset = -4; break;
b0109805 8674 case 3: offset = 0; break;
9ee6e8bb
PB
8675 default: abort();
8676 }
8677 if (offset)
b0109805
PB
8678 tcg_gen_addi_i32(addr, addr, offset);
8679 store_reg(s, rn, addr);
8680 } else {
7d1b0095 8681 tcg_temp_free_i32(addr);
9ee6e8bb 8682 }
b0109805 8683 gen_rfe(s, tmp, tmp2);
c67b6b71 8684 return;
9ee6e8bb
PB
8685 } else if ((insn & 0x0e000000) == 0x0a000000) {
8686 /* branch link and change to thumb (blx <offset>) */
8687 int32_t offset;
8688
8689 val = (uint32_t)s->pc;
7d1b0095 8690 tmp = tcg_temp_new_i32();
d9ba4830
PB
8691 tcg_gen_movi_i32(tmp, val);
8692 store_reg(s, 14, tmp);
9ee6e8bb
PB
8693 /* Sign-extend the 24-bit offset */
8694 offset = (((int32_t)insn) << 8) >> 8;
8695 /* offset * 4 + bit24 * 2 + (thumb bit) */
8696 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8697 /* pipeline offset */
8698 val += 4;
be5e7a76 8699 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8700 gen_bx_im(s, val);
9ee6e8bb
PB
8701 return;
8702 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8703 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8704 /* iWMMXt register transfer. */
c0f4af17 8705 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8706 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8707 return;
c0f4af17
PM
8708 }
8709 }
9ee6e8bb 8710 }
8b7209fa
RH
8711 } else if ((insn & 0x0e000a00) == 0x0c000800
8712 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8713 if (disas_neon_insn_3same_ext(s, insn)) {
8714 goto illegal_op;
8715 }
8716 return;
638808ff
RH
8717 } else if ((insn & 0x0f000a00) == 0x0e000800
8718 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8719 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
8720 goto illegal_op;
8721 }
8722 return;
9ee6e8bb
PB
8723 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8724 /* Coprocessor double register transfer. */
be5e7a76 8725 ARCH(5TE);
9ee6e8bb
PB
8726 } else if ((insn & 0x0f000010) == 0x0e000010) {
8727 /* Additional coprocessor register transfer. */
7997d92f 8728 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8729 uint32_t mask;
8730 uint32_t val;
8731 /* cps (privileged) */
8732 if (IS_USER(s))
8733 return;
8734 mask = val = 0;
8735 if (insn & (1 << 19)) {
8736 if (insn & (1 << 8))
8737 mask |= CPSR_A;
8738 if (insn & (1 << 7))
8739 mask |= CPSR_I;
8740 if (insn & (1 << 6))
8741 mask |= CPSR_F;
8742 if (insn & (1 << 18))
8743 val |= mask;
8744 }
7997d92f 8745 if (insn & (1 << 17)) {
9ee6e8bb
PB
8746 mask |= CPSR_M;
8747 val |= (insn & 0x1f);
8748 }
8749 if (mask) {
2fbac54b 8750 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8751 }
8752 return;
8753 }
8754 goto illegal_op;
8755 }
8756 if (cond != 0xe) {
8757 /* if not always execute, we generate a conditional jump to
8758 next instruction */
c2d9644e 8759 arm_skip_unless(s, cond);
9ee6e8bb
PB
8760 }
8761 if ((insn & 0x0f900000) == 0x03000000) {
8762 if ((insn & (1 << 21)) == 0) {
8763 ARCH(6T2);
8764 rd = (insn >> 12) & 0xf;
8765 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8766 if ((insn & (1 << 22)) == 0) {
8767 /* MOVW */
7d1b0095 8768 tmp = tcg_temp_new_i32();
5e3f878a 8769 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8770 } else {
8771 /* MOVT */
5e3f878a 8772 tmp = load_reg(s, rd);
86831435 8773 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8774 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8775 }
5e3f878a 8776 store_reg(s, rd, tmp);
9ee6e8bb
PB
8777 } else {
8778 if (((insn >> 12) & 0xf) != 0xf)
8779 goto illegal_op;
8780 if (((insn >> 16) & 0xf) == 0) {
8781 gen_nop_hint(s, insn & 0xff);
8782 } else {
8783 /* CPSR = immediate */
8784 val = insn & 0xff;
8785 shift = ((insn >> 8) & 0xf) * 2;
8786 if (shift)
8787 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8788 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8789 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8790 i, val)) {
9ee6e8bb 8791 goto illegal_op;
7dcc1f89 8792 }
9ee6e8bb
PB
8793 }
8794 }
8795 } else if ((insn & 0x0f900000) == 0x01000000
8796 && (insn & 0x00000090) != 0x00000090) {
8797 /* miscellaneous instructions */
8798 op1 = (insn >> 21) & 3;
8799 sh = (insn >> 4) & 0xf;
8800 rm = insn & 0xf;
8801 switch (sh) {
8bfd0550
PM
8802 case 0x0: /* MSR, MRS */
8803 if (insn & (1 << 9)) {
8804 /* MSR (banked) and MRS (banked) */
8805 int sysm = extract32(insn, 16, 4) |
8806 (extract32(insn, 8, 1) << 4);
8807 int r = extract32(insn, 22, 1);
8808
8809 if (op1 & 1) {
8810 /* MSR (banked) */
8811 gen_msr_banked(s, r, sysm, rm);
8812 } else {
8813 /* MRS (banked) */
8814 int rd = extract32(insn, 12, 4);
8815
8816 gen_mrs_banked(s, r, sysm, rd);
8817 }
8818 break;
8819 }
8820
8821 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8822 if (op1 & 1) {
8823 /* PSR = reg */
2fbac54b 8824 tmp = load_reg(s, rm);
9ee6e8bb 8825 i = ((op1 & 2) != 0);
7dcc1f89 8826 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8827 goto illegal_op;
8828 } else {
8829 /* reg = PSR */
8830 rd = (insn >> 12) & 0xf;
8831 if (op1 & 2) {
8832 if (IS_USER(s))
8833 goto illegal_op;
d9ba4830 8834 tmp = load_cpu_field(spsr);
9ee6e8bb 8835 } else {
7d1b0095 8836 tmp = tcg_temp_new_i32();
9ef39277 8837 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8838 }
d9ba4830 8839 store_reg(s, rd, tmp);
9ee6e8bb
PB
8840 }
8841 break;
8842 case 0x1:
8843 if (op1 == 1) {
8844 /* branch/exchange thumb (bx). */
be5e7a76 8845 ARCH(4T);
d9ba4830
PB
8846 tmp = load_reg(s, rm);
8847 gen_bx(s, tmp);
9ee6e8bb
PB
8848 } else if (op1 == 3) {
8849 /* clz */
be5e7a76 8850 ARCH(5);
9ee6e8bb 8851 rd = (insn >> 12) & 0xf;
1497c961 8852 tmp = load_reg(s, rm);
7539a012 8853 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8854 store_reg(s, rd, tmp);
9ee6e8bb
PB
8855 } else {
8856 goto illegal_op;
8857 }
8858 break;
8859 case 0x2:
8860 if (op1 == 1) {
8861 ARCH(5J); /* bxj */
8862 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8863 tmp = load_reg(s, rm);
8864 gen_bx(s, tmp);
9ee6e8bb
PB
8865 } else {
8866 goto illegal_op;
8867 }
8868 break;
8869 case 0x3:
8870 if (op1 != 1)
8871 goto illegal_op;
8872
be5e7a76 8873 ARCH(5);
9ee6e8bb 8874 /* branch link/exchange thumb (blx) */
d9ba4830 8875 tmp = load_reg(s, rm);
7d1b0095 8876 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8877 tcg_gen_movi_i32(tmp2, s->pc);
8878 store_reg(s, 14, tmp2);
8879 gen_bx(s, tmp);
9ee6e8bb 8880 break;
eb0ecd5a
WN
8881 case 0x4:
8882 {
8883 /* crc32/crc32c */
8884 uint32_t c = extract32(insn, 8, 4);
8885
8886 /* Check this CPU supports ARMv8 CRC instructions.
8887 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8888 * Bits 8, 10 and 11 should be zero.
8889 */
962fcbf2 8890 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
eb0ecd5a
WN
8891 goto illegal_op;
8892 }
8893
8894 rn = extract32(insn, 16, 4);
8895 rd = extract32(insn, 12, 4);
8896
8897 tmp = load_reg(s, rn);
8898 tmp2 = load_reg(s, rm);
aa633469
PM
8899 if (op1 == 0) {
8900 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8901 } else if (op1 == 1) {
8902 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8903 }
eb0ecd5a
WN
8904 tmp3 = tcg_const_i32(1 << op1);
8905 if (c & 0x2) {
8906 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8907 } else {
8908 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8909 }
8910 tcg_temp_free_i32(tmp2);
8911 tcg_temp_free_i32(tmp3);
8912 store_reg(s, rd, tmp);
8913 break;
8914 }
9ee6e8bb 8915 case 0x5: /* saturating add/subtract */
be5e7a76 8916 ARCH(5TE);
9ee6e8bb
PB
8917 rd = (insn >> 12) & 0xf;
8918 rn = (insn >> 16) & 0xf;
b40d0353 8919 tmp = load_reg(s, rm);
5e3f878a 8920 tmp2 = load_reg(s, rn);
9ee6e8bb 8921 if (op1 & 2)
9ef39277 8922 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8923 if (op1 & 1)
9ef39277 8924 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8925 else
9ef39277 8926 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8927 tcg_temp_free_i32(tmp2);
5e3f878a 8928 store_reg(s, rd, tmp);
9ee6e8bb 8929 break;
55c544ed
PM
8930 case 0x6: /* ERET */
8931 if (op1 != 3) {
8932 goto illegal_op;
8933 }
8934 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8935 goto illegal_op;
8936 }
8937 if ((insn & 0x000fff0f) != 0x0000000e) {
8938 /* UNPREDICTABLE; we choose to UNDEF */
8939 goto illegal_op;
8940 }
8941
8942 if (s->current_el == 2) {
8943 tmp = load_cpu_field(elr_el[2]);
8944 } else {
8945 tmp = load_reg(s, 14);
8946 }
8947 gen_exception_return(s, tmp);
8948 break;
49e14940 8949 case 7:
d4a2dc67
PM
8950 {
8951 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8952 switch (op1) {
19a6e31c
PM
8953 case 0:
8954 /* HLT */
8955 gen_hlt(s, imm16);
8956 break;
37e6456e
PM
8957 case 1:
8958 /* bkpt */
8959 ARCH(5);
c900a2e6 8960 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
8961 break;
8962 case 2:
8963 /* Hypervisor call (v7) */
8964 ARCH(7);
8965 if (IS_USER(s)) {
8966 goto illegal_op;
8967 }
8968 gen_hvc(s, imm16);
8969 break;
8970 case 3:
8971 /* Secure monitor call (v6+) */
8972 ARCH(6K);
8973 if (IS_USER(s)) {
8974 goto illegal_op;
8975 }
8976 gen_smc(s);
8977 break;
8978 default:
19a6e31c 8979 g_assert_not_reached();
49e14940 8980 }
9ee6e8bb 8981 break;
d4a2dc67 8982 }
9ee6e8bb
PB
8983 case 0x8: /* signed multiply */
8984 case 0xa:
8985 case 0xc:
8986 case 0xe:
be5e7a76 8987 ARCH(5TE);
9ee6e8bb
PB
8988 rs = (insn >> 8) & 0xf;
8989 rn = (insn >> 12) & 0xf;
8990 rd = (insn >> 16) & 0xf;
8991 if (op1 == 1) {
8992 /* (32 * 16) >> 16 */
5e3f878a
PB
8993 tmp = load_reg(s, rm);
8994 tmp2 = load_reg(s, rs);
9ee6e8bb 8995 if (sh & 4)
5e3f878a 8996 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8997 else
5e3f878a 8998 gen_sxth(tmp2);
a7812ae4
PB
8999 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9000 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9001 tmp = tcg_temp_new_i32();
ecc7b3aa 9002 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 9003 tcg_temp_free_i64(tmp64);
9ee6e8bb 9004 if ((sh & 2) == 0) {
5e3f878a 9005 tmp2 = load_reg(s, rn);
9ef39277 9006 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9007 tcg_temp_free_i32(tmp2);
9ee6e8bb 9008 }
5e3f878a 9009 store_reg(s, rd, tmp);
9ee6e8bb
PB
9010 } else {
9011 /* 16 * 16 */
5e3f878a
PB
9012 tmp = load_reg(s, rm);
9013 tmp2 = load_reg(s, rs);
9014 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 9015 tcg_temp_free_i32(tmp2);
9ee6e8bb 9016 if (op1 == 2) {
a7812ae4
PB
9017 tmp64 = tcg_temp_new_i64();
9018 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9019 tcg_temp_free_i32(tmp);
a7812ae4
PB
9020 gen_addq(s, tmp64, rn, rd);
9021 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 9022 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9023 } else {
9024 if (op1 == 0) {
5e3f878a 9025 tmp2 = load_reg(s, rn);
9ef39277 9026 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9027 tcg_temp_free_i32(tmp2);
9ee6e8bb 9028 }
5e3f878a 9029 store_reg(s, rd, tmp);
9ee6e8bb
PB
9030 }
9031 }
9032 break;
9033 default:
9034 goto illegal_op;
9035 }
9036 } else if (((insn & 0x0e000000) == 0 &&
9037 (insn & 0x00000090) != 0x90) ||
9038 ((insn & 0x0e000000) == (1 << 25))) {
9039 int set_cc, logic_cc, shiftop;
9040
9041 op1 = (insn >> 21) & 0xf;
9042 set_cc = (insn >> 20) & 1;
9043 logic_cc = table_logic_cc[op1] & set_cc;
9044
9045 /* data processing instruction */
9046 if (insn & (1 << 25)) {
9047 /* immediate operand */
9048 val = insn & 0xff;
9049 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 9050 if (shift) {
9ee6e8bb 9051 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 9052 }
7d1b0095 9053 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
9054 tcg_gen_movi_i32(tmp2, val);
9055 if (logic_cc && shift) {
9056 gen_set_CF_bit31(tmp2);
9057 }
9ee6e8bb
PB
9058 } else {
9059 /* register */
9060 rm = (insn) & 0xf;
e9bb4aa9 9061 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9062 shiftop = (insn >> 5) & 3;
9063 if (!(insn & (1 << 4))) {
9064 shift = (insn >> 7) & 0x1f;
e9bb4aa9 9065 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
9066 } else {
9067 rs = (insn >> 8) & 0xf;
8984bd2e 9068 tmp = load_reg(s, rs);
e9bb4aa9 9069 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
9070 }
9071 }
9072 if (op1 != 0x0f && op1 != 0x0d) {
9073 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
9074 tmp = load_reg(s, rn);
9075 } else {
f764718d 9076 tmp = NULL;
9ee6e8bb
PB
9077 }
9078 rd = (insn >> 12) & 0xf;
9079 switch(op1) {
9080 case 0x00:
e9bb4aa9
JR
9081 tcg_gen_and_i32(tmp, tmp, tmp2);
9082 if (logic_cc) {
9083 gen_logic_CC(tmp);
9084 }
7dcc1f89 9085 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9086 break;
9087 case 0x01:
e9bb4aa9
JR
9088 tcg_gen_xor_i32(tmp, tmp, tmp2);
9089 if (logic_cc) {
9090 gen_logic_CC(tmp);
9091 }
7dcc1f89 9092 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9093 break;
9094 case 0x02:
9095 if (set_cc && rd == 15) {
9096 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 9097 if (IS_USER(s)) {
9ee6e8bb 9098 goto illegal_op;
e9bb4aa9 9099 }
72485ec4 9100 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 9101 gen_exception_return(s, tmp);
9ee6e8bb 9102 } else {
e9bb4aa9 9103 if (set_cc) {
72485ec4 9104 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9105 } else {
9106 tcg_gen_sub_i32(tmp, tmp, tmp2);
9107 }
7dcc1f89 9108 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9109 }
9110 break;
9111 case 0x03:
e9bb4aa9 9112 if (set_cc) {
72485ec4 9113 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9114 } else {
9115 tcg_gen_sub_i32(tmp, tmp2, tmp);
9116 }
7dcc1f89 9117 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9118 break;
9119 case 0x04:
e9bb4aa9 9120 if (set_cc) {
72485ec4 9121 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9122 } else {
9123 tcg_gen_add_i32(tmp, tmp, tmp2);
9124 }
7dcc1f89 9125 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9126 break;
9127 case 0x05:
e9bb4aa9 9128 if (set_cc) {
49b4c31e 9129 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9130 } else {
9131 gen_add_carry(tmp, tmp, tmp2);
9132 }
7dcc1f89 9133 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9134 break;
9135 case 0x06:
e9bb4aa9 9136 if (set_cc) {
2de68a49 9137 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9138 } else {
9139 gen_sub_carry(tmp, tmp, tmp2);
9140 }
7dcc1f89 9141 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9142 break;
9143 case 0x07:
e9bb4aa9 9144 if (set_cc) {
2de68a49 9145 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9146 } else {
9147 gen_sub_carry(tmp, tmp2, tmp);
9148 }
7dcc1f89 9149 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9150 break;
9151 case 0x08:
9152 if (set_cc) {
e9bb4aa9
JR
9153 tcg_gen_and_i32(tmp, tmp, tmp2);
9154 gen_logic_CC(tmp);
9ee6e8bb 9155 }
7d1b0095 9156 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9157 break;
9158 case 0x09:
9159 if (set_cc) {
e9bb4aa9
JR
9160 tcg_gen_xor_i32(tmp, tmp, tmp2);
9161 gen_logic_CC(tmp);
9ee6e8bb 9162 }
7d1b0095 9163 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9164 break;
9165 case 0x0a:
9166 if (set_cc) {
72485ec4 9167 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 9168 }
7d1b0095 9169 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9170 break;
9171 case 0x0b:
9172 if (set_cc) {
72485ec4 9173 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9174 }
7d1b0095 9175 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9176 break;
9177 case 0x0c:
e9bb4aa9
JR
9178 tcg_gen_or_i32(tmp, tmp, tmp2);
9179 if (logic_cc) {
9180 gen_logic_CC(tmp);
9181 }
7dcc1f89 9182 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9183 break;
9184 case 0x0d:
9185 if (logic_cc && rd == 15) {
9186 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 9187 if (IS_USER(s)) {
9ee6e8bb 9188 goto illegal_op;
e9bb4aa9
JR
9189 }
9190 gen_exception_return(s, tmp2);
9ee6e8bb 9191 } else {
e9bb4aa9
JR
9192 if (logic_cc) {
9193 gen_logic_CC(tmp2);
9194 }
7dcc1f89 9195 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9196 }
9197 break;
9198 case 0x0e:
f669df27 9199 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
9200 if (logic_cc) {
9201 gen_logic_CC(tmp);
9202 }
7dcc1f89 9203 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9204 break;
9205 default:
9206 case 0x0f:
e9bb4aa9
JR
9207 tcg_gen_not_i32(tmp2, tmp2);
9208 if (logic_cc) {
9209 gen_logic_CC(tmp2);
9210 }
7dcc1f89 9211 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9212 break;
9213 }
e9bb4aa9 9214 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 9215 tcg_temp_free_i32(tmp2);
e9bb4aa9 9216 }
9ee6e8bb
PB
9217 } else {
9218 /* other instructions */
9219 op1 = (insn >> 24) & 0xf;
9220 switch(op1) {
9221 case 0x0:
9222 case 0x1:
9223 /* multiplies, extra load/stores */
9224 sh = (insn >> 5) & 3;
9225 if (sh == 0) {
9226 if (op1 == 0x0) {
9227 rd = (insn >> 16) & 0xf;
9228 rn = (insn >> 12) & 0xf;
9229 rs = (insn >> 8) & 0xf;
9230 rm = (insn) & 0xf;
9231 op1 = (insn >> 20) & 0xf;
9232 switch (op1) {
9233 case 0: case 1: case 2: case 3: case 6:
9234 /* 32 bit mul */
5e3f878a
PB
9235 tmp = load_reg(s, rs);
9236 tmp2 = load_reg(s, rm);
9237 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9238 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9239 if (insn & (1 << 22)) {
9240 /* Subtract (mls) */
9241 ARCH(6T2);
5e3f878a
PB
9242 tmp2 = load_reg(s, rn);
9243 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 9244 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9245 } else if (insn & (1 << 21)) {
9246 /* Add */
5e3f878a
PB
9247 tmp2 = load_reg(s, rn);
9248 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9249 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9250 }
9251 if (insn & (1 << 20))
5e3f878a
PB
9252 gen_logic_CC(tmp);
9253 store_reg(s, rd, tmp);
9ee6e8bb 9254 break;
8aac08b1
AJ
9255 case 4:
9256 /* 64 bit mul double accumulate (UMAAL) */
9257 ARCH(6);
9258 tmp = load_reg(s, rs);
9259 tmp2 = load_reg(s, rm);
9260 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9261 gen_addq_lo(s, tmp64, rn);
9262 gen_addq_lo(s, tmp64, rd);
9263 gen_storeq_reg(s, rn, rd, tmp64);
9264 tcg_temp_free_i64(tmp64);
9265 break;
9266 case 8: case 9: case 10: case 11:
9267 case 12: case 13: case 14: case 15:
9268 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
9269 tmp = load_reg(s, rs);
9270 tmp2 = load_reg(s, rm);
8aac08b1 9271 if (insn & (1 << 22)) {
c9f10124 9272 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 9273 } else {
c9f10124 9274 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
9275 }
9276 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
9277 TCGv_i32 al = load_reg(s, rn);
9278 TCGv_i32 ah = load_reg(s, rd);
c9f10124 9279 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
9280 tcg_temp_free_i32(al);
9281 tcg_temp_free_i32(ah);
9ee6e8bb 9282 }
8aac08b1 9283 if (insn & (1 << 20)) {
c9f10124 9284 gen_logicq_cc(tmp, tmp2);
8aac08b1 9285 }
c9f10124
RH
9286 store_reg(s, rn, tmp);
9287 store_reg(s, rd, tmp2);
9ee6e8bb 9288 break;
8aac08b1
AJ
9289 default:
9290 goto illegal_op;
9ee6e8bb
PB
9291 }
9292 } else {
9293 rn = (insn >> 16) & 0xf;
9294 rd = (insn >> 12) & 0xf;
9295 if (insn & (1 << 23)) {
9296 /* load/store exclusive */
2359bf80 9297 int op2 = (insn >> 8) & 3;
86753403 9298 op1 = (insn >> 21) & 0x3;
2359bf80
MR
9299
9300 switch (op2) {
9301 case 0: /* lda/stl */
9302 if (op1 == 1) {
9303 goto illegal_op;
9304 }
9305 ARCH(8);
9306 break;
9307 case 1: /* reserved */
9308 goto illegal_op;
9309 case 2: /* ldaex/stlex */
9310 ARCH(8);
9311 break;
9312 case 3: /* ldrex/strex */
9313 if (op1) {
9314 ARCH(6K);
9315 } else {
9316 ARCH(6);
9317 }
9318 break;
9319 }
9320
3174f8e9 9321 addr = tcg_temp_local_new_i32();
98a46317 9322 load_reg_var(s, addr, rn);
2359bf80
MR
9323
9324 /* Since the emulation does not have barriers,
9325 the acquire/release semantics need no special
9326 handling */
9327 if (op2 == 0) {
9328 if (insn & (1 << 20)) {
9329 tmp = tcg_temp_new_i32();
9330 switch (op1) {
9331 case 0: /* lda */
9bb6558a
PM
9332 gen_aa32_ld32u_iss(s, tmp, addr,
9333 get_mem_index(s),
9334 rd | ISSIsAcqRel);
2359bf80
MR
9335 break;
9336 case 2: /* ldab */
9bb6558a
PM
9337 gen_aa32_ld8u_iss(s, tmp, addr,
9338 get_mem_index(s),
9339 rd | ISSIsAcqRel);
2359bf80
MR
9340 break;
9341 case 3: /* ldah */
9bb6558a
PM
9342 gen_aa32_ld16u_iss(s, tmp, addr,
9343 get_mem_index(s),
9344 rd | ISSIsAcqRel);
2359bf80
MR
9345 break;
9346 default:
9347 abort();
9348 }
9349 store_reg(s, rd, tmp);
9350 } else {
9351 rm = insn & 0xf;
9352 tmp = load_reg(s, rm);
9353 switch (op1) {
9354 case 0: /* stl */
9bb6558a
PM
9355 gen_aa32_st32_iss(s, tmp, addr,
9356 get_mem_index(s),
9357 rm | ISSIsAcqRel);
2359bf80
MR
9358 break;
9359 case 2: /* stlb */
9bb6558a
PM
9360 gen_aa32_st8_iss(s, tmp, addr,
9361 get_mem_index(s),
9362 rm | ISSIsAcqRel);
2359bf80
MR
9363 break;
9364 case 3: /* stlh */
9bb6558a
PM
9365 gen_aa32_st16_iss(s, tmp, addr,
9366 get_mem_index(s),
9367 rm | ISSIsAcqRel);
2359bf80
MR
9368 break;
9369 default:
9370 abort();
9371 }
9372 tcg_temp_free_i32(tmp);
9373 }
9374 } else if (insn & (1 << 20)) {
86753403
PB
9375 switch (op1) {
9376 case 0: /* ldrex */
426f5abc 9377 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
9378 break;
9379 case 1: /* ldrexd */
426f5abc 9380 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
9381 break;
9382 case 2: /* ldrexb */
426f5abc 9383 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
9384 break;
9385 case 3: /* ldrexh */
426f5abc 9386 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
9387 break;
9388 default:
9389 abort();
9390 }
9ee6e8bb
PB
9391 } else {
9392 rm = insn & 0xf;
86753403
PB
9393 switch (op1) {
9394 case 0: /* strex */
426f5abc 9395 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
9396 break;
9397 case 1: /* strexd */
502e64fe 9398 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
9399 break;
9400 case 2: /* strexb */
426f5abc 9401 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
9402 break;
9403 case 3: /* strexh */
426f5abc 9404 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
9405 break;
9406 default:
9407 abort();
9408 }
9ee6e8bb 9409 }
39d5492a 9410 tcg_temp_free_i32(addr);
c4869ca6
OS
9411 } else if ((insn & 0x00300f00) == 0) {
9412 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9413 * - SWP, SWPB
9414 */
9415
cf12bce0
EC
9416 TCGv taddr;
9417 TCGMemOp opc = s->be_data;
9418
9ee6e8bb
PB
9419 rm = (insn) & 0xf;
9420
9ee6e8bb 9421 if (insn & (1 << 22)) {
cf12bce0 9422 opc |= MO_UB;
9ee6e8bb 9423 } else {
cf12bce0 9424 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 9425 }
cf12bce0
EC
9426
9427 addr = load_reg(s, rn);
9428 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 9429 tcg_temp_free_i32(addr);
cf12bce0
EC
9430
9431 tmp = load_reg(s, rm);
9432 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9433 get_mem_index(s), opc);
9434 tcg_temp_free(taddr);
9435 store_reg(s, rd, tmp);
c4869ca6
OS
9436 } else {
9437 goto illegal_op;
9ee6e8bb
PB
9438 }
9439 }
9440 } else {
9441 int address_offset;
3960c336 9442 bool load = insn & (1 << 20);
63f26fcf
PM
9443 bool wbit = insn & (1 << 21);
9444 bool pbit = insn & (1 << 24);
3960c336 9445 bool doubleword = false;
9bb6558a
PM
9446 ISSInfo issinfo;
9447
9ee6e8bb
PB
9448 /* Misc load/store */
9449 rn = (insn >> 16) & 0xf;
9450 rd = (insn >> 12) & 0xf;
3960c336 9451
9bb6558a
PM
9452 /* ISS not valid if writeback */
9453 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9454
3960c336
PM
9455 if (!load && (sh & 2)) {
9456 /* doubleword */
9457 ARCH(5TE);
9458 if (rd & 1) {
9459 /* UNPREDICTABLE; we choose to UNDEF */
9460 goto illegal_op;
9461 }
9462 load = (sh & 1) == 0;
9463 doubleword = true;
9464 }
9465
b0109805 9466 addr = load_reg(s, rn);
63f26fcf 9467 if (pbit) {
b0109805 9468 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 9469 }
9ee6e8bb 9470 address_offset = 0;
3960c336
PM
9471
9472 if (doubleword) {
9473 if (!load) {
9ee6e8bb 9474 /* store */
b0109805 9475 tmp = load_reg(s, rd);
12dcc321 9476 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9477 tcg_temp_free_i32(tmp);
b0109805
PB
9478 tcg_gen_addi_i32(addr, addr, 4);
9479 tmp = load_reg(s, rd + 1);
12dcc321 9480 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9481 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9482 } else {
9483 /* load */
5a839c0d 9484 tmp = tcg_temp_new_i32();
12dcc321 9485 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9486 store_reg(s, rd, tmp);
9487 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9488 tmp = tcg_temp_new_i32();
12dcc321 9489 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9490 rd++;
9ee6e8bb
PB
9491 }
9492 address_offset = -4;
3960c336
PM
9493 } else if (load) {
9494 /* load */
9495 tmp = tcg_temp_new_i32();
9496 switch (sh) {
9497 case 1:
9bb6558a
PM
9498 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9499 issinfo);
3960c336
PM
9500 break;
9501 case 2:
9bb6558a
PM
9502 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9503 issinfo);
3960c336
PM
9504 break;
9505 default:
9506 case 3:
9bb6558a
PM
9507 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9508 issinfo);
3960c336
PM
9509 break;
9510 }
9ee6e8bb
PB
9511 } else {
9512 /* store */
b0109805 9513 tmp = load_reg(s, rd);
9bb6558a 9514 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9515 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9516 }
9517 /* Perform base writeback before the loaded value to
9518 ensure correct behavior with overlapping index registers.
b6af0975 9519 ldrd with base writeback is undefined if the
9ee6e8bb 9520 destination and index registers overlap. */
63f26fcf 9521 if (!pbit) {
b0109805
PB
9522 gen_add_datah_offset(s, insn, address_offset, addr);
9523 store_reg(s, rn, addr);
63f26fcf 9524 } else if (wbit) {
9ee6e8bb 9525 if (address_offset)
b0109805
PB
9526 tcg_gen_addi_i32(addr, addr, address_offset);
9527 store_reg(s, rn, addr);
9528 } else {
7d1b0095 9529 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9530 }
9531 if (load) {
9532 /* Complete the load. */
b0109805 9533 store_reg(s, rd, tmp);
9ee6e8bb
PB
9534 }
9535 }
9536 break;
9537 case 0x4:
9538 case 0x5:
9539 goto do_ldst;
9540 case 0x6:
9541 case 0x7:
9542 if (insn & (1 << 4)) {
9543 ARCH(6);
9544 /* Armv6 Media instructions. */
9545 rm = insn & 0xf;
9546 rn = (insn >> 16) & 0xf;
2c0262af 9547 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9548 rs = (insn >> 8) & 0xf;
9549 switch ((insn >> 23) & 3) {
9550 case 0: /* Parallel add/subtract. */
9551 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9552 tmp = load_reg(s, rn);
9553 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9554 sh = (insn >> 5) & 7;
9555 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9556 goto illegal_op;
6ddbc6e4 9557 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9558 tcg_temp_free_i32(tmp2);
6ddbc6e4 9559 store_reg(s, rd, tmp);
9ee6e8bb
PB
9560 break;
9561 case 1:
9562 if ((insn & 0x00700020) == 0) {
6c95676b 9563 /* Halfword pack. */
3670669c
PB
9564 tmp = load_reg(s, rn);
9565 tmp2 = load_reg(s, rm);
9ee6e8bb 9566 shift = (insn >> 7) & 0x1f;
3670669c
PB
9567 if (insn & (1 << 6)) {
9568 /* pkhtb */
22478e79
AZ
9569 if (shift == 0)
9570 shift = 31;
9571 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9572 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9573 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9574 } else {
9575 /* pkhbt */
22478e79
AZ
9576 if (shift)
9577 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9578 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9579 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9580 }
9581 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9582 tcg_temp_free_i32(tmp2);
3670669c 9583 store_reg(s, rd, tmp);
9ee6e8bb
PB
9584 } else if ((insn & 0x00200020) == 0x00200000) {
9585 /* [us]sat */
6ddbc6e4 9586 tmp = load_reg(s, rm);
9ee6e8bb
PB
9587 shift = (insn >> 7) & 0x1f;
9588 if (insn & (1 << 6)) {
9589 if (shift == 0)
9590 shift = 31;
6ddbc6e4 9591 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9592 } else {
6ddbc6e4 9593 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9594 }
9595 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9596 tmp2 = tcg_const_i32(sh);
9597 if (insn & (1 << 22))
9ef39277 9598 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9599 else
9ef39277 9600 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9601 tcg_temp_free_i32(tmp2);
6ddbc6e4 9602 store_reg(s, rd, tmp);
9ee6e8bb
PB
9603 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9604 /* [us]sat16 */
6ddbc6e4 9605 tmp = load_reg(s, rm);
9ee6e8bb 9606 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9607 tmp2 = tcg_const_i32(sh);
9608 if (insn & (1 << 22))
9ef39277 9609 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9610 else
9ef39277 9611 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9612 tcg_temp_free_i32(tmp2);
6ddbc6e4 9613 store_reg(s, rd, tmp);
9ee6e8bb
PB
9614 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9615 /* Select bytes. */
6ddbc6e4
PB
9616 tmp = load_reg(s, rn);
9617 tmp2 = load_reg(s, rm);
7d1b0095 9618 tmp3 = tcg_temp_new_i32();
0ecb72a5 9619 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9620 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9621 tcg_temp_free_i32(tmp3);
9622 tcg_temp_free_i32(tmp2);
6ddbc6e4 9623 store_reg(s, rd, tmp);
9ee6e8bb 9624 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9625 tmp = load_reg(s, rm);
9ee6e8bb 9626 shift = (insn >> 10) & 3;
1301f322 9627 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9628 rotate, a shift is sufficient. */
9629 if (shift != 0)
f669df27 9630 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9631 op1 = (insn >> 20) & 7;
9632 switch (op1) {
5e3f878a
PB
9633 case 0: gen_sxtb16(tmp); break;
9634 case 2: gen_sxtb(tmp); break;
9635 case 3: gen_sxth(tmp); break;
9636 case 4: gen_uxtb16(tmp); break;
9637 case 6: gen_uxtb(tmp); break;
9638 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9639 default: goto illegal_op;
9640 }
9641 if (rn != 15) {
5e3f878a 9642 tmp2 = load_reg(s, rn);
9ee6e8bb 9643 if ((op1 & 3) == 0) {
5e3f878a 9644 gen_add16(tmp, tmp2);
9ee6e8bb 9645 } else {
5e3f878a 9646 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9647 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9648 }
9649 }
6c95676b 9650 store_reg(s, rd, tmp);
9ee6e8bb
PB
9651 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9652 /* rev */
b0109805 9653 tmp = load_reg(s, rm);
9ee6e8bb
PB
9654 if (insn & (1 << 22)) {
9655 if (insn & (1 << 7)) {
b0109805 9656 gen_revsh(tmp);
9ee6e8bb
PB
9657 } else {
9658 ARCH(6T2);
b0109805 9659 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9660 }
9661 } else {
9662 if (insn & (1 << 7))
b0109805 9663 gen_rev16(tmp);
9ee6e8bb 9664 else
66896cb8 9665 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9666 }
b0109805 9667 store_reg(s, rd, tmp);
9ee6e8bb
PB
9668 } else {
9669 goto illegal_op;
9670 }
9671 break;
9672 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9673 switch ((insn >> 20) & 0x7) {
9674 case 5:
9675 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9676 /* op2 not 00x or 11x : UNDEF */
9677 goto illegal_op;
9678 }
838fa72d
AJ
9679 /* Signed multiply most significant [accumulate].
9680 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9681 tmp = load_reg(s, rm);
9682 tmp2 = load_reg(s, rs);
a7812ae4 9683 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9684
955a7dd5 9685 if (rd != 15) {
838fa72d 9686 tmp = load_reg(s, rd);
9ee6e8bb 9687 if (insn & (1 << 6)) {
838fa72d 9688 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9689 } else {
838fa72d 9690 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9691 }
9692 }
838fa72d
AJ
9693 if (insn & (1 << 5)) {
9694 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9695 }
9696 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9697 tmp = tcg_temp_new_i32();
ecc7b3aa 9698 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9699 tcg_temp_free_i64(tmp64);
955a7dd5 9700 store_reg(s, rn, tmp);
41e9564d
PM
9701 break;
9702 case 0:
9703 case 4:
9704 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9705 if (insn & (1 << 7)) {
9706 goto illegal_op;
9707 }
9708 tmp = load_reg(s, rm);
9709 tmp2 = load_reg(s, rs);
9ee6e8bb 9710 if (insn & (1 << 5))
5e3f878a
PB
9711 gen_swap_half(tmp2);
9712 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9713 if (insn & (1 << 22)) {
5e3f878a 9714 /* smlald, smlsld */
33bbd75a
PC
9715 TCGv_i64 tmp64_2;
9716
a7812ae4 9717 tmp64 = tcg_temp_new_i64();
33bbd75a 9718 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9719 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9720 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9721 tcg_temp_free_i32(tmp);
33bbd75a
PC
9722 tcg_temp_free_i32(tmp2);
9723 if (insn & (1 << 6)) {
9724 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9725 } else {
9726 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9727 }
9728 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9729 gen_addq(s, tmp64, rd, rn);
9730 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9731 tcg_temp_free_i64(tmp64);
9ee6e8bb 9732 } else {
5e3f878a 9733 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9734 if (insn & (1 << 6)) {
9735 /* This subtraction cannot overflow. */
9736 tcg_gen_sub_i32(tmp, tmp, tmp2);
9737 } else {
9738 /* This addition cannot overflow 32 bits;
9739 * however it may overflow considered as a
9740 * signed operation, in which case we must set
9741 * the Q flag.
9742 */
9743 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9744 }
9745 tcg_temp_free_i32(tmp2);
22478e79 9746 if (rd != 15)
9ee6e8bb 9747 {
22478e79 9748 tmp2 = load_reg(s, rd);
9ef39277 9749 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9750 tcg_temp_free_i32(tmp2);
9ee6e8bb 9751 }
22478e79 9752 store_reg(s, rn, tmp);
9ee6e8bb 9753 }
41e9564d 9754 break;
b8b8ea05
PM
9755 case 1:
9756 case 3:
9757 /* SDIV, UDIV */
7e0cf8b4 9758 if (!dc_isar_feature(arm_div, s)) {
b8b8ea05
PM
9759 goto illegal_op;
9760 }
9761 if (((insn >> 5) & 7) || (rd != 15)) {
9762 goto illegal_op;
9763 }
9764 tmp = load_reg(s, rm);
9765 tmp2 = load_reg(s, rs);
9766 if (insn & (1 << 21)) {
9767 gen_helper_udiv(tmp, tmp, tmp2);
9768 } else {
9769 gen_helper_sdiv(tmp, tmp, tmp2);
9770 }
9771 tcg_temp_free_i32(tmp2);
9772 store_reg(s, rn, tmp);
9773 break;
41e9564d
PM
9774 default:
9775 goto illegal_op;
9ee6e8bb
PB
9776 }
9777 break;
9778 case 3:
9779 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9780 switch (op1) {
9781 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9782 ARCH(6);
9783 tmp = load_reg(s, rm);
9784 tmp2 = load_reg(s, rs);
9785 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9786 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9787 if (rd != 15) {
9788 tmp2 = load_reg(s, rd);
6ddbc6e4 9789 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9790 tcg_temp_free_i32(tmp2);
9ee6e8bb 9791 }
ded9d295 9792 store_reg(s, rn, tmp);
9ee6e8bb
PB
9793 break;
9794 case 0x20: case 0x24: case 0x28: case 0x2c:
9795 /* Bitfield insert/clear. */
9796 ARCH(6T2);
9797 shift = (insn >> 7) & 0x1f;
9798 i = (insn >> 16) & 0x1f;
45140a57
KB
9799 if (i < shift) {
9800 /* UNPREDICTABLE; we choose to UNDEF */
9801 goto illegal_op;
9802 }
9ee6e8bb
PB
9803 i = i + 1 - shift;
9804 if (rm == 15) {
7d1b0095 9805 tmp = tcg_temp_new_i32();
5e3f878a 9806 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9807 } else {
5e3f878a 9808 tmp = load_reg(s, rm);
9ee6e8bb
PB
9809 }
9810 if (i != 32) {
5e3f878a 9811 tmp2 = load_reg(s, rd);
d593c48e 9812 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9813 tcg_temp_free_i32(tmp2);
9ee6e8bb 9814 }
5e3f878a 9815 store_reg(s, rd, tmp);
9ee6e8bb
PB
9816 break;
9817 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9818 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9819 ARCH(6T2);
5e3f878a 9820 tmp = load_reg(s, rm);
9ee6e8bb
PB
9821 shift = (insn >> 7) & 0x1f;
9822 i = ((insn >> 16) & 0x1f) + 1;
9823 if (shift + i > 32)
9824 goto illegal_op;
9825 if (i < 32) {
9826 if (op1 & 0x20) {
59a71b4c 9827 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9828 } else {
59a71b4c 9829 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9830 }
9831 }
5e3f878a 9832 store_reg(s, rd, tmp);
9ee6e8bb
PB
9833 break;
9834 default:
9835 goto illegal_op;
9836 }
9837 break;
9838 }
9839 break;
9840 }
9841 do_ldst:
9842 /* Check for undefined extension instructions
9843 * per the ARM Bible IE:
9844 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9845 */
9846 sh = (0xf << 20) | (0xf << 4);
9847 if (op1 == 0x7 && ((insn & sh) == sh))
9848 {
9849 goto illegal_op;
9850 }
9851 /* load/store byte/word */
9852 rn = (insn >> 16) & 0xf;
9853 rd = (insn >> 12) & 0xf;
b0109805 9854 tmp2 = load_reg(s, rn);
a99caa48
PM
9855 if ((insn & 0x01200000) == 0x00200000) {
9856 /* ldrt/strt */
579d21cc 9857 i = get_a32_user_mem_index(s);
a99caa48
PM
9858 } else {
9859 i = get_mem_index(s);
9860 }
9ee6e8bb 9861 if (insn & (1 << 24))
b0109805 9862 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9863 if (insn & (1 << 20)) {
9864 /* load */
5a839c0d 9865 tmp = tcg_temp_new_i32();
9ee6e8bb 9866 if (insn & (1 << 22)) {
9bb6558a 9867 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9868 } else {
9bb6558a 9869 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9870 }
9ee6e8bb
PB
9871 } else {
9872 /* store */
b0109805 9873 tmp = load_reg(s, rd);
5a839c0d 9874 if (insn & (1 << 22)) {
9bb6558a 9875 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9876 } else {
9bb6558a 9877 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9878 }
9879 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9880 }
9881 if (!(insn & (1 << 24))) {
b0109805
PB
9882 gen_add_data_offset(s, insn, tmp2);
9883 store_reg(s, rn, tmp2);
9884 } else if (insn & (1 << 21)) {
9885 store_reg(s, rn, tmp2);
9886 } else {
7d1b0095 9887 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9888 }
9889 if (insn & (1 << 20)) {
9890 /* Complete the load. */
7dcc1f89 9891 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9892 }
9893 break;
9894 case 0x08:
9895 case 0x09:
9896 {
da3e53dd
PM
9897 int j, n, loaded_base;
9898 bool exc_return = false;
9899 bool is_load = extract32(insn, 20, 1);
9900 bool user = false;
39d5492a 9901 TCGv_i32 loaded_var;
9ee6e8bb
PB
9902 /* load/store multiple words */
9903 /* XXX: store correct base if write back */
9ee6e8bb 9904 if (insn & (1 << 22)) {
da3e53dd 9905 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9906 if (IS_USER(s))
9907 goto illegal_op; /* only usable in supervisor mode */
9908
da3e53dd
PM
9909 if (is_load && extract32(insn, 15, 1)) {
9910 exc_return = true;
9911 } else {
9912 user = true;
9913 }
9ee6e8bb
PB
9914 }
9915 rn = (insn >> 16) & 0xf;
b0109805 9916 addr = load_reg(s, rn);
9ee6e8bb
PB
9917
9918 /* compute total size */
9919 loaded_base = 0;
f764718d 9920 loaded_var = NULL;
9ee6e8bb
PB
9921 n = 0;
9922 for(i=0;i<16;i++) {
9923 if (insn & (1 << i))
9924 n++;
9925 }
9926 /* XXX: test invalid n == 0 case ? */
9927 if (insn & (1 << 23)) {
9928 if (insn & (1 << 24)) {
9929 /* pre increment */
b0109805 9930 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9931 } else {
9932 /* post increment */
9933 }
9934 } else {
9935 if (insn & (1 << 24)) {
9936 /* pre decrement */
b0109805 9937 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9938 } else {
9939 /* post decrement */
9940 if (n != 1)
b0109805 9941 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9942 }
9943 }
9944 j = 0;
9945 for(i=0;i<16;i++) {
9946 if (insn & (1 << i)) {
da3e53dd 9947 if (is_load) {
9ee6e8bb 9948 /* load */
5a839c0d 9949 tmp = tcg_temp_new_i32();
12dcc321 9950 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9951 if (user) {
b75263d6 9952 tmp2 = tcg_const_i32(i);
1ce94f81 9953 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9954 tcg_temp_free_i32(tmp2);
7d1b0095 9955 tcg_temp_free_i32(tmp);
9ee6e8bb 9956 } else if (i == rn) {
b0109805 9957 loaded_var = tmp;
9ee6e8bb 9958 loaded_base = 1;
fb0e8e79
PM
9959 } else if (rn == 15 && exc_return) {
9960 store_pc_exc_ret(s, tmp);
9ee6e8bb 9961 } else {
7dcc1f89 9962 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9963 }
9964 } else {
9965 /* store */
9966 if (i == 15) {
9967 /* special case: r15 = PC + 8 */
9968 val = (long)s->pc + 4;
7d1b0095 9969 tmp = tcg_temp_new_i32();
b0109805 9970 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9971 } else if (user) {
7d1b0095 9972 tmp = tcg_temp_new_i32();
b75263d6 9973 tmp2 = tcg_const_i32(i);
9ef39277 9974 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9975 tcg_temp_free_i32(tmp2);
9ee6e8bb 9976 } else {
b0109805 9977 tmp = load_reg(s, i);
9ee6e8bb 9978 }
12dcc321 9979 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9980 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9981 }
9982 j++;
9983 /* no need to add after the last transfer */
9984 if (j != n)
b0109805 9985 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9986 }
9987 }
9988 if (insn & (1 << 21)) {
9989 /* write back */
9990 if (insn & (1 << 23)) {
9991 if (insn & (1 << 24)) {
9992 /* pre increment */
9993 } else {
9994 /* post increment */
b0109805 9995 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9996 }
9997 } else {
9998 if (insn & (1 << 24)) {
9999 /* pre decrement */
10000 if (n != 1)
b0109805 10001 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10002 } else {
10003 /* post decrement */
b0109805 10004 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10005 }
10006 }
b0109805
PB
10007 store_reg(s, rn, addr);
10008 } else {
7d1b0095 10009 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10010 }
10011 if (loaded_base) {
b0109805 10012 store_reg(s, rn, loaded_var);
9ee6e8bb 10013 }
da3e53dd 10014 if (exc_return) {
9ee6e8bb 10015 /* Restore CPSR from SPSR. */
d9ba4830 10016 tmp = load_cpu_field(spsr);
e69ad9df
AL
10017 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10018 gen_io_start();
10019 }
235ea1f5 10020 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
10021 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10022 gen_io_end();
10023 }
7d1b0095 10024 tcg_temp_free_i32(tmp);
b29fd33d 10025 /* Must exit loop to check un-masked IRQs */
dcba3a8d 10026 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
10027 }
10028 }
10029 break;
10030 case 0xa:
10031 case 0xb:
10032 {
10033 int32_t offset;
10034
10035 /* branch (and link) */
10036 val = (int32_t)s->pc;
10037 if (insn & (1 << 24)) {
7d1b0095 10038 tmp = tcg_temp_new_i32();
5e3f878a
PB
10039 tcg_gen_movi_i32(tmp, val);
10040 store_reg(s, 14, tmp);
9ee6e8bb 10041 }
534df156
PM
10042 offset = sextract32(insn << 2, 0, 26);
10043 val += offset + 4;
9ee6e8bb
PB
10044 gen_jmp(s, val);
10045 }
10046 break;
10047 case 0xc:
10048 case 0xd:
10049 case 0xe:
6a57f3eb
WN
10050 if (((insn >> 8) & 0xe) == 10) {
10051 /* VFP. */
7dcc1f89 10052 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10053 goto illegal_op;
10054 }
7dcc1f89 10055 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 10056 /* Coprocessor. */
9ee6e8bb 10057 goto illegal_op;
6a57f3eb 10058 }
9ee6e8bb
PB
10059 break;
10060 case 0xf:
10061 /* swi */
eaed129d 10062 gen_set_pc_im(s, s->pc);
d4a2dc67 10063 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 10064 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
10065 break;
10066 default:
10067 illegal_op:
73710361
GB
10068 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10069 default_exception_el(s));
9ee6e8bb
PB
10070 break;
10071 }
10072 }
10073}
10074
296e5a0a
PM
10075static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10076{
10077 /* Return true if this is a 16 bit instruction. We must be precise
10078 * about this (matching the decode). We assume that s->pc still
10079 * points to the first 16 bits of the insn.
10080 */
10081 if ((insn >> 11) < 0x1d) {
10082 /* Definitely a 16-bit instruction */
10083 return true;
10084 }
10085
10086 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10087 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10088 * end up actually treating this as two 16-bit insns, though,
10089 * if it's half of a bl/blx pair that might span a page boundary.
10090 */
14120108
JS
10091 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10092 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
10093 /* Thumb2 cores (including all M profile ones) always treat
10094 * 32-bit insns as 32-bit.
10095 */
10096 return false;
10097 }
10098
bfe7ad5b 10099 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
10100 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10101 * is not on the next page; we merge this into a 32-bit
10102 * insn.
10103 */
10104 return false;
10105 }
10106 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10107 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10108 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10109 * -- handle as single 16 bit insn
10110 */
10111 return true;
10112}
10113
9ee6e8bb
PB
10114/* Return true if this is a Thumb-2 logical op. */
10115static int
10116thumb2_logic_op(int op)
10117{
10118 return (op < 8);
10119}
10120
10121/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10122 then set condition code flags based on the result of the operation.
10123 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10124 to the high bit of T1.
10125 Returns zero if the opcode is valid. */
10126
10127static int
39d5492a
PM
10128gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10129 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
10130{
10131 int logic_cc;
10132
10133 logic_cc = 0;
10134 switch (op) {
10135 case 0: /* and */
396e467c 10136 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
10137 logic_cc = conds;
10138 break;
10139 case 1: /* bic */
f669df27 10140 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
10141 logic_cc = conds;
10142 break;
10143 case 2: /* orr */
396e467c 10144 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
10145 logic_cc = conds;
10146 break;
10147 case 3: /* orn */
29501f1b 10148 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
10149 logic_cc = conds;
10150 break;
10151 case 4: /* eor */
396e467c 10152 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
10153 logic_cc = conds;
10154 break;
10155 case 8: /* add */
10156 if (conds)
72485ec4 10157 gen_add_CC(t0, t0, t1);
9ee6e8bb 10158 else
396e467c 10159 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
10160 break;
10161 case 10: /* adc */
10162 if (conds)
49b4c31e 10163 gen_adc_CC(t0, t0, t1);
9ee6e8bb 10164 else
396e467c 10165 gen_adc(t0, t1);
9ee6e8bb
PB
10166 break;
10167 case 11: /* sbc */
2de68a49
RH
10168 if (conds) {
10169 gen_sbc_CC(t0, t0, t1);
10170 } else {
396e467c 10171 gen_sub_carry(t0, t0, t1);
2de68a49 10172 }
9ee6e8bb
PB
10173 break;
10174 case 13: /* sub */
10175 if (conds)
72485ec4 10176 gen_sub_CC(t0, t0, t1);
9ee6e8bb 10177 else
396e467c 10178 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
10179 break;
10180 case 14: /* rsb */
10181 if (conds)
72485ec4 10182 gen_sub_CC(t0, t1, t0);
9ee6e8bb 10183 else
396e467c 10184 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
10185 break;
10186 default: /* 5, 6, 7, 9, 12, 15. */
10187 return 1;
10188 }
10189 if (logic_cc) {
396e467c 10190 gen_logic_CC(t0);
9ee6e8bb 10191 if (shifter_out)
396e467c 10192 gen_set_CF_bit31(t1);
9ee6e8bb
PB
10193 }
10194 return 0;
10195}
10196
2eea841c
PM
10197/* Translate a 32-bit thumb instruction. */
10198static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 10199{
296e5a0a 10200 uint32_t imm, shift, offset;
9ee6e8bb 10201 uint32_t rd, rn, rm, rs;
39d5492a
PM
10202 TCGv_i32 tmp;
10203 TCGv_i32 tmp2;
10204 TCGv_i32 tmp3;
10205 TCGv_i32 addr;
a7812ae4 10206 TCGv_i64 tmp64;
9ee6e8bb
PB
10207 int op;
10208 int shiftop;
10209 int conds;
10210 int logic_cc;
10211
14120108
JS
10212 /*
10213 * ARMv6-M supports a limited subset of Thumb2 instructions.
10214 * Other Thumb1 architectures allow only 32-bit
10215 * combined BL/BLX prefix and suffix.
296e5a0a 10216 */
14120108
JS
10217 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10218 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10219 int i;
10220 bool found = false;
8297cb13
JS
10221 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10222 0xf3b08040 /* dsb */,
10223 0xf3b08050 /* dmb */,
10224 0xf3b08060 /* isb */,
10225 0xf3e08000 /* mrs */,
10226 0xf000d000 /* bl */};
10227 static const uint32_t armv6m_mask[] = {0xffe0d000,
10228 0xfff0d0f0,
10229 0xfff0d0f0,
10230 0xfff0d0f0,
10231 0xffe0d000,
10232 0xf800d000};
14120108
JS
10233
10234 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10235 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10236 found = true;
10237 break;
10238 }
10239 }
10240 if (!found) {
10241 goto illegal_op;
10242 }
10243 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
10244 ARCH(6T2);
10245 }
10246
10247 rn = (insn >> 16) & 0xf;
10248 rs = (insn >> 12) & 0xf;
10249 rd = (insn >> 8) & 0xf;
10250 rm = insn & 0xf;
10251 switch ((insn >> 25) & 0xf) {
10252 case 0: case 1: case 2: case 3:
10253 /* 16-bit instructions. Should never happen. */
10254 abort();
10255 case 4:
10256 if (insn & (1 << 22)) {
ebfe27c5
PM
10257 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10258 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 10259 * table branch, TT.
ebfe27c5 10260 */
76eff04d
PM
10261 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10262 arm_dc_feature(s, ARM_FEATURE_V8)) {
10263 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10264 * - SG (v8M only)
10265 * The bulk of the behaviour for this instruction is implemented
10266 * in v7m_handle_execute_nsc(), which deals with the insn when
10267 * it is executed by a CPU in non-secure state from memory
10268 * which is Secure & NonSecure-Callable.
10269 * Here we only need to handle the remaining cases:
10270 * * in NS memory (including the "security extension not
10271 * implemented" case) : NOP
10272 * * in S memory but CPU already secure (clear IT bits)
10273 * We know that the attribute for the memory this insn is
10274 * in must match the current CPU state, because otherwise
10275 * get_phys_addr_pmsav8 would have generated an exception.
10276 */
10277 if (s->v8m_secure) {
10278 /* Like the IT insn, we don't need to generate any code */
10279 s->condexec_cond = 0;
10280 s->condexec_mask = 0;
10281 }
10282 } else if (insn & 0x01200000) {
ebfe27c5
PM
10283 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10284 * - load/store dual (post-indexed)
10285 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10286 * - load/store dual (literal and immediate)
10287 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10288 * - load/store dual (pre-indexed)
10289 */
910d7692
PM
10290 bool wback = extract32(insn, 21, 1);
10291
9ee6e8bb 10292 if (rn == 15) {
ebfe27c5
PM
10293 if (insn & (1 << 21)) {
10294 /* UNPREDICTABLE */
10295 goto illegal_op;
10296 }
7d1b0095 10297 addr = tcg_temp_new_i32();
b0109805 10298 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 10299 } else {
b0109805 10300 addr = load_reg(s, rn);
9ee6e8bb
PB
10301 }
10302 offset = (insn & 0xff) * 4;
910d7692 10303 if ((insn & (1 << 23)) == 0) {
9ee6e8bb 10304 offset = -offset;
910d7692
PM
10305 }
10306
10307 if (s->v8m_stackcheck && rn == 13 && wback) {
10308 /*
10309 * Here 'addr' is the current SP; if offset is +ve we're
10310 * moving SP up, else down. It is UNKNOWN whether the limit
10311 * check triggers when SP starts below the limit and ends
10312 * up above it; check whichever of the current and final
10313 * SP is lower, so QEMU will trigger in that situation.
10314 */
10315 if ((int32_t)offset < 0) {
10316 TCGv_i32 newsp = tcg_temp_new_i32();
10317
10318 tcg_gen_addi_i32(newsp, addr, offset);
10319 gen_helper_v8m_stackcheck(cpu_env, newsp);
10320 tcg_temp_free_i32(newsp);
10321 } else {
10322 gen_helper_v8m_stackcheck(cpu_env, addr);
10323 }
10324 }
10325
9ee6e8bb 10326 if (insn & (1 << 24)) {
b0109805 10327 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
10328 offset = 0;
10329 }
10330 if (insn & (1 << 20)) {
10331 /* ldrd */
e2592fad 10332 tmp = tcg_temp_new_i32();
12dcc321 10333 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10334 store_reg(s, rs, tmp);
10335 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10336 tmp = tcg_temp_new_i32();
12dcc321 10337 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10338 store_reg(s, rd, tmp);
9ee6e8bb
PB
10339 } else {
10340 /* strd */
b0109805 10341 tmp = load_reg(s, rs);
12dcc321 10342 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10343 tcg_temp_free_i32(tmp);
b0109805
PB
10344 tcg_gen_addi_i32(addr, addr, 4);
10345 tmp = load_reg(s, rd);
12dcc321 10346 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10347 tcg_temp_free_i32(tmp);
9ee6e8bb 10348 }
910d7692 10349 if (wback) {
9ee6e8bb 10350 /* Base writeback. */
b0109805
PB
10351 tcg_gen_addi_i32(addr, addr, offset - 4);
10352 store_reg(s, rn, addr);
10353 } else {
7d1b0095 10354 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10355 }
10356 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
10357 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10358 * - load/store exclusive word
5158de24 10359 * - TT (v8M only)
ebfe27c5
PM
10360 */
10361 if (rs == 15) {
5158de24
PM
10362 if (!(insn & (1 << 20)) &&
10363 arm_dc_feature(s, ARM_FEATURE_M) &&
10364 arm_dc_feature(s, ARM_FEATURE_V8)) {
10365 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10366 * - TT (v8M only)
10367 */
10368 bool alt = insn & (1 << 7);
10369 TCGv_i32 addr, op, ttresp;
10370
10371 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10372 /* we UNDEF for these UNPREDICTABLE cases */
10373 goto illegal_op;
10374 }
10375
10376 if (alt && !s->v8m_secure) {
10377 goto illegal_op;
10378 }
10379
10380 addr = load_reg(s, rn);
10381 op = tcg_const_i32(extract32(insn, 6, 2));
10382 ttresp = tcg_temp_new_i32();
10383 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10384 tcg_temp_free_i32(addr);
10385 tcg_temp_free_i32(op);
10386 store_reg(s, rd, ttresp);
384c6c03 10387 break;
5158de24 10388 }
ebfe27c5
PM
10389 goto illegal_op;
10390 }
39d5492a 10391 addr = tcg_temp_local_new_i32();
98a46317 10392 load_reg_var(s, addr, rn);
426f5abc 10393 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 10394 if (insn & (1 << 20)) {
426f5abc 10395 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 10396 } else {
426f5abc 10397 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 10398 }
39d5492a 10399 tcg_temp_free_i32(addr);
2359bf80 10400 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
10401 /* Table Branch. */
10402 if (rn == 15) {
7d1b0095 10403 addr = tcg_temp_new_i32();
b0109805 10404 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 10405 } else {
b0109805 10406 addr = load_reg(s, rn);
9ee6e8bb 10407 }
b26eefb6 10408 tmp = load_reg(s, rm);
b0109805 10409 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
10410 if (insn & (1 << 4)) {
10411 /* tbh */
b0109805 10412 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10413 tcg_temp_free_i32(tmp);
e2592fad 10414 tmp = tcg_temp_new_i32();
12dcc321 10415 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10416 } else { /* tbb */
7d1b0095 10417 tcg_temp_free_i32(tmp);
e2592fad 10418 tmp = tcg_temp_new_i32();
12dcc321 10419 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10420 }
7d1b0095 10421 tcg_temp_free_i32(addr);
b0109805
PB
10422 tcg_gen_shli_i32(tmp, tmp, 1);
10423 tcg_gen_addi_i32(tmp, tmp, s->pc);
10424 store_reg(s, 15, tmp);
9ee6e8bb 10425 } else {
2359bf80 10426 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 10427 op = (insn >> 4) & 0x3;
2359bf80
MR
10428 switch (op2) {
10429 case 0:
426f5abc 10430 goto illegal_op;
2359bf80
MR
10431 case 1:
10432 /* Load/store exclusive byte/halfword/doubleword */
10433 if (op == 2) {
10434 goto illegal_op;
10435 }
10436 ARCH(7);
10437 break;
10438 case 2:
10439 /* Load-acquire/store-release */
10440 if (op == 3) {
10441 goto illegal_op;
10442 }
10443 /* Fall through */
10444 case 3:
10445 /* Load-acquire/store-release exclusive */
10446 ARCH(8);
10447 break;
426f5abc 10448 }
39d5492a 10449 addr = tcg_temp_local_new_i32();
98a46317 10450 load_reg_var(s, addr, rn);
2359bf80
MR
10451 if (!(op2 & 1)) {
10452 if (insn & (1 << 20)) {
10453 tmp = tcg_temp_new_i32();
10454 switch (op) {
10455 case 0: /* ldab */
9bb6558a
PM
10456 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
10457 rs | ISSIsAcqRel);
2359bf80
MR
10458 break;
10459 case 1: /* ldah */
9bb6558a
PM
10460 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10461 rs | ISSIsAcqRel);
2359bf80
MR
10462 break;
10463 case 2: /* lda */
9bb6558a
PM
10464 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10465 rs | ISSIsAcqRel);
2359bf80
MR
10466 break;
10467 default:
10468 abort();
10469 }
10470 store_reg(s, rs, tmp);
10471 } else {
10472 tmp = load_reg(s, rs);
10473 switch (op) {
10474 case 0: /* stlb */
9bb6558a
PM
10475 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10476 rs | ISSIsAcqRel);
2359bf80
MR
10477 break;
10478 case 1: /* stlh */
9bb6558a
PM
10479 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10480 rs | ISSIsAcqRel);
2359bf80
MR
10481 break;
10482 case 2: /* stl */
9bb6558a
PM
10483 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10484 rs | ISSIsAcqRel);
2359bf80
MR
10485 break;
10486 default:
10487 abort();
10488 }
10489 tcg_temp_free_i32(tmp);
10490 }
10491 } else if (insn & (1 << 20)) {
426f5abc 10492 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 10493 } else {
426f5abc 10494 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 10495 }
39d5492a 10496 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10497 }
10498 } else {
10499 /* Load/store multiple, RFE, SRS. */
10500 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 10501 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 10502 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10503 goto illegal_op;
00115976 10504 }
9ee6e8bb
PB
10505 if (insn & (1 << 20)) {
10506 /* rfe */
b0109805
PB
10507 addr = load_reg(s, rn);
10508 if ((insn & (1 << 24)) == 0)
10509 tcg_gen_addi_i32(addr, addr, -8);
10510 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 10511 tmp = tcg_temp_new_i32();
12dcc321 10512 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10513 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10514 tmp2 = tcg_temp_new_i32();
12dcc321 10515 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
10516 if (insn & (1 << 21)) {
10517 /* Base writeback. */
b0109805
PB
10518 if (insn & (1 << 24)) {
10519 tcg_gen_addi_i32(addr, addr, 4);
10520 } else {
10521 tcg_gen_addi_i32(addr, addr, -4);
10522 }
10523 store_reg(s, rn, addr);
10524 } else {
7d1b0095 10525 tcg_temp_free_i32(addr);
9ee6e8bb 10526 }
b0109805 10527 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10528 } else {
10529 /* srs */
81465888
PM
10530 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10531 insn & (1 << 21));
9ee6e8bb
PB
10532 }
10533 } else {
5856d44e 10534 int i, loaded_base = 0;
39d5492a 10535 TCGv_i32 loaded_var;
7c0ed88e 10536 bool wback = extract32(insn, 21, 1);
9ee6e8bb 10537 /* Load/store multiple. */
b0109805 10538 addr = load_reg(s, rn);
9ee6e8bb
PB
10539 offset = 0;
10540 for (i = 0; i < 16; i++) {
10541 if (insn & (1 << i))
10542 offset += 4;
10543 }
7c0ed88e 10544
9ee6e8bb 10545 if (insn & (1 << 24)) {
b0109805 10546 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10547 }
10548
7c0ed88e
PM
10549 if (s->v8m_stackcheck && rn == 13 && wback) {
10550 /*
10551 * If the writeback is incrementing SP rather than
10552 * decrementing it, and the initial SP is below the
10553 * stack limit but the final written-back SP would
10554 * be above, then then we must not perform any memory
10555 * accesses, but it is IMPDEF whether we generate
10556 * an exception. We choose to do so in this case.
10557 * At this point 'addr' is the lowest address, so
10558 * either the original SP (if incrementing) or our
10559 * final SP (if decrementing), so that's what we check.
10560 */
10561 gen_helper_v8m_stackcheck(cpu_env, addr);
10562 }
10563
f764718d 10564 loaded_var = NULL;
9ee6e8bb
PB
10565 for (i = 0; i < 16; i++) {
10566 if ((insn & (1 << i)) == 0)
10567 continue;
10568 if (insn & (1 << 20)) {
10569 /* Load. */
e2592fad 10570 tmp = tcg_temp_new_i32();
12dcc321 10571 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10572 if (i == 15) {
3bb8a96f 10573 gen_bx_excret(s, tmp);
5856d44e
YO
10574 } else if (i == rn) {
10575 loaded_var = tmp;
10576 loaded_base = 1;
9ee6e8bb 10577 } else {
b0109805 10578 store_reg(s, i, tmp);
9ee6e8bb
PB
10579 }
10580 } else {
10581 /* Store. */
b0109805 10582 tmp = load_reg(s, i);
12dcc321 10583 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10584 tcg_temp_free_i32(tmp);
9ee6e8bb 10585 }
b0109805 10586 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 10587 }
5856d44e
YO
10588 if (loaded_base) {
10589 store_reg(s, rn, loaded_var);
10590 }
7c0ed88e 10591 if (wback) {
9ee6e8bb
PB
10592 /* Base register writeback. */
10593 if (insn & (1 << 24)) {
b0109805 10594 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10595 }
10596 /* Fault if writeback register is in register list. */
10597 if (insn & (1 << rn))
10598 goto illegal_op;
b0109805
PB
10599 store_reg(s, rn, addr);
10600 } else {
7d1b0095 10601 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10602 }
10603 }
10604 }
10605 break;
2af9ab77
JB
10606 case 5:
10607
9ee6e8bb 10608 op = (insn >> 21) & 0xf;
2af9ab77 10609 if (op == 6) {
62b44f05
AR
10610 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10611 goto illegal_op;
10612 }
2af9ab77
JB
10613 /* Halfword pack. */
10614 tmp = load_reg(s, rn);
10615 tmp2 = load_reg(s, rm);
10616 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10617 if (insn & (1 << 5)) {
10618 /* pkhtb */
10619 if (shift == 0)
10620 shift = 31;
10621 tcg_gen_sari_i32(tmp2, tmp2, shift);
10622 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10623 tcg_gen_ext16u_i32(tmp2, tmp2);
10624 } else {
10625 /* pkhbt */
10626 if (shift)
10627 tcg_gen_shli_i32(tmp2, tmp2, shift);
10628 tcg_gen_ext16u_i32(tmp, tmp);
10629 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10630 }
10631 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10632 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10633 store_reg(s, rd, tmp);
10634 } else {
2af9ab77
JB
10635 /* Data processing register constant shift. */
10636 if (rn == 15) {
7d1b0095 10637 tmp = tcg_temp_new_i32();
2af9ab77
JB
10638 tcg_gen_movi_i32(tmp, 0);
10639 } else {
10640 tmp = load_reg(s, rn);
10641 }
10642 tmp2 = load_reg(s, rm);
10643
10644 shiftop = (insn >> 4) & 3;
10645 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10646 conds = (insn & (1 << 20)) != 0;
10647 logic_cc = (conds && thumb2_logic_op(op));
10648 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10649 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10650 goto illegal_op;
7d1b0095 10651 tcg_temp_free_i32(tmp2);
55203189
PM
10652 if (rd == 13 &&
10653 ((op == 2 && rn == 15) ||
10654 (op == 8 && rn == 13) ||
10655 (op == 13 && rn == 13))) {
10656 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
10657 store_sp_checked(s, tmp);
10658 } else if (rd != 15) {
2af9ab77
JB
10659 store_reg(s, rd, tmp);
10660 } else {
7d1b0095 10661 tcg_temp_free_i32(tmp);
2af9ab77 10662 }
3174f8e9 10663 }
9ee6e8bb
PB
10664 break;
10665 case 13: /* Misc data processing. */
10666 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10667 if (op < 4 && (insn & 0xf000) != 0xf000)
10668 goto illegal_op;
10669 switch (op) {
10670 case 0: /* Register controlled shift. */
8984bd2e
PB
10671 tmp = load_reg(s, rn);
10672 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10673 if ((insn & 0x70) != 0)
10674 goto illegal_op;
a2d12f0f
PM
10675 /*
10676 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
10677 * - MOV, MOVS (register-shifted register), flagsetting
10678 */
9ee6e8bb 10679 op = (insn >> 21) & 3;
8984bd2e
PB
10680 logic_cc = (insn & (1 << 20)) != 0;
10681 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10682 if (logic_cc)
10683 gen_logic_CC(tmp);
bedb8a6b 10684 store_reg(s, rd, tmp);
9ee6e8bb
PB
10685 break;
10686 case 1: /* Sign/zero extend. */
62b44f05
AR
10687 op = (insn >> 20) & 7;
10688 switch (op) {
10689 case 0: /* SXTAH, SXTH */
10690 case 1: /* UXTAH, UXTH */
10691 case 4: /* SXTAB, SXTB */
10692 case 5: /* UXTAB, UXTB */
10693 break;
10694 case 2: /* SXTAB16, SXTB16 */
10695 case 3: /* UXTAB16, UXTB16 */
10696 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10697 goto illegal_op;
10698 }
10699 break;
10700 default:
10701 goto illegal_op;
10702 }
10703 if (rn != 15) {
10704 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10705 goto illegal_op;
10706 }
10707 }
5e3f878a 10708 tmp = load_reg(s, rm);
9ee6e8bb 10709 shift = (insn >> 4) & 3;
1301f322 10710 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10711 rotate, a shift is sufficient. */
10712 if (shift != 0)
f669df27 10713 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10714 op = (insn >> 20) & 7;
10715 switch (op) {
5e3f878a
PB
10716 case 0: gen_sxth(tmp); break;
10717 case 1: gen_uxth(tmp); break;
10718 case 2: gen_sxtb16(tmp); break;
10719 case 3: gen_uxtb16(tmp); break;
10720 case 4: gen_sxtb(tmp); break;
10721 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10722 default:
10723 g_assert_not_reached();
9ee6e8bb
PB
10724 }
10725 if (rn != 15) {
5e3f878a 10726 tmp2 = load_reg(s, rn);
9ee6e8bb 10727 if ((op >> 1) == 1) {
5e3f878a 10728 gen_add16(tmp, tmp2);
9ee6e8bb 10729 } else {
5e3f878a 10730 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10731 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10732 }
10733 }
5e3f878a 10734 store_reg(s, rd, tmp);
9ee6e8bb
PB
10735 break;
10736 case 2: /* SIMD add/subtract. */
62b44f05
AR
10737 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10738 goto illegal_op;
10739 }
9ee6e8bb
PB
10740 op = (insn >> 20) & 7;
10741 shift = (insn >> 4) & 7;
10742 if ((op & 3) == 3 || (shift & 3) == 3)
10743 goto illegal_op;
6ddbc6e4
PB
10744 tmp = load_reg(s, rn);
10745 tmp2 = load_reg(s, rm);
10746 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10747 tcg_temp_free_i32(tmp2);
6ddbc6e4 10748 store_reg(s, rd, tmp);
9ee6e8bb
PB
10749 break;
10750 case 3: /* Other data processing. */
10751 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10752 if (op < 4) {
10753 /* Saturating add/subtract. */
62b44f05
AR
10754 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10755 goto illegal_op;
10756 }
d9ba4830
PB
10757 tmp = load_reg(s, rn);
10758 tmp2 = load_reg(s, rm);
9ee6e8bb 10759 if (op & 1)
9ef39277 10760 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10761 if (op & 2)
9ef39277 10762 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10763 else
9ef39277 10764 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10765 tcg_temp_free_i32(tmp2);
9ee6e8bb 10766 } else {
62b44f05
AR
10767 switch (op) {
10768 case 0x0a: /* rbit */
10769 case 0x08: /* rev */
10770 case 0x09: /* rev16 */
10771 case 0x0b: /* revsh */
10772 case 0x18: /* clz */
10773 break;
10774 case 0x10: /* sel */
10775 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10776 goto illegal_op;
10777 }
10778 break;
10779 case 0x20: /* crc32/crc32c */
10780 case 0x21:
10781 case 0x22:
10782 case 0x28:
10783 case 0x29:
10784 case 0x2a:
962fcbf2 10785 if (!dc_isar_feature(aa32_crc32, s)) {
62b44f05
AR
10786 goto illegal_op;
10787 }
10788 break;
10789 default:
10790 goto illegal_op;
10791 }
d9ba4830 10792 tmp = load_reg(s, rn);
9ee6e8bb
PB
10793 switch (op) {
10794 case 0x0a: /* rbit */
d9ba4830 10795 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10796 break;
10797 case 0x08: /* rev */
66896cb8 10798 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10799 break;
10800 case 0x09: /* rev16 */
d9ba4830 10801 gen_rev16(tmp);
9ee6e8bb
PB
10802 break;
10803 case 0x0b: /* revsh */
d9ba4830 10804 gen_revsh(tmp);
9ee6e8bb
PB
10805 break;
10806 case 0x10: /* sel */
d9ba4830 10807 tmp2 = load_reg(s, rm);
7d1b0095 10808 tmp3 = tcg_temp_new_i32();
0ecb72a5 10809 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10810 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10811 tcg_temp_free_i32(tmp3);
10812 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10813 break;
10814 case 0x18: /* clz */
7539a012 10815 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10816 break;
eb0ecd5a
WN
10817 case 0x20:
10818 case 0x21:
10819 case 0x22:
10820 case 0x28:
10821 case 0x29:
10822 case 0x2a:
10823 {
10824 /* crc32/crc32c */
10825 uint32_t sz = op & 0x3;
10826 uint32_t c = op & 0x8;
10827
eb0ecd5a 10828 tmp2 = load_reg(s, rm);
aa633469
PM
10829 if (sz == 0) {
10830 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10831 } else if (sz == 1) {
10832 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10833 }
eb0ecd5a
WN
10834 tmp3 = tcg_const_i32(1 << sz);
10835 if (c) {
10836 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10837 } else {
10838 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10839 }
10840 tcg_temp_free_i32(tmp2);
10841 tcg_temp_free_i32(tmp3);
10842 break;
10843 }
9ee6e8bb 10844 default:
62b44f05 10845 g_assert_not_reached();
9ee6e8bb
PB
10846 }
10847 }
d9ba4830 10848 store_reg(s, rd, tmp);
9ee6e8bb
PB
10849 break;
10850 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10851 switch ((insn >> 20) & 7) {
10852 case 0: /* 32 x 32 -> 32 */
10853 case 7: /* Unsigned sum of absolute differences. */
10854 break;
10855 case 1: /* 16 x 16 -> 32 */
10856 case 2: /* Dual multiply add. */
10857 case 3: /* 32 * 16 -> 32msb */
10858 case 4: /* Dual multiply subtract. */
10859 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10860 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10861 goto illegal_op;
10862 }
10863 break;
10864 }
9ee6e8bb 10865 op = (insn >> 4) & 0xf;
d9ba4830
PB
10866 tmp = load_reg(s, rn);
10867 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10868 switch ((insn >> 20) & 7) {
10869 case 0: /* 32 x 32 -> 32 */
d9ba4830 10870 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10871 tcg_temp_free_i32(tmp2);
9ee6e8bb 10872 if (rs != 15) {
d9ba4830 10873 tmp2 = load_reg(s, rs);
9ee6e8bb 10874 if (op)
d9ba4830 10875 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10876 else
d9ba4830 10877 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10878 tcg_temp_free_i32(tmp2);
9ee6e8bb 10879 }
9ee6e8bb
PB
10880 break;
10881 case 1: /* 16 x 16 -> 32 */
d9ba4830 10882 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10883 tcg_temp_free_i32(tmp2);
9ee6e8bb 10884 if (rs != 15) {
d9ba4830 10885 tmp2 = load_reg(s, rs);
9ef39277 10886 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10887 tcg_temp_free_i32(tmp2);
9ee6e8bb 10888 }
9ee6e8bb
PB
10889 break;
10890 case 2: /* Dual multiply add. */
10891 case 4: /* Dual multiply subtract. */
10892 if (op)
d9ba4830
PB
10893 gen_swap_half(tmp2);
10894 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10895 if (insn & (1 << 22)) {
e1d177b9 10896 /* This subtraction cannot overflow. */
d9ba4830 10897 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10898 } else {
e1d177b9
PM
10899 /* This addition cannot overflow 32 bits;
10900 * however it may overflow considered as a signed
10901 * operation, in which case we must set the Q flag.
10902 */
9ef39277 10903 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10904 }
7d1b0095 10905 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10906 if (rs != 15)
10907 {
d9ba4830 10908 tmp2 = load_reg(s, rs);
9ef39277 10909 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10910 tcg_temp_free_i32(tmp2);
9ee6e8bb 10911 }
9ee6e8bb
PB
10912 break;
10913 case 3: /* 32 * 16 -> 32msb */
10914 if (op)
d9ba4830 10915 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10916 else
d9ba4830 10917 gen_sxth(tmp2);
a7812ae4
PB
10918 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10919 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10920 tmp = tcg_temp_new_i32();
ecc7b3aa 10921 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10922 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10923 if (rs != 15)
10924 {
d9ba4830 10925 tmp2 = load_reg(s, rs);
9ef39277 10926 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10927 tcg_temp_free_i32(tmp2);
9ee6e8bb 10928 }
9ee6e8bb 10929 break;
838fa72d
AJ
10930 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10931 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10932 if (rs != 15) {
838fa72d
AJ
10933 tmp = load_reg(s, rs);
10934 if (insn & (1 << 20)) {
10935 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10936 } else {
838fa72d 10937 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10938 }
2c0262af 10939 }
838fa72d
AJ
10940 if (insn & (1 << 4)) {
10941 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10942 }
10943 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10944 tmp = tcg_temp_new_i32();
ecc7b3aa 10945 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10946 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10947 break;
10948 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10949 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10950 tcg_temp_free_i32(tmp2);
9ee6e8bb 10951 if (rs != 15) {
d9ba4830
PB
10952 tmp2 = load_reg(s, rs);
10953 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10954 tcg_temp_free_i32(tmp2);
5fd46862 10955 }
9ee6e8bb 10956 break;
2c0262af 10957 }
d9ba4830 10958 store_reg(s, rd, tmp);
2c0262af 10959 break;
9ee6e8bb
PB
10960 case 6: case 7: /* 64-bit multiply, Divide. */
10961 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10962 tmp = load_reg(s, rn);
10963 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10964 if ((op & 0x50) == 0x10) {
10965 /* sdiv, udiv */
7e0cf8b4 10966 if (!dc_isar_feature(thumb_div, s)) {
9ee6e8bb 10967 goto illegal_op;
47789990 10968 }
9ee6e8bb 10969 if (op & 0x20)
5e3f878a 10970 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10971 else
5e3f878a 10972 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10973 tcg_temp_free_i32(tmp2);
5e3f878a 10974 store_reg(s, rd, tmp);
9ee6e8bb
PB
10975 } else if ((op & 0xe) == 0xc) {
10976 /* Dual multiply accumulate long. */
62b44f05
AR
10977 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10978 tcg_temp_free_i32(tmp);
10979 tcg_temp_free_i32(tmp2);
10980 goto illegal_op;
10981 }
9ee6e8bb 10982 if (op & 1)
5e3f878a
PB
10983 gen_swap_half(tmp2);
10984 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10985 if (op & 0x10) {
5e3f878a 10986 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10987 } else {
5e3f878a 10988 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10989 }
7d1b0095 10990 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10991 /* BUGFIX */
10992 tmp64 = tcg_temp_new_i64();
10993 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10994 tcg_temp_free_i32(tmp);
a7812ae4
PB
10995 gen_addq(s, tmp64, rs, rd);
10996 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10997 tcg_temp_free_i64(tmp64);
2c0262af 10998 } else {
9ee6e8bb
PB
10999 if (op & 0x20) {
11000 /* Unsigned 64-bit multiply */
a7812ae4 11001 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 11002 } else {
9ee6e8bb
PB
11003 if (op & 8) {
11004 /* smlalxy */
62b44f05
AR
11005 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11006 tcg_temp_free_i32(tmp2);
11007 tcg_temp_free_i32(tmp);
11008 goto illegal_op;
11009 }
5e3f878a 11010 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 11011 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11012 tmp64 = tcg_temp_new_i64();
11013 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11014 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11015 } else {
11016 /* Signed 64-bit multiply */
a7812ae4 11017 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11018 }
b5ff1b31 11019 }
9ee6e8bb
PB
11020 if (op & 4) {
11021 /* umaal */
62b44f05
AR
11022 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11023 tcg_temp_free_i64(tmp64);
11024 goto illegal_op;
11025 }
a7812ae4
PB
11026 gen_addq_lo(s, tmp64, rs);
11027 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
11028 } else if (op & 0x40) {
11029 /* 64-bit accumulate. */
a7812ae4 11030 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 11031 }
a7812ae4 11032 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11033 tcg_temp_free_i64(tmp64);
5fd46862 11034 }
2c0262af 11035 break;
9ee6e8bb
PB
11036 }
11037 break;
11038 case 6: case 7: case 14: case 15:
11039 /* Coprocessor. */
7517748e
PM
11040 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11041 /* We don't currently implement M profile FP support,
b1e5336a
PM
11042 * so this entire space should give a NOCP fault, with
11043 * the exception of the v8M VLLDM and VLSTM insns, which
11044 * must be NOPs in Secure state and UNDEF in Nonsecure state.
7517748e 11045 */
b1e5336a
PM
11046 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
11047 (insn & 0xffa00f00) == 0xec200a00) {
11048 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
11049 * - VLLDM, VLSTM
11050 * We choose to UNDEF if the RAZ bits are non-zero.
11051 */
11052 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
11053 goto illegal_op;
11054 }
11055 /* Just NOP since FP support is not implemented */
11056 break;
11057 }
11058 /* All other insns: NOCP */
7517748e
PM
11059 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
11060 default_exception_el(s));
11061 break;
11062 }
0052087e
RH
11063 if ((insn & 0xfe000a00) == 0xfc000800
11064 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11065 /* The Thumb2 and ARM encodings are identical. */
11066 if (disas_neon_insn_3same_ext(s, insn)) {
11067 goto illegal_op;
11068 }
11069 } else if ((insn & 0xff000a00) == 0xfe000800
11070 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11071 /* The Thumb2 and ARM encodings are identical. */
11072 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
11073 goto illegal_op;
11074 }
11075 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 11076 /* Translate into the equivalent ARM encoding. */
f06053e3 11077 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 11078 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 11079 goto illegal_op;
7dcc1f89 11080 }
6a57f3eb 11081 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 11082 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
11083 goto illegal_op;
11084 }
9ee6e8bb
PB
11085 } else {
11086 if (insn & (1 << 28))
11087 goto illegal_op;
7dcc1f89 11088 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 11089 goto illegal_op;
7dcc1f89 11090 }
9ee6e8bb
PB
11091 }
11092 break;
11093 case 8: case 9: case 10: case 11:
11094 if (insn & (1 << 15)) {
11095 /* Branches, misc control. */
11096 if (insn & 0x5000) {
11097 /* Unconditional branch. */
11098 /* signextend(hw1[10:0]) -> offset[:12]. */
11099 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
11100 /* hw1[10:0] -> offset[11:1]. */
11101 offset |= (insn & 0x7ff) << 1;
11102 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
11103 offset[24:22] already have the same value because of the
11104 sign extension above. */
11105 offset ^= ((~insn) & (1 << 13)) << 10;
11106 offset ^= ((~insn) & (1 << 11)) << 11;
11107
9ee6e8bb
PB
11108 if (insn & (1 << 14)) {
11109 /* Branch and link. */
3174f8e9 11110 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 11111 }
3b46e624 11112
b0109805 11113 offset += s->pc;
9ee6e8bb
PB
11114 if (insn & (1 << 12)) {
11115 /* b/bl */
b0109805 11116 gen_jmp(s, offset);
9ee6e8bb
PB
11117 } else {
11118 /* blx */
b0109805 11119 offset &= ~(uint32_t)2;
be5e7a76 11120 /* thumb2 bx, no need to check */
b0109805 11121 gen_bx_im(s, offset);
2c0262af 11122 }
9ee6e8bb
PB
11123 } else if (((insn >> 23) & 7) == 7) {
11124 /* Misc control */
11125 if (insn & (1 << 13))
11126 goto illegal_op;
11127
11128 if (insn & (1 << 26)) {
001b3cab
PM
11129 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11130 goto illegal_op;
11131 }
37e6456e
PM
11132 if (!(insn & (1 << 20))) {
11133 /* Hypervisor call (v7) */
11134 int imm16 = extract32(insn, 16, 4) << 12
11135 | extract32(insn, 0, 12);
11136 ARCH(7);
11137 if (IS_USER(s)) {
11138 goto illegal_op;
11139 }
11140 gen_hvc(s, imm16);
11141 } else {
11142 /* Secure monitor call (v6+) */
11143 ARCH(6K);
11144 if (IS_USER(s)) {
11145 goto illegal_op;
11146 }
11147 gen_smc(s);
11148 }
2c0262af 11149 } else {
9ee6e8bb
PB
11150 op = (insn >> 20) & 7;
11151 switch (op) {
11152 case 0: /* msr cpsr. */
b53d8923 11153 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 11154 tmp = load_reg(s, rn);
b28b3377
PM
11155 /* the constant is the mask and SYSm fields */
11156 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 11157 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 11158 tcg_temp_free_i32(addr);
7d1b0095 11159 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11160 gen_lookup_tb(s);
11161 break;
11162 }
11163 /* fall through */
11164 case 1: /* msr spsr. */
b53d8923 11165 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11166 goto illegal_op;
b53d8923 11167 }
8bfd0550
PM
11168
11169 if (extract32(insn, 5, 1)) {
11170 /* MSR (banked) */
11171 int sysm = extract32(insn, 8, 4) |
11172 (extract32(insn, 4, 1) << 4);
11173 int r = op & 1;
11174
11175 gen_msr_banked(s, r, sysm, rm);
11176 break;
11177 }
11178
11179 /* MSR (for PSRs) */
2fbac54b
FN
11180 tmp = load_reg(s, rn);
11181 if (gen_set_psr(s,
7dcc1f89 11182 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 11183 op == 1, tmp))
9ee6e8bb
PB
11184 goto illegal_op;
11185 break;
11186 case 2: /* cps, nop-hint. */
11187 if (((insn >> 8) & 7) == 0) {
11188 gen_nop_hint(s, insn & 0xff);
11189 }
11190 /* Implemented as NOP in user mode. */
11191 if (IS_USER(s))
11192 break;
11193 offset = 0;
11194 imm = 0;
11195 if (insn & (1 << 10)) {
11196 if (insn & (1 << 7))
11197 offset |= CPSR_A;
11198 if (insn & (1 << 6))
11199 offset |= CPSR_I;
11200 if (insn & (1 << 5))
11201 offset |= CPSR_F;
11202 if (insn & (1 << 9))
11203 imm = CPSR_A | CPSR_I | CPSR_F;
11204 }
11205 if (insn & (1 << 8)) {
11206 offset |= 0x1f;
11207 imm |= (insn & 0x1f);
11208 }
11209 if (offset) {
2fbac54b 11210 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
11211 }
11212 break;
11213 case 3: /* Special control operations. */
14120108 11214 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 11215 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
11216 goto illegal_op;
11217 }
9ee6e8bb
PB
11218 op = (insn >> 4) & 0xf;
11219 switch (op) {
11220 case 2: /* clrex */
426f5abc 11221 gen_clrex(s);
9ee6e8bb
PB
11222 break;
11223 case 4: /* dsb */
11224 case 5: /* dmb */
61e4c432 11225 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 11226 break;
6df99dec
SS
11227 case 6: /* isb */
11228 /* We need to break the TB after this insn
11229 * to execute self-modifying code correctly
11230 * and also to take any pending interrupts
11231 * immediately.
11232 */
0b609cc1 11233 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 11234 break;
9ee6e8bb
PB
11235 default:
11236 goto illegal_op;
11237 }
11238 break;
11239 case 4: /* bxj */
9d7c59c8
PM
11240 /* Trivial implementation equivalent to bx.
11241 * This instruction doesn't exist at all for M-profile.
11242 */
11243 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11244 goto illegal_op;
11245 }
d9ba4830
PB
11246 tmp = load_reg(s, rn);
11247 gen_bx(s, tmp);
9ee6e8bb
PB
11248 break;
11249 case 5: /* Exception return. */
b8b45b68
RV
11250 if (IS_USER(s)) {
11251 goto illegal_op;
11252 }
11253 if (rn != 14 || rd != 15) {
11254 goto illegal_op;
11255 }
55c544ed
PM
11256 if (s->current_el == 2) {
11257 /* ERET from Hyp uses ELR_Hyp, not LR */
11258 if (insn & 0xff) {
11259 goto illegal_op;
11260 }
11261 tmp = load_cpu_field(elr_el[2]);
11262 } else {
11263 tmp = load_reg(s, rn);
11264 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11265 }
b8b45b68
RV
11266 gen_exception_return(s, tmp);
11267 break;
8bfd0550 11268 case 6: /* MRS */
43ac6574
PM
11269 if (extract32(insn, 5, 1) &&
11270 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11271 /* MRS (banked) */
11272 int sysm = extract32(insn, 16, 4) |
11273 (extract32(insn, 4, 1) << 4);
11274
11275 gen_mrs_banked(s, 0, sysm, rd);
11276 break;
11277 }
11278
3d54026f
PM
11279 if (extract32(insn, 16, 4) != 0xf) {
11280 goto illegal_op;
11281 }
11282 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11283 extract32(insn, 0, 8) != 0) {
11284 goto illegal_op;
11285 }
11286
8bfd0550 11287 /* mrs cpsr */
7d1b0095 11288 tmp = tcg_temp_new_i32();
b53d8923 11289 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
11290 addr = tcg_const_i32(insn & 0xff);
11291 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 11292 tcg_temp_free_i32(addr);
9ee6e8bb 11293 } else {
9ef39277 11294 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 11295 }
8984bd2e 11296 store_reg(s, rd, tmp);
9ee6e8bb 11297 break;
8bfd0550 11298 case 7: /* MRS */
43ac6574
PM
11299 if (extract32(insn, 5, 1) &&
11300 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11301 /* MRS (banked) */
11302 int sysm = extract32(insn, 16, 4) |
11303 (extract32(insn, 4, 1) << 4);
11304
11305 gen_mrs_banked(s, 1, sysm, rd);
11306 break;
11307 }
11308
11309 /* mrs spsr. */
9ee6e8bb 11310 /* Not accessible in user mode. */
b53d8923 11311 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11312 goto illegal_op;
b53d8923 11313 }
3d54026f
PM
11314
11315 if (extract32(insn, 16, 4) != 0xf ||
11316 extract32(insn, 0, 8) != 0) {
11317 goto illegal_op;
11318 }
11319
d9ba4830
PB
11320 tmp = load_cpu_field(spsr);
11321 store_reg(s, rd, tmp);
9ee6e8bb 11322 break;
2c0262af
FB
11323 }
11324 }
9ee6e8bb
PB
11325 } else {
11326 /* Conditional branch. */
11327 op = (insn >> 22) & 0xf;
11328 /* Generate a conditional jump to next instruction. */
c2d9644e 11329 arm_skip_unless(s, op);
9ee6e8bb
PB
11330
11331 /* offset[11:1] = insn[10:0] */
11332 offset = (insn & 0x7ff) << 1;
11333 /* offset[17:12] = insn[21:16]. */
11334 offset |= (insn & 0x003f0000) >> 4;
11335 /* offset[31:20] = insn[26]. */
11336 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11337 /* offset[18] = insn[13]. */
11338 offset |= (insn & (1 << 13)) << 5;
11339 /* offset[19] = insn[11]. */
11340 offset |= (insn & (1 << 11)) << 8;
11341
11342 /* jump to the offset */
b0109805 11343 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
11344 }
11345 } else {
55203189
PM
11346 /*
11347 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
11348 * - Data-processing (modified immediate, plain binary immediate)
11349 */
9ee6e8bb 11350 if (insn & (1 << 25)) {
55203189
PM
11351 /*
11352 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
11353 * - Data-processing (plain binary immediate)
11354 */
9ee6e8bb
PB
11355 if (insn & (1 << 24)) {
11356 if (insn & (1 << 20))
11357 goto illegal_op;
11358 /* Bitfield/Saturate. */
11359 op = (insn >> 21) & 7;
11360 imm = insn & 0x1f;
11361 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 11362 if (rn == 15) {
7d1b0095 11363 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
11364 tcg_gen_movi_i32(tmp, 0);
11365 } else {
11366 tmp = load_reg(s, rn);
11367 }
9ee6e8bb
PB
11368 switch (op) {
11369 case 2: /* Signed bitfield extract. */
11370 imm++;
11371 if (shift + imm > 32)
11372 goto illegal_op;
59a71b4c
RH
11373 if (imm < 32) {
11374 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11375 }
9ee6e8bb
PB
11376 break;
11377 case 6: /* Unsigned bitfield extract. */
11378 imm++;
11379 if (shift + imm > 32)
11380 goto illegal_op;
59a71b4c
RH
11381 if (imm < 32) {
11382 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11383 }
9ee6e8bb
PB
11384 break;
11385 case 3: /* Bitfield insert/clear. */
11386 if (imm < shift)
11387 goto illegal_op;
11388 imm = imm + 1 - shift;
11389 if (imm != 32) {
6ddbc6e4 11390 tmp2 = load_reg(s, rd);
d593c48e 11391 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 11392 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11393 }
11394 break;
11395 case 7:
11396 goto illegal_op;
11397 default: /* Saturate. */
9ee6e8bb
PB
11398 if (shift) {
11399 if (op & 1)
6ddbc6e4 11400 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 11401 else
6ddbc6e4 11402 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 11403 }
6ddbc6e4 11404 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
11405 if (op & 4) {
11406 /* Unsigned. */
62b44f05
AR
11407 if ((op & 1) && shift == 0) {
11408 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11409 tcg_temp_free_i32(tmp);
11410 tcg_temp_free_i32(tmp2);
11411 goto illegal_op;
11412 }
9ef39277 11413 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11414 } else {
9ef39277 11415 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 11416 }
2c0262af 11417 } else {
9ee6e8bb 11418 /* Signed. */
62b44f05
AR
11419 if ((op & 1) && shift == 0) {
11420 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11421 tcg_temp_free_i32(tmp);
11422 tcg_temp_free_i32(tmp2);
11423 goto illegal_op;
11424 }
9ef39277 11425 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11426 } else {
9ef39277 11427 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 11428 }
2c0262af 11429 }
b75263d6 11430 tcg_temp_free_i32(tmp2);
9ee6e8bb 11431 break;
2c0262af 11432 }
6ddbc6e4 11433 store_reg(s, rd, tmp);
9ee6e8bb
PB
11434 } else {
11435 imm = ((insn & 0x04000000) >> 15)
11436 | ((insn & 0x7000) >> 4) | (insn & 0xff);
11437 if (insn & (1 << 22)) {
11438 /* 16-bit immediate. */
11439 imm |= (insn >> 4) & 0xf000;
11440 if (insn & (1 << 23)) {
11441 /* movt */
5e3f878a 11442 tmp = load_reg(s, rd);
86831435 11443 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 11444 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 11445 } else {
9ee6e8bb 11446 /* movw */
7d1b0095 11447 tmp = tcg_temp_new_i32();
5e3f878a 11448 tcg_gen_movi_i32(tmp, imm);
2c0262af 11449 }
55203189 11450 store_reg(s, rd, tmp);
2c0262af 11451 } else {
9ee6e8bb
PB
11452 /* Add/sub 12-bit immediate. */
11453 if (rn == 15) {
b0109805 11454 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 11455 if (insn & (1 << 23))
b0109805 11456 offset -= imm;
9ee6e8bb 11457 else
b0109805 11458 offset += imm;
7d1b0095 11459 tmp = tcg_temp_new_i32();
5e3f878a 11460 tcg_gen_movi_i32(tmp, offset);
55203189 11461 store_reg(s, rd, tmp);
2c0262af 11462 } else {
5e3f878a 11463 tmp = load_reg(s, rn);
9ee6e8bb 11464 if (insn & (1 << 23))
5e3f878a 11465 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 11466 else
5e3f878a 11467 tcg_gen_addi_i32(tmp, tmp, imm);
55203189
PM
11468 if (rn == 13 && rd == 13) {
11469 /* ADD SP, SP, imm or SUB SP, SP, imm */
11470 store_sp_checked(s, tmp);
11471 } else {
11472 store_reg(s, rd, tmp);
11473 }
2c0262af 11474 }
9ee6e8bb 11475 }
191abaa2 11476 }
9ee6e8bb 11477 } else {
55203189
PM
11478 /*
11479 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
11480 * - Data-processing (modified immediate)
11481 */
9ee6e8bb
PB
11482 int shifter_out = 0;
11483 /* modified 12-bit immediate. */
11484 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11485 imm = (insn & 0xff);
11486 switch (shift) {
11487 case 0: /* XY */
11488 /* Nothing to do. */
11489 break;
11490 case 1: /* 00XY00XY */
11491 imm |= imm << 16;
11492 break;
11493 case 2: /* XY00XY00 */
11494 imm |= imm << 16;
11495 imm <<= 8;
11496 break;
11497 case 3: /* XYXYXYXY */
11498 imm |= imm << 16;
11499 imm |= imm << 8;
11500 break;
11501 default: /* Rotated constant. */
11502 shift = (shift << 1) | (imm >> 7);
11503 imm |= 0x80;
11504 imm = imm << (32 - shift);
11505 shifter_out = 1;
11506 break;
b5ff1b31 11507 }
7d1b0095 11508 tmp2 = tcg_temp_new_i32();
3174f8e9 11509 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 11510 rn = (insn >> 16) & 0xf;
3174f8e9 11511 if (rn == 15) {
7d1b0095 11512 tmp = tcg_temp_new_i32();
3174f8e9
FN
11513 tcg_gen_movi_i32(tmp, 0);
11514 } else {
11515 tmp = load_reg(s, rn);
11516 }
9ee6e8bb
PB
11517 op = (insn >> 21) & 0xf;
11518 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 11519 shifter_out, tmp, tmp2))
9ee6e8bb 11520 goto illegal_op;
7d1b0095 11521 tcg_temp_free_i32(tmp2);
9ee6e8bb 11522 rd = (insn >> 8) & 0xf;
55203189
PM
11523 if (rd == 13 && rn == 13
11524 && (op == 8 || op == 13)) {
11525 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
11526 store_sp_checked(s, tmp);
11527 } else if (rd != 15) {
3174f8e9
FN
11528 store_reg(s, rd, tmp);
11529 } else {
7d1b0095 11530 tcg_temp_free_i32(tmp);
2c0262af 11531 }
2c0262af 11532 }
9ee6e8bb
PB
11533 }
11534 break;
11535 case 12: /* Load/store single data item. */
11536 {
11537 int postinc = 0;
11538 int writeback = 0;
a99caa48 11539 int memidx;
9bb6558a
PM
11540 ISSInfo issinfo;
11541
9ee6e8bb 11542 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 11543 if (disas_neon_ls_insn(s, insn)) {
c1713132 11544 goto illegal_op;
7dcc1f89 11545 }
9ee6e8bb
PB
11546 break;
11547 }
a2fdc890
PM
11548 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
11549 if (rs == 15) {
11550 if (!(insn & (1 << 20))) {
11551 goto illegal_op;
11552 }
11553 if (op != 2) {
11554 /* Byte or halfword load space with dest == r15 : memory hints.
11555 * Catch them early so we don't emit pointless addressing code.
11556 * This space is a mix of:
11557 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11558 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11559 * cores)
11560 * unallocated hints, which must be treated as NOPs
11561 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11562 * which is easiest for the decoding logic
11563 * Some space which must UNDEF
11564 */
11565 int op1 = (insn >> 23) & 3;
11566 int op2 = (insn >> 6) & 0x3f;
11567 if (op & 2) {
11568 goto illegal_op;
11569 }
11570 if (rn == 15) {
02afbf64
PM
11571 /* UNPREDICTABLE, unallocated hint or
11572 * PLD/PLDW/PLI (literal)
11573 */
2eea841c 11574 return;
a2fdc890
PM
11575 }
11576 if (op1 & 1) {
2eea841c 11577 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11578 }
11579 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 11580 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11581 }
11582 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 11583 goto illegal_op;
a2fdc890
PM
11584 }
11585 }
a99caa48 11586 memidx = get_mem_index(s);
9ee6e8bb 11587 if (rn == 15) {
7d1b0095 11588 addr = tcg_temp_new_i32();
9ee6e8bb
PB
11589 /* PC relative. */
11590 /* s->pc has already been incremented by 4. */
11591 imm = s->pc & 0xfffffffc;
11592 if (insn & (1 << 23))
11593 imm += insn & 0xfff;
11594 else
11595 imm -= insn & 0xfff;
b0109805 11596 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 11597 } else {
b0109805 11598 addr = load_reg(s, rn);
9ee6e8bb
PB
11599 if (insn & (1 << 23)) {
11600 /* Positive offset. */
11601 imm = insn & 0xfff;
b0109805 11602 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 11603 } else {
9ee6e8bb 11604 imm = insn & 0xff;
2a0308c5
PM
11605 switch ((insn >> 8) & 0xf) {
11606 case 0x0: /* Shifted Register. */
9ee6e8bb 11607 shift = (insn >> 4) & 0xf;
2a0308c5
PM
11608 if (shift > 3) {
11609 tcg_temp_free_i32(addr);
18c9b560 11610 goto illegal_op;
2a0308c5 11611 }
b26eefb6 11612 tmp = load_reg(s, rm);
9ee6e8bb 11613 if (shift)
b26eefb6 11614 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 11615 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11616 tcg_temp_free_i32(tmp);
9ee6e8bb 11617 break;
2a0308c5 11618 case 0xc: /* Negative offset. */
b0109805 11619 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 11620 break;
2a0308c5 11621 case 0xe: /* User privilege. */
b0109805 11622 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 11623 memidx = get_a32_user_mem_index(s);
9ee6e8bb 11624 break;
2a0308c5 11625 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
11626 imm = -imm;
11627 /* Fall through. */
2a0308c5 11628 case 0xb: /* Post-increment. */
9ee6e8bb
PB
11629 postinc = 1;
11630 writeback = 1;
11631 break;
2a0308c5 11632 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
11633 imm = -imm;
11634 /* Fall through. */
2a0308c5 11635 case 0xf: /* Pre-increment. */
9ee6e8bb
PB
11636 writeback = 1;
11637 break;
11638 default:
2a0308c5 11639 tcg_temp_free_i32(addr);
b7bcbe95 11640 goto illegal_op;
9ee6e8bb
PB
11641 }
11642 }
11643 }
9bb6558a
PM
11644
11645 issinfo = writeback ? ISSInvalid : rs;
11646
0bc003ba
PM
11647 if (s->v8m_stackcheck && rn == 13 && writeback) {
11648 /*
11649 * Stackcheck. Here we know 'addr' is the current SP;
11650 * if imm is +ve we're moving SP up, else down. It is
11651 * UNKNOWN whether the limit check triggers when SP starts
11652 * below the limit and ends up above it; we chose to do so.
11653 */
11654 if ((int32_t)imm < 0) {
11655 TCGv_i32 newsp = tcg_temp_new_i32();
11656
11657 tcg_gen_addi_i32(newsp, addr, imm);
11658 gen_helper_v8m_stackcheck(cpu_env, newsp);
11659 tcg_temp_free_i32(newsp);
11660 } else {
11661 gen_helper_v8m_stackcheck(cpu_env, addr);
11662 }
11663 }
11664
11665 if (writeback && !postinc) {
11666 tcg_gen_addi_i32(addr, addr, imm);
11667 }
11668
9ee6e8bb
PB
11669 if (insn & (1 << 20)) {
11670 /* Load. */
5a839c0d 11671 tmp = tcg_temp_new_i32();
a2fdc890 11672 switch (op) {
5a839c0d 11673 case 0:
9bb6558a 11674 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11675 break;
11676 case 4:
9bb6558a 11677 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11678 break;
11679 case 1:
9bb6558a 11680 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11681 break;
11682 case 5:
9bb6558a 11683 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11684 break;
11685 case 2:
9bb6558a 11686 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11687 break;
2a0308c5 11688 default:
5a839c0d 11689 tcg_temp_free_i32(tmp);
2a0308c5
PM
11690 tcg_temp_free_i32(addr);
11691 goto illegal_op;
a2fdc890
PM
11692 }
11693 if (rs == 15) {
3bb8a96f 11694 gen_bx_excret(s, tmp);
9ee6e8bb 11695 } else {
a2fdc890 11696 store_reg(s, rs, tmp);
9ee6e8bb
PB
11697 }
11698 } else {
11699 /* Store. */
b0109805 11700 tmp = load_reg(s, rs);
9ee6e8bb 11701 switch (op) {
5a839c0d 11702 case 0:
9bb6558a 11703 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11704 break;
11705 case 1:
9bb6558a 11706 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11707 break;
11708 case 2:
9bb6558a 11709 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11710 break;
2a0308c5 11711 default:
5a839c0d 11712 tcg_temp_free_i32(tmp);
2a0308c5
PM
11713 tcg_temp_free_i32(addr);
11714 goto illegal_op;
b7bcbe95 11715 }
5a839c0d 11716 tcg_temp_free_i32(tmp);
2c0262af 11717 }
9ee6e8bb 11718 if (postinc)
b0109805
PB
11719 tcg_gen_addi_i32(addr, addr, imm);
11720 if (writeback) {
11721 store_reg(s, rn, addr);
11722 } else {
7d1b0095 11723 tcg_temp_free_i32(addr);
b0109805 11724 }
9ee6e8bb
PB
11725 }
11726 break;
11727 default:
11728 goto illegal_op;
2c0262af 11729 }
2eea841c 11730 return;
9ee6e8bb 11731illegal_op:
2eea841c
PM
11732 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11733 default_exception_el(s));
2c0262af
FB
11734}
11735
296e5a0a 11736static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 11737{
296e5a0a 11738 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
11739 int32_t offset;
11740 int i;
39d5492a
PM
11741 TCGv_i32 tmp;
11742 TCGv_i32 tmp2;
11743 TCGv_i32 addr;
99c475ab 11744
99c475ab
FB
11745 switch (insn >> 12) {
11746 case 0: case 1:
396e467c 11747
99c475ab
FB
11748 rd = insn & 7;
11749 op = (insn >> 11) & 3;
11750 if (op == 3) {
a2d12f0f
PM
11751 /*
11752 * 0b0001_1xxx_xxxx_xxxx
11753 * - Add, subtract (three low registers)
11754 * - Add, subtract (two low registers and immediate)
11755 */
99c475ab 11756 rn = (insn >> 3) & 7;
396e467c 11757 tmp = load_reg(s, rn);
99c475ab
FB
11758 if (insn & (1 << 10)) {
11759 /* immediate */
7d1b0095 11760 tmp2 = tcg_temp_new_i32();
396e467c 11761 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11762 } else {
11763 /* reg */
11764 rm = (insn >> 6) & 7;
396e467c 11765 tmp2 = load_reg(s, rm);
99c475ab 11766 }
9ee6e8bb
PB
11767 if (insn & (1 << 9)) {
11768 if (s->condexec_mask)
396e467c 11769 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11770 else
72485ec4 11771 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11772 } else {
11773 if (s->condexec_mask)
396e467c 11774 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11775 else
72485ec4 11776 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11777 }
7d1b0095 11778 tcg_temp_free_i32(tmp2);
396e467c 11779 store_reg(s, rd, tmp);
99c475ab
FB
11780 } else {
11781 /* shift immediate */
11782 rm = (insn >> 3) & 7;
11783 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11784 tmp = load_reg(s, rm);
11785 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11786 if (!s->condexec_mask)
11787 gen_logic_CC(tmp);
11788 store_reg(s, rd, tmp);
99c475ab
FB
11789 }
11790 break;
11791 case 2: case 3:
a2d12f0f
PM
11792 /*
11793 * 0b001x_xxxx_xxxx_xxxx
11794 * - Add, subtract, compare, move (one low register and immediate)
11795 */
99c475ab
FB
11796 op = (insn >> 11) & 3;
11797 rd = (insn >> 8) & 0x7;
396e467c 11798 if (op == 0) { /* mov */
7d1b0095 11799 tmp = tcg_temp_new_i32();
396e467c 11800 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11801 if (!s->condexec_mask)
396e467c
FN
11802 gen_logic_CC(tmp);
11803 store_reg(s, rd, tmp);
11804 } else {
11805 tmp = load_reg(s, rd);
7d1b0095 11806 tmp2 = tcg_temp_new_i32();
396e467c
FN
11807 tcg_gen_movi_i32(tmp2, insn & 0xff);
11808 switch (op) {
11809 case 1: /* cmp */
72485ec4 11810 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11811 tcg_temp_free_i32(tmp);
11812 tcg_temp_free_i32(tmp2);
396e467c
FN
11813 break;
11814 case 2: /* add */
11815 if (s->condexec_mask)
11816 tcg_gen_add_i32(tmp, tmp, tmp2);
11817 else
72485ec4 11818 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11819 tcg_temp_free_i32(tmp2);
396e467c
FN
11820 store_reg(s, rd, tmp);
11821 break;
11822 case 3: /* sub */
11823 if (s->condexec_mask)
11824 tcg_gen_sub_i32(tmp, tmp, tmp2);
11825 else
72485ec4 11826 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11827 tcg_temp_free_i32(tmp2);
396e467c
FN
11828 store_reg(s, rd, tmp);
11829 break;
11830 }
99c475ab 11831 }
99c475ab
FB
11832 break;
11833 case 4:
11834 if (insn & (1 << 11)) {
11835 rd = (insn >> 8) & 7;
5899f386
FB
11836 /* load pc-relative. Bit 1 of PC is ignored. */
11837 val = s->pc + 2 + ((insn & 0xff) * 4);
11838 val &= ~(uint32_t)2;
7d1b0095 11839 addr = tcg_temp_new_i32();
b0109805 11840 tcg_gen_movi_i32(addr, val);
c40c8556 11841 tmp = tcg_temp_new_i32();
9bb6558a
PM
11842 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11843 rd | ISSIs16Bit);
7d1b0095 11844 tcg_temp_free_i32(addr);
b0109805 11845 store_reg(s, rd, tmp);
99c475ab
FB
11846 break;
11847 }
11848 if (insn & (1 << 10)) {
ebfe27c5
PM
11849 /* 0b0100_01xx_xxxx_xxxx
11850 * - data processing extended, branch and exchange
11851 */
99c475ab
FB
11852 rd = (insn & 7) | ((insn >> 4) & 8);
11853 rm = (insn >> 3) & 0xf;
11854 op = (insn >> 8) & 3;
11855 switch (op) {
11856 case 0: /* add */
396e467c
FN
11857 tmp = load_reg(s, rd);
11858 tmp2 = load_reg(s, rm);
11859 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11860 tcg_temp_free_i32(tmp2);
55203189
PM
11861 if (rd == 13) {
11862 /* ADD SP, SP, reg */
11863 store_sp_checked(s, tmp);
11864 } else {
11865 store_reg(s, rd, tmp);
11866 }
99c475ab
FB
11867 break;
11868 case 1: /* cmp */
396e467c
FN
11869 tmp = load_reg(s, rd);
11870 tmp2 = load_reg(s, rm);
72485ec4 11871 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11872 tcg_temp_free_i32(tmp2);
11873 tcg_temp_free_i32(tmp);
99c475ab
FB
11874 break;
11875 case 2: /* mov/cpy */
396e467c 11876 tmp = load_reg(s, rm);
55203189
PM
11877 if (rd == 13) {
11878 /* MOV SP, reg */
11879 store_sp_checked(s, tmp);
11880 } else {
11881 store_reg(s, rd, tmp);
11882 }
99c475ab 11883 break;
ebfe27c5
PM
11884 case 3:
11885 {
11886 /* 0b0100_0111_xxxx_xxxx
11887 * - branch [and link] exchange thumb register
11888 */
11889 bool link = insn & (1 << 7);
11890
fb602cb7 11891 if (insn & 3) {
ebfe27c5
PM
11892 goto undef;
11893 }
11894 if (link) {
be5e7a76 11895 ARCH(5);
ebfe27c5 11896 }
fb602cb7
PM
11897 if ((insn & 4)) {
11898 /* BXNS/BLXNS: only exists for v8M with the
11899 * security extensions, and always UNDEF if NonSecure.
11900 * We don't implement these in the user-only mode
11901 * either (in theory you can use them from Secure User
11902 * mode but they are too tied in to system emulation.)
11903 */
11904 if (!s->v8m_secure || IS_USER_ONLY) {
11905 goto undef;
11906 }
11907 if (link) {
3e3fa230 11908 gen_blxns(s, rm);
fb602cb7
PM
11909 } else {
11910 gen_bxns(s, rm);
11911 }
11912 break;
11913 }
11914 /* BLX/BX */
ebfe27c5
PM
11915 tmp = load_reg(s, rm);
11916 if (link) {
99c475ab 11917 val = (uint32_t)s->pc | 1;
7d1b0095 11918 tmp2 = tcg_temp_new_i32();
b0109805
PB
11919 tcg_gen_movi_i32(tmp2, val);
11920 store_reg(s, 14, tmp2);
3bb8a96f
PM
11921 gen_bx(s, tmp);
11922 } else {
11923 /* Only BX works as exception-return, not BLX */
11924 gen_bx_excret(s, tmp);
99c475ab 11925 }
99c475ab
FB
11926 break;
11927 }
ebfe27c5 11928 }
99c475ab
FB
11929 break;
11930 }
11931
a2d12f0f
PM
11932 /*
11933 * 0b0100_00xx_xxxx_xxxx
11934 * - Data-processing (two low registers)
11935 */
99c475ab
FB
11936 rd = insn & 7;
11937 rm = (insn >> 3) & 7;
11938 op = (insn >> 6) & 0xf;
11939 if (op == 2 || op == 3 || op == 4 || op == 7) {
11940 /* the shift/rotate ops want the operands backwards */
11941 val = rm;
11942 rm = rd;
11943 rd = val;
11944 val = 1;
11945 } else {
11946 val = 0;
11947 }
11948
396e467c 11949 if (op == 9) { /* neg */
7d1b0095 11950 tmp = tcg_temp_new_i32();
396e467c
FN
11951 tcg_gen_movi_i32(tmp, 0);
11952 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11953 tmp = load_reg(s, rd);
11954 } else {
f764718d 11955 tmp = NULL;
396e467c 11956 }
99c475ab 11957
396e467c 11958 tmp2 = load_reg(s, rm);
5899f386 11959 switch (op) {
99c475ab 11960 case 0x0: /* and */
396e467c 11961 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11962 if (!s->condexec_mask)
396e467c 11963 gen_logic_CC(tmp);
99c475ab
FB
11964 break;
11965 case 0x1: /* eor */
396e467c 11966 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11967 if (!s->condexec_mask)
396e467c 11968 gen_logic_CC(tmp);
99c475ab
FB
11969 break;
11970 case 0x2: /* lsl */
9ee6e8bb 11971 if (s->condexec_mask) {
365af80e 11972 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11973 } else {
9ef39277 11974 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11975 gen_logic_CC(tmp2);
9ee6e8bb 11976 }
99c475ab
FB
11977 break;
11978 case 0x3: /* lsr */
9ee6e8bb 11979 if (s->condexec_mask) {
365af80e 11980 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11981 } else {
9ef39277 11982 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11983 gen_logic_CC(tmp2);
9ee6e8bb 11984 }
99c475ab
FB
11985 break;
11986 case 0x4: /* asr */
9ee6e8bb 11987 if (s->condexec_mask) {
365af80e 11988 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11989 } else {
9ef39277 11990 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11991 gen_logic_CC(tmp2);
9ee6e8bb 11992 }
99c475ab
FB
11993 break;
11994 case 0x5: /* adc */
49b4c31e 11995 if (s->condexec_mask) {
396e467c 11996 gen_adc(tmp, tmp2);
49b4c31e
RH
11997 } else {
11998 gen_adc_CC(tmp, tmp, tmp2);
11999 }
99c475ab
FB
12000 break;
12001 case 0x6: /* sbc */
2de68a49 12002 if (s->condexec_mask) {
396e467c 12003 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
12004 } else {
12005 gen_sbc_CC(tmp, tmp, tmp2);
12006 }
99c475ab
FB
12007 break;
12008 case 0x7: /* ror */
9ee6e8bb 12009 if (s->condexec_mask) {
f669df27
AJ
12010 tcg_gen_andi_i32(tmp, tmp, 0x1f);
12011 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 12012 } else {
9ef39277 12013 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12014 gen_logic_CC(tmp2);
9ee6e8bb 12015 }
99c475ab
FB
12016 break;
12017 case 0x8: /* tst */
396e467c
FN
12018 tcg_gen_and_i32(tmp, tmp, tmp2);
12019 gen_logic_CC(tmp);
99c475ab 12020 rd = 16;
5899f386 12021 break;
99c475ab 12022 case 0x9: /* neg */
9ee6e8bb 12023 if (s->condexec_mask)
396e467c 12024 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 12025 else
72485ec4 12026 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12027 break;
12028 case 0xa: /* cmp */
72485ec4 12029 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12030 rd = 16;
12031 break;
12032 case 0xb: /* cmn */
72485ec4 12033 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
12034 rd = 16;
12035 break;
12036 case 0xc: /* orr */
396e467c 12037 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 12038 if (!s->condexec_mask)
396e467c 12039 gen_logic_CC(tmp);
99c475ab
FB
12040 break;
12041 case 0xd: /* mul */
7b2919a0 12042 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 12043 if (!s->condexec_mask)
396e467c 12044 gen_logic_CC(tmp);
99c475ab
FB
12045 break;
12046 case 0xe: /* bic */
f669df27 12047 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 12048 if (!s->condexec_mask)
396e467c 12049 gen_logic_CC(tmp);
99c475ab
FB
12050 break;
12051 case 0xf: /* mvn */
396e467c 12052 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 12053 if (!s->condexec_mask)
396e467c 12054 gen_logic_CC(tmp2);
99c475ab 12055 val = 1;
5899f386 12056 rm = rd;
99c475ab
FB
12057 break;
12058 }
12059 if (rd != 16) {
396e467c
FN
12060 if (val) {
12061 store_reg(s, rm, tmp2);
12062 if (op != 0xf)
7d1b0095 12063 tcg_temp_free_i32(tmp);
396e467c
FN
12064 } else {
12065 store_reg(s, rd, tmp);
7d1b0095 12066 tcg_temp_free_i32(tmp2);
396e467c
FN
12067 }
12068 } else {
7d1b0095
PM
12069 tcg_temp_free_i32(tmp);
12070 tcg_temp_free_i32(tmp2);
99c475ab
FB
12071 }
12072 break;
12073
12074 case 5:
12075 /* load/store register offset. */
12076 rd = insn & 7;
12077 rn = (insn >> 3) & 7;
12078 rm = (insn >> 6) & 7;
12079 op = (insn >> 9) & 7;
b0109805 12080 addr = load_reg(s, rn);
b26eefb6 12081 tmp = load_reg(s, rm);
b0109805 12082 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12083 tcg_temp_free_i32(tmp);
99c475ab 12084
c40c8556 12085 if (op < 3) { /* store */
b0109805 12086 tmp = load_reg(s, rd);
c40c8556
PM
12087 } else {
12088 tmp = tcg_temp_new_i32();
12089 }
99c475ab
FB
12090
12091 switch (op) {
12092 case 0: /* str */
9bb6558a 12093 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12094 break;
12095 case 1: /* strh */
9bb6558a 12096 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12097 break;
12098 case 2: /* strb */
9bb6558a 12099 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12100 break;
12101 case 3: /* ldrsb */
9bb6558a 12102 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12103 break;
12104 case 4: /* ldr */
9bb6558a 12105 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12106 break;
12107 case 5: /* ldrh */
9bb6558a 12108 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12109 break;
12110 case 6: /* ldrb */
9bb6558a 12111 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12112 break;
12113 case 7: /* ldrsh */
9bb6558a 12114 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12115 break;
12116 }
c40c8556 12117 if (op >= 3) { /* load */
b0109805 12118 store_reg(s, rd, tmp);
c40c8556
PM
12119 } else {
12120 tcg_temp_free_i32(tmp);
12121 }
7d1b0095 12122 tcg_temp_free_i32(addr);
99c475ab
FB
12123 break;
12124
12125 case 6:
12126 /* load/store word immediate offset */
12127 rd = insn & 7;
12128 rn = (insn >> 3) & 7;
b0109805 12129 addr = load_reg(s, rn);
99c475ab 12130 val = (insn >> 4) & 0x7c;
b0109805 12131 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12132
12133 if (insn & (1 << 11)) {
12134 /* load */
c40c8556 12135 tmp = tcg_temp_new_i32();
12dcc321 12136 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12137 store_reg(s, rd, tmp);
99c475ab
FB
12138 } else {
12139 /* store */
b0109805 12140 tmp = load_reg(s, rd);
12dcc321 12141 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12142 tcg_temp_free_i32(tmp);
99c475ab 12143 }
7d1b0095 12144 tcg_temp_free_i32(addr);
99c475ab
FB
12145 break;
12146
12147 case 7:
12148 /* load/store byte immediate offset */
12149 rd = insn & 7;
12150 rn = (insn >> 3) & 7;
b0109805 12151 addr = load_reg(s, rn);
99c475ab 12152 val = (insn >> 6) & 0x1f;
b0109805 12153 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12154
12155 if (insn & (1 << 11)) {
12156 /* load */
c40c8556 12157 tmp = tcg_temp_new_i32();
9bb6558a 12158 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12159 store_reg(s, rd, tmp);
99c475ab
FB
12160 } else {
12161 /* store */
b0109805 12162 tmp = load_reg(s, rd);
9bb6558a 12163 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12164 tcg_temp_free_i32(tmp);
99c475ab 12165 }
7d1b0095 12166 tcg_temp_free_i32(addr);
99c475ab
FB
12167 break;
12168
12169 case 8:
12170 /* load/store halfword immediate offset */
12171 rd = insn & 7;
12172 rn = (insn >> 3) & 7;
b0109805 12173 addr = load_reg(s, rn);
99c475ab 12174 val = (insn >> 5) & 0x3e;
b0109805 12175 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12176
12177 if (insn & (1 << 11)) {
12178 /* load */
c40c8556 12179 tmp = tcg_temp_new_i32();
9bb6558a 12180 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12181 store_reg(s, rd, tmp);
99c475ab
FB
12182 } else {
12183 /* store */
b0109805 12184 tmp = load_reg(s, rd);
9bb6558a 12185 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12186 tcg_temp_free_i32(tmp);
99c475ab 12187 }
7d1b0095 12188 tcg_temp_free_i32(addr);
99c475ab
FB
12189 break;
12190
12191 case 9:
12192 /* load/store from stack */
12193 rd = (insn >> 8) & 7;
b0109805 12194 addr = load_reg(s, 13);
99c475ab 12195 val = (insn & 0xff) * 4;
b0109805 12196 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12197
12198 if (insn & (1 << 11)) {
12199 /* load */
c40c8556 12200 tmp = tcg_temp_new_i32();
9bb6558a 12201 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12202 store_reg(s, rd, tmp);
99c475ab
FB
12203 } else {
12204 /* store */
b0109805 12205 tmp = load_reg(s, rd);
9bb6558a 12206 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12207 tcg_temp_free_i32(tmp);
99c475ab 12208 }
7d1b0095 12209 tcg_temp_free_i32(addr);
99c475ab
FB
12210 break;
12211
12212 case 10:
55203189
PM
12213 /*
12214 * 0b1010_xxxx_xxxx_xxxx
12215 * - Add PC/SP (immediate)
12216 */
99c475ab 12217 rd = (insn >> 8) & 7;
5899f386
FB
12218 if (insn & (1 << 11)) {
12219 /* SP */
5e3f878a 12220 tmp = load_reg(s, 13);
5899f386
FB
12221 } else {
12222 /* PC. bit 1 is ignored. */
7d1b0095 12223 tmp = tcg_temp_new_i32();
5e3f878a 12224 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 12225 }
99c475ab 12226 val = (insn & 0xff) * 4;
5e3f878a
PB
12227 tcg_gen_addi_i32(tmp, tmp, val);
12228 store_reg(s, rd, tmp);
99c475ab
FB
12229 break;
12230
12231 case 11:
12232 /* misc */
12233 op = (insn >> 8) & 0xf;
12234 switch (op) {
12235 case 0:
55203189
PM
12236 /*
12237 * 0b1011_0000_xxxx_xxxx
12238 * - ADD (SP plus immediate)
12239 * - SUB (SP minus immediate)
12240 */
b26eefb6 12241 tmp = load_reg(s, 13);
99c475ab
FB
12242 val = (insn & 0x7f) * 4;
12243 if (insn & (1 << 7))
6a0d8a1d 12244 val = -(int32_t)val;
b26eefb6 12245 tcg_gen_addi_i32(tmp, tmp, val);
55203189 12246 store_sp_checked(s, tmp);
99c475ab
FB
12247 break;
12248
9ee6e8bb
PB
12249 case 2: /* sign/zero extend. */
12250 ARCH(6);
12251 rd = insn & 7;
12252 rm = (insn >> 3) & 7;
b0109805 12253 tmp = load_reg(s, rm);
9ee6e8bb 12254 switch ((insn >> 6) & 3) {
b0109805
PB
12255 case 0: gen_sxth(tmp); break;
12256 case 1: gen_sxtb(tmp); break;
12257 case 2: gen_uxth(tmp); break;
12258 case 3: gen_uxtb(tmp); break;
9ee6e8bb 12259 }
b0109805 12260 store_reg(s, rd, tmp);
9ee6e8bb 12261 break;
99c475ab 12262 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
12263 /*
12264 * 0b1011_x10x_xxxx_xxxx
12265 * - push/pop
12266 */
b0109805 12267 addr = load_reg(s, 13);
5899f386
FB
12268 if (insn & (1 << 8))
12269 offset = 4;
99c475ab 12270 else
5899f386
FB
12271 offset = 0;
12272 for (i = 0; i < 8; i++) {
12273 if (insn & (1 << i))
12274 offset += 4;
12275 }
12276 if ((insn & (1 << 11)) == 0) {
b0109805 12277 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12278 }
aa369e5c
PM
12279
12280 if (s->v8m_stackcheck) {
12281 /*
12282 * Here 'addr' is the lower of "old SP" and "new SP";
12283 * if this is a pop that starts below the limit and ends
12284 * above it, it is UNKNOWN whether the limit check triggers;
12285 * we choose to trigger.
12286 */
12287 gen_helper_v8m_stackcheck(cpu_env, addr);
12288 }
12289
99c475ab
FB
12290 for (i = 0; i < 8; i++) {
12291 if (insn & (1 << i)) {
12292 if (insn & (1 << 11)) {
12293 /* pop */
c40c8556 12294 tmp = tcg_temp_new_i32();
12dcc321 12295 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12296 store_reg(s, i, tmp);
99c475ab
FB
12297 } else {
12298 /* push */
b0109805 12299 tmp = load_reg(s, i);
12dcc321 12300 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12301 tcg_temp_free_i32(tmp);
99c475ab 12302 }
5899f386 12303 /* advance to the next address. */
b0109805 12304 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12305 }
12306 }
f764718d 12307 tmp = NULL;
99c475ab
FB
12308 if (insn & (1 << 8)) {
12309 if (insn & (1 << 11)) {
12310 /* pop pc */
c40c8556 12311 tmp = tcg_temp_new_i32();
12dcc321 12312 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
12313 /* don't set the pc until the rest of the instruction
12314 has completed */
12315 } else {
12316 /* push lr */
b0109805 12317 tmp = load_reg(s, 14);
12dcc321 12318 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12319 tcg_temp_free_i32(tmp);
99c475ab 12320 }
b0109805 12321 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 12322 }
5899f386 12323 if ((insn & (1 << 11)) == 0) {
b0109805 12324 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12325 }
99c475ab 12326 /* write back the new stack pointer */
b0109805 12327 store_reg(s, 13, addr);
99c475ab 12328 /* set the new PC value */
be5e7a76 12329 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 12330 store_reg_from_load(s, 15, tmp);
be5e7a76 12331 }
99c475ab
FB
12332 break;
12333
9ee6e8bb
PB
12334 case 1: case 3: case 9: case 11: /* czb */
12335 rm = insn & 7;
d9ba4830 12336 tmp = load_reg(s, rm);
c2d9644e 12337 arm_gen_condlabel(s);
9ee6e8bb 12338 if (insn & (1 << 11))
cb63669a 12339 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 12340 else
cb63669a 12341 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 12342 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
12343 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
12344 val = (uint32_t)s->pc + 2;
12345 val += offset;
12346 gen_jmp(s, val);
12347 break;
12348
12349 case 15: /* IT, nop-hint. */
12350 if ((insn & 0xf) == 0) {
12351 gen_nop_hint(s, (insn >> 4) & 0xf);
12352 break;
12353 }
12354 /* If Then. */
12355 s->condexec_cond = (insn >> 4) & 0xe;
12356 s->condexec_mask = insn & 0x1f;
12357 /* No actual code generated for this insn, just setup state. */
12358 break;
12359
06c949e6 12360 case 0xe: /* bkpt */
d4a2dc67
PM
12361 {
12362 int imm8 = extract32(insn, 0, 8);
be5e7a76 12363 ARCH(5);
c900a2e6 12364 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 12365 break;
d4a2dc67 12366 }
06c949e6 12367
19a6e31c
PM
12368 case 0xa: /* rev, and hlt */
12369 {
12370 int op1 = extract32(insn, 6, 2);
12371
12372 if (op1 == 2) {
12373 /* HLT */
12374 int imm6 = extract32(insn, 0, 6);
12375
12376 gen_hlt(s, imm6);
12377 break;
12378 }
12379
12380 /* Otherwise this is rev */
9ee6e8bb
PB
12381 ARCH(6);
12382 rn = (insn >> 3) & 0x7;
12383 rd = insn & 0x7;
b0109805 12384 tmp = load_reg(s, rn);
19a6e31c 12385 switch (op1) {
66896cb8 12386 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
12387 case 1: gen_rev16(tmp); break;
12388 case 3: gen_revsh(tmp); break;
19a6e31c
PM
12389 default:
12390 g_assert_not_reached();
9ee6e8bb 12391 }
b0109805 12392 store_reg(s, rd, tmp);
9ee6e8bb 12393 break;
19a6e31c 12394 }
9ee6e8bb 12395
d9e028c1
PM
12396 case 6:
12397 switch ((insn >> 5) & 7) {
12398 case 2:
12399 /* setend */
12400 ARCH(6);
9886ecdf
PB
12401 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12402 gen_helper_setend(cpu_env);
dcba3a8d 12403 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 12404 }
9ee6e8bb 12405 break;
d9e028c1
PM
12406 case 3:
12407 /* cps */
12408 ARCH(6);
12409 if (IS_USER(s)) {
12410 break;
8984bd2e 12411 }
b53d8923 12412 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
12413 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
12414 /* FAULTMASK */
12415 if (insn & 1) {
12416 addr = tcg_const_i32(19);
12417 gen_helper_v7m_msr(cpu_env, addr, tmp);
12418 tcg_temp_free_i32(addr);
12419 }
12420 /* PRIMASK */
12421 if (insn & 2) {
12422 addr = tcg_const_i32(16);
12423 gen_helper_v7m_msr(cpu_env, addr, tmp);
12424 tcg_temp_free_i32(addr);
12425 }
12426 tcg_temp_free_i32(tmp);
12427 gen_lookup_tb(s);
12428 } else {
12429 if (insn & (1 << 4)) {
12430 shift = CPSR_A | CPSR_I | CPSR_F;
12431 } else {
12432 shift = 0;
12433 }
12434 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 12435 }
d9e028c1
PM
12436 break;
12437 default:
12438 goto undef;
9ee6e8bb
PB
12439 }
12440 break;
12441
99c475ab
FB
12442 default:
12443 goto undef;
12444 }
12445 break;
12446
12447 case 12:
a7d3970d 12448 {
99c475ab 12449 /* load/store multiple */
f764718d 12450 TCGv_i32 loaded_var = NULL;
99c475ab 12451 rn = (insn >> 8) & 0x7;
b0109805 12452 addr = load_reg(s, rn);
99c475ab
FB
12453 for (i = 0; i < 8; i++) {
12454 if (insn & (1 << i)) {
99c475ab
FB
12455 if (insn & (1 << 11)) {
12456 /* load */
c40c8556 12457 tmp = tcg_temp_new_i32();
12dcc321 12458 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
12459 if (i == rn) {
12460 loaded_var = tmp;
12461 } else {
12462 store_reg(s, i, tmp);
12463 }
99c475ab
FB
12464 } else {
12465 /* store */
b0109805 12466 tmp = load_reg(s, i);
12dcc321 12467 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12468 tcg_temp_free_i32(tmp);
99c475ab 12469 }
5899f386 12470 /* advance to the next address */
b0109805 12471 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12472 }
12473 }
b0109805 12474 if ((insn & (1 << rn)) == 0) {
a7d3970d 12475 /* base reg not in list: base register writeback */
b0109805
PB
12476 store_reg(s, rn, addr);
12477 } else {
a7d3970d
PM
12478 /* base reg in list: if load, complete it now */
12479 if (insn & (1 << 11)) {
12480 store_reg(s, rn, loaded_var);
12481 }
7d1b0095 12482 tcg_temp_free_i32(addr);
b0109805 12483 }
99c475ab 12484 break;
a7d3970d 12485 }
99c475ab
FB
12486 case 13:
12487 /* conditional branch or swi */
12488 cond = (insn >> 8) & 0xf;
12489 if (cond == 0xe)
12490 goto undef;
12491
12492 if (cond == 0xf) {
12493 /* swi */
eaed129d 12494 gen_set_pc_im(s, s->pc);
d4a2dc67 12495 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 12496 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
12497 break;
12498 }
12499 /* generate a conditional jump to next instruction */
c2d9644e 12500 arm_skip_unless(s, cond);
99c475ab
FB
12501
12502 /* jump to the offset */
5899f386 12503 val = (uint32_t)s->pc + 2;
99c475ab 12504 offset = ((int32_t)insn << 24) >> 24;
5899f386 12505 val += offset << 1;
8aaca4c0 12506 gen_jmp(s, val);
99c475ab
FB
12507 break;
12508
12509 case 14:
358bf29e 12510 if (insn & (1 << 11)) {
296e5a0a
PM
12511 /* thumb_insn_is_16bit() ensures we can't get here for
12512 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12513 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12514 */
12515 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12516 ARCH(5);
12517 offset = ((insn & 0x7ff) << 1);
12518 tmp = load_reg(s, 14);
12519 tcg_gen_addi_i32(tmp, tmp, offset);
12520 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12521
12522 tmp2 = tcg_temp_new_i32();
12523 tcg_gen_movi_i32(tmp2, s->pc | 1);
12524 store_reg(s, 14, tmp2);
12525 gen_bx(s, tmp);
358bf29e
PB
12526 break;
12527 }
9ee6e8bb 12528 /* unconditional branch */
99c475ab
FB
12529 val = (uint32_t)s->pc;
12530 offset = ((int32_t)insn << 21) >> 21;
12531 val += (offset << 1) + 2;
8aaca4c0 12532 gen_jmp(s, val);
99c475ab
FB
12533 break;
12534
12535 case 15:
296e5a0a
PM
12536 /* thumb_insn_is_16bit() ensures we can't get here for
12537 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12538 */
12539 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12540
12541 if (insn & (1 << 11)) {
12542 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
12543 offset = ((insn & 0x7ff) << 1) | 1;
12544 tmp = load_reg(s, 14);
12545 tcg_gen_addi_i32(tmp, tmp, offset);
12546
12547 tmp2 = tcg_temp_new_i32();
12548 tcg_gen_movi_i32(tmp2, s->pc | 1);
12549 store_reg(s, 14, tmp2);
12550 gen_bx(s, tmp);
12551 } else {
12552 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
12553 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
12554
12555 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
12556 }
9ee6e8bb 12557 break;
99c475ab
FB
12558 }
12559 return;
9ee6e8bb 12560illegal_op:
99c475ab 12561undef:
73710361
GB
12562 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
12563 default_exception_el(s));
99c475ab
FB
12564}
12565
541ebcd4
PM
12566static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
12567{
12568 /* Return true if the insn at dc->pc might cross a page boundary.
12569 * (False positives are OK, false negatives are not.)
5b8d7289
PM
12570 * We know this is a Thumb insn, and our caller ensures we are
12571 * only called if dc->pc is less than 4 bytes from the page
12572 * boundary, so we cross the page if the first 16 bits indicate
12573 * that this is a 32 bit insn.
541ebcd4 12574 */
5b8d7289 12575 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 12576
5b8d7289 12577 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
12578}
12579
b542683d 12580static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 12581{
1d8a5535 12582 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 12583 CPUARMState *env = cs->env_ptr;
4e5e1215 12584 ARMCPU *cpu = arm_env_get_cpu(env);
3b46e624 12585
962fcbf2 12586 dc->isar = &cpu->isar;
dcba3a8d 12587 dc->pc = dc->base.pc_first;
e50e6a20 12588 dc->condjmp = 0;
3926cc84 12589
40f860cd 12590 dc->aarch64 = 0;
cef9ee70
SS
12591 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
12592 * there is no secure EL1, so we route exceptions to EL3.
12593 */
12594 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
12595 !arm_el_is_aa64(env, 3);
1d8a5535
LV
12596 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
12597 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
12598 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
12599 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
12600 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
12601 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
c1e37810 12602 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 12603#if !defined(CONFIG_USER_ONLY)
c1e37810 12604 dc->user = (dc->current_el == 0);
3926cc84 12605#endif
1d8a5535
LV
12606 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
12607 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
12608 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
12609 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
12610 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
12611 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
12612 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
fb602cb7
PM
12613 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12614 regime_is_secure(env, dc->mmu_idx);
4730fb85 12615 dc->v8m_stackcheck = ARM_TBFLAG_STACKCHECK(dc->base.tb->flags);
60322b39 12616 dc->cp_regs = cpu->cp_regs;
a984e42c 12617 dc->features = env->features;
40f860cd 12618
50225ad0
PM
12619 /* Single step state. The code-generation logic here is:
12620 * SS_ACTIVE == 0:
12621 * generate code with no special handling for single-stepping (except
12622 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12623 * this happens anyway because those changes are all system register or
12624 * PSTATE writes).
12625 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12626 * emit code for one insn
12627 * emit code to clear PSTATE.SS
12628 * emit code to generate software step exception for completed step
12629 * end TB (as usual for having generated an exception)
12630 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12631 * emit code to generate a software step exception
12632 * end the TB
12633 */
1d8a5535
LV
12634 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
12635 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
50225ad0
PM
12636 dc->is_ldex = false;
12637 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
12638
bfe7ad5b 12639 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 12640
f7708456
RH
12641 /* If architectural single step active, limit to 1. */
12642 if (is_singlestepping(dc)) {
b542683d 12643 dc->base.max_insns = 1;
f7708456
RH
12644 }
12645
d0264d86
RH
12646 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12647 to those left on the page. */
12648 if (!dc->thumb) {
bfe7ad5b 12649 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 12650 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
12651 }
12652
a7812ae4
PB
12653 cpu_F0s = tcg_temp_new_i32();
12654 cpu_F1s = tcg_temp_new_i32();
12655 cpu_F0d = tcg_temp_new_i64();
12656 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
12657 cpu_V0 = cpu_F0d;
12658 cpu_V1 = cpu_F1d;
e677137d 12659 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 12660 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
12661}
12662
b1476854
LV
12663static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12664{
12665 DisasContext *dc = container_of(dcbase, DisasContext, base);
12666
12667 /* A note on handling of the condexec (IT) bits:
12668 *
12669 * We want to avoid the overhead of having to write the updated condexec
12670 * bits back to the CPUARMState for every instruction in an IT block. So:
12671 * (1) if the condexec bits are not already zero then we write
12672 * zero back into the CPUARMState now. This avoids complications trying
12673 * to do it at the end of the block. (For example if we don't do this
12674 * it's hard to identify whether we can safely skip writing condexec
12675 * at the end of the TB, which we definitely want to do for the case
12676 * where a TB doesn't do anything with the IT state at all.)
12677 * (2) if we are going to leave the TB then we call gen_set_condexec()
12678 * which will write the correct value into CPUARMState if zero is wrong.
12679 * This is done both for leaving the TB at the end, and for leaving
12680 * it because of an exception we know will happen, which is done in
12681 * gen_exception_insn(). The latter is necessary because we need to
12682 * leave the TB with the PC/IT state just prior to execution of the
12683 * instruction which caused the exception.
12684 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12685 * then the CPUARMState will be wrong and we need to reset it.
12686 * This is handled in the same way as restoration of the
12687 * PC in these situations; we save the value of the condexec bits
12688 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12689 * then uses this to restore them after an exception.
12690 *
12691 * Note that there are no instructions which can read the condexec
12692 * bits, and none which can write non-static values to them, so
12693 * we don't need to care about whether CPUARMState is correct in the
12694 * middle of a TB.
12695 */
12696
12697 /* Reset the conditional execution bits immediately. This avoids
12698 complications trying to do it at the end of the block. */
12699 if (dc->condexec_mask || dc->condexec_cond) {
12700 TCGv_i32 tmp = tcg_temp_new_i32();
12701 tcg_gen_movi_i32(tmp, 0);
12702 store_cpu_field(tmp, condexec_bits);
12703 }
23169224 12704 tcg_clear_temp_count();
b1476854
LV
12705}
12706
f62bd897
LV
12707static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12708{
12709 DisasContext *dc = container_of(dcbase, DisasContext, base);
12710
f62bd897
LV
12711 tcg_gen_insn_start(dc->pc,
12712 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12713 0);
15fa08f8 12714 dc->insn_start = tcg_last_op();
f62bd897
LV
12715}
12716
a68956ad
LV
12717static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12718 const CPUBreakpoint *bp)
12719{
12720 DisasContext *dc = container_of(dcbase, DisasContext, base);
12721
12722 if (bp->flags & BP_CPU) {
12723 gen_set_condexec(dc);
12724 gen_set_pc_im(dc, dc->pc);
12725 gen_helper_check_breakpoints(cpu_env);
12726 /* End the TB early; it's likely not going to be executed */
12727 dc->base.is_jmp = DISAS_TOO_MANY;
12728 } else {
12729 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12730 /* The address covered by the breakpoint must be
12731 included in [tb->pc, tb->pc + tb->size) in order
12732 to for it to be properly cleared -- thus we
12733 increment the PC here so that the logic setting
12734 tb->size below does the right thing. */
12735 /* TODO: Advance PC by correct instruction length to
12736 * avoid disassembler error messages */
12737 dc->pc += 2;
12738 dc->base.is_jmp = DISAS_NORETURN;
12739 }
12740
12741 return true;
12742}
12743
722ef0a5 12744static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 12745{
13189a90
LV
12746#ifdef CONFIG_USER_ONLY
12747 /* Intercept jump to the magic kernel page. */
12748 if (dc->pc >= 0xffff0000) {
12749 /* We always get here via a jump, so know we are not in a
12750 conditional execution block. */
12751 gen_exception_internal(EXCP_KERNEL_TRAP);
12752 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12753 return true;
13189a90
LV
12754 }
12755#endif
12756
12757 if (dc->ss_active && !dc->pstate_ss) {
12758 /* Singlestep state is Active-pending.
12759 * If we're in this state at the start of a TB then either
12760 * a) we just took an exception to an EL which is being debugged
12761 * and this is the first insn in the exception handler
12762 * b) debug exceptions were masked and we just unmasked them
12763 * without changing EL (eg by clearing PSTATE.D)
12764 * In either case we're going to take a swstep exception in the
12765 * "did not step an insn" case, and so the syndrome ISV and EX
12766 * bits should be zero.
12767 */
12768 assert(dc->base.num_insns == 1);
12769 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12770 default_exception_el(dc));
12771 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12772 return true;
13189a90
LV
12773 }
12774
722ef0a5
RH
12775 return false;
12776}
13189a90 12777
d0264d86 12778static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12779{
13189a90
LV
12780 if (dc->condjmp && !dc->base.is_jmp) {
12781 gen_set_label(dc->condlabel);
12782 dc->condjmp = 0;
12783 }
13189a90 12784 dc->base.pc_next = dc->pc;
23169224 12785 translator_loop_temp_check(&dc->base);
13189a90
LV
12786}
12787
722ef0a5
RH
12788static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12789{
12790 DisasContext *dc = container_of(dcbase, DisasContext, base);
12791 CPUARMState *env = cpu->env_ptr;
12792 unsigned int insn;
12793
12794 if (arm_pre_translate_insn(dc)) {
12795 return;
12796 }
12797
12798 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 12799 dc->insn = insn;
722ef0a5
RH
12800 dc->pc += 4;
12801 disas_arm_insn(dc, insn);
12802
d0264d86
RH
12803 arm_post_translate_insn(dc);
12804
12805 /* ARM is a fixed-length ISA. We performed the cross-page check
12806 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12807}
12808
dcf14dfb
PM
12809static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12810{
12811 /* Return true if this Thumb insn is always unconditional,
12812 * even inside an IT block. This is true of only a very few
12813 * instructions: BKPT, HLT, and SG.
12814 *
12815 * A larger class of instructions are UNPREDICTABLE if used
12816 * inside an IT block; we do not need to detect those here, because
12817 * what we do by default (perform the cc check and update the IT
12818 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12819 * choice for those situations.
12820 *
12821 * insn is either a 16-bit or a 32-bit instruction; the two are
12822 * distinguishable because for the 16-bit case the top 16 bits
12823 * are zeroes, and that isn't a valid 32-bit encoding.
12824 */
12825 if ((insn & 0xffffff00) == 0xbe00) {
12826 /* BKPT */
12827 return true;
12828 }
12829
12830 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12831 !arm_dc_feature(s, ARM_FEATURE_M)) {
12832 /* HLT: v8A only. This is unconditional even when it is going to
12833 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12834 * For v7 cores this was a plain old undefined encoding and so
12835 * honours its cc check. (We might be using the encoding as
12836 * a semihosting trap, but we don't change the cc check behaviour
12837 * on that account, because a debugger connected to a real v7A
12838 * core and emulating semihosting traps by catching the UNDEF
12839 * exception would also only see cases where the cc check passed.
12840 * No guest code should be trying to do a HLT semihosting trap
12841 * in an IT block anyway.
12842 */
12843 return true;
12844 }
12845
12846 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12847 arm_dc_feature(s, ARM_FEATURE_M)) {
12848 /* SG: v8M only */
12849 return true;
12850 }
12851
12852 return false;
12853}
12854
722ef0a5
RH
12855static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12856{
12857 DisasContext *dc = container_of(dcbase, DisasContext, base);
12858 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12859 uint32_t insn;
12860 bool is_16bit;
722ef0a5
RH
12861
12862 if (arm_pre_translate_insn(dc)) {
12863 return;
12864 }
12865
296e5a0a
PM
12866 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12867 is_16bit = thumb_insn_is_16bit(dc, insn);
12868 dc->pc += 2;
12869 if (!is_16bit) {
12870 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12871
12872 insn = insn << 16 | insn2;
12873 dc->pc += 2;
12874 }
58803318 12875 dc->insn = insn;
296e5a0a 12876
dcf14dfb 12877 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12878 uint32_t cond = dc->condexec_cond;
12879
12880 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
c2d9644e 12881 arm_skip_unless(dc, cond);
296e5a0a
PM
12882 }
12883 }
12884
12885 if (is_16bit) {
12886 disas_thumb_insn(dc, insn);
12887 } else {
2eea841c 12888 disas_thumb2_insn(dc, insn);
296e5a0a 12889 }
722ef0a5
RH
12890
12891 /* Advance the Thumb condexec condition. */
12892 if (dc->condexec_mask) {
12893 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12894 ((dc->condexec_mask >> 4) & 1));
12895 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12896 if (dc->condexec_mask == 0) {
12897 dc->condexec_cond = 0;
12898 }
12899 }
12900
d0264d86
RH
12901 arm_post_translate_insn(dc);
12902
12903 /* Thumb is a variable-length ISA. Stop translation when the next insn
12904 * will touch a new page. This ensures that prefetch aborts occur at
12905 * the right place.
12906 *
12907 * We want to stop the TB if the next insn starts in a new page,
12908 * or if it spans between this page and the next. This means that
12909 * if we're looking at the last halfword in the page we need to
12910 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12911 * or a 32-bit Thumb insn (which won't).
12912 * This is to avoid generating a silly TB with a single 16-bit insn
12913 * in it at the end of this page (which would execute correctly
12914 * but isn't very efficient).
12915 */
12916 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
12917 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
12918 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
12919 && insn_crosses_page(env, dc)))) {
12920 dc->base.is_jmp = DISAS_TOO_MANY;
12921 }
722ef0a5
RH
12922}
12923
70d3c035 12924static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12925{
70d3c035 12926 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12927
c5a49c63 12928 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12929 /* FIXME: This can theoretically happen with self-modifying code. */
12930 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12931 }
9ee6e8bb 12932
b5ff1b31 12933 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12934 instruction was a conditional branch or trap, and the PC has
12935 already been written. */
f021b2c4 12936 gen_set_condexec(dc);
dcba3a8d 12937 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12938 /* Exception return branches need some special case code at the
12939 * end of the TB, which is complex enough that it has to
12940 * handle the single-step vs not and the condition-failed
12941 * insn codepath itself.
12942 */
12943 gen_bx_excret_final_code(dc);
12944 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12945 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12946 switch (dc->base.is_jmp) {
7999a5c8 12947 case DISAS_SWI:
50225ad0 12948 gen_ss_advance(dc);
73710361
GB
12949 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12950 default_exception_el(dc));
7999a5c8
SF
12951 break;
12952 case DISAS_HVC:
37e6456e 12953 gen_ss_advance(dc);
73710361 12954 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12955 break;
12956 case DISAS_SMC:
37e6456e 12957 gen_ss_advance(dc);
73710361 12958 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12959 break;
12960 case DISAS_NEXT:
a68956ad 12961 case DISAS_TOO_MANY:
7999a5c8
SF
12962 case DISAS_UPDATE:
12963 gen_set_pc_im(dc, dc->pc);
12964 /* fall through */
12965 default:
5425415e
PM
12966 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12967 gen_singlestep_exception(dc);
a0c231e6
RH
12968 break;
12969 case DISAS_NORETURN:
12970 break;
7999a5c8 12971 }
8aaca4c0 12972 } else {
9ee6e8bb
PB
12973 /* While branches must always occur at the end of an IT block,
12974 there are a few other things that can cause us to terminate
65626741 12975 the TB in the middle of an IT block:
9ee6e8bb
PB
12976 - Exception generating instructions (bkpt, swi, undefined).
12977 - Page boundaries.
12978 - Hardware watchpoints.
12979 Hardware breakpoints have already been handled and skip this code.
12980 */
dcba3a8d 12981 switch(dc->base.is_jmp) {
8aaca4c0 12982 case DISAS_NEXT:
a68956ad 12983 case DISAS_TOO_MANY:
6e256c93 12984 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12985 break;
577bf808 12986 case DISAS_JUMP:
8a6b28c7
EC
12987 gen_goto_ptr();
12988 break;
e8d52302
AB
12989 case DISAS_UPDATE:
12990 gen_set_pc_im(dc, dc->pc);
12991 /* fall through */
577bf808 12992 default:
8aaca4c0 12993 /* indicate that the hash table must be used to find the next TB */
07ea28b4 12994 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 12995 break;
a0c231e6 12996 case DISAS_NORETURN:
8aaca4c0
FB
12997 /* nothing more to generate */
12998 break;
9ee6e8bb 12999 case DISAS_WFI:
58803318
SS
13000 {
13001 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
13002 !(dc->insn & (1U << 31))) ? 2 : 4);
13003
13004 gen_helper_wfi(cpu_env, tmp);
13005 tcg_temp_free_i32(tmp);
84549b6d
PM
13006 /* The helper doesn't necessarily throw an exception, but we
13007 * must go back to the main loop to check for interrupts anyway.
13008 */
07ea28b4 13009 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 13010 break;
58803318 13011 }
72c1d3af
PM
13012 case DISAS_WFE:
13013 gen_helper_wfe(cpu_env);
13014 break;
c87e5a61
PM
13015 case DISAS_YIELD:
13016 gen_helper_yield(cpu_env);
13017 break;
9ee6e8bb 13018 case DISAS_SWI:
73710361
GB
13019 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13020 default_exception_el(dc));
9ee6e8bb 13021 break;
37e6456e 13022 case DISAS_HVC:
73710361 13023 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
13024 break;
13025 case DISAS_SMC:
73710361 13026 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 13027 break;
8aaca4c0 13028 }
f021b2c4
PM
13029 }
13030
13031 if (dc->condjmp) {
13032 /* "Condition failed" instruction codepath for the branch/trap insn */
13033 gen_set_label(dc->condlabel);
13034 gen_set_condexec(dc);
b636649f 13035 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
13036 gen_set_pc_im(dc, dc->pc);
13037 gen_singlestep_exception(dc);
13038 } else {
6e256c93 13039 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 13040 }
2c0262af 13041 }
23169224
LV
13042
13043 /* Functions above can change dc->pc, so re-align db->pc_next */
13044 dc->base.pc_next = dc->pc;
70d3c035
LV
13045}
13046
4013f7fc
LV
13047static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
13048{
13049 DisasContext *dc = container_of(dcbase, DisasContext, base);
13050
13051 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 13052 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
13053}
13054
23169224
LV
13055static const TranslatorOps arm_translator_ops = {
13056 .init_disas_context = arm_tr_init_disas_context,
13057 .tb_start = arm_tr_tb_start,
13058 .insn_start = arm_tr_insn_start,
13059 .breakpoint_check = arm_tr_breakpoint_check,
13060 .translate_insn = arm_tr_translate_insn,
13061 .tb_stop = arm_tr_tb_stop,
13062 .disas_log = arm_tr_disas_log,
13063};
13064
722ef0a5
RH
13065static const TranslatorOps thumb_translator_ops = {
13066 .init_disas_context = arm_tr_init_disas_context,
13067 .tb_start = arm_tr_tb_start,
13068 .insn_start = arm_tr_insn_start,
13069 .breakpoint_check = arm_tr_breakpoint_check,
13070 .translate_insn = thumb_tr_translate_insn,
13071 .tb_stop = arm_tr_tb_stop,
13072 .disas_log = arm_tr_disas_log,
13073};
13074
70d3c035 13075/* generate intermediate code for basic block 'tb'. */
23169224 13076void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 13077{
23169224
LV
13078 DisasContext dc;
13079 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 13080
722ef0a5
RH
13081 if (ARM_TBFLAG_THUMB(tb->flags)) {
13082 ops = &thumb_translator_ops;
13083 }
23169224 13084#ifdef TARGET_AARCH64
70d3c035 13085 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
23169224 13086 ops = &aarch64_translator_ops;
2c0262af
FB
13087 }
13088#endif
23169224
LV
13089
13090 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
13091}
13092
b5ff1b31 13093static const char *cpu_mode_names[16] = {
28c9457d
EI
13094 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
13095 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 13096};
9ee6e8bb 13097
878096ee
AF
13098void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
13099 int flags)
2c0262af 13100{
878096ee
AF
13101 ARMCPU *cpu = ARM_CPU(cs);
13102 CPUARMState *env = &cpu->env;
2c0262af
FB
13103 int i;
13104
17731115
PM
13105 if (is_a64(env)) {
13106 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
13107 return;
13108 }
13109
2c0262af 13110 for(i=0;i<16;i++) {
7fe48483 13111 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 13112 if ((i % 4) == 3)
7fe48483 13113 cpu_fprintf(f, "\n");
2c0262af 13114 else
7fe48483 13115 cpu_fprintf(f, " ");
2c0262af 13116 }
06e5cf7a 13117
5b906f35
PM
13118 if (arm_feature(env, ARM_FEATURE_M)) {
13119 uint32_t xpsr = xpsr_read(env);
13120 const char *mode;
1e577cc7
PM
13121 const char *ns_status = "";
13122
13123 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
13124 ns_status = env->v7m.secure ? "S " : "NS ";
13125 }
5b906f35
PM
13126
13127 if (xpsr & XPSR_EXCP) {
13128 mode = "handler";
13129 } else {
8bfc26ea 13130 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
13131 mode = "unpriv-thread";
13132 } else {
13133 mode = "priv-thread";
13134 }
13135 }
13136
1e577cc7 13137 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
13138 xpsr,
13139 xpsr & XPSR_N ? 'N' : '-',
13140 xpsr & XPSR_Z ? 'Z' : '-',
13141 xpsr & XPSR_C ? 'C' : '-',
13142 xpsr & XPSR_V ? 'V' : '-',
13143 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 13144 ns_status,
5b906f35 13145 mode);
06e5cf7a 13146 } else {
5b906f35
PM
13147 uint32_t psr = cpsr_read(env);
13148 const char *ns_status = "";
13149
13150 if (arm_feature(env, ARM_FEATURE_EL3) &&
13151 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
13152 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
13153 }
13154
13155 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
13156 psr,
13157 psr & CPSR_N ? 'N' : '-',
13158 psr & CPSR_Z ? 'Z' : '-',
13159 psr & CPSR_C ? 'C' : '-',
13160 psr & CPSR_V ? 'V' : '-',
13161 psr & CPSR_T ? 'T' : 'A',
13162 ns_status,
13163 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
13164 }
b7bcbe95 13165
f2617cfc
PM
13166 if (flags & CPU_DUMP_FPU) {
13167 int numvfpregs = 0;
13168 if (arm_feature(env, ARM_FEATURE_VFP)) {
13169 numvfpregs += 16;
13170 }
13171 if (arm_feature(env, ARM_FEATURE_VFP3)) {
13172 numvfpregs += 16;
13173 }
13174 for (i = 0; i < numvfpregs; i++) {
9a2b5256 13175 uint64_t v = *aa32_vfp_dreg(env, i);
f2617cfc
PM
13176 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
13177 i * 2, (uint32_t)v,
13178 i * 2 + 1, (uint32_t)(v >> 32),
13179 i, v);
13180 }
13181 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 13182 }
2c0262af 13183}
a6b025d3 13184
bad729e2
RH
13185void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
13186 target_ulong *data)
d2856f1a 13187{
3926cc84 13188 if (is_a64(env)) {
bad729e2 13189 env->pc = data[0];
40f860cd 13190 env->condexec_bits = 0;
aaa1f954 13191 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13192 } else {
bad729e2
RH
13193 env->regs[15] = data[0];
13194 env->condexec_bits = data[1];
aaa1f954 13195 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13196 }
d2856f1a 13197}