]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/ppc: introduce GEN_VSX_HELPER_R2 macro to fpu_helper.c
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
90c84c56 31#include "qemu/qemu-print.h"
1d854765 32#include "arm_ldst.h"
f1672e6f 33#include "hw/semihosting/semihost.h"
1497c961 34
2ef6175a
RH
35#include "exec/helper-proto.h"
36#include "exec/helper-gen.h"
2c0262af 37
a7e30d84 38#include "trace-tcg.h"
508127e2 39#include "exec/log.h"
a7e30d84
LV
40
41
2b51668f
PM
42#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
43#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 44/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 45#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
09cbd501 46#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
2b51668f
PM
47#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
48#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
49#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
50#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
51#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 52
86753403 53#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 54
f570c61e 55#include "translate.h"
e12ce78d 56
b5ff1b31
FB
57#if defined(CONFIG_USER_ONLY)
58#define IS_USER(s) 1
59#else
60#define IS_USER(s) (s->user)
61#endif
62
ad69471c 63/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 64static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 65static TCGv_i32 cpu_R[16];
78bcaa3e
RH
66TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
67TCGv_i64 cpu_exclusive_addr;
68TCGv_i64 cpu_exclusive_val;
ad69471c 69
022c62cb 70#include "exec/gen-icount.h"
2e70f6ef 71
308e5636 72static const char * const regnames[] =
155c3eac
FN
73 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
74 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
75
61adacc8
RH
76/* Function prototypes for gen_ functions calling Neon helpers. */
77typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
78 TCGv_i32, TCGv_i32);
c253dd78
PM
79/* Function prototypes for gen_ functions for fix point conversions */
80typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
61adacc8 81
b26eefb6
PB
82/* initialize TCG globals. */
83void arm_translate_init(void)
84{
155c3eac
FN
85 int i;
86
155c3eac 87 for (i = 0; i < 16; i++) {
e1ccc054 88 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 89 offsetof(CPUARMState, regs[i]),
155c3eac
FN
90 regnames[i]);
91 }
e1ccc054
RH
92 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
93 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
94 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
95 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 96
e1ccc054 97 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 98 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 99 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 100 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 101
14ade10f 102 a64_translate_init();
b26eefb6
PB
103}
104
9bb6558a
PM
105/* Flags for the disas_set_da_iss info argument:
106 * lower bits hold the Rt register number, higher bits are flags.
107 */
108typedef enum ISSInfo {
109 ISSNone = 0,
110 ISSRegMask = 0x1f,
111 ISSInvalid = (1 << 5),
112 ISSIsAcqRel = (1 << 6),
113 ISSIsWrite = (1 << 7),
114 ISSIs16Bit = (1 << 8),
115} ISSInfo;
116
117/* Save the syndrome information for a Data Abort */
118static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
119{
120 uint32_t syn;
121 int sas = memop & MO_SIZE;
122 bool sse = memop & MO_SIGN;
123 bool is_acqrel = issinfo & ISSIsAcqRel;
124 bool is_write = issinfo & ISSIsWrite;
125 bool is_16bit = issinfo & ISSIs16Bit;
126 int srt = issinfo & ISSRegMask;
127
128 if (issinfo & ISSInvalid) {
129 /* Some callsites want to conditionally provide ISS info,
130 * eg "only if this was not a writeback"
131 */
132 return;
133 }
134
135 if (srt == 15) {
136 /* For AArch32, insns where the src/dest is R15 never generate
137 * ISS information. Catching that here saves checking at all
138 * the call sites.
139 */
140 return;
141 }
142
143 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
144 0, 0, 0, is_write, 0, is_16bit);
145 disas_set_insn_syndrome(s, syn);
146}
147
8bd5c820 148static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 149{
8bd5c820 150 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
151 * insns:
152 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
153 * otherwise, access as if at PL0.
154 */
155 switch (s->mmu_idx) {
156 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
157 case ARMMMUIdx_S12NSE0:
158 case ARMMMUIdx_S12NSE1:
8bd5c820 159 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
160 case ARMMMUIdx_S1E3:
161 case ARMMMUIdx_S1SE0:
162 case ARMMMUIdx_S1SE1:
8bd5c820 163 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
164 case ARMMMUIdx_MUser:
165 case ARMMMUIdx_MPriv:
166 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
167 case ARMMMUIdx_MUserNegPri:
168 case ARMMMUIdx_MPrivNegPri:
169 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
170 case ARMMMUIdx_MSUser:
171 case ARMMMUIdx_MSPriv:
b9f587d6 172 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
173 case ARMMMUIdx_MSUserNegPri:
174 case ARMMMUIdx_MSPrivNegPri:
175 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
176 case ARMMMUIdx_S2NS:
177 default:
178 g_assert_not_reached();
179 }
180}
181
39d5492a 182static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 183{
39d5492a 184 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
185 tcg_gen_ld_i32(tmp, cpu_env, offset);
186 return tmp;
187}
188
0ecb72a5 189#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 190
39d5492a 191static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
192{
193 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 194 tcg_temp_free_i32(var);
d9ba4830
PB
195}
196
197#define store_cpu_field(var, name) \
0ecb72a5 198 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 199
b26eefb6 200/* Set a variable to the value of a CPU register. */
39d5492a 201static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
202{
203 if (reg == 15) {
204 uint32_t addr;
b90372ad 205 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
206 if (s->thumb)
207 addr = (long)s->pc + 2;
208 else
209 addr = (long)s->pc + 4;
210 tcg_gen_movi_i32(var, addr);
211 } else {
155c3eac 212 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
213 }
214}
215
216/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 217static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 218{
39d5492a 219 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
220 load_reg_var(s, tmp, reg);
221 return tmp;
222}
223
224/* Set a CPU register. The source must be a temporary and will be
225 marked as dead. */
39d5492a 226static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
227{
228 if (reg == 15) {
9b6a3ea7
PM
229 /* In Thumb mode, we must ignore bit 0.
230 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
231 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
232 * We choose to ignore [1:0] in ARM mode for all architecture versions.
233 */
234 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 235 s->base.is_jmp = DISAS_JUMP;
b26eefb6 236 }
155c3eac 237 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 238 tcg_temp_free_i32(var);
b26eefb6
PB
239}
240
55203189
PM
241/*
242 * Variant of store_reg which applies v8M stack-limit checks before updating
243 * SP. If the check fails this will result in an exception being taken.
244 * We disable the stack checks for CONFIG_USER_ONLY because we have
245 * no idea what the stack limits should be in that case.
246 * If stack checking is not being done this just acts like store_reg().
247 */
248static void store_sp_checked(DisasContext *s, TCGv_i32 var)
249{
250#ifndef CONFIG_USER_ONLY
251 if (s->v8m_stackcheck) {
252 gen_helper_v8m_stackcheck(cpu_env, var);
253 }
254#endif
255 store_reg(s, 13, var);
256}
257
b26eefb6 258/* Value extensions. */
86831435
PB
259#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
260#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
261#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
262#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
263
1497c961
PB
264#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
265#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 266
b26eefb6 267
39d5492a 268static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 269{
39d5492a 270 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 271 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
272 tcg_temp_free_i32(tmp_mask);
273}
d9ba4830
PB
274/* Set NZCV flags from the high 4 bits of var. */
275#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
276
d4a2dc67 277static void gen_exception_internal(int excp)
d9ba4830 278{
d4a2dc67
PM
279 TCGv_i32 tcg_excp = tcg_const_i32(excp);
280
281 assert(excp_is_internal(excp));
282 gen_helper_exception_internal(cpu_env, tcg_excp);
283 tcg_temp_free_i32(tcg_excp);
284}
285
73710361 286static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
287{
288 TCGv_i32 tcg_excp = tcg_const_i32(excp);
289 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 290 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 291
73710361
GB
292 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
293 tcg_syn, tcg_el);
294
295 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
296 tcg_temp_free_i32(tcg_syn);
297 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
298}
299
50225ad0
PM
300static void gen_step_complete_exception(DisasContext *s)
301{
302 /* We just completed step of an insn. Move from Active-not-pending
303 * to Active-pending, and then also take the swstep exception.
304 * This corresponds to making the (IMPDEF) choice to prioritize
305 * swstep exceptions over asynchronous exceptions taken to an exception
306 * level where debug is disabled. This choice has the advantage that
307 * we do not need to maintain internal state corresponding to the
308 * ISV/EX syndrome bits between completion of the step and generation
309 * of the exception, and our syndrome information is always correct.
310 */
311 gen_ss_advance(s);
73710361
GB
312 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
313 default_exception_el(s));
dcba3a8d 314 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
315}
316
5425415e
PM
317static void gen_singlestep_exception(DisasContext *s)
318{
319 /* Generate the right kind of exception for singlestep, which is
320 * either the architectural singlestep or EXCP_DEBUG for QEMU's
321 * gdb singlestepping.
322 */
323 if (s->ss_active) {
324 gen_step_complete_exception(s);
325 } else {
326 gen_exception_internal(EXCP_DEBUG);
327 }
328}
329
b636649f
PM
330static inline bool is_singlestepping(DisasContext *s)
331{
332 /* Return true if we are singlestepping either because of
333 * architectural singlestep or QEMU gdbstub singlestep. This does
334 * not include the command line '-singlestep' mode which is rather
335 * misnamed as it only means "one instruction per TB" and doesn't
336 * affect the code we generate.
337 */
dcba3a8d 338 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
339}
340
39d5492a 341static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 342{
39d5492a
PM
343 TCGv_i32 tmp1 = tcg_temp_new_i32();
344 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
345 tcg_gen_ext16s_i32(tmp1, a);
346 tcg_gen_ext16s_i32(tmp2, b);
3670669c 347 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 348 tcg_temp_free_i32(tmp2);
3670669c
PB
349 tcg_gen_sari_i32(a, a, 16);
350 tcg_gen_sari_i32(b, b, 16);
351 tcg_gen_mul_i32(b, b, a);
352 tcg_gen_mov_i32(a, tmp1);
7d1b0095 353 tcg_temp_free_i32(tmp1);
3670669c
PB
354}
355
356/* Byteswap each halfword. */
39d5492a 357static void gen_rev16(TCGv_i32 var)
3670669c 358{
39d5492a 359 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 360 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 361 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
362 tcg_gen_and_i32(tmp, tmp, mask);
363 tcg_gen_and_i32(var, var, mask);
3670669c 364 tcg_gen_shli_i32(var, var, 8);
3670669c 365 tcg_gen_or_i32(var, var, tmp);
68cedf73 366 tcg_temp_free_i32(mask);
7d1b0095 367 tcg_temp_free_i32(tmp);
3670669c
PB
368}
369
370/* Byteswap low halfword and sign extend. */
39d5492a 371static void gen_revsh(TCGv_i32 var)
3670669c 372{
1a855029
AJ
373 tcg_gen_ext16u_i32(var, var);
374 tcg_gen_bswap16_i32(var, var);
375 tcg_gen_ext16s_i32(var, var);
3670669c
PB
376}
377
838fa72d 378/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 379static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 380{
838fa72d
AJ
381 TCGv_i64 tmp64 = tcg_temp_new_i64();
382
383 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 384 tcg_temp_free_i32(b);
838fa72d
AJ
385 tcg_gen_shli_i64(tmp64, tmp64, 32);
386 tcg_gen_add_i64(a, tmp64, a);
387
388 tcg_temp_free_i64(tmp64);
389 return a;
390}
391
392/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 393static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
394{
395 TCGv_i64 tmp64 = tcg_temp_new_i64();
396
397 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 398 tcg_temp_free_i32(b);
838fa72d
AJ
399 tcg_gen_shli_i64(tmp64, tmp64, 32);
400 tcg_gen_sub_i64(a, tmp64, a);
401
402 tcg_temp_free_i64(tmp64);
403 return a;
3670669c
PB
404}
405
5e3f878a 406/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 407static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 408{
39d5492a
PM
409 TCGv_i32 lo = tcg_temp_new_i32();
410 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 411 TCGv_i64 ret;
5e3f878a 412
831d7fe8 413 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 414 tcg_temp_free_i32(a);
7d1b0095 415 tcg_temp_free_i32(b);
831d7fe8
RH
416
417 ret = tcg_temp_new_i64();
418 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
419 tcg_temp_free_i32(lo);
420 tcg_temp_free_i32(hi);
831d7fe8
RH
421
422 return ret;
5e3f878a
PB
423}
424
39d5492a 425static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 426{
39d5492a
PM
427 TCGv_i32 lo = tcg_temp_new_i32();
428 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 429 TCGv_i64 ret;
5e3f878a 430
831d7fe8 431 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 432 tcg_temp_free_i32(a);
7d1b0095 433 tcg_temp_free_i32(b);
831d7fe8
RH
434
435 ret = tcg_temp_new_i64();
436 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
437 tcg_temp_free_i32(lo);
438 tcg_temp_free_i32(hi);
831d7fe8
RH
439
440 return ret;
5e3f878a
PB
441}
442
8f01245e 443/* Swap low and high halfwords. */
39d5492a 444static void gen_swap_half(TCGv_i32 var)
8f01245e 445{
39d5492a 446 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
447 tcg_gen_shri_i32(tmp, var, 16);
448 tcg_gen_shli_i32(var, var, 16);
449 tcg_gen_or_i32(var, var, tmp);
7d1b0095 450 tcg_temp_free_i32(tmp);
8f01245e
PB
451}
452
b26eefb6
PB
453/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
454 tmp = (t0 ^ t1) & 0x8000;
455 t0 &= ~0x8000;
456 t1 &= ~0x8000;
457 t0 = (t0 + t1) ^ tmp;
458 */
459
39d5492a 460static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 461{
39d5492a 462 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
463 tcg_gen_xor_i32(tmp, t0, t1);
464 tcg_gen_andi_i32(tmp, tmp, 0x8000);
465 tcg_gen_andi_i32(t0, t0, ~0x8000);
466 tcg_gen_andi_i32(t1, t1, ~0x8000);
467 tcg_gen_add_i32(t0, t0, t1);
468 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
469 tcg_temp_free_i32(tmp);
470 tcg_temp_free_i32(t1);
b26eefb6
PB
471}
472
473/* Set CF to the top bit of var. */
39d5492a 474static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 475{
66c374de 476 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
477}
478
479/* Set N and Z flags from var. */
39d5492a 480static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 481{
66c374de
AJ
482 tcg_gen_mov_i32(cpu_NF, var);
483 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
484}
485
486/* T0 += T1 + CF. */
39d5492a 487static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 488{
396e467c 489 tcg_gen_add_i32(t0, t0, t1);
66c374de 490 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
491}
492
e9bb4aa9 493/* dest = T0 + T1 + CF. */
39d5492a 494static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 495{
e9bb4aa9 496 tcg_gen_add_i32(dest, t0, t1);
66c374de 497 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
498}
499
3670669c 500/* dest = T0 - T1 + CF - 1. */
39d5492a 501static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 502{
3670669c 503 tcg_gen_sub_i32(dest, t0, t1);
66c374de 504 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 505 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
506}
507
72485ec4 508/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 509static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 510{
39d5492a 511 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
512 tcg_gen_movi_i32(tmp, 0);
513 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 514 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 515 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
516 tcg_gen_xor_i32(tmp, t0, t1);
517 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
518 tcg_temp_free_i32(tmp);
519 tcg_gen_mov_i32(dest, cpu_NF);
520}
521
49b4c31e 522/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 523static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 524{
39d5492a 525 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
526 if (TCG_TARGET_HAS_add2_i32) {
527 tcg_gen_movi_i32(tmp, 0);
528 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 529 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
530 } else {
531 TCGv_i64 q0 = tcg_temp_new_i64();
532 TCGv_i64 q1 = tcg_temp_new_i64();
533 tcg_gen_extu_i32_i64(q0, t0);
534 tcg_gen_extu_i32_i64(q1, t1);
535 tcg_gen_add_i64(q0, q0, q1);
536 tcg_gen_extu_i32_i64(q1, cpu_CF);
537 tcg_gen_add_i64(q0, q0, q1);
538 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
539 tcg_temp_free_i64(q0);
540 tcg_temp_free_i64(q1);
541 }
542 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
543 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
544 tcg_gen_xor_i32(tmp, t0, t1);
545 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
546 tcg_temp_free_i32(tmp);
547 tcg_gen_mov_i32(dest, cpu_NF);
548}
549
72485ec4 550/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 551static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 552{
39d5492a 553 TCGv_i32 tmp;
72485ec4
AJ
554 tcg_gen_sub_i32(cpu_NF, t0, t1);
555 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
556 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
557 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
558 tmp = tcg_temp_new_i32();
559 tcg_gen_xor_i32(tmp, t0, t1);
560 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
561 tcg_temp_free_i32(tmp);
562 tcg_gen_mov_i32(dest, cpu_NF);
563}
564
e77f0832 565/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 566static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 567{
39d5492a 568 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
569 tcg_gen_not_i32(tmp, t1);
570 gen_adc_CC(dest, t0, tmp);
39d5492a 571 tcg_temp_free_i32(tmp);
2de68a49
RH
572}
573
365af80e 574#define GEN_SHIFT(name) \
39d5492a 575static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 576{ \
39d5492a 577 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
578 tmp1 = tcg_temp_new_i32(); \
579 tcg_gen_andi_i32(tmp1, t1, 0xff); \
580 tmp2 = tcg_const_i32(0); \
581 tmp3 = tcg_const_i32(0x1f); \
582 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
583 tcg_temp_free_i32(tmp3); \
584 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
585 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
586 tcg_temp_free_i32(tmp2); \
587 tcg_temp_free_i32(tmp1); \
588}
589GEN_SHIFT(shl)
590GEN_SHIFT(shr)
591#undef GEN_SHIFT
592
39d5492a 593static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 594{
39d5492a 595 TCGv_i32 tmp1, tmp2;
365af80e
AJ
596 tmp1 = tcg_temp_new_i32();
597 tcg_gen_andi_i32(tmp1, t1, 0xff);
598 tmp2 = tcg_const_i32(0x1f);
599 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
600 tcg_temp_free_i32(tmp2);
601 tcg_gen_sar_i32(dest, t0, tmp1);
602 tcg_temp_free_i32(tmp1);
603}
604
39d5492a 605static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 606{
9a119ff6 607 if (shift == 0) {
66c374de 608 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 609 } else {
66c374de
AJ
610 tcg_gen_shri_i32(cpu_CF, var, shift);
611 if (shift != 31) {
612 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
613 }
9a119ff6 614 }
9a119ff6 615}
b26eefb6 616
9a119ff6 617/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
618static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
619 int shift, int flags)
9a119ff6
PB
620{
621 switch (shiftop) {
622 case 0: /* LSL */
623 if (shift != 0) {
624 if (flags)
625 shifter_out_im(var, 32 - shift);
626 tcg_gen_shli_i32(var, var, shift);
627 }
628 break;
629 case 1: /* LSR */
630 if (shift == 0) {
631 if (flags) {
66c374de 632 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
633 }
634 tcg_gen_movi_i32(var, 0);
635 } else {
636 if (flags)
637 shifter_out_im(var, shift - 1);
638 tcg_gen_shri_i32(var, var, shift);
639 }
640 break;
641 case 2: /* ASR */
642 if (shift == 0)
643 shift = 32;
644 if (flags)
645 shifter_out_im(var, shift - 1);
646 if (shift == 32)
647 shift = 31;
648 tcg_gen_sari_i32(var, var, shift);
649 break;
650 case 3: /* ROR/RRX */
651 if (shift != 0) {
652 if (flags)
653 shifter_out_im(var, shift - 1);
f669df27 654 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 655 } else {
39d5492a 656 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 657 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
658 if (flags)
659 shifter_out_im(var, 0);
660 tcg_gen_shri_i32(var, var, 1);
b26eefb6 661 tcg_gen_or_i32(var, var, tmp);
7d1b0095 662 tcg_temp_free_i32(tmp);
b26eefb6
PB
663 }
664 }
665};
666
39d5492a
PM
667static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
668 TCGv_i32 shift, int flags)
8984bd2e
PB
669{
670 if (flags) {
671 switch (shiftop) {
9ef39277
BS
672 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
673 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
674 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
675 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
676 }
677 } else {
678 switch (shiftop) {
365af80e
AJ
679 case 0:
680 gen_shl(var, var, shift);
681 break;
682 case 1:
683 gen_shr(var, var, shift);
684 break;
685 case 2:
686 gen_sar(var, var, shift);
687 break;
f669df27
AJ
688 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
689 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
690 }
691 }
7d1b0095 692 tcg_temp_free_i32(shift);
8984bd2e
PB
693}
694
6ddbc6e4
PB
695#define PAS_OP(pfx) \
696 switch (op2) { \
697 case 0: gen_pas_helper(glue(pfx,add16)); break; \
698 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
699 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
700 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
701 case 4: gen_pas_helper(glue(pfx,add8)); break; \
702 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
703 }
39d5492a 704static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 705{
a7812ae4 706 TCGv_ptr tmp;
6ddbc6e4
PB
707
708 switch (op1) {
709#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
710 case 1:
a7812ae4 711 tmp = tcg_temp_new_ptr();
0ecb72a5 712 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 713 PAS_OP(s)
b75263d6 714 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
715 break;
716 case 5:
a7812ae4 717 tmp = tcg_temp_new_ptr();
0ecb72a5 718 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 719 PAS_OP(u)
b75263d6 720 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
721 break;
722#undef gen_pas_helper
723#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
724 case 2:
725 PAS_OP(q);
726 break;
727 case 3:
728 PAS_OP(sh);
729 break;
730 case 6:
731 PAS_OP(uq);
732 break;
733 case 7:
734 PAS_OP(uh);
735 break;
736#undef gen_pas_helper
737 }
738}
9ee6e8bb
PB
739#undef PAS_OP
740
6ddbc6e4
PB
741/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
742#define PAS_OP(pfx) \
ed89a2f1 743 switch (op1) { \
6ddbc6e4
PB
744 case 0: gen_pas_helper(glue(pfx,add8)); break; \
745 case 1: gen_pas_helper(glue(pfx,add16)); break; \
746 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
747 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
748 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
749 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
750 }
39d5492a 751static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 752{
a7812ae4 753 TCGv_ptr tmp;
6ddbc6e4 754
ed89a2f1 755 switch (op2) {
6ddbc6e4
PB
756#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
757 case 0:
a7812ae4 758 tmp = tcg_temp_new_ptr();
0ecb72a5 759 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 760 PAS_OP(s)
b75263d6 761 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
762 break;
763 case 4:
a7812ae4 764 tmp = tcg_temp_new_ptr();
0ecb72a5 765 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 766 PAS_OP(u)
b75263d6 767 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
768 break;
769#undef gen_pas_helper
770#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
771 case 1:
772 PAS_OP(q);
773 break;
774 case 2:
775 PAS_OP(sh);
776 break;
777 case 5:
778 PAS_OP(uq);
779 break;
780 case 6:
781 PAS_OP(uh);
782 break;
783#undef gen_pas_helper
784 }
785}
9ee6e8bb
PB
786#undef PAS_OP
787
39fb730a 788/*
6c2c63d3 789 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
790 * This is common between ARM and Aarch64 targets.
791 */
6c2c63d3 792void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 793{
6c2c63d3
RH
794 TCGv_i32 value;
795 TCGCond cond;
796 bool global = true;
d9ba4830 797
d9ba4830
PB
798 switch (cc) {
799 case 0: /* eq: Z */
d9ba4830 800 case 1: /* ne: !Z */
6c2c63d3
RH
801 cond = TCG_COND_EQ;
802 value = cpu_ZF;
d9ba4830 803 break;
6c2c63d3 804
d9ba4830 805 case 2: /* cs: C */
d9ba4830 806 case 3: /* cc: !C */
6c2c63d3
RH
807 cond = TCG_COND_NE;
808 value = cpu_CF;
d9ba4830 809 break;
6c2c63d3 810
d9ba4830 811 case 4: /* mi: N */
d9ba4830 812 case 5: /* pl: !N */
6c2c63d3
RH
813 cond = TCG_COND_LT;
814 value = cpu_NF;
d9ba4830 815 break;
6c2c63d3 816
d9ba4830 817 case 6: /* vs: V */
d9ba4830 818 case 7: /* vc: !V */
6c2c63d3
RH
819 cond = TCG_COND_LT;
820 value = cpu_VF;
d9ba4830 821 break;
6c2c63d3 822
d9ba4830 823 case 8: /* hi: C && !Z */
6c2c63d3
RH
824 case 9: /* ls: !C || Z -> !(C && !Z) */
825 cond = TCG_COND_NE;
826 value = tcg_temp_new_i32();
827 global = false;
828 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
829 ZF is non-zero for !Z; so AND the two subexpressions. */
830 tcg_gen_neg_i32(value, cpu_CF);
831 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 832 break;
6c2c63d3 833
d9ba4830 834 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 835 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
836 /* Since we're only interested in the sign bit, == 0 is >= 0. */
837 cond = TCG_COND_GE;
838 value = tcg_temp_new_i32();
839 global = false;
840 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 841 break;
6c2c63d3 842
d9ba4830 843 case 12: /* gt: !Z && N == V */
d9ba4830 844 case 13: /* le: Z || N != V */
6c2c63d3
RH
845 cond = TCG_COND_NE;
846 value = tcg_temp_new_i32();
847 global = false;
848 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
849 * the sign bit then AND with ZF to yield the result. */
850 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
851 tcg_gen_sari_i32(value, value, 31);
852 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 853 break;
6c2c63d3 854
9305eac0
RH
855 case 14: /* always */
856 case 15: /* always */
857 /* Use the ALWAYS condition, which will fold early.
858 * It doesn't matter what we use for the value. */
859 cond = TCG_COND_ALWAYS;
860 value = cpu_ZF;
861 goto no_invert;
862
d9ba4830
PB
863 default:
864 fprintf(stderr, "Bad condition code 0x%x\n", cc);
865 abort();
866 }
6c2c63d3
RH
867
868 if (cc & 1) {
869 cond = tcg_invert_cond(cond);
870 }
871
9305eac0 872 no_invert:
6c2c63d3
RH
873 cmp->cond = cond;
874 cmp->value = value;
875 cmp->value_global = global;
876}
877
878void arm_free_cc(DisasCompare *cmp)
879{
880 if (!cmp->value_global) {
881 tcg_temp_free_i32(cmp->value);
882 }
883}
884
885void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
886{
887 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
888}
889
890void arm_gen_test_cc(int cc, TCGLabel *label)
891{
892 DisasCompare cmp;
893 arm_test_cc(&cmp, cc);
894 arm_jump_cc(&cmp, label);
895 arm_free_cc(&cmp);
d9ba4830 896}
2c0262af 897
b1d8e52e 898static const uint8_t table_logic_cc[16] = {
2c0262af
FB
899 1, /* and */
900 1, /* xor */
901 0, /* sub */
902 0, /* rsb */
903 0, /* add */
904 0, /* adc */
905 0, /* sbc */
906 0, /* rsc */
907 1, /* andl */
908 1, /* xorl */
909 0, /* cmp */
910 0, /* cmn */
911 1, /* orr */
912 1, /* mov */
913 1, /* bic */
914 1, /* mvn */
915};
3b46e624 916
4d5e8c96
PM
917static inline void gen_set_condexec(DisasContext *s)
918{
919 if (s->condexec_mask) {
920 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
921 TCGv_i32 tmp = tcg_temp_new_i32();
922 tcg_gen_movi_i32(tmp, val);
923 store_cpu_field(tmp, condexec_bits);
924 }
925}
926
927static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
928{
929 tcg_gen_movi_i32(cpu_R[15], val);
930}
931
d9ba4830
PB
932/* Set PC and Thumb state from an immediate address. */
933static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 934{
39d5492a 935 TCGv_i32 tmp;
99c475ab 936
dcba3a8d 937 s->base.is_jmp = DISAS_JUMP;
d9ba4830 938 if (s->thumb != (addr & 1)) {
7d1b0095 939 tmp = tcg_temp_new_i32();
d9ba4830 940 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 941 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 942 tcg_temp_free_i32(tmp);
d9ba4830 943 }
155c3eac 944 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
945}
946
947/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 948static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 949{
dcba3a8d 950 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
951 tcg_gen_andi_i32(cpu_R[15], var, ~1);
952 tcg_gen_andi_i32(var, var, 1);
953 store_cpu_field(var, thumb);
d9ba4830
PB
954}
955
3bb8a96f
PM
956/* Set PC and Thumb state from var. var is marked as dead.
957 * For M-profile CPUs, include logic to detect exception-return
958 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
959 * and BX reg, and no others, and happens only for code in Handler mode.
960 */
961static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
962{
963 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 964 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
965 */
966 gen_bx(s, var);
d02a8698
PM
967 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
968 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 969 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
970 }
971}
972
973static inline void gen_bx_excret_final_code(DisasContext *s)
974{
975 /* Generate the code to finish possible exception return and end the TB */
976 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
977 uint32_t min_magic;
978
979 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
980 /* Covers FNC_RETURN and EXC_RETURN magic */
981 min_magic = FNC_RETURN_MIN_MAGIC;
982 } else {
983 /* EXC_RETURN magic only */
984 min_magic = EXC_RETURN_MIN_MAGIC;
985 }
3bb8a96f
PM
986
987 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 988 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
989 /* No: end the TB as we would for a DISAS_JMP */
990 if (is_singlestepping(s)) {
991 gen_singlestep_exception(s);
992 } else {
07ea28b4 993 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
994 }
995 gen_set_label(excret_label);
996 /* Yes: this is an exception return.
997 * At this point in runtime env->regs[15] and env->thumb will hold
998 * the exception-return magic number, which do_v7m_exception_exit()
999 * will read. Nothing else will be able to see those values because
1000 * the cpu-exec main loop guarantees that we will always go straight
1001 * from raising the exception to the exception-handling code.
1002 *
1003 * gen_ss_advance(s) does nothing on M profile currently but
1004 * calling it is conceptually the right thing as we have executed
1005 * this instruction (compare SWI, HVC, SMC handling).
1006 */
1007 gen_ss_advance(s);
1008 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1009}
1010
fb602cb7
PM
1011static inline void gen_bxns(DisasContext *s, int rm)
1012{
1013 TCGv_i32 var = load_reg(s, rm);
1014
1015 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1016 * we need to sync state before calling it, but:
1017 * - we don't need to do gen_set_pc_im() because the bxns helper will
1018 * always set the PC itself
1019 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1020 * unless it's outside an IT block or the last insn in an IT block,
1021 * so we know that condexec == 0 (already set at the top of the TB)
1022 * is correct in the non-UNPREDICTABLE cases, and we can choose
1023 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1024 */
1025 gen_helper_v7m_bxns(cpu_env, var);
1026 tcg_temp_free_i32(var);
ef475b5d 1027 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1028}
1029
3e3fa230
PM
1030static inline void gen_blxns(DisasContext *s, int rm)
1031{
1032 TCGv_i32 var = load_reg(s, rm);
1033
1034 /* We don't need to sync condexec state, for the same reason as bxns.
1035 * We do however need to set the PC, because the blxns helper reads it.
1036 * The blxns helper may throw an exception.
1037 */
1038 gen_set_pc_im(s, s->pc);
1039 gen_helper_v7m_blxns(cpu_env, var);
1040 tcg_temp_free_i32(var);
1041 s->base.is_jmp = DISAS_EXIT;
1042}
1043
21aeb343
JR
1044/* Variant of store_reg which uses branch&exchange logic when storing
1045 to r15 in ARM architecture v7 and above. The source must be a temporary
1046 and will be marked as dead. */
7dcc1f89 1047static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1048{
1049 if (reg == 15 && ENABLE_ARCH_7) {
1050 gen_bx(s, var);
1051 } else {
1052 store_reg(s, reg, var);
1053 }
1054}
1055
be5e7a76
DES
1056/* Variant of store_reg which uses branch&exchange logic when storing
1057 * to r15 in ARM architecture v5T and above. This is used for storing
1058 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1059 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1060static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1061{
1062 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1063 gen_bx_excret(s, var);
be5e7a76
DES
1064 } else {
1065 store_reg(s, reg, var);
1066 }
1067}
1068
e334bd31
PB
1069#ifdef CONFIG_USER_ONLY
1070#define IS_USER_ONLY 1
1071#else
1072#define IS_USER_ONLY 0
1073#endif
1074
08307563
PM
1075/* Abstractions of "generate code to do a guest load/store for
1076 * AArch32", where a vaddr is always 32 bits (and is zero
1077 * extended if we're a 64 bit core) and data is also
1078 * 32 bits unless specifically doing a 64 bit access.
1079 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1080 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1081 */
08307563 1082
7f5616f5 1083static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1084{
7f5616f5
RH
1085 TCGv addr = tcg_temp_new();
1086 tcg_gen_extu_i32_tl(addr, a32);
1087
e334bd31 1088 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1089 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1090 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1091 }
7f5616f5 1092 return addr;
08307563
PM
1093}
1094
7f5616f5
RH
1095static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1096 int index, TCGMemOp opc)
08307563 1097{
2aeba0d0
JS
1098 TCGv addr;
1099
1100 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1101 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1102 opc |= MO_ALIGN;
1103 }
1104
1105 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1106 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1107 tcg_temp_free(addr);
08307563
PM
1108}
1109
7f5616f5
RH
1110static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1111 int index, TCGMemOp opc)
1112{
2aeba0d0
JS
1113 TCGv addr;
1114
1115 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1116 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1117 opc |= MO_ALIGN;
1118 }
1119
1120 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1121 tcg_gen_qemu_st_i32(val, addr, index, opc);
1122 tcg_temp_free(addr);
1123}
08307563 1124
7f5616f5 1125#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1126static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1127 TCGv_i32 a32, int index) \
08307563 1128{ \
7f5616f5 1129 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1130} \
1131static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1132 TCGv_i32 val, \
1133 TCGv_i32 a32, int index, \
1134 ISSInfo issinfo) \
1135{ \
1136 gen_aa32_ld##SUFF(s, val, a32, index); \
1137 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1138}
1139
7f5616f5 1140#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1141static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1142 TCGv_i32 a32, int index) \
08307563 1143{ \
7f5616f5 1144 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1145} \
1146static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1147 TCGv_i32 val, \
1148 TCGv_i32 a32, int index, \
1149 ISSInfo issinfo) \
1150{ \
1151 gen_aa32_st##SUFF(s, val, a32, index); \
1152 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1153}
1154
7f5616f5 1155static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1156{
e334bd31
PB
1157 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1158 if (!IS_USER_ONLY && s->sctlr_b) {
1159 tcg_gen_rotri_i64(val, val, 32);
1160 }
08307563
PM
1161}
1162
7f5616f5
RH
1163static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1164 int index, TCGMemOp opc)
08307563 1165{
7f5616f5
RH
1166 TCGv addr = gen_aa32_addr(s, a32, opc);
1167 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1168 gen_aa32_frob64(s, val);
1169 tcg_temp_free(addr);
1170}
1171
1172static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1173 TCGv_i32 a32, int index)
1174{
1175 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1176}
1177
1178static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1179 int index, TCGMemOp opc)
1180{
1181 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1182
1183 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1184 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1185 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1186 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1187 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1188 tcg_temp_free_i64(tmp);
e334bd31 1189 } else {
7f5616f5 1190 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1191 }
7f5616f5 1192 tcg_temp_free(addr);
08307563
PM
1193}
1194
7f5616f5
RH
1195static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1196 TCGv_i32 a32, int index)
1197{
1198 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1199}
08307563 1200
7f5616f5
RH
1201DO_GEN_LD(8s, MO_SB)
1202DO_GEN_LD(8u, MO_UB)
1203DO_GEN_LD(16s, MO_SW)
1204DO_GEN_LD(16u, MO_UW)
1205DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1206DO_GEN_ST(8, MO_UB)
1207DO_GEN_ST(16, MO_UW)
1208DO_GEN_ST(32, MO_UL)
08307563 1209
37e6456e
PM
1210static inline void gen_hvc(DisasContext *s, int imm16)
1211{
1212 /* The pre HVC helper handles cases when HVC gets trapped
1213 * as an undefined insn by runtime configuration (ie before
1214 * the insn really executes).
1215 */
1216 gen_set_pc_im(s, s->pc - 4);
1217 gen_helper_pre_hvc(cpu_env);
1218 /* Otherwise we will treat this as a real exception which
1219 * happens after execution of the insn. (The distinction matters
1220 * for the PC value reported to the exception handler and also
1221 * for single stepping.)
1222 */
1223 s->svc_imm = imm16;
1224 gen_set_pc_im(s, s->pc);
dcba3a8d 1225 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1226}
1227
1228static inline void gen_smc(DisasContext *s)
1229{
1230 /* As with HVC, we may take an exception either before or after
1231 * the insn executes.
1232 */
1233 TCGv_i32 tmp;
1234
1235 gen_set_pc_im(s, s->pc - 4);
1236 tmp = tcg_const_i32(syn_aa32_smc());
1237 gen_helper_pre_smc(cpu_env, tmp);
1238 tcg_temp_free_i32(tmp);
1239 gen_set_pc_im(s, s->pc);
dcba3a8d 1240 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1241}
1242
d4a2dc67
PM
1243static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1244{
1245 gen_set_condexec(s);
1246 gen_set_pc_im(s, s->pc - offset);
1247 gen_exception_internal(excp);
dcba3a8d 1248 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1249}
1250
73710361
GB
1251static void gen_exception_insn(DisasContext *s, int offset, int excp,
1252 int syn, uint32_t target_el)
d4a2dc67
PM
1253{
1254 gen_set_condexec(s);
1255 gen_set_pc_im(s, s->pc - offset);
73710361 1256 gen_exception(excp, syn, target_el);
dcba3a8d 1257 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1258}
1259
c900a2e6
PM
1260static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1261{
1262 TCGv_i32 tcg_syn;
1263
1264 gen_set_condexec(s);
1265 gen_set_pc_im(s, s->pc - offset);
1266 tcg_syn = tcg_const_i32(syn);
1267 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1268 tcg_temp_free_i32(tcg_syn);
1269 s->base.is_jmp = DISAS_NORETURN;
1270}
1271
b5ff1b31
FB
1272/* Force a TB lookup after an instruction that changes the CPU state. */
1273static inline void gen_lookup_tb(DisasContext *s)
1274{
a6445c52 1275 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1276 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1277}
1278
19a6e31c
PM
1279static inline void gen_hlt(DisasContext *s, int imm)
1280{
1281 /* HLT. This has two purposes.
1282 * Architecturally, it is an external halting debug instruction.
1283 * Since QEMU doesn't implement external debug, we treat this as
1284 * it is required for halting debug disabled: it will UNDEF.
1285 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1286 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1287 * must trigger semihosting even for ARMv7 and earlier, where
1288 * HLT was an undefined encoding.
1289 * In system mode, we don't allow userspace access to
1290 * semihosting, to provide some semblance of security
1291 * (and for consistency with our 32-bit semihosting).
1292 */
1293 if (semihosting_enabled() &&
1294#ifndef CONFIG_USER_ONLY
1295 s->current_el != 0 &&
1296#endif
1297 (imm == (s->thumb ? 0x3c : 0xf000))) {
1298 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1299 return;
1300 }
1301
1302 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1303 default_exception_el(s));
1304}
1305
b0109805 1306static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1307 TCGv_i32 var)
2c0262af 1308{
1e8d4eec 1309 int val, rm, shift, shiftop;
39d5492a 1310 TCGv_i32 offset;
2c0262af
FB
1311
1312 if (!(insn & (1 << 25))) {
1313 /* immediate */
1314 val = insn & 0xfff;
1315 if (!(insn & (1 << 23)))
1316 val = -val;
537730b9 1317 if (val != 0)
b0109805 1318 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1319 } else {
1320 /* shift/register */
1321 rm = (insn) & 0xf;
1322 shift = (insn >> 7) & 0x1f;
1e8d4eec 1323 shiftop = (insn >> 5) & 3;
b26eefb6 1324 offset = load_reg(s, rm);
9a119ff6 1325 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1326 if (!(insn & (1 << 23)))
b0109805 1327 tcg_gen_sub_i32(var, var, offset);
2c0262af 1328 else
b0109805 1329 tcg_gen_add_i32(var, var, offset);
7d1b0095 1330 tcg_temp_free_i32(offset);
2c0262af
FB
1331 }
1332}
1333
191f9a93 1334static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1335 int extra, TCGv_i32 var)
2c0262af
FB
1336{
1337 int val, rm;
39d5492a 1338 TCGv_i32 offset;
3b46e624 1339
2c0262af
FB
1340 if (insn & (1 << 22)) {
1341 /* immediate */
1342 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1343 if (!(insn & (1 << 23)))
1344 val = -val;
18acad92 1345 val += extra;
537730b9 1346 if (val != 0)
b0109805 1347 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1348 } else {
1349 /* register */
191f9a93 1350 if (extra)
b0109805 1351 tcg_gen_addi_i32(var, var, extra);
2c0262af 1352 rm = (insn) & 0xf;
b26eefb6 1353 offset = load_reg(s, rm);
2c0262af 1354 if (!(insn & (1 << 23)))
b0109805 1355 tcg_gen_sub_i32(var, var, offset);
2c0262af 1356 else
b0109805 1357 tcg_gen_add_i32(var, var, offset);
7d1b0095 1358 tcg_temp_free_i32(offset);
2c0262af
FB
1359 }
1360}
1361
5aaebd13
PM
1362static TCGv_ptr get_fpstatus_ptr(int neon)
1363{
1364 TCGv_ptr statusptr = tcg_temp_new_ptr();
1365 int offset;
1366 if (neon) {
0ecb72a5 1367 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1368 } else {
0ecb72a5 1369 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1370 }
1371 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1372 return statusptr;
1373}
1374
c39c2b90 1375static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1376{
9a2b5256 1377 if (dp) {
c39c2b90 1378 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1379 } else {
c39c2b90 1380 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1381 if (reg & 1) {
1382 ofs += offsetof(CPU_DoubleU, l.upper);
1383 } else {
1384 ofs += offsetof(CPU_DoubleU, l.lower);
1385 }
1386 return ofs;
8e96005d
FB
1387 }
1388}
9ee6e8bb
PB
1389
1390/* Return the offset of a 32-bit piece of a NEON register.
1391 zero is the least significant end of the register. */
1392static inline long
1393neon_reg_offset (int reg, int n)
1394{
1395 int sreg;
1396 sreg = reg * 2 + n;
1397 return vfp_reg_offset(0, sreg);
1398}
1399
32f91fb7
RH
1400/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1401 * where 0 is the least significant end of the register.
1402 */
1403static inline long
1404neon_element_offset(int reg, int element, TCGMemOp size)
1405{
1406 int element_size = 1 << size;
1407 int ofs = element * element_size;
1408#ifdef HOST_WORDS_BIGENDIAN
1409 /* Calculate the offset assuming fully little-endian,
1410 * then XOR to account for the order of the 8-byte units.
1411 */
1412 if (element_size < 8) {
1413 ofs ^= 8 - element_size;
1414 }
1415#endif
1416 return neon_reg_offset(reg, 0) + ofs;
1417}
1418
39d5492a 1419static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1420{
39d5492a 1421 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1422 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1423 return tmp;
1424}
1425
2d6ac920
RH
1426static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1427{
1428 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1429
1430 switch (mop) {
1431 case MO_UB:
1432 tcg_gen_ld8u_i32(var, cpu_env, offset);
1433 break;
1434 case MO_UW:
1435 tcg_gen_ld16u_i32(var, cpu_env, offset);
1436 break;
1437 case MO_UL:
1438 tcg_gen_ld_i32(var, cpu_env, offset);
1439 break;
1440 default:
1441 g_assert_not_reached();
1442 }
1443}
1444
ac55d007
RH
1445static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1446{
1447 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1448
1449 switch (mop) {
1450 case MO_UB:
1451 tcg_gen_ld8u_i64(var, cpu_env, offset);
1452 break;
1453 case MO_UW:
1454 tcg_gen_ld16u_i64(var, cpu_env, offset);
1455 break;
1456 case MO_UL:
1457 tcg_gen_ld32u_i64(var, cpu_env, offset);
1458 break;
1459 case MO_Q:
1460 tcg_gen_ld_i64(var, cpu_env, offset);
1461 break;
1462 default:
1463 g_assert_not_reached();
1464 }
1465}
1466
39d5492a 1467static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1468{
1469 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1470 tcg_temp_free_i32(var);
8f8e3aa4
PB
1471}
1472
2d6ac920
RH
1473static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1474{
1475 long offset = neon_element_offset(reg, ele, size);
1476
1477 switch (size) {
1478 case MO_8:
1479 tcg_gen_st8_i32(var, cpu_env, offset);
1480 break;
1481 case MO_16:
1482 tcg_gen_st16_i32(var, cpu_env, offset);
1483 break;
1484 case MO_32:
1485 tcg_gen_st_i32(var, cpu_env, offset);
1486 break;
1487 default:
1488 g_assert_not_reached();
1489 }
1490}
1491
ac55d007
RH
1492static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1493{
1494 long offset = neon_element_offset(reg, ele, size);
1495
1496 switch (size) {
1497 case MO_8:
1498 tcg_gen_st8_i64(var, cpu_env, offset);
1499 break;
1500 case MO_16:
1501 tcg_gen_st16_i64(var, cpu_env, offset);
1502 break;
1503 case MO_32:
1504 tcg_gen_st32_i64(var, cpu_env, offset);
1505 break;
1506 case MO_64:
1507 tcg_gen_st_i64(var, cpu_env, offset);
1508 break;
1509 default:
1510 g_assert_not_reached();
1511 }
1512}
1513
a7812ae4 1514static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1515{
1516 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1517}
1518
a7812ae4 1519static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1520{
1521 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1522}
1523
160f3b64
PM
1524static inline void neon_load_reg32(TCGv_i32 var, int reg)
1525{
1526 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1527}
1528
1529static inline void neon_store_reg32(TCGv_i32 var, int reg)
1530{
1531 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1532}
1533
1a66ac61
RH
1534static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1535{
1536 TCGv_ptr ret = tcg_temp_new_ptr();
1537 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1538 return ret;
1539}
1540
d00584b7 1541#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1542
78e138bc
PM
1543/* Include the VFP decoder */
1544#include "translate-vfp.inc.c"
1545
a7812ae4 1546static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1547{
0ecb72a5 1548 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1549}
1550
a7812ae4 1551static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1552{
0ecb72a5 1553 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1554}
1555
39d5492a 1556static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1557{
39d5492a 1558 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1559 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1560 return var;
e677137d
PB
1561}
1562
39d5492a 1563static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1564{
0ecb72a5 1565 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1566 tcg_temp_free_i32(var);
e677137d
PB
1567}
1568
1569static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1570{
1571 iwmmxt_store_reg(cpu_M0, rn);
1572}
1573
1574static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1575{
1576 iwmmxt_load_reg(cpu_M0, rn);
1577}
1578
1579static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1580{
1581 iwmmxt_load_reg(cpu_V1, rn);
1582 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1583}
1584
1585static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1586{
1587 iwmmxt_load_reg(cpu_V1, rn);
1588 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1589}
1590
1591static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1592{
1593 iwmmxt_load_reg(cpu_V1, rn);
1594 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1595}
1596
1597#define IWMMXT_OP(name) \
1598static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1599{ \
1600 iwmmxt_load_reg(cpu_V1, rn); \
1601 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1602}
1603
477955bd
PM
1604#define IWMMXT_OP_ENV(name) \
1605static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1606{ \
1607 iwmmxt_load_reg(cpu_V1, rn); \
1608 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1609}
1610
1611#define IWMMXT_OP_ENV_SIZE(name) \
1612IWMMXT_OP_ENV(name##b) \
1613IWMMXT_OP_ENV(name##w) \
1614IWMMXT_OP_ENV(name##l)
e677137d 1615
477955bd 1616#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1617static inline void gen_op_iwmmxt_##name##_M0(void) \
1618{ \
477955bd 1619 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1620}
1621
1622IWMMXT_OP(maddsq)
1623IWMMXT_OP(madduq)
1624IWMMXT_OP(sadb)
1625IWMMXT_OP(sadw)
1626IWMMXT_OP(mulslw)
1627IWMMXT_OP(mulshw)
1628IWMMXT_OP(mululw)
1629IWMMXT_OP(muluhw)
1630IWMMXT_OP(macsw)
1631IWMMXT_OP(macuw)
1632
477955bd
PM
1633IWMMXT_OP_ENV_SIZE(unpackl)
1634IWMMXT_OP_ENV_SIZE(unpackh)
1635
1636IWMMXT_OP_ENV1(unpacklub)
1637IWMMXT_OP_ENV1(unpackluw)
1638IWMMXT_OP_ENV1(unpacklul)
1639IWMMXT_OP_ENV1(unpackhub)
1640IWMMXT_OP_ENV1(unpackhuw)
1641IWMMXT_OP_ENV1(unpackhul)
1642IWMMXT_OP_ENV1(unpacklsb)
1643IWMMXT_OP_ENV1(unpacklsw)
1644IWMMXT_OP_ENV1(unpacklsl)
1645IWMMXT_OP_ENV1(unpackhsb)
1646IWMMXT_OP_ENV1(unpackhsw)
1647IWMMXT_OP_ENV1(unpackhsl)
1648
1649IWMMXT_OP_ENV_SIZE(cmpeq)
1650IWMMXT_OP_ENV_SIZE(cmpgtu)
1651IWMMXT_OP_ENV_SIZE(cmpgts)
1652
1653IWMMXT_OP_ENV_SIZE(mins)
1654IWMMXT_OP_ENV_SIZE(minu)
1655IWMMXT_OP_ENV_SIZE(maxs)
1656IWMMXT_OP_ENV_SIZE(maxu)
1657
1658IWMMXT_OP_ENV_SIZE(subn)
1659IWMMXT_OP_ENV_SIZE(addn)
1660IWMMXT_OP_ENV_SIZE(subu)
1661IWMMXT_OP_ENV_SIZE(addu)
1662IWMMXT_OP_ENV_SIZE(subs)
1663IWMMXT_OP_ENV_SIZE(adds)
1664
1665IWMMXT_OP_ENV(avgb0)
1666IWMMXT_OP_ENV(avgb1)
1667IWMMXT_OP_ENV(avgw0)
1668IWMMXT_OP_ENV(avgw1)
e677137d 1669
477955bd
PM
1670IWMMXT_OP_ENV(packuw)
1671IWMMXT_OP_ENV(packul)
1672IWMMXT_OP_ENV(packuq)
1673IWMMXT_OP_ENV(packsw)
1674IWMMXT_OP_ENV(packsl)
1675IWMMXT_OP_ENV(packsq)
e677137d 1676
e677137d
PB
1677static void gen_op_iwmmxt_set_mup(void)
1678{
39d5492a 1679 TCGv_i32 tmp;
e677137d
PB
1680 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1681 tcg_gen_ori_i32(tmp, tmp, 2);
1682 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1683}
1684
1685static void gen_op_iwmmxt_set_cup(void)
1686{
39d5492a 1687 TCGv_i32 tmp;
e677137d
PB
1688 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1689 tcg_gen_ori_i32(tmp, tmp, 1);
1690 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1691}
1692
1693static void gen_op_iwmmxt_setpsr_nz(void)
1694{
39d5492a 1695 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1696 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1697 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1698}
1699
1700static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1701{
1702 iwmmxt_load_reg(cpu_V1, rn);
86831435 1703 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1704 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1705}
1706
39d5492a
PM
1707static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1708 TCGv_i32 dest)
18c9b560
AZ
1709{
1710 int rd;
1711 uint32_t offset;
39d5492a 1712 TCGv_i32 tmp;
18c9b560
AZ
1713
1714 rd = (insn >> 16) & 0xf;
da6b5335 1715 tmp = load_reg(s, rd);
18c9b560
AZ
1716
1717 offset = (insn & 0xff) << ((insn >> 7) & 2);
1718 if (insn & (1 << 24)) {
1719 /* Pre indexed */
1720 if (insn & (1 << 23))
da6b5335 1721 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1722 else
da6b5335
FN
1723 tcg_gen_addi_i32(tmp, tmp, -offset);
1724 tcg_gen_mov_i32(dest, tmp);
18c9b560 1725 if (insn & (1 << 21))
da6b5335
FN
1726 store_reg(s, rd, tmp);
1727 else
7d1b0095 1728 tcg_temp_free_i32(tmp);
18c9b560
AZ
1729 } else if (insn & (1 << 21)) {
1730 /* Post indexed */
da6b5335 1731 tcg_gen_mov_i32(dest, tmp);
18c9b560 1732 if (insn & (1 << 23))
da6b5335 1733 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1734 else
da6b5335
FN
1735 tcg_gen_addi_i32(tmp, tmp, -offset);
1736 store_reg(s, rd, tmp);
18c9b560
AZ
1737 } else if (!(insn & (1 << 23)))
1738 return 1;
1739 return 0;
1740}
1741
39d5492a 1742static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1743{
1744 int rd = (insn >> 0) & 0xf;
39d5492a 1745 TCGv_i32 tmp;
18c9b560 1746
da6b5335
FN
1747 if (insn & (1 << 8)) {
1748 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1749 return 1;
da6b5335
FN
1750 } else {
1751 tmp = iwmmxt_load_creg(rd);
1752 }
1753 } else {
7d1b0095 1754 tmp = tcg_temp_new_i32();
da6b5335 1755 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1756 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1757 }
1758 tcg_gen_andi_i32(tmp, tmp, mask);
1759 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1760 tcg_temp_free_i32(tmp);
18c9b560
AZ
1761 return 0;
1762}
1763
a1c7273b 1764/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1765 (ie. an undefined instruction). */
7dcc1f89 1766static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1767{
1768 int rd, wrd;
1769 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1770 TCGv_i32 addr;
1771 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1772
1773 if ((insn & 0x0e000e00) == 0x0c000000) {
1774 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1775 wrd = insn & 0xf;
1776 rdlo = (insn >> 12) & 0xf;
1777 rdhi = (insn >> 16) & 0xf;
d00584b7 1778 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1779 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1780 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1781 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1782 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1783 } else { /* TMCRR */
da6b5335
FN
1784 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1785 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1786 gen_op_iwmmxt_set_mup();
1787 }
1788 return 0;
1789 }
1790
1791 wrd = (insn >> 12) & 0xf;
7d1b0095 1792 addr = tcg_temp_new_i32();
da6b5335 1793 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1794 tcg_temp_free_i32(addr);
18c9b560 1795 return 1;
da6b5335 1796 }
18c9b560 1797 if (insn & ARM_CP_RW_BIT) {
d00584b7 1798 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1799 tmp = tcg_temp_new_i32();
12dcc321 1800 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1801 iwmmxt_store_creg(wrd, tmp);
18c9b560 1802 } else {
e677137d
PB
1803 i = 1;
1804 if (insn & (1 << 8)) {
d00584b7 1805 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1806 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1807 i = 0;
d00584b7 1808 } else { /* WLDRW wRd */
29531141 1809 tmp = tcg_temp_new_i32();
12dcc321 1810 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1811 }
1812 } else {
29531141 1813 tmp = tcg_temp_new_i32();
d00584b7 1814 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1815 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 1816 } else { /* WLDRB */
12dcc321 1817 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1818 }
1819 }
1820 if (i) {
1821 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1822 tcg_temp_free_i32(tmp);
e677137d 1823 }
18c9b560
AZ
1824 gen_op_iwmmxt_movq_wRn_M0(wrd);
1825 }
1826 } else {
d00584b7 1827 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1828 tmp = iwmmxt_load_creg(wrd);
12dcc321 1829 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1830 } else {
1831 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1832 tmp = tcg_temp_new_i32();
e677137d 1833 if (insn & (1 << 8)) {
d00584b7 1834 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1835 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 1836 } else { /* WSTRW wRd */
ecc7b3aa 1837 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1838 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1839 }
1840 } else {
d00584b7 1841 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1842 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1843 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 1844 } else { /* WSTRB */
ecc7b3aa 1845 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1846 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1847 }
1848 }
18c9b560 1849 }
29531141 1850 tcg_temp_free_i32(tmp);
18c9b560 1851 }
7d1b0095 1852 tcg_temp_free_i32(addr);
18c9b560
AZ
1853 return 0;
1854 }
1855
1856 if ((insn & 0x0f000000) != 0x0e000000)
1857 return 1;
1858
1859 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 1860 case 0x000: /* WOR */
18c9b560
AZ
1861 wrd = (insn >> 12) & 0xf;
1862 rd0 = (insn >> 0) & 0xf;
1863 rd1 = (insn >> 16) & 0xf;
1864 gen_op_iwmmxt_movq_M0_wRn(rd0);
1865 gen_op_iwmmxt_orq_M0_wRn(rd1);
1866 gen_op_iwmmxt_setpsr_nz();
1867 gen_op_iwmmxt_movq_wRn_M0(wrd);
1868 gen_op_iwmmxt_set_mup();
1869 gen_op_iwmmxt_set_cup();
1870 break;
d00584b7 1871 case 0x011: /* TMCR */
18c9b560
AZ
1872 if (insn & 0xf)
1873 return 1;
1874 rd = (insn >> 12) & 0xf;
1875 wrd = (insn >> 16) & 0xf;
1876 switch (wrd) {
1877 case ARM_IWMMXT_wCID:
1878 case ARM_IWMMXT_wCASF:
1879 break;
1880 case ARM_IWMMXT_wCon:
1881 gen_op_iwmmxt_set_cup();
1882 /* Fall through. */
1883 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1884 tmp = iwmmxt_load_creg(wrd);
1885 tmp2 = load_reg(s, rd);
f669df27 1886 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1887 tcg_temp_free_i32(tmp2);
da6b5335 1888 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1889 break;
1890 case ARM_IWMMXT_wCGR0:
1891 case ARM_IWMMXT_wCGR1:
1892 case ARM_IWMMXT_wCGR2:
1893 case ARM_IWMMXT_wCGR3:
1894 gen_op_iwmmxt_set_cup();
da6b5335
FN
1895 tmp = load_reg(s, rd);
1896 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1897 break;
1898 default:
1899 return 1;
1900 }
1901 break;
d00584b7 1902 case 0x100: /* WXOR */
18c9b560
AZ
1903 wrd = (insn >> 12) & 0xf;
1904 rd0 = (insn >> 0) & 0xf;
1905 rd1 = (insn >> 16) & 0xf;
1906 gen_op_iwmmxt_movq_M0_wRn(rd0);
1907 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1908 gen_op_iwmmxt_setpsr_nz();
1909 gen_op_iwmmxt_movq_wRn_M0(wrd);
1910 gen_op_iwmmxt_set_mup();
1911 gen_op_iwmmxt_set_cup();
1912 break;
d00584b7 1913 case 0x111: /* TMRC */
18c9b560
AZ
1914 if (insn & 0xf)
1915 return 1;
1916 rd = (insn >> 12) & 0xf;
1917 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1918 tmp = iwmmxt_load_creg(wrd);
1919 store_reg(s, rd, tmp);
18c9b560 1920 break;
d00584b7 1921 case 0x300: /* WANDN */
18c9b560
AZ
1922 wrd = (insn >> 12) & 0xf;
1923 rd0 = (insn >> 0) & 0xf;
1924 rd1 = (insn >> 16) & 0xf;
1925 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1926 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1927 gen_op_iwmmxt_andq_M0_wRn(rd1);
1928 gen_op_iwmmxt_setpsr_nz();
1929 gen_op_iwmmxt_movq_wRn_M0(wrd);
1930 gen_op_iwmmxt_set_mup();
1931 gen_op_iwmmxt_set_cup();
1932 break;
d00584b7 1933 case 0x200: /* WAND */
18c9b560
AZ
1934 wrd = (insn >> 12) & 0xf;
1935 rd0 = (insn >> 0) & 0xf;
1936 rd1 = (insn >> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0);
1938 gen_op_iwmmxt_andq_M0_wRn(rd1);
1939 gen_op_iwmmxt_setpsr_nz();
1940 gen_op_iwmmxt_movq_wRn_M0(wrd);
1941 gen_op_iwmmxt_set_mup();
1942 gen_op_iwmmxt_set_cup();
1943 break;
d00584b7 1944 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
1945 wrd = (insn >> 12) & 0xf;
1946 rd0 = (insn >> 0) & 0xf;
1947 rd1 = (insn >> 16) & 0xf;
1948 gen_op_iwmmxt_movq_M0_wRn(rd0);
1949 if (insn & (1 << 21))
1950 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1951 else
1952 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 break;
d00584b7 1956 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
1957 wrd = (insn >> 12) & 0xf;
1958 rd0 = (insn >> 16) & 0xf;
1959 rd1 = (insn >> 0) & 0xf;
1960 gen_op_iwmmxt_movq_M0_wRn(rd0);
1961 switch ((insn >> 22) & 3) {
1962 case 0:
1963 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1964 break;
1965 case 1:
1966 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1967 break;
1968 case 2:
1969 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1970 break;
1971 case 3:
1972 return 1;
1973 }
1974 gen_op_iwmmxt_movq_wRn_M0(wrd);
1975 gen_op_iwmmxt_set_mup();
1976 gen_op_iwmmxt_set_cup();
1977 break;
d00584b7 1978 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
1979 wrd = (insn >> 12) & 0xf;
1980 rd0 = (insn >> 16) & 0xf;
1981 rd1 = (insn >> 0) & 0xf;
1982 gen_op_iwmmxt_movq_M0_wRn(rd0);
1983 switch ((insn >> 22) & 3) {
1984 case 0:
1985 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1986 break;
1987 case 1:
1988 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1989 break;
1990 case 2:
1991 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1992 break;
1993 case 3:
1994 return 1;
1995 }
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
1999 break;
d00584b7 2000 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
2001 wrd = (insn >> 12) & 0xf;
2002 rd0 = (insn >> 16) & 0xf;
2003 rd1 = (insn >> 0) & 0xf;
2004 gen_op_iwmmxt_movq_M0_wRn(rd0);
2005 if (insn & (1 << 22))
2006 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2007 else
2008 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2009 if (!(insn & (1 << 20)))
2010 gen_op_iwmmxt_addl_M0_wRn(wrd);
2011 gen_op_iwmmxt_movq_wRn_M0(wrd);
2012 gen_op_iwmmxt_set_mup();
2013 break;
d00584b7 2014 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
2015 wrd = (insn >> 12) & 0xf;
2016 rd0 = (insn >> 16) & 0xf;
2017 rd1 = (insn >> 0) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2019 if (insn & (1 << 21)) {
2020 if (insn & (1 << 20))
2021 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2022 else
2023 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2024 } else {
2025 if (insn & (1 << 20))
2026 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2027 else
2028 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2029 }
18c9b560
AZ
2030 gen_op_iwmmxt_movq_wRn_M0(wrd);
2031 gen_op_iwmmxt_set_mup();
2032 break;
d00584b7 2033 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
2034 wrd = (insn >> 12) & 0xf;
2035 rd0 = (insn >> 16) & 0xf;
2036 rd1 = (insn >> 0) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 if (insn & (1 << 21))
2039 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2040 else
2041 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2042 if (!(insn & (1 << 20))) {
e677137d
PB
2043 iwmmxt_load_reg(cpu_V1, wrd);
2044 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2045 }
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 break;
d00584b7 2049 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2050 wrd = (insn >> 12) & 0xf;
2051 rd0 = (insn >> 16) & 0xf;
2052 rd1 = (insn >> 0) & 0xf;
2053 gen_op_iwmmxt_movq_M0_wRn(rd0);
2054 switch ((insn >> 22) & 3) {
2055 case 0:
2056 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2057 break;
2058 case 1:
2059 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2060 break;
2061 case 2:
2062 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2063 break;
2064 case 3:
2065 return 1;
2066 }
2067 gen_op_iwmmxt_movq_wRn_M0(wrd);
2068 gen_op_iwmmxt_set_mup();
2069 gen_op_iwmmxt_set_cup();
2070 break;
d00584b7 2071 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2072 wrd = (insn >> 12) & 0xf;
2073 rd0 = (insn >> 16) & 0xf;
2074 rd1 = (insn >> 0) & 0xf;
2075 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2076 if (insn & (1 << 22)) {
2077 if (insn & (1 << 20))
2078 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2079 else
2080 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2081 } else {
2082 if (insn & (1 << 20))
2083 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2084 else
2085 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2086 }
18c9b560
AZ
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2090 break;
d00584b7 2091 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2092 wrd = (insn >> 12) & 0xf;
2093 rd0 = (insn >> 16) & 0xf;
2094 rd1 = (insn >> 0) & 0xf;
2095 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2096 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2097 tcg_gen_andi_i32(tmp, tmp, 7);
2098 iwmmxt_load_reg(cpu_V1, rd1);
2099 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2100 tcg_temp_free_i32(tmp);
18c9b560
AZ
2101 gen_op_iwmmxt_movq_wRn_M0(wrd);
2102 gen_op_iwmmxt_set_mup();
2103 break;
d00584b7 2104 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2105 if (((insn >> 6) & 3) == 3)
2106 return 1;
18c9b560
AZ
2107 rd = (insn >> 12) & 0xf;
2108 wrd = (insn >> 16) & 0xf;
da6b5335 2109 tmp = load_reg(s, rd);
18c9b560
AZ
2110 gen_op_iwmmxt_movq_M0_wRn(wrd);
2111 switch ((insn >> 6) & 3) {
2112 case 0:
da6b5335
FN
2113 tmp2 = tcg_const_i32(0xff);
2114 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2115 break;
2116 case 1:
da6b5335
FN
2117 tmp2 = tcg_const_i32(0xffff);
2118 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2119 break;
2120 case 2:
da6b5335
FN
2121 tmp2 = tcg_const_i32(0xffffffff);
2122 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2123 break;
da6b5335 2124 default:
f764718d
RH
2125 tmp2 = NULL;
2126 tmp3 = NULL;
18c9b560 2127 }
da6b5335 2128 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2129 tcg_temp_free_i32(tmp3);
2130 tcg_temp_free_i32(tmp2);
7d1b0095 2131 tcg_temp_free_i32(tmp);
18c9b560
AZ
2132 gen_op_iwmmxt_movq_wRn_M0(wrd);
2133 gen_op_iwmmxt_set_mup();
2134 break;
d00584b7 2135 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2136 rd = (insn >> 12) & 0xf;
2137 wrd = (insn >> 16) & 0xf;
da6b5335 2138 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2139 return 1;
2140 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2141 tmp = tcg_temp_new_i32();
18c9b560
AZ
2142 switch ((insn >> 22) & 3) {
2143 case 0:
da6b5335 2144 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2145 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2146 if (insn & 8) {
2147 tcg_gen_ext8s_i32(tmp, tmp);
2148 } else {
2149 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2150 }
2151 break;
2152 case 1:
da6b5335 2153 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2154 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2155 if (insn & 8) {
2156 tcg_gen_ext16s_i32(tmp, tmp);
2157 } else {
2158 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2159 }
2160 break;
2161 case 2:
da6b5335 2162 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2163 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2164 break;
18c9b560 2165 }
da6b5335 2166 store_reg(s, rd, tmp);
18c9b560 2167 break;
d00584b7 2168 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2169 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2170 return 1;
da6b5335 2171 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2172 switch ((insn >> 22) & 3) {
2173 case 0:
da6b5335 2174 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2175 break;
2176 case 1:
da6b5335 2177 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2178 break;
2179 case 2:
da6b5335 2180 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2181 break;
18c9b560 2182 }
da6b5335
FN
2183 tcg_gen_shli_i32(tmp, tmp, 28);
2184 gen_set_nzcv(tmp);
7d1b0095 2185 tcg_temp_free_i32(tmp);
18c9b560 2186 break;
d00584b7 2187 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2188 if (((insn >> 6) & 3) == 3)
2189 return 1;
18c9b560
AZ
2190 rd = (insn >> 12) & 0xf;
2191 wrd = (insn >> 16) & 0xf;
da6b5335 2192 tmp = load_reg(s, rd);
18c9b560
AZ
2193 switch ((insn >> 6) & 3) {
2194 case 0:
da6b5335 2195 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2196 break;
2197 case 1:
da6b5335 2198 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2199 break;
2200 case 2:
da6b5335 2201 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2202 break;
18c9b560 2203 }
7d1b0095 2204 tcg_temp_free_i32(tmp);
18c9b560
AZ
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 break;
d00584b7 2208 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2209 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2210 return 1;
da6b5335 2211 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2212 tmp2 = tcg_temp_new_i32();
da6b5335 2213 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2214 switch ((insn >> 22) & 3) {
2215 case 0:
2216 for (i = 0; i < 7; i ++) {
da6b5335
FN
2217 tcg_gen_shli_i32(tmp2, tmp2, 4);
2218 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2219 }
2220 break;
2221 case 1:
2222 for (i = 0; i < 3; i ++) {
da6b5335
FN
2223 tcg_gen_shli_i32(tmp2, tmp2, 8);
2224 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2225 }
2226 break;
2227 case 2:
da6b5335
FN
2228 tcg_gen_shli_i32(tmp2, tmp2, 16);
2229 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2230 break;
18c9b560 2231 }
da6b5335 2232 gen_set_nzcv(tmp);
7d1b0095
PM
2233 tcg_temp_free_i32(tmp2);
2234 tcg_temp_free_i32(tmp);
18c9b560 2235 break;
d00584b7 2236 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2237 wrd = (insn >> 12) & 0xf;
2238 rd0 = (insn >> 16) & 0xf;
2239 gen_op_iwmmxt_movq_M0_wRn(rd0);
2240 switch ((insn >> 22) & 3) {
2241 case 0:
e677137d 2242 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2243 break;
2244 case 1:
e677137d 2245 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2246 break;
2247 case 2:
e677137d 2248 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2249 break;
2250 case 3:
2251 return 1;
2252 }
2253 gen_op_iwmmxt_movq_wRn_M0(wrd);
2254 gen_op_iwmmxt_set_mup();
2255 break;
d00584b7 2256 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2257 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2258 return 1;
da6b5335 2259 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2260 tmp2 = tcg_temp_new_i32();
da6b5335 2261 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2262 switch ((insn >> 22) & 3) {
2263 case 0:
2264 for (i = 0; i < 7; i ++) {
da6b5335
FN
2265 tcg_gen_shli_i32(tmp2, tmp2, 4);
2266 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2267 }
2268 break;
2269 case 1:
2270 for (i = 0; i < 3; i ++) {
da6b5335
FN
2271 tcg_gen_shli_i32(tmp2, tmp2, 8);
2272 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2273 }
2274 break;
2275 case 2:
da6b5335
FN
2276 tcg_gen_shli_i32(tmp2, tmp2, 16);
2277 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2278 break;
18c9b560 2279 }
da6b5335 2280 gen_set_nzcv(tmp);
7d1b0095
PM
2281 tcg_temp_free_i32(tmp2);
2282 tcg_temp_free_i32(tmp);
18c9b560 2283 break;
d00584b7 2284 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2285 rd = (insn >> 12) & 0xf;
2286 rd0 = (insn >> 16) & 0xf;
da6b5335 2287 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2288 return 1;
2289 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2290 tmp = tcg_temp_new_i32();
18c9b560
AZ
2291 switch ((insn >> 22) & 3) {
2292 case 0:
da6b5335 2293 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2294 break;
2295 case 1:
da6b5335 2296 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2297 break;
2298 case 2:
da6b5335 2299 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2300 break;
18c9b560 2301 }
da6b5335 2302 store_reg(s, rd, tmp);
18c9b560 2303 break;
d00584b7 2304 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2305 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2306 wrd = (insn >> 12) & 0xf;
2307 rd0 = (insn >> 16) & 0xf;
2308 rd1 = (insn >> 0) & 0xf;
2309 gen_op_iwmmxt_movq_M0_wRn(rd0);
2310 switch ((insn >> 22) & 3) {
2311 case 0:
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2314 else
2315 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2316 break;
2317 case 1:
2318 if (insn & (1 << 21))
2319 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2320 else
2321 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2322 break;
2323 case 2:
2324 if (insn & (1 << 21))
2325 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2326 else
2327 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2328 break;
2329 case 3:
2330 return 1;
2331 }
2332 gen_op_iwmmxt_movq_wRn_M0(wrd);
2333 gen_op_iwmmxt_set_mup();
2334 gen_op_iwmmxt_set_cup();
2335 break;
d00584b7 2336 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2337 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2338 wrd = (insn >> 12) & 0xf;
2339 rd0 = (insn >> 16) & 0xf;
2340 gen_op_iwmmxt_movq_M0_wRn(rd0);
2341 switch ((insn >> 22) & 3) {
2342 case 0:
2343 if (insn & (1 << 21))
2344 gen_op_iwmmxt_unpacklsb_M0();
2345 else
2346 gen_op_iwmmxt_unpacklub_M0();
2347 break;
2348 case 1:
2349 if (insn & (1 << 21))
2350 gen_op_iwmmxt_unpacklsw_M0();
2351 else
2352 gen_op_iwmmxt_unpackluw_M0();
2353 break;
2354 case 2:
2355 if (insn & (1 << 21))
2356 gen_op_iwmmxt_unpacklsl_M0();
2357 else
2358 gen_op_iwmmxt_unpacklul_M0();
2359 break;
2360 case 3:
2361 return 1;
2362 }
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2366 break;
d00584b7 2367 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2368 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2369 wrd = (insn >> 12) & 0xf;
2370 rd0 = (insn >> 16) & 0xf;
2371 gen_op_iwmmxt_movq_M0_wRn(rd0);
2372 switch ((insn >> 22) & 3) {
2373 case 0:
2374 if (insn & (1 << 21))
2375 gen_op_iwmmxt_unpackhsb_M0();
2376 else
2377 gen_op_iwmmxt_unpackhub_M0();
2378 break;
2379 case 1:
2380 if (insn & (1 << 21))
2381 gen_op_iwmmxt_unpackhsw_M0();
2382 else
2383 gen_op_iwmmxt_unpackhuw_M0();
2384 break;
2385 case 2:
2386 if (insn & (1 << 21))
2387 gen_op_iwmmxt_unpackhsl_M0();
2388 else
2389 gen_op_iwmmxt_unpackhul_M0();
2390 break;
2391 case 3:
2392 return 1;
2393 }
2394 gen_op_iwmmxt_movq_wRn_M0(wrd);
2395 gen_op_iwmmxt_set_mup();
2396 gen_op_iwmmxt_set_cup();
2397 break;
d00584b7 2398 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2399 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2400 if (((insn >> 22) & 3) == 0)
2401 return 1;
18c9b560
AZ
2402 wrd = (insn >> 12) & 0xf;
2403 rd0 = (insn >> 16) & 0xf;
2404 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2405 tmp = tcg_temp_new_i32();
da6b5335 2406 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2407 tcg_temp_free_i32(tmp);
18c9b560 2408 return 1;
da6b5335 2409 }
18c9b560 2410 switch ((insn >> 22) & 3) {
18c9b560 2411 case 1:
477955bd 2412 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2413 break;
2414 case 2:
477955bd 2415 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2416 break;
2417 case 3:
477955bd 2418 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2419 break;
2420 }
7d1b0095 2421 tcg_temp_free_i32(tmp);
18c9b560
AZ
2422 gen_op_iwmmxt_movq_wRn_M0(wrd);
2423 gen_op_iwmmxt_set_mup();
2424 gen_op_iwmmxt_set_cup();
2425 break;
d00584b7 2426 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2427 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2428 if (((insn >> 22) & 3) == 0)
2429 return 1;
18c9b560
AZ
2430 wrd = (insn >> 12) & 0xf;
2431 rd0 = (insn >> 16) & 0xf;
2432 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2433 tmp = tcg_temp_new_i32();
da6b5335 2434 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2435 tcg_temp_free_i32(tmp);
18c9b560 2436 return 1;
da6b5335 2437 }
18c9b560 2438 switch ((insn >> 22) & 3) {
18c9b560 2439 case 1:
477955bd 2440 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2441 break;
2442 case 2:
477955bd 2443 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2444 break;
2445 case 3:
477955bd 2446 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2447 break;
2448 }
7d1b0095 2449 tcg_temp_free_i32(tmp);
18c9b560
AZ
2450 gen_op_iwmmxt_movq_wRn_M0(wrd);
2451 gen_op_iwmmxt_set_mup();
2452 gen_op_iwmmxt_set_cup();
2453 break;
d00584b7 2454 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2455 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2456 if (((insn >> 22) & 3) == 0)
2457 return 1;
18c9b560
AZ
2458 wrd = (insn >> 12) & 0xf;
2459 rd0 = (insn >> 16) & 0xf;
2460 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2461 tmp = tcg_temp_new_i32();
da6b5335 2462 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2463 tcg_temp_free_i32(tmp);
18c9b560 2464 return 1;
da6b5335 2465 }
18c9b560 2466 switch ((insn >> 22) & 3) {
18c9b560 2467 case 1:
477955bd 2468 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2469 break;
2470 case 2:
477955bd 2471 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2472 break;
2473 case 3:
477955bd 2474 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2475 break;
2476 }
7d1b0095 2477 tcg_temp_free_i32(tmp);
18c9b560
AZ
2478 gen_op_iwmmxt_movq_wRn_M0(wrd);
2479 gen_op_iwmmxt_set_mup();
2480 gen_op_iwmmxt_set_cup();
2481 break;
d00584b7 2482 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2483 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2484 if (((insn >> 22) & 3) == 0)
2485 return 1;
18c9b560
AZ
2486 wrd = (insn >> 12) & 0xf;
2487 rd0 = (insn >> 16) & 0xf;
2488 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2489 tmp = tcg_temp_new_i32();
18c9b560 2490 switch ((insn >> 22) & 3) {
18c9b560 2491 case 1:
da6b5335 2492 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2493 tcg_temp_free_i32(tmp);
18c9b560 2494 return 1;
da6b5335 2495 }
477955bd 2496 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2497 break;
2498 case 2:
da6b5335 2499 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2500 tcg_temp_free_i32(tmp);
18c9b560 2501 return 1;
da6b5335 2502 }
477955bd 2503 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2504 break;
2505 case 3:
da6b5335 2506 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2507 tcg_temp_free_i32(tmp);
18c9b560 2508 return 1;
da6b5335 2509 }
477955bd 2510 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2511 break;
2512 }
7d1b0095 2513 tcg_temp_free_i32(tmp);
18c9b560
AZ
2514 gen_op_iwmmxt_movq_wRn_M0(wrd);
2515 gen_op_iwmmxt_set_mup();
2516 gen_op_iwmmxt_set_cup();
2517 break;
d00584b7 2518 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2519 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2520 wrd = (insn >> 12) & 0xf;
2521 rd0 = (insn >> 16) & 0xf;
2522 rd1 = (insn >> 0) & 0xf;
2523 gen_op_iwmmxt_movq_M0_wRn(rd0);
2524 switch ((insn >> 22) & 3) {
2525 case 0:
2526 if (insn & (1 << 21))
2527 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2528 else
2529 gen_op_iwmmxt_minub_M0_wRn(rd1);
2530 break;
2531 case 1:
2532 if (insn & (1 << 21))
2533 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2534 else
2535 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2536 break;
2537 case 2:
2538 if (insn & (1 << 21))
2539 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2540 else
2541 gen_op_iwmmxt_minul_M0_wRn(rd1);
2542 break;
2543 case 3:
2544 return 1;
2545 }
2546 gen_op_iwmmxt_movq_wRn_M0(wrd);
2547 gen_op_iwmmxt_set_mup();
2548 break;
d00584b7 2549 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2550 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2551 wrd = (insn >> 12) & 0xf;
2552 rd0 = (insn >> 16) & 0xf;
2553 rd1 = (insn >> 0) & 0xf;
2554 gen_op_iwmmxt_movq_M0_wRn(rd0);
2555 switch ((insn >> 22) & 3) {
2556 case 0:
2557 if (insn & (1 << 21))
2558 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2559 else
2560 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2561 break;
2562 case 1:
2563 if (insn & (1 << 21))
2564 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2565 else
2566 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2567 break;
2568 case 2:
2569 if (insn & (1 << 21))
2570 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2571 else
2572 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2573 break;
2574 case 3:
2575 return 1;
2576 }
2577 gen_op_iwmmxt_movq_wRn_M0(wrd);
2578 gen_op_iwmmxt_set_mup();
2579 break;
d00584b7 2580 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2581 case 0x402: case 0x502: case 0x602: case 0x702:
2582 wrd = (insn >> 12) & 0xf;
2583 rd0 = (insn >> 16) & 0xf;
2584 rd1 = (insn >> 0) & 0xf;
2585 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2586 tmp = tcg_const_i32((insn >> 20) & 3);
2587 iwmmxt_load_reg(cpu_V1, rd1);
2588 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2589 tcg_temp_free_i32(tmp);
18c9b560
AZ
2590 gen_op_iwmmxt_movq_wRn_M0(wrd);
2591 gen_op_iwmmxt_set_mup();
2592 break;
d00584b7 2593 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2594 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2595 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2596 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2597 wrd = (insn >> 12) & 0xf;
2598 rd0 = (insn >> 16) & 0xf;
2599 rd1 = (insn >> 0) & 0xf;
2600 gen_op_iwmmxt_movq_M0_wRn(rd0);
2601 switch ((insn >> 20) & 0xf) {
2602 case 0x0:
2603 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2604 break;
2605 case 0x1:
2606 gen_op_iwmmxt_subub_M0_wRn(rd1);
2607 break;
2608 case 0x3:
2609 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2610 break;
2611 case 0x4:
2612 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2613 break;
2614 case 0x5:
2615 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2616 break;
2617 case 0x7:
2618 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2619 break;
2620 case 0x8:
2621 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2622 break;
2623 case 0x9:
2624 gen_op_iwmmxt_subul_M0_wRn(rd1);
2625 break;
2626 case 0xb:
2627 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2628 break;
2629 default:
2630 return 1;
2631 }
2632 gen_op_iwmmxt_movq_wRn_M0(wrd);
2633 gen_op_iwmmxt_set_mup();
2634 gen_op_iwmmxt_set_cup();
2635 break;
d00584b7 2636 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2637 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2638 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2639 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2640 wrd = (insn >> 12) & 0xf;
2641 rd0 = (insn >> 16) & 0xf;
2642 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2643 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2644 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2645 tcg_temp_free_i32(tmp);
18c9b560
AZ
2646 gen_op_iwmmxt_movq_wRn_M0(wrd);
2647 gen_op_iwmmxt_set_mup();
2648 gen_op_iwmmxt_set_cup();
2649 break;
d00584b7 2650 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2651 case 0x418: case 0x518: case 0x618: case 0x718:
2652 case 0x818: case 0x918: case 0xa18: case 0xb18:
2653 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2654 wrd = (insn >> 12) & 0xf;
2655 rd0 = (insn >> 16) & 0xf;
2656 rd1 = (insn >> 0) & 0xf;
2657 gen_op_iwmmxt_movq_M0_wRn(rd0);
2658 switch ((insn >> 20) & 0xf) {
2659 case 0x0:
2660 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2661 break;
2662 case 0x1:
2663 gen_op_iwmmxt_addub_M0_wRn(rd1);
2664 break;
2665 case 0x3:
2666 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2667 break;
2668 case 0x4:
2669 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2670 break;
2671 case 0x5:
2672 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2673 break;
2674 case 0x7:
2675 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2676 break;
2677 case 0x8:
2678 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2679 break;
2680 case 0x9:
2681 gen_op_iwmmxt_addul_M0_wRn(rd1);
2682 break;
2683 case 0xb:
2684 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2685 break;
2686 default:
2687 return 1;
2688 }
2689 gen_op_iwmmxt_movq_wRn_M0(wrd);
2690 gen_op_iwmmxt_set_mup();
2691 gen_op_iwmmxt_set_cup();
2692 break;
d00584b7 2693 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2694 case 0x408: case 0x508: case 0x608: case 0x708:
2695 case 0x808: case 0x908: case 0xa08: case 0xb08:
2696 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2697 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2698 return 1;
18c9b560
AZ
2699 wrd = (insn >> 12) & 0xf;
2700 rd0 = (insn >> 16) & 0xf;
2701 rd1 = (insn >> 0) & 0xf;
2702 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2703 switch ((insn >> 22) & 3) {
18c9b560
AZ
2704 case 1:
2705 if (insn & (1 << 21))
2706 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2707 else
2708 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2709 break;
2710 case 2:
2711 if (insn & (1 << 21))
2712 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2713 else
2714 gen_op_iwmmxt_packul_M0_wRn(rd1);
2715 break;
2716 case 3:
2717 if (insn & (1 << 21))
2718 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2719 else
2720 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2721 break;
2722 }
2723 gen_op_iwmmxt_movq_wRn_M0(wrd);
2724 gen_op_iwmmxt_set_mup();
2725 gen_op_iwmmxt_set_cup();
2726 break;
2727 case 0x201: case 0x203: case 0x205: case 0x207:
2728 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2729 case 0x211: case 0x213: case 0x215: case 0x217:
2730 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2731 wrd = (insn >> 5) & 0xf;
2732 rd0 = (insn >> 12) & 0xf;
2733 rd1 = (insn >> 0) & 0xf;
2734 if (rd0 == 0xf || rd1 == 0xf)
2735 return 1;
2736 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2737 tmp = load_reg(s, rd0);
2738 tmp2 = load_reg(s, rd1);
18c9b560 2739 switch ((insn >> 16) & 0xf) {
d00584b7 2740 case 0x0: /* TMIA */
da6b5335 2741 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2742 break;
d00584b7 2743 case 0x8: /* TMIAPH */
da6b5335 2744 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2745 break;
d00584b7 2746 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2747 if (insn & (1 << 16))
da6b5335 2748 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2749 if (insn & (1 << 17))
da6b5335
FN
2750 tcg_gen_shri_i32(tmp2, tmp2, 16);
2751 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2752 break;
2753 default:
7d1b0095
PM
2754 tcg_temp_free_i32(tmp2);
2755 tcg_temp_free_i32(tmp);
18c9b560
AZ
2756 return 1;
2757 }
7d1b0095
PM
2758 tcg_temp_free_i32(tmp2);
2759 tcg_temp_free_i32(tmp);
18c9b560
AZ
2760 gen_op_iwmmxt_movq_wRn_M0(wrd);
2761 gen_op_iwmmxt_set_mup();
2762 break;
2763 default:
2764 return 1;
2765 }
2766
2767 return 0;
2768}
2769
a1c7273b 2770/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2771 (ie. an undefined instruction). */
7dcc1f89 2772static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2773{
2774 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2775 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2776
2777 if ((insn & 0x0ff00f10) == 0x0e200010) {
2778 /* Multiply with Internal Accumulate Format */
2779 rd0 = (insn >> 12) & 0xf;
2780 rd1 = insn & 0xf;
2781 acc = (insn >> 5) & 7;
2782
2783 if (acc != 0)
2784 return 1;
2785
3a554c0f
FN
2786 tmp = load_reg(s, rd0);
2787 tmp2 = load_reg(s, rd1);
18c9b560 2788 switch ((insn >> 16) & 0xf) {
d00584b7 2789 case 0x0: /* MIA */
3a554c0f 2790 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2791 break;
d00584b7 2792 case 0x8: /* MIAPH */
3a554c0f 2793 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2794 break;
d00584b7
PM
2795 case 0xc: /* MIABB */
2796 case 0xd: /* MIABT */
2797 case 0xe: /* MIATB */
2798 case 0xf: /* MIATT */
18c9b560 2799 if (insn & (1 << 16))
3a554c0f 2800 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2801 if (insn & (1 << 17))
3a554c0f
FN
2802 tcg_gen_shri_i32(tmp2, tmp2, 16);
2803 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2804 break;
2805 default:
2806 return 1;
2807 }
7d1b0095
PM
2808 tcg_temp_free_i32(tmp2);
2809 tcg_temp_free_i32(tmp);
18c9b560
AZ
2810
2811 gen_op_iwmmxt_movq_wRn_M0(acc);
2812 return 0;
2813 }
2814
2815 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2816 /* Internal Accumulator Access Format */
2817 rdhi = (insn >> 16) & 0xf;
2818 rdlo = (insn >> 12) & 0xf;
2819 acc = insn & 7;
2820
2821 if (acc != 0)
2822 return 1;
2823
d00584b7 2824 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2825 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2826 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2827 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2828 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2829 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 2830 } else { /* MAR */
3a554c0f
FN
2831 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2832 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2833 }
2834 return 0;
2835 }
2836
2837 return 1;
2838}
2839
9ee6e8bb
PB
2840#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2841#define VFP_SREG(insn, bigbit, smallbit) \
2842 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2843#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2844 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2845 reg = (((insn) >> (bigbit)) & 0x0f) \
2846 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2847 } else { \
2848 if (insn & (1 << (smallbit))) \
2849 return 1; \
2850 reg = ((insn) >> (bigbit)) & 0x0f; \
2851 }} while (0)
2852
2853#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2854#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2855#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2856#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2857#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2858#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2859
39d5492a 2860static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2861{
39d5492a 2862 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2863 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2864 tcg_gen_shli_i32(tmp, var, 16);
2865 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2866 tcg_temp_free_i32(tmp);
ad69471c
PB
2867}
2868
39d5492a 2869static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2870{
39d5492a 2871 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2872 tcg_gen_andi_i32(var, var, 0xffff0000);
2873 tcg_gen_shri_i32(tmp, var, 16);
2874 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2875 tcg_temp_free_i32(tmp);
ad69471c
PB
2876}
2877
06db8196
PM
2878/*
2879 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2880 * (ie. an undefined instruction).
2881 */
7dcc1f89 2882static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95 2883{
d614a513 2884 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 2885 return 1;
d614a513 2886 }
40f137e1 2887
78e138bc
PM
2888 /*
2889 * If the decodetree decoder handles this insn it will always
2890 * emit code to either execute the insn or generate an appropriate
2891 * exception; so we don't need to ever return non-zero to tell
2892 * the calling code to emit an UNDEF exception.
2893 */
2894 if (extract32(insn, 28, 4) == 0xf) {
2895 if (disas_vfp_uncond(s, insn)) {
2896 return 0;
2897 }
2898 } else {
2899 if (disas_vfp(s, insn)) {
2900 return 0;
2901 }
2902 }
3111bfc2
PM
2903 /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
2904 return 1;
b7bcbe95
FB
2905}
2906
90aa39a1 2907static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 2908{
90aa39a1 2909#ifndef CONFIG_USER_ONLY
dcba3a8d 2910 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
2911 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
2912#else
2913 return true;
2914#endif
2915}
6e256c93 2916
8a6b28c7
EC
2917static void gen_goto_ptr(void)
2918{
7f11636d 2919 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
2920}
2921
4cae8f56
AB
2922/* This will end the TB but doesn't guarantee we'll return to
2923 * cpu_loop_exec. Any live exit_requests will be processed as we
2924 * enter the next TB.
2925 */
8a6b28c7 2926static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
2927{
2928 if (use_goto_tb(s, dest)) {
57fec1fe 2929 tcg_gen_goto_tb(n);
eaed129d 2930 gen_set_pc_im(s, dest);
07ea28b4 2931 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 2932 } else {
eaed129d 2933 gen_set_pc_im(s, dest);
8a6b28c7 2934 gen_goto_ptr();
6e256c93 2935 }
dcba3a8d 2936 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
2937}
2938
8aaca4c0
FB
2939static inline void gen_jmp (DisasContext *s, uint32_t dest)
2940{
b636649f 2941 if (unlikely(is_singlestepping(s))) {
8aaca4c0 2942 /* An indirect jump so that we still trigger the debug exception. */
5899f386 2943 if (s->thumb)
d9ba4830
PB
2944 dest |= 1;
2945 gen_bx_im(s, dest);
8aaca4c0 2946 } else {
6e256c93 2947 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
2948 }
2949}
2950
39d5492a 2951static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 2952{
ee097184 2953 if (x)
d9ba4830 2954 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 2955 else
d9ba4830 2956 gen_sxth(t0);
ee097184 2957 if (y)
d9ba4830 2958 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 2959 else
d9ba4830
PB
2960 gen_sxth(t1);
2961 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
2962}
2963
2964/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
2965static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2966{
b5ff1b31
FB
2967 uint32_t mask;
2968
2969 mask = 0;
2970 if (flags & (1 << 0))
2971 mask |= 0xff;
2972 if (flags & (1 << 1))
2973 mask |= 0xff00;
2974 if (flags & (1 << 2))
2975 mask |= 0xff0000;
2976 if (flags & (1 << 3))
2977 mask |= 0xff000000;
9ee6e8bb 2978
2ae23e75 2979 /* Mask out undefined bits. */
9ee6e8bb 2980 mask &= ~CPSR_RESERVED;
d614a513 2981 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 2982 mask &= ~CPSR_T;
d614a513
PM
2983 }
2984 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 2985 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
2986 }
2987 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 2988 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
2989 }
2990 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 2991 mask &= ~CPSR_IT;
d614a513 2992 }
4051e12c
PM
2993 /* Mask out execution state and reserved bits. */
2994 if (!spsr) {
2995 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
2996 }
b5ff1b31
FB
2997 /* Mask out privileged bits. */
2998 if (IS_USER(s))
9ee6e8bb 2999 mask &= CPSR_USER;
b5ff1b31
FB
3000 return mask;
3001}
3002
2fbac54b 3003/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3004static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3005{
39d5492a 3006 TCGv_i32 tmp;
b5ff1b31
FB
3007 if (spsr) {
3008 /* ??? This is also undefined in system mode. */
3009 if (IS_USER(s))
3010 return 1;
d9ba4830
PB
3011
3012 tmp = load_cpu_field(spsr);
3013 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3014 tcg_gen_andi_i32(t0, t0, mask);
3015 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3016 store_cpu_field(tmp, spsr);
b5ff1b31 3017 } else {
2fbac54b 3018 gen_set_cpsr(t0, mask);
b5ff1b31 3019 }
7d1b0095 3020 tcg_temp_free_i32(t0);
b5ff1b31
FB
3021 gen_lookup_tb(s);
3022 return 0;
3023}
3024
2fbac54b
FN
3025/* Returns nonzero if access to the PSR is not permitted. */
3026static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3027{
39d5492a 3028 TCGv_i32 tmp;
7d1b0095 3029 tmp = tcg_temp_new_i32();
2fbac54b
FN
3030 tcg_gen_movi_i32(tmp, val);
3031 return gen_set_psr(s, mask, spsr, tmp);
3032}
3033
8bfd0550
PM
3034static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
3035 int *tgtmode, int *regno)
3036{
3037 /* Decode the r and sysm fields of MSR/MRS banked accesses into
3038 * the target mode and register number, and identify the various
3039 * unpredictable cases.
3040 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
3041 * + executed in user mode
3042 * + using R15 as the src/dest register
3043 * + accessing an unimplemented register
3044 * + accessing a register that's inaccessible at current PL/security state*
3045 * + accessing a register that you could access with a different insn
3046 * We choose to UNDEF in all these cases.
3047 * Since we don't know which of the various AArch32 modes we are in
3048 * we have to defer some checks to runtime.
3049 * Accesses to Monitor mode registers from Secure EL1 (which implies
3050 * that EL3 is AArch64) must trap to EL3.
3051 *
3052 * If the access checks fail this function will emit code to take
3053 * an exception and return false. Otherwise it will return true,
3054 * and set *tgtmode and *regno appropriately.
3055 */
3056 int exc_target = default_exception_el(s);
3057
3058 /* These instructions are present only in ARMv8, or in ARMv7 with the
3059 * Virtualization Extensions.
3060 */
3061 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
3062 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3063 goto undef;
3064 }
3065
3066 if (IS_USER(s) || rn == 15) {
3067 goto undef;
3068 }
3069
3070 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
3071 * of registers into (r, sysm).
3072 */
3073 if (r) {
3074 /* SPSRs for other modes */
3075 switch (sysm) {
3076 case 0xe: /* SPSR_fiq */
3077 *tgtmode = ARM_CPU_MODE_FIQ;
3078 break;
3079 case 0x10: /* SPSR_irq */
3080 *tgtmode = ARM_CPU_MODE_IRQ;
3081 break;
3082 case 0x12: /* SPSR_svc */
3083 *tgtmode = ARM_CPU_MODE_SVC;
3084 break;
3085 case 0x14: /* SPSR_abt */
3086 *tgtmode = ARM_CPU_MODE_ABT;
3087 break;
3088 case 0x16: /* SPSR_und */
3089 *tgtmode = ARM_CPU_MODE_UND;
3090 break;
3091 case 0x1c: /* SPSR_mon */
3092 *tgtmode = ARM_CPU_MODE_MON;
3093 break;
3094 case 0x1e: /* SPSR_hyp */
3095 *tgtmode = ARM_CPU_MODE_HYP;
3096 break;
3097 default: /* unallocated */
3098 goto undef;
3099 }
3100 /* We arbitrarily assign SPSR a register number of 16. */
3101 *regno = 16;
3102 } else {
3103 /* general purpose registers for other modes */
3104 switch (sysm) {
3105 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
3106 *tgtmode = ARM_CPU_MODE_USR;
3107 *regno = sysm + 8;
3108 break;
3109 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
3110 *tgtmode = ARM_CPU_MODE_FIQ;
3111 *regno = sysm;
3112 break;
3113 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
3114 *tgtmode = ARM_CPU_MODE_IRQ;
3115 *regno = sysm & 1 ? 13 : 14;
3116 break;
3117 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
3118 *tgtmode = ARM_CPU_MODE_SVC;
3119 *regno = sysm & 1 ? 13 : 14;
3120 break;
3121 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
3122 *tgtmode = ARM_CPU_MODE_ABT;
3123 *regno = sysm & 1 ? 13 : 14;
3124 break;
3125 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
3126 *tgtmode = ARM_CPU_MODE_UND;
3127 *regno = sysm & 1 ? 13 : 14;
3128 break;
3129 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
3130 *tgtmode = ARM_CPU_MODE_MON;
3131 *regno = sysm & 1 ? 13 : 14;
3132 break;
3133 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
3134 *tgtmode = ARM_CPU_MODE_HYP;
3135 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
3136 *regno = sysm & 1 ? 13 : 17;
3137 break;
3138 default: /* unallocated */
3139 goto undef;
3140 }
3141 }
3142
3143 /* Catch the 'accessing inaccessible register' cases we can detect
3144 * at translate time.
3145 */
3146 switch (*tgtmode) {
3147 case ARM_CPU_MODE_MON:
3148 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
3149 goto undef;
3150 }
3151 if (s->current_el == 1) {
3152 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
3153 * then accesses to Mon registers trap to EL3
3154 */
3155 exc_target = 3;
3156 goto undef;
3157 }
3158 break;
3159 case ARM_CPU_MODE_HYP:
aec4dd09
PM
3160 /*
3161 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
3162 * (and so we can forbid accesses from EL2 or below). elr_hyp
3163 * can be accessed also from Hyp mode, so forbid accesses from
3164 * EL0 or EL1.
8bfd0550 3165 */
aec4dd09
PM
3166 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
3167 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
3168 goto undef;
3169 }
3170 break;
3171 default:
3172 break;
3173 }
3174
3175 return true;
3176
3177undef:
3178 /* If we get here then some access check did not pass */
3179 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
3180 return false;
3181}
3182
3183static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
3184{
3185 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3186 int tgtmode = 0, regno = 0;
3187
3188 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3189 return;
3190 }
3191
3192 /* Sync state because msr_banked() can raise exceptions */
3193 gen_set_condexec(s);
3194 gen_set_pc_im(s, s->pc - 4);
3195 tcg_reg = load_reg(s, rn);
3196 tcg_tgtmode = tcg_const_i32(tgtmode);
3197 tcg_regno = tcg_const_i32(regno);
3198 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
3199 tcg_temp_free_i32(tcg_tgtmode);
3200 tcg_temp_free_i32(tcg_regno);
3201 tcg_temp_free_i32(tcg_reg);
dcba3a8d 3202 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
3203}
3204
3205static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
3206{
3207 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3208 int tgtmode = 0, regno = 0;
3209
3210 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3211 return;
3212 }
3213
3214 /* Sync state because mrs_banked() can raise exceptions */
3215 gen_set_condexec(s);
3216 gen_set_pc_im(s, s->pc - 4);
3217 tcg_reg = tcg_temp_new_i32();
3218 tcg_tgtmode = tcg_const_i32(tgtmode);
3219 tcg_regno = tcg_const_i32(regno);
3220 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
3221 tcg_temp_free_i32(tcg_tgtmode);
3222 tcg_temp_free_i32(tcg_regno);
3223 store_reg(s, rn, tcg_reg);
dcba3a8d 3224 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
3225}
3226
fb0e8e79
PM
3227/* Store value to PC as for an exception return (ie don't
3228 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
3229 * will do the masking based on the new value of the Thumb bit.
3230 */
3231static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3232{
fb0e8e79
PM
3233 tcg_gen_mov_i32(cpu_R[15], pc);
3234 tcg_temp_free_i32(pc);
b5ff1b31
FB
3235}
3236
b0109805 3237/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3238static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3239{
fb0e8e79
PM
3240 store_pc_exc_ret(s, pc);
3241 /* The cpsr_write_eret helper will mask the low bits of PC
3242 * appropriately depending on the new Thumb bit, so it must
3243 * be called after storing the new PC.
3244 */
e69ad9df
AL
3245 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3246 gen_io_start();
3247 }
235ea1f5 3248 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
3249 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3250 gen_io_end();
3251 }
7d1b0095 3252 tcg_temp_free_i32(cpsr);
b29fd33d 3253 /* Must exit loop to check un-masked IRQs */
dcba3a8d 3254 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 3255}
3b46e624 3256
fb0e8e79
PM
3257/* Generate an old-style exception return. Marks pc as dead. */
3258static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3259{
3260 gen_rfe(s, pc, load_cpu_field(spsr));
3261}
3262
c22edfeb
AB
3263/*
3264 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
3265 * only call the helper when running single threaded TCG code to ensure
3266 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
3267 * just skip this instruction. Currently the SEV/SEVL instructions
3268 * which are *one* of many ways to wake the CPU from WFE are not
3269 * implemented so we can't sleep like WFI does.
3270 */
9ee6e8bb
PB
3271static void gen_nop_hint(DisasContext *s, int val)
3272{
3273 switch (val) {
2399d4e7
EC
3274 /* When running in MTTCG we don't generate jumps to the yield and
3275 * WFE helpers as it won't affect the scheduling of other vCPUs.
3276 * If we wanted to more completely model WFE/SEV so we don't busy
3277 * spin unnecessarily we would need to do something more involved.
3278 */
c87e5a61 3279 case 1: /* yield */
2399d4e7 3280 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 3281 gen_set_pc_im(s, s->pc);
dcba3a8d 3282 s->base.is_jmp = DISAS_YIELD;
c22edfeb 3283 }
c87e5a61 3284 break;
9ee6e8bb 3285 case 3: /* wfi */
eaed129d 3286 gen_set_pc_im(s, s->pc);
dcba3a8d 3287 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
3288 break;
3289 case 2: /* wfe */
2399d4e7 3290 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 3291 gen_set_pc_im(s, s->pc);
dcba3a8d 3292 s->base.is_jmp = DISAS_WFE;
c22edfeb 3293 }
72c1d3af 3294 break;
9ee6e8bb 3295 case 4: /* sev */
12b10571
MR
3296 case 5: /* sevl */
3297 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3298 default: /* nop */
3299 break;
3300 }
3301}
99c475ab 3302
ad69471c 3303#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3304
39d5492a 3305static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3306{
3307 switch (size) {
dd8fbd78
FN
3308 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3309 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3310 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3311 default: abort();
9ee6e8bb 3312 }
9ee6e8bb
PB
3313}
3314
39d5492a 3315static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3316{
3317 switch (size) {
dd8fbd78
FN
3318 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3319 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3320 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3321 default: return;
3322 }
3323}
3324
3325/* 32-bit pairwise ops end up the same as the elementwise versions. */
9ecd3c5c
RH
3326#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3327#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3328#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3329#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
ad69471c 3330
ad69471c
PB
3331#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3332 switch ((size << 1) | u) { \
3333 case 0: \
dd8fbd78 3334 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3335 break; \
3336 case 1: \
dd8fbd78 3337 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3338 break; \
3339 case 2: \
dd8fbd78 3340 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3341 break; \
3342 case 3: \
dd8fbd78 3343 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3344 break; \
3345 case 4: \
dd8fbd78 3346 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3347 break; \
3348 case 5: \
dd8fbd78 3349 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3350 break; \
3351 default: return 1; \
3352 }} while (0)
9ee6e8bb
PB
3353
3354#define GEN_NEON_INTEGER_OP(name) do { \
3355 switch ((size << 1) | u) { \
ad69471c 3356 case 0: \
dd8fbd78 3357 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3358 break; \
3359 case 1: \
dd8fbd78 3360 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3361 break; \
3362 case 2: \
dd8fbd78 3363 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3364 break; \
3365 case 3: \
dd8fbd78 3366 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3367 break; \
3368 case 4: \
dd8fbd78 3369 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3370 break; \
3371 case 5: \
dd8fbd78 3372 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3373 break; \
9ee6e8bb
PB
3374 default: return 1; \
3375 }} while (0)
3376
39d5492a 3377static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3378{
39d5492a 3379 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3380 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3381 return tmp;
9ee6e8bb
PB
3382}
3383
39d5492a 3384static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3385{
dd8fbd78 3386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3387 tcg_temp_free_i32(var);
9ee6e8bb
PB
3388}
3389
39d5492a 3390static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3391{
39d5492a 3392 TCGv_i32 tmp;
9ee6e8bb 3393 if (size == 1) {
0fad6efc
PM
3394 tmp = neon_load_reg(reg & 7, reg >> 4);
3395 if (reg & 8) {
dd8fbd78 3396 gen_neon_dup_high16(tmp);
0fad6efc
PM
3397 } else {
3398 gen_neon_dup_low16(tmp);
dd8fbd78 3399 }
0fad6efc
PM
3400 } else {
3401 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3402 }
dd8fbd78 3403 return tmp;
9ee6e8bb
PB
3404}
3405
02acedf9 3406static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3407{
b13708bb
RH
3408 TCGv_ptr pd, pm;
3409
600b828c 3410 if (!q && size == 2) {
02acedf9
PM
3411 return 1;
3412 }
b13708bb
RH
3413 pd = vfp_reg_ptr(true, rd);
3414 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
3415 if (q) {
3416 switch (size) {
3417 case 0:
b13708bb 3418 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
3419 break;
3420 case 1:
b13708bb 3421 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
3422 break;
3423 case 2:
b13708bb 3424 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
3425 break;
3426 default:
3427 abort();
3428 }
3429 } else {
3430 switch (size) {
3431 case 0:
b13708bb 3432 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
3433 break;
3434 case 1:
b13708bb 3435 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
3436 break;
3437 default:
3438 abort();
3439 }
3440 }
b13708bb
RH
3441 tcg_temp_free_ptr(pd);
3442 tcg_temp_free_ptr(pm);
02acedf9 3443 return 0;
19457615
FN
3444}
3445
d68a6f3a 3446static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3447{
b13708bb
RH
3448 TCGv_ptr pd, pm;
3449
600b828c 3450 if (!q && size == 2) {
d68a6f3a
PM
3451 return 1;
3452 }
b13708bb
RH
3453 pd = vfp_reg_ptr(true, rd);
3454 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
3455 if (q) {
3456 switch (size) {
3457 case 0:
b13708bb 3458 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
3459 break;
3460 case 1:
b13708bb 3461 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
3462 break;
3463 case 2:
b13708bb 3464 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
3465 break;
3466 default:
3467 abort();
3468 }
3469 } else {
3470 switch (size) {
3471 case 0:
b13708bb 3472 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
3473 break;
3474 case 1:
b13708bb 3475 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
3476 break;
3477 default:
3478 abort();
3479 }
3480 }
b13708bb
RH
3481 tcg_temp_free_ptr(pd);
3482 tcg_temp_free_ptr(pm);
d68a6f3a 3483 return 0;
19457615
FN
3484}
3485
39d5492a 3486static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3487{
39d5492a 3488 TCGv_i32 rd, tmp;
19457615 3489
7d1b0095
PM
3490 rd = tcg_temp_new_i32();
3491 tmp = tcg_temp_new_i32();
19457615
FN
3492
3493 tcg_gen_shli_i32(rd, t0, 8);
3494 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3495 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3496 tcg_gen_or_i32(rd, rd, tmp);
3497
3498 tcg_gen_shri_i32(t1, t1, 8);
3499 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3500 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3501 tcg_gen_or_i32(t1, t1, tmp);
3502 tcg_gen_mov_i32(t0, rd);
3503
7d1b0095
PM
3504 tcg_temp_free_i32(tmp);
3505 tcg_temp_free_i32(rd);
19457615
FN
3506}
3507
39d5492a 3508static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3509{
39d5492a 3510 TCGv_i32 rd, tmp;
19457615 3511
7d1b0095
PM
3512 rd = tcg_temp_new_i32();
3513 tmp = tcg_temp_new_i32();
19457615
FN
3514
3515 tcg_gen_shli_i32(rd, t0, 16);
3516 tcg_gen_andi_i32(tmp, t1, 0xffff);
3517 tcg_gen_or_i32(rd, rd, tmp);
3518 tcg_gen_shri_i32(t1, t1, 16);
3519 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3520 tcg_gen_or_i32(t1, t1, tmp);
3521 tcg_gen_mov_i32(t0, rd);
3522
7d1b0095
PM
3523 tcg_temp_free_i32(tmp);
3524 tcg_temp_free_i32(rd);
19457615
FN
3525}
3526
3527
9ee6e8bb
PB
3528static struct {
3529 int nregs;
3530 int interleave;
3531 int spacing;
308e5636 3532} const neon_ls_element_type[11] = {
ac55d007
RH
3533 {1, 4, 1},
3534 {1, 4, 2},
9ee6e8bb 3535 {4, 1, 1},
ac55d007
RH
3536 {2, 2, 2},
3537 {1, 3, 1},
3538 {1, 3, 2},
9ee6e8bb
PB
3539 {3, 1, 1},
3540 {1, 1, 1},
ac55d007
RH
3541 {1, 2, 1},
3542 {1, 2, 2},
9ee6e8bb
PB
3543 {2, 1, 1}
3544};
3545
3546/* Translate a NEON load/store element instruction. Return nonzero if the
3547 instruction is invalid. */
7dcc1f89 3548static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3549{
3550 int rd, rn, rm;
3551 int op;
3552 int nregs;
3553 int interleave;
84496233 3554 int spacing;
9ee6e8bb
PB
3555 int stride;
3556 int size;
3557 int reg;
9ee6e8bb 3558 int load;
9ee6e8bb 3559 int n;
7377c2c9 3560 int vec_size;
ac55d007
RH
3561 int mmu_idx;
3562 TCGMemOp endian;
39d5492a
PM
3563 TCGv_i32 addr;
3564 TCGv_i32 tmp;
3565 TCGv_i32 tmp2;
84496233 3566 TCGv_i64 tmp64;
9ee6e8bb 3567
2c7ffc41
PM
3568 /* FIXME: this access check should not take precedence over UNDEF
3569 * for invalid encodings; we will generate incorrect syndrome information
3570 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3571 */
9dbbc748 3572 if (s->fp_excp_el) {
2c7ffc41 3573 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 3574 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3575 return 0;
3576 }
3577
5df8bac1 3578 if (!s->vfp_enabled)
9ee6e8bb
PB
3579 return 1;
3580 VFP_DREG_D(rd, insn);
3581 rn = (insn >> 16) & 0xf;
3582 rm = insn & 0xf;
3583 load = (insn & (1 << 21)) != 0;
ac55d007
RH
3584 endian = s->be_data;
3585 mmu_idx = get_mem_index(s);
9ee6e8bb
PB
3586 if ((insn & (1 << 23)) == 0) {
3587 /* Load store all elements. */
3588 op = (insn >> 8) & 0xf;
3589 size = (insn >> 6) & 3;
84496233 3590 if (op > 10)
9ee6e8bb 3591 return 1;
f2dd89d0
PM
3592 /* Catch UNDEF cases for bad values of align field */
3593 switch (op & 0xc) {
3594 case 4:
3595 if (((insn >> 5) & 1) == 1) {
3596 return 1;
3597 }
3598 break;
3599 case 8:
3600 if (((insn >> 4) & 3) == 3) {
3601 return 1;
3602 }
3603 break;
3604 default:
3605 break;
3606 }
9ee6e8bb
PB
3607 nregs = neon_ls_element_type[op].nregs;
3608 interleave = neon_ls_element_type[op].interleave;
84496233 3609 spacing = neon_ls_element_type[op].spacing;
ac55d007 3610 if (size == 3 && (interleave | spacing) != 1) {
84496233 3611 return 1;
ac55d007 3612 }
e23f12b3
RH
3613 /* For our purposes, bytes are always little-endian. */
3614 if (size == 0) {
3615 endian = MO_LE;
3616 }
3617 /* Consecutive little-endian elements from a single register
3618 * can be promoted to a larger little-endian operation.
3619 */
3620 if (interleave == 1 && endian == MO_LE) {
3621 size = 3;
3622 }
ac55d007 3623 tmp64 = tcg_temp_new_i64();
e318a60b 3624 addr = tcg_temp_new_i32();
ac55d007 3625 tmp2 = tcg_const_i32(1 << size);
dcc65026 3626 load_reg_var(s, addr, rn);
9ee6e8bb 3627 for (reg = 0; reg < nregs; reg++) {
ac55d007
RH
3628 for (n = 0; n < 8 >> size; n++) {
3629 int xs;
3630 for (xs = 0; xs < interleave; xs++) {
3631 int tt = rd + reg + spacing * xs;
3632
3633 if (load) {
3634 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
3635 neon_store_element64(tt, n, size, tmp64);
3636 } else {
3637 neon_load_element64(tmp64, tt, n, size);
3638 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
9ee6e8bb 3639 }
ac55d007 3640 tcg_gen_add_i32(addr, addr, tmp2);
9ee6e8bb
PB
3641 }
3642 }
9ee6e8bb 3643 }
e318a60b 3644 tcg_temp_free_i32(addr);
ac55d007
RH
3645 tcg_temp_free_i32(tmp2);
3646 tcg_temp_free_i64(tmp64);
3647 stride = nregs * interleave * 8;
9ee6e8bb
PB
3648 } else {
3649 size = (insn >> 10) & 3;
3650 if (size == 3) {
3651 /* Load single element to all lanes. */
8e18cde3
PM
3652 int a = (insn >> 4) & 1;
3653 if (!load) {
9ee6e8bb 3654 return 1;
8e18cde3 3655 }
9ee6e8bb
PB
3656 size = (insn >> 6) & 3;
3657 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3658
3659 if (size == 3) {
3660 if (nregs != 4 || a == 0) {
9ee6e8bb 3661 return 1;
99c475ab 3662 }
8e18cde3
PM
3663 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3664 size = 2;
3665 }
3666 if (nregs == 1 && a == 1 && size == 0) {
3667 return 1;
3668 }
3669 if (nregs == 3 && a == 1) {
3670 return 1;
3671 }
e318a60b 3672 addr = tcg_temp_new_i32();
8e18cde3 3673 load_reg_var(s, addr, rn);
7377c2c9
RH
3674
3675 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
3676 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
3677 */
3678 stride = (insn & (1 << 5)) ? 2 : 1;
3679 vec_size = nregs == 1 ? stride * 8 : 8;
3680
3681 tmp = tcg_temp_new_i32();
3682 for (reg = 0; reg < nregs; reg++) {
3683 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3684 s->be_data | size);
3685 if ((rd & 1) && vec_size == 16) {
3686 /* We cannot write 16 bytes at once because the
3687 * destination is unaligned.
3688 */
3689 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3690 8, 8, tmp);
3691 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
3692 neon_reg_offset(rd, 0), 8, 8);
3693 } else {
3694 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3695 vec_size, vec_size, tmp);
8e18cde3 3696 }
7377c2c9
RH
3697 tcg_gen_addi_i32(addr, addr, 1 << size);
3698 rd += stride;
9ee6e8bb 3699 }
7377c2c9 3700 tcg_temp_free_i32(tmp);
e318a60b 3701 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3702 stride = (1 << size) * nregs;
3703 } else {
3704 /* Single element. */
93262b16 3705 int idx = (insn >> 4) & 0xf;
2d6ac920 3706 int reg_idx;
9ee6e8bb
PB
3707 switch (size) {
3708 case 0:
2d6ac920 3709 reg_idx = (insn >> 5) & 7;
9ee6e8bb
PB
3710 stride = 1;
3711 break;
3712 case 1:
2d6ac920 3713 reg_idx = (insn >> 6) & 3;
9ee6e8bb
PB
3714 stride = (insn & (1 << 5)) ? 2 : 1;
3715 break;
3716 case 2:
2d6ac920 3717 reg_idx = (insn >> 7) & 1;
9ee6e8bb
PB
3718 stride = (insn & (1 << 6)) ? 2 : 1;
3719 break;
3720 default:
3721 abort();
3722 }
3723 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
3724 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3725 switch (nregs) {
3726 case 1:
3727 if (((idx & (1 << size)) != 0) ||
3728 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3729 return 1;
3730 }
3731 break;
3732 case 3:
3733 if ((idx & 1) != 0) {
3734 return 1;
3735 }
3736 /* fall through */
3737 case 2:
3738 if (size == 2 && (idx & 2) != 0) {
3739 return 1;
3740 }
3741 break;
3742 case 4:
3743 if ((size == 2) && ((idx & 3) == 3)) {
3744 return 1;
3745 }
3746 break;
3747 default:
3748 abort();
3749 }
3750 if ((rd + stride * (nregs - 1)) > 31) {
3751 /* Attempts to write off the end of the register file
3752 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3753 * the neon_load_reg() would write off the end of the array.
3754 */
3755 return 1;
3756 }
2d6ac920 3757 tmp = tcg_temp_new_i32();
e318a60b 3758 addr = tcg_temp_new_i32();
dcc65026 3759 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3760 for (reg = 0; reg < nregs; reg++) {
3761 if (load) {
2d6ac920
RH
3762 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3763 s->be_data | size);
3764 neon_store_element(rd, reg_idx, size, tmp);
9ee6e8bb 3765 } else { /* Store */
2d6ac920
RH
3766 neon_load_element(tmp, rd, reg_idx, size);
3767 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
3768 s->be_data | size);
99c475ab 3769 }
9ee6e8bb 3770 rd += stride;
1b2b1e54 3771 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3772 }
e318a60b 3773 tcg_temp_free_i32(addr);
2d6ac920 3774 tcg_temp_free_i32(tmp);
9ee6e8bb 3775 stride = nregs * (1 << size);
99c475ab 3776 }
9ee6e8bb
PB
3777 }
3778 if (rm != 15) {
39d5492a 3779 TCGv_i32 base;
b26eefb6
PB
3780
3781 base = load_reg(s, rn);
9ee6e8bb 3782 if (rm == 13) {
b26eefb6 3783 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3784 } else {
39d5492a 3785 TCGv_i32 index;
b26eefb6
PB
3786 index = load_reg(s, rm);
3787 tcg_gen_add_i32(base, base, index);
7d1b0095 3788 tcg_temp_free_i32(index);
9ee6e8bb 3789 }
b26eefb6 3790 store_reg(s, rn, base);
9ee6e8bb
PB
3791 }
3792 return 0;
3793}
3b46e624 3794
39d5492a 3795static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3796{
3797 switch (size) {
3798 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3799 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 3800 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
3801 default: abort();
3802 }
3803}
3804
39d5492a 3805static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3806{
3807 switch (size) {
02da0b2d
PM
3808 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3809 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3810 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
3811 default: abort();
3812 }
3813}
3814
39d5492a 3815static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3816{
3817 switch (size) {
02da0b2d
PM
3818 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3819 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3820 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
3821 default: abort();
3822 }
3823}
3824
39d5492a 3825static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
3826{
3827 switch (size) {
02da0b2d
PM
3828 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
3829 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
3830 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
3831 default: abort();
3832 }
3833}
3834
39d5492a 3835static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
3836 int q, int u)
3837{
3838 if (q) {
3839 if (u) {
3840 switch (size) {
3841 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3842 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3843 default: abort();
3844 }
3845 } else {
3846 switch (size) {
3847 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3848 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3849 default: abort();
3850 }
3851 }
3852 } else {
3853 if (u) {
3854 switch (size) {
b408a9b0
CL
3855 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
3856 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
3857 default: abort();
3858 }
3859 } else {
3860 switch (size) {
3861 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3862 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3863 default: abort();
3864 }
3865 }
3866 }
3867}
3868
39d5492a 3869static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
3870{
3871 if (u) {
3872 switch (size) {
3873 case 0: gen_helper_neon_widen_u8(dest, src); break;
3874 case 1: gen_helper_neon_widen_u16(dest, src); break;
3875 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3876 default: abort();
3877 }
3878 } else {
3879 switch (size) {
3880 case 0: gen_helper_neon_widen_s8(dest, src); break;
3881 case 1: gen_helper_neon_widen_s16(dest, src); break;
3882 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3883 default: abort();
3884 }
3885 }
7d1b0095 3886 tcg_temp_free_i32(src);
ad69471c
PB
3887}
3888
3889static inline void gen_neon_addl(int size)
3890{
3891 switch (size) {
3892 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3893 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3894 case 2: tcg_gen_add_i64(CPU_V001); break;
3895 default: abort();
3896 }
3897}
3898
3899static inline void gen_neon_subl(int size)
3900{
3901 switch (size) {
3902 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3903 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3904 case 2: tcg_gen_sub_i64(CPU_V001); break;
3905 default: abort();
3906 }
3907}
3908
a7812ae4 3909static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
3910{
3911 switch (size) {
3912 case 0: gen_helper_neon_negl_u16(var, var); break;
3913 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
3914 case 2:
3915 tcg_gen_neg_i64(var, var);
3916 break;
ad69471c
PB
3917 default: abort();
3918 }
3919}
3920
a7812ae4 3921static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
3922{
3923 switch (size) {
02da0b2d
PM
3924 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
3925 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
3926 default: abort();
3927 }
3928}
3929
39d5492a
PM
3930static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
3931 int size, int u)
ad69471c 3932{
a7812ae4 3933 TCGv_i64 tmp;
ad69471c
PB
3934
3935 switch ((size << 1) | u) {
3936 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
3937 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
3938 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
3939 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
3940 case 4:
3941 tmp = gen_muls_i64_i32(a, b);
3942 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 3943 tcg_temp_free_i64(tmp);
ad69471c
PB
3944 break;
3945 case 5:
3946 tmp = gen_mulu_i64_i32(a, b);
3947 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 3948 tcg_temp_free_i64(tmp);
ad69471c
PB
3949 break;
3950 default: abort();
3951 }
c6067f04
CL
3952
3953 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3954 Don't forget to clean them now. */
3955 if (size < 2) {
7d1b0095
PM
3956 tcg_temp_free_i32(a);
3957 tcg_temp_free_i32(b);
c6067f04 3958 }
ad69471c
PB
3959}
3960
39d5492a
PM
3961static void gen_neon_narrow_op(int op, int u, int size,
3962 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
3963{
3964 if (op) {
3965 if (u) {
3966 gen_neon_unarrow_sats(size, dest, src);
3967 } else {
3968 gen_neon_narrow(size, dest, src);
3969 }
3970 } else {
3971 if (u) {
3972 gen_neon_narrow_satu(size, dest, src);
3973 } else {
3974 gen_neon_narrow_sats(size, dest, src);
3975 }
3976 }
3977}
3978
62698be3
PM
3979/* Symbolic constants for op fields for Neon 3-register same-length.
3980 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3981 * table A7-9.
3982 */
3983#define NEON_3R_VHADD 0
3984#define NEON_3R_VQADD 1
3985#define NEON_3R_VRHADD 2
3986#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3987#define NEON_3R_VHSUB 4
3988#define NEON_3R_VQSUB 5
3989#define NEON_3R_VCGT 6
3990#define NEON_3R_VCGE 7
3991#define NEON_3R_VSHL 8
3992#define NEON_3R_VQSHL 9
3993#define NEON_3R_VRSHL 10
3994#define NEON_3R_VQRSHL 11
3995#define NEON_3R_VMAX 12
3996#define NEON_3R_VMIN 13
3997#define NEON_3R_VABD 14
3998#define NEON_3R_VABA 15
3999#define NEON_3R_VADD_VSUB 16
4000#define NEON_3R_VTST_VCEQ 17
4a7832b0 4001#define NEON_3R_VML 18 /* VMLA, VMLS */
62698be3
PM
4002#define NEON_3R_VMUL 19
4003#define NEON_3R_VPMAX 20
4004#define NEON_3R_VPMIN 21
4005#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 4006#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 4007#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 4008#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
4009#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4010#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4011#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4012#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4013#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4014#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4015
4016static const uint8_t neon_3r_sizes[] = {
4017 [NEON_3R_VHADD] = 0x7,
4018 [NEON_3R_VQADD] = 0xf,
4019 [NEON_3R_VRHADD] = 0x7,
4020 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4021 [NEON_3R_VHSUB] = 0x7,
4022 [NEON_3R_VQSUB] = 0xf,
4023 [NEON_3R_VCGT] = 0x7,
4024 [NEON_3R_VCGE] = 0x7,
4025 [NEON_3R_VSHL] = 0xf,
4026 [NEON_3R_VQSHL] = 0xf,
4027 [NEON_3R_VRSHL] = 0xf,
4028 [NEON_3R_VQRSHL] = 0xf,
4029 [NEON_3R_VMAX] = 0x7,
4030 [NEON_3R_VMIN] = 0x7,
4031 [NEON_3R_VABD] = 0x7,
4032 [NEON_3R_VABA] = 0x7,
4033 [NEON_3R_VADD_VSUB] = 0xf,
4034 [NEON_3R_VTST_VCEQ] = 0x7,
4035 [NEON_3R_VML] = 0x7,
4036 [NEON_3R_VMUL] = 0x7,
4037 [NEON_3R_VPMAX] = 0x7,
4038 [NEON_3R_VPMIN] = 0x7,
4039 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 4040 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 4041 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 4042 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
4043 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4044 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4045 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4046 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4047 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4048 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4049};
4050
600b828c
PM
4051/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4052 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4053 * table A7-13.
4054 */
4055#define NEON_2RM_VREV64 0
4056#define NEON_2RM_VREV32 1
4057#define NEON_2RM_VREV16 2
4058#define NEON_2RM_VPADDL 4
4059#define NEON_2RM_VPADDL_U 5
9d935509
AB
4060#define NEON_2RM_AESE 6 /* Includes AESD */
4061#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4062#define NEON_2RM_VCLS 8
4063#define NEON_2RM_VCLZ 9
4064#define NEON_2RM_VCNT 10
4065#define NEON_2RM_VMVN 11
4066#define NEON_2RM_VPADAL 12
4067#define NEON_2RM_VPADAL_U 13
4068#define NEON_2RM_VQABS 14
4069#define NEON_2RM_VQNEG 15
4070#define NEON_2RM_VCGT0 16
4071#define NEON_2RM_VCGE0 17
4072#define NEON_2RM_VCEQ0 18
4073#define NEON_2RM_VCLE0 19
4074#define NEON_2RM_VCLT0 20
f1ecb913 4075#define NEON_2RM_SHA1H 21
600b828c
PM
4076#define NEON_2RM_VABS 22
4077#define NEON_2RM_VNEG 23
4078#define NEON_2RM_VCGT0_F 24
4079#define NEON_2RM_VCGE0_F 25
4080#define NEON_2RM_VCEQ0_F 26
4081#define NEON_2RM_VCLE0_F 27
4082#define NEON_2RM_VCLT0_F 28
4083#define NEON_2RM_VABS_F 30
4084#define NEON_2RM_VNEG_F 31
4085#define NEON_2RM_VSWP 32
4086#define NEON_2RM_VTRN 33
4087#define NEON_2RM_VUZP 34
4088#define NEON_2RM_VZIP 35
4089#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4090#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4091#define NEON_2RM_VSHLL 38
f1ecb913 4092#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 4093#define NEON_2RM_VRINTN 40
2ce70625 4094#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4095#define NEON_2RM_VRINTA 42
4096#define NEON_2RM_VRINTZ 43
600b828c 4097#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4098#define NEON_2RM_VRINTM 45
600b828c 4099#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4100#define NEON_2RM_VRINTP 47
901ad525
WN
4101#define NEON_2RM_VCVTAU 48
4102#define NEON_2RM_VCVTAS 49
4103#define NEON_2RM_VCVTNU 50
4104#define NEON_2RM_VCVTNS 51
4105#define NEON_2RM_VCVTPU 52
4106#define NEON_2RM_VCVTPS 53
4107#define NEON_2RM_VCVTMU 54
4108#define NEON_2RM_VCVTMS 55
600b828c
PM
4109#define NEON_2RM_VRECPE 56
4110#define NEON_2RM_VRSQRTE 57
4111#define NEON_2RM_VRECPE_F 58
4112#define NEON_2RM_VRSQRTE_F 59
4113#define NEON_2RM_VCVT_FS 60
4114#define NEON_2RM_VCVT_FU 61
4115#define NEON_2RM_VCVT_SF 62
4116#define NEON_2RM_VCVT_UF 63
4117
fe8fcf3d
PM
4118static bool neon_2rm_is_v8_op(int op)
4119{
4120 /* Return true if this neon 2reg-misc op is ARMv8 and up */
4121 switch (op) {
4122 case NEON_2RM_VRINTN:
4123 case NEON_2RM_VRINTA:
4124 case NEON_2RM_VRINTM:
4125 case NEON_2RM_VRINTP:
4126 case NEON_2RM_VRINTZ:
4127 case NEON_2RM_VRINTX:
4128 case NEON_2RM_VCVTAU:
4129 case NEON_2RM_VCVTAS:
4130 case NEON_2RM_VCVTNU:
4131 case NEON_2RM_VCVTNS:
4132 case NEON_2RM_VCVTPU:
4133 case NEON_2RM_VCVTPS:
4134 case NEON_2RM_VCVTMU:
4135 case NEON_2RM_VCVTMS:
4136 return true;
4137 default:
4138 return false;
4139 }
4140}
4141
600b828c
PM
4142/* Each entry in this array has bit n set if the insn allows
4143 * size value n (otherwise it will UNDEF). Since unallocated
4144 * op values will have no bits set they always UNDEF.
4145 */
4146static const uint8_t neon_2rm_sizes[] = {
4147 [NEON_2RM_VREV64] = 0x7,
4148 [NEON_2RM_VREV32] = 0x3,
4149 [NEON_2RM_VREV16] = 0x1,
4150 [NEON_2RM_VPADDL] = 0x7,
4151 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4152 [NEON_2RM_AESE] = 0x1,
4153 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4154 [NEON_2RM_VCLS] = 0x7,
4155 [NEON_2RM_VCLZ] = 0x7,
4156 [NEON_2RM_VCNT] = 0x1,
4157 [NEON_2RM_VMVN] = 0x1,
4158 [NEON_2RM_VPADAL] = 0x7,
4159 [NEON_2RM_VPADAL_U] = 0x7,
4160 [NEON_2RM_VQABS] = 0x7,
4161 [NEON_2RM_VQNEG] = 0x7,
4162 [NEON_2RM_VCGT0] = 0x7,
4163 [NEON_2RM_VCGE0] = 0x7,
4164 [NEON_2RM_VCEQ0] = 0x7,
4165 [NEON_2RM_VCLE0] = 0x7,
4166 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 4167 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
4168 [NEON_2RM_VABS] = 0x7,
4169 [NEON_2RM_VNEG] = 0x7,
4170 [NEON_2RM_VCGT0_F] = 0x4,
4171 [NEON_2RM_VCGE0_F] = 0x4,
4172 [NEON_2RM_VCEQ0_F] = 0x4,
4173 [NEON_2RM_VCLE0_F] = 0x4,
4174 [NEON_2RM_VCLT0_F] = 0x4,
4175 [NEON_2RM_VABS_F] = 0x4,
4176 [NEON_2RM_VNEG_F] = 0x4,
4177 [NEON_2RM_VSWP] = 0x1,
4178 [NEON_2RM_VTRN] = 0x7,
4179 [NEON_2RM_VUZP] = 0x7,
4180 [NEON_2RM_VZIP] = 0x7,
4181 [NEON_2RM_VMOVN] = 0x7,
4182 [NEON_2RM_VQMOVN] = 0x7,
4183 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 4184 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 4185 [NEON_2RM_VRINTN] = 0x4,
2ce70625 4186 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
4187 [NEON_2RM_VRINTA] = 0x4,
4188 [NEON_2RM_VRINTZ] = 0x4,
600b828c 4189 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 4190 [NEON_2RM_VRINTM] = 0x4,
600b828c 4191 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 4192 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
4193 [NEON_2RM_VCVTAU] = 0x4,
4194 [NEON_2RM_VCVTAS] = 0x4,
4195 [NEON_2RM_VCVTNU] = 0x4,
4196 [NEON_2RM_VCVTNS] = 0x4,
4197 [NEON_2RM_VCVTPU] = 0x4,
4198 [NEON_2RM_VCVTPS] = 0x4,
4199 [NEON_2RM_VCVTMU] = 0x4,
4200 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
4201 [NEON_2RM_VRECPE] = 0x4,
4202 [NEON_2RM_VRSQRTE] = 0x4,
4203 [NEON_2RM_VRECPE_F] = 0x4,
4204 [NEON_2RM_VRSQRTE_F] = 0x4,
4205 [NEON_2RM_VCVT_FS] = 0x4,
4206 [NEON_2RM_VCVT_FU] = 0x4,
4207 [NEON_2RM_VCVT_SF] = 0x4,
4208 [NEON_2RM_VCVT_UF] = 0x4,
4209};
4210
36a71934
RH
4211
4212/* Expand v8.1 simd helper. */
4213static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
4214 int q, int rd, int rn, int rm)
4215{
962fcbf2 4216 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
4217 int opr_sz = (1 + q) * 8;
4218 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
4219 vfp_reg_offset(1, rn),
4220 vfp_reg_offset(1, rm), cpu_env,
4221 opr_sz, opr_sz, 0, fn);
4222 return 0;
4223 }
4224 return 1;
4225}
4226
41f6c113
RH
4227static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4228{
4229 tcg_gen_vec_sar8i_i64(a, a, shift);
4230 tcg_gen_vec_add8_i64(d, d, a);
4231}
4232
4233static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4234{
4235 tcg_gen_vec_sar16i_i64(a, a, shift);
4236 tcg_gen_vec_add16_i64(d, d, a);
4237}
4238
4239static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4240{
4241 tcg_gen_sari_i32(a, a, shift);
4242 tcg_gen_add_i32(d, d, a);
4243}
4244
4245static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4246{
4247 tcg_gen_sari_i64(a, a, shift);
4248 tcg_gen_add_i64(d, d, a);
4249}
4250
4251static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4252{
4253 tcg_gen_sari_vec(vece, a, a, sh);
4254 tcg_gen_add_vec(vece, d, d, a);
4255}
4256
53229a77
RH
4257static const TCGOpcode vecop_list_ssra[] = {
4258 INDEX_op_sari_vec, INDEX_op_add_vec, 0
4259};
4260
41f6c113
RH
4261const GVecGen2i ssra_op[4] = {
4262 { .fni8 = gen_ssra8_i64,
4263 .fniv = gen_ssra_vec,
4264 .load_dest = true,
53229a77 4265 .opt_opc = vecop_list_ssra,
41f6c113
RH
4266 .vece = MO_8 },
4267 { .fni8 = gen_ssra16_i64,
4268 .fniv = gen_ssra_vec,
4269 .load_dest = true,
53229a77 4270 .opt_opc = vecop_list_ssra,
41f6c113
RH
4271 .vece = MO_16 },
4272 { .fni4 = gen_ssra32_i32,
4273 .fniv = gen_ssra_vec,
4274 .load_dest = true,
53229a77 4275 .opt_opc = vecop_list_ssra,
41f6c113
RH
4276 .vece = MO_32 },
4277 { .fni8 = gen_ssra64_i64,
4278 .fniv = gen_ssra_vec,
4279 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 4280 .opt_opc = vecop_list_ssra,
41f6c113 4281 .load_dest = true,
41f6c113
RH
4282 .vece = MO_64 },
4283};
4284
4285static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4286{
4287 tcg_gen_vec_shr8i_i64(a, a, shift);
4288 tcg_gen_vec_add8_i64(d, d, a);
4289}
4290
4291static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4292{
4293 tcg_gen_vec_shr16i_i64(a, a, shift);
4294 tcg_gen_vec_add16_i64(d, d, a);
4295}
4296
4297static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4298{
4299 tcg_gen_shri_i32(a, a, shift);
4300 tcg_gen_add_i32(d, d, a);
4301}
4302
4303static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4304{
4305 tcg_gen_shri_i64(a, a, shift);
4306 tcg_gen_add_i64(d, d, a);
4307}
4308
4309static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4310{
4311 tcg_gen_shri_vec(vece, a, a, sh);
4312 tcg_gen_add_vec(vece, d, d, a);
4313}
4314
53229a77
RH
4315static const TCGOpcode vecop_list_usra[] = {
4316 INDEX_op_shri_vec, INDEX_op_add_vec, 0
4317};
4318
41f6c113
RH
4319const GVecGen2i usra_op[4] = {
4320 { .fni8 = gen_usra8_i64,
4321 .fniv = gen_usra_vec,
4322 .load_dest = true,
53229a77 4323 .opt_opc = vecop_list_usra,
41f6c113
RH
4324 .vece = MO_8, },
4325 { .fni8 = gen_usra16_i64,
4326 .fniv = gen_usra_vec,
4327 .load_dest = true,
53229a77 4328 .opt_opc = vecop_list_usra,
41f6c113
RH
4329 .vece = MO_16, },
4330 { .fni4 = gen_usra32_i32,
4331 .fniv = gen_usra_vec,
4332 .load_dest = true,
53229a77 4333 .opt_opc = vecop_list_usra,
41f6c113
RH
4334 .vece = MO_32, },
4335 { .fni8 = gen_usra64_i64,
4336 .fniv = gen_usra_vec,
4337 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4338 .load_dest = true,
53229a77 4339 .opt_opc = vecop_list_usra,
41f6c113
RH
4340 .vece = MO_64, },
4341};
eabcd6fa 4342
f3cd8218
RH
4343static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4344{
4345 uint64_t mask = dup_const(MO_8, 0xff >> shift);
4346 TCGv_i64 t = tcg_temp_new_i64();
4347
4348 tcg_gen_shri_i64(t, a, shift);
4349 tcg_gen_andi_i64(t, t, mask);
4350 tcg_gen_andi_i64(d, d, ~mask);
4351 tcg_gen_or_i64(d, d, t);
4352 tcg_temp_free_i64(t);
4353}
4354
4355static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4356{
4357 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
4358 TCGv_i64 t = tcg_temp_new_i64();
4359
4360 tcg_gen_shri_i64(t, a, shift);
4361 tcg_gen_andi_i64(t, t, mask);
4362 tcg_gen_andi_i64(d, d, ~mask);
4363 tcg_gen_or_i64(d, d, t);
4364 tcg_temp_free_i64(t);
4365}
4366
4367static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4368{
4369 tcg_gen_shri_i32(a, a, shift);
4370 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
4371}
4372
4373static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4374{
4375 tcg_gen_shri_i64(a, a, shift);
4376 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
4377}
4378
4379static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4380{
4381 if (sh == 0) {
4382 tcg_gen_mov_vec(d, a);
4383 } else {
4384 TCGv_vec t = tcg_temp_new_vec_matching(d);
4385 TCGv_vec m = tcg_temp_new_vec_matching(d);
4386
4387 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
4388 tcg_gen_shri_vec(vece, t, a, sh);
4389 tcg_gen_and_vec(vece, d, d, m);
4390 tcg_gen_or_vec(vece, d, d, t);
4391
4392 tcg_temp_free_vec(t);
4393 tcg_temp_free_vec(m);
4394 }
4395}
4396
53229a77
RH
4397static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
4398
f3cd8218
RH
4399const GVecGen2i sri_op[4] = {
4400 { .fni8 = gen_shr8_ins_i64,
4401 .fniv = gen_shr_ins_vec,
4402 .load_dest = true,
53229a77 4403 .opt_opc = vecop_list_sri,
f3cd8218
RH
4404 .vece = MO_8 },
4405 { .fni8 = gen_shr16_ins_i64,
4406 .fniv = gen_shr_ins_vec,
4407 .load_dest = true,
53229a77 4408 .opt_opc = vecop_list_sri,
f3cd8218
RH
4409 .vece = MO_16 },
4410 { .fni4 = gen_shr32_ins_i32,
4411 .fniv = gen_shr_ins_vec,
4412 .load_dest = true,
53229a77 4413 .opt_opc = vecop_list_sri,
f3cd8218
RH
4414 .vece = MO_32 },
4415 { .fni8 = gen_shr64_ins_i64,
4416 .fniv = gen_shr_ins_vec,
4417 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4418 .load_dest = true,
53229a77 4419 .opt_opc = vecop_list_sri,
f3cd8218
RH
4420 .vece = MO_64 },
4421};
4422
4423static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4424{
4425 uint64_t mask = dup_const(MO_8, 0xff << shift);
4426 TCGv_i64 t = tcg_temp_new_i64();
4427
4428 tcg_gen_shli_i64(t, a, shift);
4429 tcg_gen_andi_i64(t, t, mask);
4430 tcg_gen_andi_i64(d, d, ~mask);
4431 tcg_gen_or_i64(d, d, t);
4432 tcg_temp_free_i64(t);
4433}
4434
4435static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4436{
4437 uint64_t mask = dup_const(MO_16, 0xffff << shift);
4438 TCGv_i64 t = tcg_temp_new_i64();
4439
4440 tcg_gen_shli_i64(t, a, shift);
4441 tcg_gen_andi_i64(t, t, mask);
4442 tcg_gen_andi_i64(d, d, ~mask);
4443 tcg_gen_or_i64(d, d, t);
4444 tcg_temp_free_i64(t);
4445}
4446
4447static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4448{
4449 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
4450}
4451
4452static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4453{
4454 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
4455}
4456
4457static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4458{
4459 if (sh == 0) {
4460 tcg_gen_mov_vec(d, a);
4461 } else {
4462 TCGv_vec t = tcg_temp_new_vec_matching(d);
4463 TCGv_vec m = tcg_temp_new_vec_matching(d);
4464
4465 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
4466 tcg_gen_shli_vec(vece, t, a, sh);
4467 tcg_gen_and_vec(vece, d, d, m);
4468 tcg_gen_or_vec(vece, d, d, t);
4469
4470 tcg_temp_free_vec(t);
4471 tcg_temp_free_vec(m);
4472 }
4473}
4474
53229a77
RH
4475static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
4476
f3cd8218
RH
4477const GVecGen2i sli_op[4] = {
4478 { .fni8 = gen_shl8_ins_i64,
4479 .fniv = gen_shl_ins_vec,
4480 .load_dest = true,
53229a77 4481 .opt_opc = vecop_list_sli,
f3cd8218
RH
4482 .vece = MO_8 },
4483 { .fni8 = gen_shl16_ins_i64,
4484 .fniv = gen_shl_ins_vec,
4485 .load_dest = true,
53229a77 4486 .opt_opc = vecop_list_sli,
f3cd8218
RH
4487 .vece = MO_16 },
4488 { .fni4 = gen_shl32_ins_i32,
4489 .fniv = gen_shl_ins_vec,
4490 .load_dest = true,
53229a77 4491 .opt_opc = vecop_list_sli,
f3cd8218
RH
4492 .vece = MO_32 },
4493 { .fni8 = gen_shl64_ins_i64,
4494 .fniv = gen_shl_ins_vec,
4495 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4496 .load_dest = true,
53229a77 4497 .opt_opc = vecop_list_sli,
f3cd8218
RH
4498 .vece = MO_64 },
4499};
4500
4a7832b0
RH
4501static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4502{
4503 gen_helper_neon_mul_u8(a, a, b);
4504 gen_helper_neon_add_u8(d, d, a);
4505}
4506
4507static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4508{
4509 gen_helper_neon_mul_u8(a, a, b);
4510 gen_helper_neon_sub_u8(d, d, a);
4511}
4512
4513static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4514{
4515 gen_helper_neon_mul_u16(a, a, b);
4516 gen_helper_neon_add_u16(d, d, a);
4517}
4518
4519static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4520{
4521 gen_helper_neon_mul_u16(a, a, b);
4522 gen_helper_neon_sub_u16(d, d, a);
4523}
4524
4525static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4526{
4527 tcg_gen_mul_i32(a, a, b);
4528 tcg_gen_add_i32(d, d, a);
4529}
4530
4531static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4532{
4533 tcg_gen_mul_i32(a, a, b);
4534 tcg_gen_sub_i32(d, d, a);
4535}
4536
4537static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4538{
4539 tcg_gen_mul_i64(a, a, b);
4540 tcg_gen_add_i64(d, d, a);
4541}
4542
4543static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4544{
4545 tcg_gen_mul_i64(a, a, b);
4546 tcg_gen_sub_i64(d, d, a);
4547}
4548
4549static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4550{
4551 tcg_gen_mul_vec(vece, a, a, b);
4552 tcg_gen_add_vec(vece, d, d, a);
4553}
4554
4555static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4556{
4557 tcg_gen_mul_vec(vece, a, a, b);
4558 tcg_gen_sub_vec(vece, d, d, a);
4559}
4560
4561/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4562 * these tables are shared with AArch64 which does support them.
4563 */
53229a77
RH
4564
4565static const TCGOpcode vecop_list_mla[] = {
4566 INDEX_op_mul_vec, INDEX_op_add_vec, 0
4567};
4568
4569static const TCGOpcode vecop_list_mls[] = {
4570 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
4571};
4572
4a7832b0
RH
4573const GVecGen3 mla_op[4] = {
4574 { .fni4 = gen_mla8_i32,
4575 .fniv = gen_mla_vec,
4a7832b0 4576 .load_dest = true,
53229a77 4577 .opt_opc = vecop_list_mla,
4a7832b0
RH
4578 .vece = MO_8 },
4579 { .fni4 = gen_mla16_i32,
4580 .fniv = gen_mla_vec,
4a7832b0 4581 .load_dest = true,
53229a77 4582 .opt_opc = vecop_list_mla,
4a7832b0
RH
4583 .vece = MO_16 },
4584 { .fni4 = gen_mla32_i32,
4585 .fniv = gen_mla_vec,
4a7832b0 4586 .load_dest = true,
53229a77 4587 .opt_opc = vecop_list_mla,
4a7832b0
RH
4588 .vece = MO_32 },
4589 { .fni8 = gen_mla64_i64,
4590 .fniv = gen_mla_vec,
4a7832b0
RH
4591 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4592 .load_dest = true,
53229a77 4593 .opt_opc = vecop_list_mla,
4a7832b0
RH
4594 .vece = MO_64 },
4595};
4596
4597const GVecGen3 mls_op[4] = {
4598 { .fni4 = gen_mls8_i32,
4599 .fniv = gen_mls_vec,
4a7832b0 4600 .load_dest = true,
53229a77 4601 .opt_opc = vecop_list_mls,
4a7832b0
RH
4602 .vece = MO_8 },
4603 { .fni4 = gen_mls16_i32,
4604 .fniv = gen_mls_vec,
4a7832b0 4605 .load_dest = true,
53229a77 4606 .opt_opc = vecop_list_mls,
4a7832b0
RH
4607 .vece = MO_16 },
4608 { .fni4 = gen_mls32_i32,
4609 .fniv = gen_mls_vec,
4a7832b0 4610 .load_dest = true,
53229a77 4611 .opt_opc = vecop_list_mls,
4a7832b0
RH
4612 .vece = MO_32 },
4613 { .fni8 = gen_mls64_i64,
4614 .fniv = gen_mls_vec,
4a7832b0
RH
4615 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4616 .load_dest = true,
53229a77 4617 .opt_opc = vecop_list_mls,
4a7832b0
RH
4618 .vece = MO_64 },
4619};
4620
ea580fa3
RH
4621/* CMTST : test is "if (X & Y != 0)". */
4622static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4623{
4624 tcg_gen_and_i32(d, a, b);
4625 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
4626 tcg_gen_neg_i32(d, d);
4627}
4628
4629void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4630{
4631 tcg_gen_and_i64(d, a, b);
4632 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
4633 tcg_gen_neg_i64(d, d);
4634}
4635
4636static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4637{
4638 tcg_gen_and_vec(vece, d, a, b);
4639 tcg_gen_dupi_vec(vece, a, 0);
4640 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
4641}
4642
53229a77
RH
4643static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
4644
ea580fa3
RH
4645const GVecGen3 cmtst_op[4] = {
4646 { .fni4 = gen_helper_neon_tst_u8,
4647 .fniv = gen_cmtst_vec,
53229a77 4648 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4649 .vece = MO_8 },
4650 { .fni4 = gen_helper_neon_tst_u16,
4651 .fniv = gen_cmtst_vec,
53229a77 4652 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4653 .vece = MO_16 },
4654 { .fni4 = gen_cmtst_i32,
4655 .fniv = gen_cmtst_vec,
53229a77 4656 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4657 .vece = MO_32 },
4658 { .fni8 = gen_cmtst_i64,
4659 .fniv = gen_cmtst_vec,
4660 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 4661 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4662 .vece = MO_64 },
4663};
4664
89e68b57
RH
4665static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4666 TCGv_vec a, TCGv_vec b)
4667{
4668 TCGv_vec x = tcg_temp_new_vec_matching(t);
4669 tcg_gen_add_vec(vece, x, a, b);
4670 tcg_gen_usadd_vec(vece, t, a, b);
4671 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4672 tcg_gen_or_vec(vece, sat, sat, x);
4673 tcg_temp_free_vec(x);
4674}
4675
53229a77
RH
4676static const TCGOpcode vecop_list_uqadd[] = {
4677 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4678};
4679
89e68b57
RH
4680const GVecGen4 uqadd_op[4] = {
4681 { .fniv = gen_uqadd_vec,
4682 .fno = gen_helper_gvec_uqadd_b,
89e68b57 4683 .write_aofs = true,
53229a77 4684 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4685 .vece = MO_8 },
4686 { .fniv = gen_uqadd_vec,
4687 .fno = gen_helper_gvec_uqadd_h,
89e68b57 4688 .write_aofs = true,
53229a77 4689 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4690 .vece = MO_16 },
4691 { .fniv = gen_uqadd_vec,
4692 .fno = gen_helper_gvec_uqadd_s,
89e68b57 4693 .write_aofs = true,
53229a77 4694 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4695 .vece = MO_32 },
4696 { .fniv = gen_uqadd_vec,
4697 .fno = gen_helper_gvec_uqadd_d,
89e68b57 4698 .write_aofs = true,
53229a77 4699 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4700 .vece = MO_64 },
4701};
4702
4703static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4704 TCGv_vec a, TCGv_vec b)
4705{
4706 TCGv_vec x = tcg_temp_new_vec_matching(t);
4707 tcg_gen_add_vec(vece, x, a, b);
4708 tcg_gen_ssadd_vec(vece, t, a, b);
4709 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4710 tcg_gen_or_vec(vece, sat, sat, x);
4711 tcg_temp_free_vec(x);
4712}
4713
53229a77
RH
4714static const TCGOpcode vecop_list_sqadd[] = {
4715 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4716};
4717
89e68b57
RH
4718const GVecGen4 sqadd_op[4] = {
4719 { .fniv = gen_sqadd_vec,
4720 .fno = gen_helper_gvec_sqadd_b,
53229a77 4721 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4722 .write_aofs = true,
4723 .vece = MO_8 },
4724 { .fniv = gen_sqadd_vec,
4725 .fno = gen_helper_gvec_sqadd_h,
53229a77 4726 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4727 .write_aofs = true,
4728 .vece = MO_16 },
4729 { .fniv = gen_sqadd_vec,
4730 .fno = gen_helper_gvec_sqadd_s,
53229a77 4731 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4732 .write_aofs = true,
4733 .vece = MO_32 },
4734 { .fniv = gen_sqadd_vec,
4735 .fno = gen_helper_gvec_sqadd_d,
53229a77 4736 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4737 .write_aofs = true,
4738 .vece = MO_64 },
4739};
4740
4741static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4742 TCGv_vec a, TCGv_vec b)
4743{
4744 TCGv_vec x = tcg_temp_new_vec_matching(t);
4745 tcg_gen_sub_vec(vece, x, a, b);
4746 tcg_gen_ussub_vec(vece, t, a, b);
4747 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4748 tcg_gen_or_vec(vece, sat, sat, x);
4749 tcg_temp_free_vec(x);
4750}
4751
53229a77
RH
4752static const TCGOpcode vecop_list_uqsub[] = {
4753 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4754};
4755
89e68b57
RH
4756const GVecGen4 uqsub_op[4] = {
4757 { .fniv = gen_uqsub_vec,
4758 .fno = gen_helper_gvec_uqsub_b,
53229a77 4759 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4760 .write_aofs = true,
4761 .vece = MO_8 },
4762 { .fniv = gen_uqsub_vec,
4763 .fno = gen_helper_gvec_uqsub_h,
53229a77 4764 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4765 .write_aofs = true,
4766 .vece = MO_16 },
4767 { .fniv = gen_uqsub_vec,
4768 .fno = gen_helper_gvec_uqsub_s,
53229a77 4769 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4770 .write_aofs = true,
4771 .vece = MO_32 },
4772 { .fniv = gen_uqsub_vec,
4773 .fno = gen_helper_gvec_uqsub_d,
53229a77 4774 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4775 .write_aofs = true,
4776 .vece = MO_64 },
4777};
4778
4779static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4780 TCGv_vec a, TCGv_vec b)
4781{
4782 TCGv_vec x = tcg_temp_new_vec_matching(t);
4783 tcg_gen_sub_vec(vece, x, a, b);
4784 tcg_gen_sssub_vec(vece, t, a, b);
4785 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4786 tcg_gen_or_vec(vece, sat, sat, x);
4787 tcg_temp_free_vec(x);
4788}
4789
53229a77
RH
4790static const TCGOpcode vecop_list_sqsub[] = {
4791 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4792};
4793
89e68b57
RH
4794const GVecGen4 sqsub_op[4] = {
4795 { .fniv = gen_sqsub_vec,
4796 .fno = gen_helper_gvec_sqsub_b,
53229a77 4797 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4798 .write_aofs = true,
4799 .vece = MO_8 },
4800 { .fniv = gen_sqsub_vec,
4801 .fno = gen_helper_gvec_sqsub_h,
53229a77 4802 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4803 .write_aofs = true,
4804 .vece = MO_16 },
4805 { .fniv = gen_sqsub_vec,
4806 .fno = gen_helper_gvec_sqsub_s,
53229a77 4807 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4808 .write_aofs = true,
4809 .vece = MO_32 },
4810 { .fniv = gen_sqsub_vec,
4811 .fno = gen_helper_gvec_sqsub_d,
53229a77 4812 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4813 .write_aofs = true,
4814 .vece = MO_64 },
4815};
4816
9ee6e8bb
PB
4817/* Translate a NEON data processing instruction. Return nonzero if the
4818 instruction is invalid.
ad69471c
PB
4819 We process data in a mixture of 32-bit and 64-bit chunks.
4820 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4821
7dcc1f89 4822static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4823{
4824 int op;
4825 int q;
eabcd6fa 4826 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
9ee6e8bb
PB
4827 int size;
4828 int shift;
4829 int pass;
4830 int count;
4831 int pairwise;
4832 int u;
eabcd6fa 4833 int vec_size;
f3cd8218 4834 uint32_t imm;
39d5492a 4835 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 4836 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 4837 TCGv_i64 tmp64;
9ee6e8bb 4838
2c7ffc41
PM
4839 /* FIXME: this access check should not take precedence over UNDEF
4840 * for invalid encodings; we will generate incorrect syndrome information
4841 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4842 */
9dbbc748 4843 if (s->fp_excp_el) {
2c7ffc41 4844 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 4845 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4846 return 0;
4847 }
4848
5df8bac1 4849 if (!s->vfp_enabled)
9ee6e8bb
PB
4850 return 1;
4851 q = (insn & (1 << 6)) != 0;
4852 u = (insn >> 24) & 1;
4853 VFP_DREG_D(rd, insn);
4854 VFP_DREG_N(rn, insn);
4855 VFP_DREG_M(rm, insn);
4856 size = (insn >> 20) & 3;
eabcd6fa
RH
4857 vec_size = q ? 16 : 8;
4858 rd_ofs = neon_reg_offset(rd, 0);
4859 rn_ofs = neon_reg_offset(rn, 0);
4860 rm_ofs = neon_reg_offset(rm, 0);
4861
9ee6e8bb
PB
4862 if ((insn & (1 << 23)) == 0) {
4863 /* Three register same length. */
4864 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4865 /* Catch invalid op and bad size combinations: UNDEF */
4866 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4867 return 1;
4868 }
25f84f79
PM
4869 /* All insns of this form UNDEF for either this condition or the
4870 * superset of cases "Q==1"; we catch the latter later.
4871 */
4872 if (q && ((rd | rn | rm) & 1)) {
4873 return 1;
4874 }
36a71934
RH
4875 switch (op) {
4876 case NEON_3R_SHA:
4877 /* The SHA-1/SHA-256 3-register instructions require special
4878 * treatment here, as their size field is overloaded as an
4879 * op type selector, and they all consume their input in a
4880 * single pass.
4881 */
f1ecb913
AB
4882 if (!q) {
4883 return 1;
4884 }
4885 if (!u) { /* SHA-1 */
962fcbf2 4886 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
4887 return 1;
4888 }
1a66ac61
RH
4889 ptr1 = vfp_reg_ptr(true, rd);
4890 ptr2 = vfp_reg_ptr(true, rn);
4891 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 4892 tmp4 = tcg_const_i32(size);
1a66ac61 4893 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
4894 tcg_temp_free_i32(tmp4);
4895 } else { /* SHA-256 */
962fcbf2 4896 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
4897 return 1;
4898 }
1a66ac61
RH
4899 ptr1 = vfp_reg_ptr(true, rd);
4900 ptr2 = vfp_reg_ptr(true, rn);
4901 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
4902 switch (size) {
4903 case 0:
1a66ac61 4904 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
4905 break;
4906 case 1:
1a66ac61 4907 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
4908 break;
4909 case 2:
1a66ac61 4910 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
4911 break;
4912 }
4913 }
1a66ac61
RH
4914 tcg_temp_free_ptr(ptr1);
4915 tcg_temp_free_ptr(ptr2);
4916 tcg_temp_free_ptr(ptr3);
f1ecb913 4917 return 0;
36a71934
RH
4918
4919 case NEON_3R_VPADD_VQRDMLAH:
4920 if (!u) {
4921 break; /* VPADD */
4922 }
4923 /* VQRDMLAH */
4924 switch (size) {
4925 case 1:
4926 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
4927 q, rd, rn, rm);
4928 case 2:
4929 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
4930 q, rd, rn, rm);
4931 }
4932 return 1;
4933
4934 case NEON_3R_VFM_VQRDMLSH:
4935 if (!u) {
4936 /* VFM, VFMS */
4937 if (size == 1) {
4938 return 1;
4939 }
4940 break;
4941 }
4942 /* VQRDMLSH */
4943 switch (size) {
4944 case 1:
4945 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
4946 q, rd, rn, rm);
4947 case 2:
4948 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
4949 q, rd, rn, rm);
4950 }
4951 return 1;
eabcd6fa
RH
4952
4953 case NEON_3R_LOGIC: /* Logic ops. */
4954 switch ((u << 2) | size) {
4955 case 0: /* VAND */
4956 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
4957 vec_size, vec_size);
4958 break;
4959 case 1: /* VBIC */
4960 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
4961 vec_size, vec_size);
4962 break;
2900847f
RH
4963 case 2: /* VORR */
4964 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
4965 vec_size, vec_size);
eabcd6fa
RH
4966 break;
4967 case 3: /* VORN */
4968 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
4969 vec_size, vec_size);
4970 break;
4971 case 4: /* VEOR */
4972 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
4973 vec_size, vec_size);
4974 break;
4975 case 5: /* VBSL */
3a7a2b4e
RH
4976 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
4977 vec_size, vec_size);
eabcd6fa
RH
4978 break;
4979 case 6: /* VBIT */
3a7a2b4e
RH
4980 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
4981 vec_size, vec_size);
eabcd6fa
RH
4982 break;
4983 case 7: /* VBIF */
3a7a2b4e
RH
4984 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
4985 vec_size, vec_size);
eabcd6fa
RH
4986 break;
4987 }
4988 return 0;
e4717ae0
RH
4989
4990 case NEON_3R_VADD_VSUB:
4991 if (u) {
4992 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
4993 vec_size, vec_size);
4994 } else {
4995 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
4996 vec_size, vec_size);
4997 }
4998 return 0;
82083184 4999
89e68b57
RH
5000 case NEON_3R_VQADD:
5001 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5002 rn_ofs, rm_ofs, vec_size, vec_size,
5003 (u ? uqadd_op : sqadd_op) + size);
2f143d3a 5004 return 0;
89e68b57
RH
5005
5006 case NEON_3R_VQSUB:
5007 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5008 rn_ofs, rm_ofs, vec_size, vec_size,
5009 (u ? uqsub_op : sqsub_op) + size);
2f143d3a 5010 return 0;
89e68b57 5011
82083184
RH
5012 case NEON_3R_VMUL: /* VMUL */
5013 if (u) {
5014 /* Polynomial case allows only P8 and is handled below. */
5015 if (size != 0) {
5016 return 1;
5017 }
5018 } else {
5019 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
5020 vec_size, vec_size);
5021 return 0;
5022 }
5023 break;
4a7832b0
RH
5024
5025 case NEON_3R_VML: /* VMLA, VMLS */
5026 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
5027 u ? &mls_op[size] : &mla_op[size]);
5028 return 0;
ea580fa3
RH
5029
5030 case NEON_3R_VTST_VCEQ:
5031 if (u) { /* VCEQ */
5032 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
5033 vec_size, vec_size);
5034 } else { /* VTST */
5035 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
5036 vec_size, vec_size, &cmtst_op[size]);
5037 }
5038 return 0;
5039
5040 case NEON_3R_VCGT:
5041 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
5042 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5043 return 0;
5044
5045 case NEON_3R_VCGE:
5046 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
5047 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5048 return 0;
6f278221
RH
5049
5050 case NEON_3R_VMAX:
5051 if (u) {
5052 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
5053 vec_size, vec_size);
5054 } else {
5055 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
5056 vec_size, vec_size);
5057 }
5058 return 0;
5059 case NEON_3R_VMIN:
5060 if (u) {
5061 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
5062 vec_size, vec_size);
5063 } else {
5064 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
5065 vec_size, vec_size);
5066 }
5067 return 0;
f1ecb913 5068 }
4a7832b0 5069
eabcd6fa 5070 if (size == 3) {
62698be3 5071 /* 64-bit element instructions. */
9ee6e8bb 5072 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5073 neon_load_reg64(cpu_V0, rn + pass);
5074 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5075 switch (op) {
62698be3 5076 case NEON_3R_VSHL:
ad69471c
PB
5077 if (u) {
5078 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5079 } else {
5080 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5081 }
5082 break;
62698be3 5083 case NEON_3R_VQSHL:
ad69471c 5084 if (u) {
02da0b2d
PM
5085 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5086 cpu_V1, cpu_V0);
ad69471c 5087 } else {
02da0b2d
PM
5088 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5089 cpu_V1, cpu_V0);
ad69471c
PB
5090 }
5091 break;
62698be3 5092 case NEON_3R_VRSHL:
ad69471c
PB
5093 if (u) {
5094 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5095 } else {
ad69471c
PB
5096 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5097 }
5098 break;
62698be3 5099 case NEON_3R_VQRSHL:
ad69471c 5100 if (u) {
02da0b2d
PM
5101 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5102 cpu_V1, cpu_V0);
ad69471c 5103 } else {
02da0b2d
PM
5104 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5105 cpu_V1, cpu_V0);
1e8d4eec 5106 }
9ee6e8bb 5107 break;
9ee6e8bb
PB
5108 default:
5109 abort();
2c0262af 5110 }
ad69471c 5111 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5112 }
9ee6e8bb 5113 return 0;
2c0262af 5114 }
25f84f79 5115 pairwise = 0;
9ee6e8bb 5116 switch (op) {
62698be3
PM
5117 case NEON_3R_VSHL:
5118 case NEON_3R_VQSHL:
5119 case NEON_3R_VRSHL:
5120 case NEON_3R_VQRSHL:
9ee6e8bb 5121 {
ad69471c
PB
5122 int rtmp;
5123 /* Shift instruction operands are reversed. */
5124 rtmp = rn;
9ee6e8bb 5125 rn = rm;
ad69471c 5126 rm = rtmp;
9ee6e8bb 5127 }
2c0262af 5128 break;
36a71934 5129 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
5130 case NEON_3R_VPMAX:
5131 case NEON_3R_VPMIN:
9ee6e8bb 5132 pairwise = 1;
2c0262af 5133 break;
25f84f79
PM
5134 case NEON_3R_FLOAT_ARITH:
5135 pairwise = (u && size < 2); /* if VPADD (float) */
5136 break;
5137 case NEON_3R_FLOAT_MINMAX:
5138 pairwise = u; /* if VPMIN/VPMAX (float) */
5139 break;
5140 case NEON_3R_FLOAT_CMP:
5141 if (!u && size) {
5142 /* no encoding for U=0 C=1x */
5143 return 1;
5144 }
5145 break;
5146 case NEON_3R_FLOAT_ACMP:
5147 if (!u) {
5148 return 1;
5149 }
5150 break;
505935fc
WN
5151 case NEON_3R_FLOAT_MISC:
5152 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5153 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5154 return 1;
5155 }
2c0262af 5156 break;
36a71934
RH
5157 case NEON_3R_VFM_VQRDMLSH:
5158 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
5159 return 1;
5160 }
5161 break;
9ee6e8bb 5162 default:
2c0262af 5163 break;
9ee6e8bb 5164 }
dd8fbd78 5165
25f84f79
PM
5166 if (pairwise && q) {
5167 /* All the pairwise insns UNDEF if Q is set */
5168 return 1;
5169 }
5170
9ee6e8bb
PB
5171 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5172
5173 if (pairwise) {
5174 /* Pairwise. */
a5a14945
JR
5175 if (pass < 1) {
5176 tmp = neon_load_reg(rn, 0);
5177 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5178 } else {
a5a14945
JR
5179 tmp = neon_load_reg(rm, 0);
5180 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5181 }
5182 } else {
5183 /* Elementwise. */
dd8fbd78
FN
5184 tmp = neon_load_reg(rn, pass);
5185 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5186 }
5187 switch (op) {
62698be3 5188 case NEON_3R_VHADD:
9ee6e8bb
PB
5189 GEN_NEON_INTEGER_OP(hadd);
5190 break;
62698be3 5191 case NEON_3R_VRHADD:
9ee6e8bb 5192 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5193 break;
62698be3 5194 case NEON_3R_VHSUB:
9ee6e8bb
PB
5195 GEN_NEON_INTEGER_OP(hsub);
5196 break;
62698be3 5197 case NEON_3R_VSHL:
ad69471c 5198 GEN_NEON_INTEGER_OP(shl);
2c0262af 5199 break;
62698be3 5200 case NEON_3R_VQSHL:
02da0b2d 5201 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5202 break;
62698be3 5203 case NEON_3R_VRSHL:
ad69471c 5204 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5205 break;
62698be3 5206 case NEON_3R_VQRSHL:
02da0b2d 5207 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5208 break;
62698be3 5209 case NEON_3R_VABD:
9ee6e8bb
PB
5210 GEN_NEON_INTEGER_OP(abd);
5211 break;
62698be3 5212 case NEON_3R_VABA:
9ee6e8bb 5213 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5214 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5215 tmp2 = neon_load_reg(rd, pass);
5216 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5217 break;
62698be3 5218 case NEON_3R_VMUL:
82083184
RH
5219 /* VMUL.P8; other cases already eliminated. */
5220 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb 5221 break;
62698be3 5222 case NEON_3R_VPMAX:
9ee6e8bb
PB
5223 GEN_NEON_INTEGER_OP(pmax);
5224 break;
62698be3 5225 case NEON_3R_VPMIN:
9ee6e8bb
PB
5226 GEN_NEON_INTEGER_OP(pmin);
5227 break;
62698be3 5228 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5229 if (!u) { /* VQDMULH */
5230 switch (size) {
02da0b2d
PM
5231 case 1:
5232 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5233 break;
5234 case 2:
5235 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5236 break;
62698be3 5237 default: abort();
9ee6e8bb 5238 }
62698be3 5239 } else { /* VQRDMULH */
9ee6e8bb 5240 switch (size) {
02da0b2d
PM
5241 case 1:
5242 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5243 break;
5244 case 2:
5245 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5246 break;
62698be3 5247 default: abort();
9ee6e8bb
PB
5248 }
5249 }
5250 break;
36a71934 5251 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 5252 switch (size) {
dd8fbd78
FN
5253 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5254 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5255 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5256 default: abort();
9ee6e8bb
PB
5257 }
5258 break;
62698be3 5259 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5260 {
5261 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5262 switch ((u << 2) | size) {
5263 case 0: /* VADD */
aa47cfdd
PM
5264 case 4: /* VPADD */
5265 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5266 break;
5267 case 2: /* VSUB */
aa47cfdd 5268 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5269 break;
5270 case 6: /* VABD */
aa47cfdd 5271 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5272 break;
5273 default:
62698be3 5274 abort();
9ee6e8bb 5275 }
aa47cfdd 5276 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5277 break;
aa47cfdd 5278 }
62698be3 5279 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5280 {
5281 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5282 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5283 if (!u) {
7d1b0095 5284 tcg_temp_free_i32(tmp2);
dd8fbd78 5285 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5286 if (size == 0) {
aa47cfdd 5287 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5288 } else {
aa47cfdd 5289 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5290 }
5291 }
aa47cfdd 5292 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5293 break;
aa47cfdd 5294 }
62698be3 5295 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5296 {
5297 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5298 if (!u) {
aa47cfdd 5299 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5300 } else {
aa47cfdd
PM
5301 if (size == 0) {
5302 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5303 } else {
5304 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5305 }
b5ff1b31 5306 }
aa47cfdd 5307 tcg_temp_free_ptr(fpstatus);
2c0262af 5308 break;
aa47cfdd 5309 }
62698be3 5310 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5311 {
5312 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5313 if (size == 0) {
5314 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5315 } else {
5316 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5317 }
5318 tcg_temp_free_ptr(fpstatus);
2c0262af 5319 break;
aa47cfdd 5320 }
62698be3 5321 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5322 {
5323 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5324 if (size == 0) {
f71a2ae5 5325 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5326 } else {
f71a2ae5 5327 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5328 }
5329 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5330 break;
aa47cfdd 5331 }
505935fc
WN
5332 case NEON_3R_FLOAT_MISC:
5333 if (u) {
5334 /* VMAXNM/VMINNM */
5335 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5336 if (size == 0) {
f71a2ae5 5337 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5338 } else {
f71a2ae5 5339 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5340 }
5341 tcg_temp_free_ptr(fpstatus);
5342 } else {
5343 if (size == 0) {
5344 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5345 } else {
5346 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5347 }
5348 }
2c0262af 5349 break;
36a71934 5350 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
5351 {
5352 /* VFMA, VFMS: fused multiply-add */
5353 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5354 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5355 if (size) {
5356 /* VFMS */
5357 gen_helper_vfp_negs(tmp, tmp);
5358 }
5359 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5360 tcg_temp_free_i32(tmp3);
5361 tcg_temp_free_ptr(fpstatus);
5362 break;
5363 }
9ee6e8bb
PB
5364 default:
5365 abort();
2c0262af 5366 }
7d1b0095 5367 tcg_temp_free_i32(tmp2);
dd8fbd78 5368
9ee6e8bb
PB
5369 /* Save the result. For elementwise operations we can put it
5370 straight into the destination register. For pairwise operations
5371 we have to be careful to avoid clobbering the source operands. */
5372 if (pairwise && rd == rm) {
dd8fbd78 5373 neon_store_scratch(pass, tmp);
9ee6e8bb 5374 } else {
dd8fbd78 5375 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5376 }
5377
5378 } /* for pass */
5379 if (pairwise && rd == rm) {
5380 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5381 tmp = neon_load_scratch(pass);
5382 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5383 }
5384 }
ad69471c 5385 /* End of 3 register same size operations. */
9ee6e8bb
PB
5386 } else if (insn & (1 << 4)) {
5387 if ((insn & 0x00380080) != 0) {
5388 /* Two registers and shift. */
5389 op = (insn >> 8) & 0xf;
5390 if (insn & (1 << 7)) {
cc13115b
PM
5391 /* 64-bit shift. */
5392 if (op > 7) {
5393 return 1;
5394 }
9ee6e8bb
PB
5395 size = 3;
5396 } else {
5397 size = 2;
5398 while ((insn & (1 << (size + 19))) == 0)
5399 size--;
5400 }
5401 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
9ee6e8bb
PB
5402 if (op < 8) {
5403 /* Shift by immediate:
5404 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5405 if (q && ((rd | rm) & 1)) {
5406 return 1;
5407 }
5408 if (!u && (op == 4 || op == 6)) {
5409 return 1;
5410 }
9ee6e8bb
PB
5411 /* Right shifts are encoded as N - shift, where N is the
5412 element size in bits. */
1dc8425e 5413 if (op <= 4) {
9ee6e8bb 5414 shift = shift - (1 << (size + 3));
1dc8425e
RH
5415 }
5416
5417 switch (op) {
5418 case 0: /* VSHR */
5419 /* Right shift comes here negative. */
5420 shift = -shift;
5421 /* Shifts larger than the element size are architecturally
5422 * valid. Unsigned results in all zeros; signed results
5423 * in all sign bits.
5424 */
5425 if (!u) {
5426 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
5427 MIN(shift, (8 << size) - 1),
5428 vec_size, vec_size);
5429 } else if (shift >= 8 << size) {
5430 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5431 } else {
5432 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
5433 vec_size, vec_size);
5434 }
5435 return 0;
5436
41f6c113
RH
5437 case 1: /* VSRA */
5438 /* Right shift comes here negative. */
5439 shift = -shift;
5440 /* Shifts larger than the element size are architecturally
5441 * valid. Unsigned results in all zeros; signed results
5442 * in all sign bits.
5443 */
5444 if (!u) {
5445 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5446 MIN(shift, (8 << size) - 1),
5447 &ssra_op[size]);
5448 } else if (shift >= 8 << size) {
5449 /* rd += 0 */
5450 } else {
5451 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5452 shift, &usra_op[size]);
5453 }
5454 return 0;
5455
f3cd8218
RH
5456 case 4: /* VSRI */
5457 if (!u) {
5458 return 1;
5459 }
5460 /* Right shift comes here negative. */
5461 shift = -shift;
5462 /* Shift out of range leaves destination unchanged. */
5463 if (shift < 8 << size) {
5464 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5465 shift, &sri_op[size]);
5466 }
5467 return 0;
5468
1dc8425e 5469 case 5: /* VSHL, VSLI */
f3cd8218
RH
5470 if (u) { /* VSLI */
5471 /* Shift out of range leaves destination unchanged. */
5472 if (shift < 8 << size) {
5473 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
5474 vec_size, shift, &sli_op[size]);
5475 }
5476 } else { /* VSHL */
1dc8425e
RH
5477 /* Shifts larger than the element size are
5478 * architecturally valid and results in zero.
5479 */
5480 if (shift >= 8 << size) {
5481 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5482 } else {
5483 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
5484 vec_size, vec_size);
5485 }
1dc8425e 5486 }
f3cd8218 5487 return 0;
1dc8425e
RH
5488 }
5489
9ee6e8bb
PB
5490 if (size == 3) {
5491 count = q + 1;
5492 } else {
5493 count = q ? 4: 2;
5494 }
1dc8425e
RH
5495
5496 /* To avoid excessive duplication of ops we implement shift
5497 * by immediate using the variable shift operations.
5498 */
5499 imm = dup_const(size, shift);
9ee6e8bb
PB
5500
5501 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5502 if (size == 3) {
5503 neon_load_reg64(cpu_V0, rm + pass);
5504 tcg_gen_movi_i64(cpu_V1, imm);
5505 switch (op) {
ad69471c
PB
5506 case 2: /* VRSHR */
5507 case 3: /* VRSRA */
5508 if (u)
5509 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5510 else
ad69471c 5511 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5512 break;
0322b26e 5513 case 6: /* VQSHLU */
02da0b2d
PM
5514 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5515 cpu_V0, cpu_V1);
ad69471c 5516 break;
0322b26e
PM
5517 case 7: /* VQSHL */
5518 if (u) {
02da0b2d 5519 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5520 cpu_V0, cpu_V1);
5521 } else {
02da0b2d 5522 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5523 cpu_V0, cpu_V1);
5524 }
9ee6e8bb 5525 break;
1dc8425e
RH
5526 default:
5527 g_assert_not_reached();
9ee6e8bb 5528 }
41f6c113 5529 if (op == 3) {
ad69471c 5530 /* Accumulate. */
5371cb81 5531 neon_load_reg64(cpu_V1, rd + pass);
ad69471c 5532 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5533 }
5534 neon_store_reg64(cpu_V0, rd + pass);
5535 } else { /* size < 3 */
5536 /* Operands in T0 and T1. */
dd8fbd78 5537 tmp = neon_load_reg(rm, pass);
7d1b0095 5538 tmp2 = tcg_temp_new_i32();
dd8fbd78 5539 tcg_gen_movi_i32(tmp2, imm);
ad69471c 5540 switch (op) {
ad69471c
PB
5541 case 2: /* VRSHR */
5542 case 3: /* VRSRA */
5543 GEN_NEON_INTEGER_OP(rshl);
5544 break;
0322b26e 5545 case 6: /* VQSHLU */
ad69471c 5546 switch (size) {
0322b26e 5547 case 0:
02da0b2d
PM
5548 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5549 tmp, tmp2);
0322b26e
PM
5550 break;
5551 case 1:
02da0b2d
PM
5552 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5553 tmp, tmp2);
0322b26e
PM
5554 break;
5555 case 2:
02da0b2d
PM
5556 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5557 tmp, tmp2);
0322b26e
PM
5558 break;
5559 default:
cc13115b 5560 abort();
ad69471c
PB
5561 }
5562 break;
0322b26e 5563 case 7: /* VQSHL */
02da0b2d 5564 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5565 break;
1dc8425e
RH
5566 default:
5567 g_assert_not_reached();
ad69471c 5568 }
7d1b0095 5569 tcg_temp_free_i32(tmp2);
ad69471c 5570
41f6c113 5571 if (op == 3) {
ad69471c 5572 /* Accumulate. */
dd8fbd78 5573 tmp2 = neon_load_reg(rd, pass);
5371cb81 5574 gen_neon_add(size, tmp, tmp2);
7d1b0095 5575 tcg_temp_free_i32(tmp2);
ad69471c 5576 }
dd8fbd78 5577 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5578 }
5579 } /* for pass */
5580 } else if (op < 10) {
ad69471c 5581 /* Shift by immediate and narrow:
9ee6e8bb 5582 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5583 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5584 if (rm & 1) {
5585 return 1;
5586 }
9ee6e8bb
PB
5587 shift = shift - (1 << (size + 3));
5588 size++;
92cdfaeb 5589 if (size == 3) {
a7812ae4 5590 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5591 neon_load_reg64(cpu_V0, rm);
5592 neon_load_reg64(cpu_V1, rm + 1);
5593 for (pass = 0; pass < 2; pass++) {
5594 TCGv_i64 in;
5595 if (pass == 0) {
5596 in = cpu_V0;
5597 } else {
5598 in = cpu_V1;
5599 }
ad69471c 5600 if (q) {
0b36f4cd 5601 if (input_unsigned) {
92cdfaeb 5602 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5603 } else {
92cdfaeb 5604 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5605 }
ad69471c 5606 } else {
0b36f4cd 5607 if (input_unsigned) {
92cdfaeb 5608 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5609 } else {
92cdfaeb 5610 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5611 }
ad69471c 5612 }
7d1b0095 5613 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5614 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5615 neon_store_reg(rd, pass, tmp);
5616 } /* for pass */
5617 tcg_temp_free_i64(tmp64);
5618 } else {
5619 if (size == 1) {
5620 imm = (uint16_t)shift;
5621 imm |= imm << 16;
2c0262af 5622 } else {
92cdfaeb
PM
5623 /* size == 2 */
5624 imm = (uint32_t)shift;
5625 }
5626 tmp2 = tcg_const_i32(imm);
5627 tmp4 = neon_load_reg(rm + 1, 0);
5628 tmp5 = neon_load_reg(rm + 1, 1);
5629 for (pass = 0; pass < 2; pass++) {
5630 if (pass == 0) {
5631 tmp = neon_load_reg(rm, 0);
5632 } else {
5633 tmp = tmp4;
5634 }
0b36f4cd
CL
5635 gen_neon_shift_narrow(size, tmp, tmp2, q,
5636 input_unsigned);
92cdfaeb
PM
5637 if (pass == 0) {
5638 tmp3 = neon_load_reg(rm, 1);
5639 } else {
5640 tmp3 = tmp5;
5641 }
0b36f4cd
CL
5642 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5643 input_unsigned);
36aa55dc 5644 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5645 tcg_temp_free_i32(tmp);
5646 tcg_temp_free_i32(tmp3);
5647 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5648 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5649 neon_store_reg(rd, pass, tmp);
5650 } /* for pass */
c6067f04 5651 tcg_temp_free_i32(tmp2);
b75263d6 5652 }
9ee6e8bb 5653 } else if (op == 10) {
cc13115b
PM
5654 /* VSHLL, VMOVL */
5655 if (q || (rd & 1)) {
9ee6e8bb 5656 return 1;
cc13115b 5657 }
ad69471c
PB
5658 tmp = neon_load_reg(rm, 0);
5659 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5660 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5661 if (pass == 1)
5662 tmp = tmp2;
5663
5664 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5665
9ee6e8bb
PB
5666 if (shift != 0) {
5667 /* The shift is less than the width of the source
ad69471c
PB
5668 type, so we can just shift the whole register. */
5669 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5670 /* Widen the result of shift: we need to clear
5671 * the potential overflow bits resulting from
5672 * left bits of the narrow input appearing as
5673 * right bits of left the neighbour narrow
5674 * input. */
ad69471c
PB
5675 if (size < 2 || !u) {
5676 uint64_t imm64;
5677 if (size == 0) {
5678 imm = (0xffu >> (8 - shift));
5679 imm |= imm << 16;
acdf01ef 5680 } else if (size == 1) {
ad69471c 5681 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5682 } else {
5683 /* size == 2 */
5684 imm = 0xffffffff >> (32 - shift);
5685 }
5686 if (size < 2) {
5687 imm64 = imm | (((uint64_t)imm) << 32);
5688 } else {
5689 imm64 = imm;
9ee6e8bb 5690 }
acdf01ef 5691 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5692 }
5693 }
ad69471c 5694 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5695 }
f73534a5 5696 } else if (op >= 14) {
9ee6e8bb 5697 /* VCVT fixed-point. */
c253dd78
PM
5698 TCGv_ptr fpst;
5699 TCGv_i32 shiftv;
5700 VFPGenFixPointFn *fn;
5701
cc13115b
PM
5702 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5703 return 1;
5704 }
c253dd78
PM
5705
5706 if (!(op & 1)) {
5707 if (u) {
5708 fn = gen_helper_vfp_ultos;
5709 } else {
5710 fn = gen_helper_vfp_sltos;
5711 }
5712 } else {
5713 if (u) {
5714 fn = gen_helper_vfp_touls_round_to_zero;
5715 } else {
5716 fn = gen_helper_vfp_tosls_round_to_zero;
5717 }
5718 }
5719
f73534a5
PM
5720 /* We have already masked out the must-be-1 top bit of imm6,
5721 * hence this 32-shift where the ARM ARM has 64-imm6.
5722 */
5723 shift = 32 - shift;
c253dd78
PM
5724 fpst = get_fpstatus_ptr(1);
5725 shiftv = tcg_const_i32(shift);
9ee6e8bb 5726 for (pass = 0; pass < (q ? 4 : 2); pass++) {
c253dd78
PM
5727 TCGv_i32 tmpf = neon_load_reg(rm, pass);
5728 fn(tmpf, tmpf, shiftv, fpst);
5729 neon_store_reg(rd, pass, tmpf);
2c0262af 5730 }
c253dd78
PM
5731 tcg_temp_free_ptr(fpst);
5732 tcg_temp_free_i32(shiftv);
2c0262af 5733 } else {
9ee6e8bb
PB
5734 return 1;
5735 }
5736 } else { /* (insn & 0x00380080) == 0 */
246fa4ac
RH
5737 int invert, reg_ofs, vec_size;
5738
7d80fee5
PM
5739 if (q && (rd & 1)) {
5740 return 1;
5741 }
9ee6e8bb
PB
5742
5743 op = (insn >> 8) & 0xf;
5744 /* One register and immediate. */
5745 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5746 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5747 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5748 * We choose to not special-case this and will behave as if a
5749 * valid constant encoding of 0 had been given.
5750 */
9ee6e8bb
PB
5751 switch (op) {
5752 case 0: case 1:
5753 /* no-op */
5754 break;
5755 case 2: case 3:
5756 imm <<= 8;
5757 break;
5758 case 4: case 5:
5759 imm <<= 16;
5760 break;
5761 case 6: case 7:
5762 imm <<= 24;
5763 break;
5764 case 8: case 9:
5765 imm |= imm << 16;
5766 break;
5767 case 10: case 11:
5768 imm = (imm << 8) | (imm << 24);
5769 break;
5770 case 12:
8e31209e 5771 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5772 break;
5773 case 13:
5774 imm = (imm << 16) | 0xffff;
5775 break;
5776 case 14:
5777 imm |= (imm << 8) | (imm << 16) | (imm << 24);
246fa4ac 5778 if (invert) {
9ee6e8bb 5779 imm = ~imm;
246fa4ac 5780 }
9ee6e8bb
PB
5781 break;
5782 case 15:
7d80fee5
PM
5783 if (invert) {
5784 return 1;
5785 }
9ee6e8bb
PB
5786 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5787 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5788 break;
5789 }
246fa4ac 5790 if (invert) {
9ee6e8bb 5791 imm = ~imm;
246fa4ac 5792 }
9ee6e8bb 5793
246fa4ac
RH
5794 reg_ofs = neon_reg_offset(rd, 0);
5795 vec_size = q ? 16 : 8;
5796
5797 if (op & 1 && op < 12) {
5798 if (invert) {
5799 /* The immediate value has already been inverted,
5800 * so BIC becomes AND.
5801 */
5802 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
5803 vec_size, vec_size);
9ee6e8bb 5804 } else {
246fa4ac
RH
5805 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
5806 vec_size, vec_size);
5807 }
5808 } else {
5809 /* VMOV, VMVN. */
5810 if (op == 14 && invert) {
5811 TCGv_i64 t64 = tcg_temp_new_i64();
5812
5813 for (pass = 0; pass <= q; ++pass) {
5814 uint64_t val = 0;
a5a14945 5815 int n;
246fa4ac
RH
5816
5817 for (n = 0; n < 8; n++) {
5818 if (imm & (1 << (n + pass * 8))) {
5819 val |= 0xffull << (n * 8);
5820 }
9ee6e8bb 5821 }
246fa4ac
RH
5822 tcg_gen_movi_i64(t64, val);
5823 neon_store_reg64(t64, rd + pass);
9ee6e8bb 5824 }
246fa4ac
RH
5825 tcg_temp_free_i64(t64);
5826 } else {
5827 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
9ee6e8bb
PB
5828 }
5829 }
5830 }
e4b3861d 5831 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5832 if (size != 3) {
5833 op = (insn >> 8) & 0xf;
5834 if ((insn & (1 << 6)) == 0) {
5835 /* Three registers of different lengths. */
5836 int src1_wide;
5837 int src2_wide;
5838 int prewiden;
526d0096
PM
5839 /* undefreq: bit 0 : UNDEF if size == 0
5840 * bit 1 : UNDEF if size == 1
5841 * bit 2 : UNDEF if size == 2
5842 * bit 3 : UNDEF if U == 1
5843 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
5844 */
5845 int undefreq;
5846 /* prewiden, src1_wide, src2_wide, undefreq */
5847 static const int neon_3reg_wide[16][4] = {
5848 {1, 0, 0, 0}, /* VADDL */
5849 {1, 1, 0, 0}, /* VADDW */
5850 {1, 0, 0, 0}, /* VSUBL */
5851 {1, 1, 0, 0}, /* VSUBW */
5852 {0, 1, 1, 0}, /* VADDHN */
5853 {0, 0, 0, 0}, /* VABAL */
5854 {0, 1, 1, 0}, /* VSUBHN */
5855 {0, 0, 0, 0}, /* VABDL */
5856 {0, 0, 0, 0}, /* VMLAL */
526d0096 5857 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 5858 {0, 0, 0, 0}, /* VMLSL */
526d0096 5859 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 5860 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 5861 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 5862 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 5863 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5864 };
5865
5866 prewiden = neon_3reg_wide[op][0];
5867 src1_wide = neon_3reg_wide[op][1];
5868 src2_wide = neon_3reg_wide[op][2];
695272dc 5869 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5870
526d0096
PM
5871 if ((undefreq & (1 << size)) ||
5872 ((undefreq & 8) && u)) {
695272dc
PM
5873 return 1;
5874 }
5875 if ((src1_wide && (rn & 1)) ||
5876 (src2_wide && (rm & 1)) ||
5877 (!src2_wide && (rd & 1))) {
ad69471c 5878 return 1;
695272dc 5879 }
ad69471c 5880
4e624eda
PM
5881 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
5882 * outside the loop below as it only performs a single pass.
5883 */
5884 if (op == 14 && size == 2) {
5885 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
5886
962fcbf2 5887 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
5888 return 1;
5889 }
5890 tcg_rn = tcg_temp_new_i64();
5891 tcg_rm = tcg_temp_new_i64();
5892 tcg_rd = tcg_temp_new_i64();
5893 neon_load_reg64(tcg_rn, rn);
5894 neon_load_reg64(tcg_rm, rm);
5895 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
5896 neon_store_reg64(tcg_rd, rd);
5897 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
5898 neon_store_reg64(tcg_rd, rd + 1);
5899 tcg_temp_free_i64(tcg_rn);
5900 tcg_temp_free_i64(tcg_rm);
5901 tcg_temp_free_i64(tcg_rd);
5902 return 0;
5903 }
5904
9ee6e8bb
PB
5905 /* Avoid overlapping operands. Wide source operands are
5906 always aligned so will never overlap with wide
5907 destinations in problematic ways. */
8f8e3aa4 5908 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5909 tmp = neon_load_reg(rm, 1);
5910 neon_store_scratch(2, tmp);
8f8e3aa4 5911 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5912 tmp = neon_load_reg(rn, 1);
5913 neon_store_scratch(2, tmp);
9ee6e8bb 5914 }
f764718d 5915 tmp3 = NULL;
9ee6e8bb 5916 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5917 if (src1_wide) {
5918 neon_load_reg64(cpu_V0, rn + pass);
f764718d 5919 tmp = NULL;
9ee6e8bb 5920 } else {
ad69471c 5921 if (pass == 1 && rd == rn) {
dd8fbd78 5922 tmp = neon_load_scratch(2);
9ee6e8bb 5923 } else {
ad69471c
PB
5924 tmp = neon_load_reg(rn, pass);
5925 }
5926 if (prewiden) {
5927 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5928 }
5929 }
ad69471c
PB
5930 if (src2_wide) {
5931 neon_load_reg64(cpu_V1, rm + pass);
f764718d 5932 tmp2 = NULL;
9ee6e8bb 5933 } else {
ad69471c 5934 if (pass == 1 && rd == rm) {
dd8fbd78 5935 tmp2 = neon_load_scratch(2);
9ee6e8bb 5936 } else {
ad69471c
PB
5937 tmp2 = neon_load_reg(rm, pass);
5938 }
5939 if (prewiden) {
5940 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5941 }
9ee6e8bb
PB
5942 }
5943 switch (op) {
5944 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5945 gen_neon_addl(size);
9ee6e8bb 5946 break;
79b0e534 5947 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5948 gen_neon_subl(size);
9ee6e8bb
PB
5949 break;
5950 case 5: case 7: /* VABAL, VABDL */
5951 switch ((size << 1) | u) {
ad69471c
PB
5952 case 0:
5953 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5954 break;
5955 case 1:
5956 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5957 break;
5958 case 2:
5959 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5960 break;
5961 case 3:
5962 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5963 break;
5964 case 4:
5965 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5966 break;
5967 case 5:
5968 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5969 break;
9ee6e8bb
PB
5970 default: abort();
5971 }
7d1b0095
PM
5972 tcg_temp_free_i32(tmp2);
5973 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5974 break;
5975 case 8: case 9: case 10: case 11: case 12: case 13:
5976 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5977 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5978 break;
5979 case 14: /* Polynomial VMULL */
e5ca24cb 5980 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5981 tcg_temp_free_i32(tmp2);
5982 tcg_temp_free_i32(tmp);
e5ca24cb 5983 break;
695272dc
PM
5984 default: /* 15 is RESERVED: caught earlier */
5985 abort();
9ee6e8bb 5986 }
ebcd88ce
PM
5987 if (op == 13) {
5988 /* VQDMULL */
5989 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5990 neon_store_reg64(cpu_V0, rd + pass);
5991 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5992 /* Accumulate. */
ebcd88ce 5993 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5994 switch (op) {
4dc064e6
PM
5995 case 10: /* VMLSL */
5996 gen_neon_negl(cpu_V0, size);
5997 /* Fall through */
5998 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5999 gen_neon_addl(size);
9ee6e8bb
PB
6000 break;
6001 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6002 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6003 if (op == 11) {
6004 gen_neon_negl(cpu_V0, size);
6005 }
ad69471c
PB
6006 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6007 break;
9ee6e8bb
PB
6008 default:
6009 abort();
6010 }
ad69471c 6011 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6012 } else if (op == 4 || op == 6) {
6013 /* Narrowing operation. */
7d1b0095 6014 tmp = tcg_temp_new_i32();
79b0e534 6015 if (!u) {
9ee6e8bb 6016 switch (size) {
ad69471c
PB
6017 case 0:
6018 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6019 break;
6020 case 1:
6021 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6022 break;
6023 case 2:
6024 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6025 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6026 break;
9ee6e8bb
PB
6027 default: abort();
6028 }
6029 } else {
6030 switch (size) {
ad69471c
PB
6031 case 0:
6032 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6033 break;
6034 case 1:
6035 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6036 break;
6037 case 2:
6038 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6039 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6040 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6041 break;
9ee6e8bb
PB
6042 default: abort();
6043 }
6044 }
ad69471c
PB
6045 if (pass == 0) {
6046 tmp3 = tmp;
6047 } else {
6048 neon_store_reg(rd, 0, tmp3);
6049 neon_store_reg(rd, 1, tmp);
6050 }
9ee6e8bb
PB
6051 } else {
6052 /* Write back the result. */
ad69471c 6053 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6054 }
6055 }
6056 } else {
3e3326df
PM
6057 /* Two registers and a scalar. NB that for ops of this form
6058 * the ARM ARM labels bit 24 as Q, but it is in our variable
6059 * 'u', not 'q'.
6060 */
6061 if (size == 0) {
6062 return 1;
6063 }
9ee6e8bb 6064 switch (op) {
9ee6e8bb 6065 case 1: /* Float VMLA scalar */
9ee6e8bb 6066 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6067 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6068 if (size == 1) {
6069 return 1;
6070 }
6071 /* fall through */
6072 case 0: /* Integer VMLA scalar */
6073 case 4: /* Integer VMLS scalar */
6074 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6075 case 12: /* VQDMULH scalar */
6076 case 13: /* VQRDMULH scalar */
3e3326df
PM
6077 if (u && ((rd | rn) & 1)) {
6078 return 1;
6079 }
dd8fbd78
FN
6080 tmp = neon_get_scalar(size, rm);
6081 neon_store_scratch(0, tmp);
9ee6e8bb 6082 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6083 tmp = neon_load_scratch(0);
6084 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6085 if (op == 12) {
6086 if (size == 1) {
02da0b2d 6087 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6088 } else {
02da0b2d 6089 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6090 }
6091 } else if (op == 13) {
6092 if (size == 1) {
02da0b2d 6093 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6094 } else {
02da0b2d 6095 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6096 }
6097 } else if (op & 1) {
aa47cfdd
PM
6098 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6099 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6100 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6101 } else {
6102 switch (size) {
dd8fbd78
FN
6103 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6104 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6105 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6106 default: abort();
9ee6e8bb
PB
6107 }
6108 }
7d1b0095 6109 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6110 if (op < 8) {
6111 /* Accumulate. */
dd8fbd78 6112 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6113 switch (op) {
6114 case 0:
dd8fbd78 6115 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6116 break;
6117 case 1:
aa47cfdd
PM
6118 {
6119 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6120 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6121 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6122 break;
aa47cfdd 6123 }
9ee6e8bb 6124 case 4:
dd8fbd78 6125 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6126 break;
6127 case 5:
aa47cfdd
PM
6128 {
6129 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6130 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6131 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6132 break;
aa47cfdd 6133 }
9ee6e8bb
PB
6134 default:
6135 abort();
6136 }
7d1b0095 6137 tcg_temp_free_i32(tmp2);
9ee6e8bb 6138 }
dd8fbd78 6139 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6140 }
6141 break;
9ee6e8bb 6142 case 3: /* VQDMLAL scalar */
9ee6e8bb 6143 case 7: /* VQDMLSL scalar */
9ee6e8bb 6144 case 11: /* VQDMULL scalar */
3e3326df 6145 if (u == 1) {
ad69471c 6146 return 1;
3e3326df
PM
6147 }
6148 /* fall through */
6149 case 2: /* VMLAL sclar */
6150 case 6: /* VMLSL scalar */
6151 case 10: /* VMULL scalar */
6152 if (rd & 1) {
6153 return 1;
6154 }
dd8fbd78 6155 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6156 /* We need a copy of tmp2 because gen_neon_mull
6157 * deletes it during pass 0. */
7d1b0095 6158 tmp4 = tcg_temp_new_i32();
c6067f04 6159 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6160 tmp3 = neon_load_reg(rn, 1);
ad69471c 6161
9ee6e8bb 6162 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6163 if (pass == 0) {
6164 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6165 } else {
dd8fbd78 6166 tmp = tmp3;
c6067f04 6167 tmp2 = tmp4;
9ee6e8bb 6168 }
ad69471c 6169 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6170 if (op != 11) {
6171 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6172 }
9ee6e8bb 6173 switch (op) {
4dc064e6
PM
6174 case 6:
6175 gen_neon_negl(cpu_V0, size);
6176 /* Fall through */
6177 case 2:
ad69471c 6178 gen_neon_addl(size);
9ee6e8bb
PB
6179 break;
6180 case 3: case 7:
ad69471c 6181 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6182 if (op == 7) {
6183 gen_neon_negl(cpu_V0, size);
6184 }
ad69471c 6185 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6186 break;
6187 case 10:
6188 /* no-op */
6189 break;
6190 case 11:
ad69471c 6191 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6192 break;
6193 default:
6194 abort();
6195 }
ad69471c 6196 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6197 }
61adacc8
RH
6198 break;
6199 case 14: /* VQRDMLAH scalar */
6200 case 15: /* VQRDMLSH scalar */
6201 {
6202 NeonGenThreeOpEnvFn *fn;
dd8fbd78 6203
962fcbf2 6204 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
6205 return 1;
6206 }
6207 if (u && ((rd | rn) & 1)) {
6208 return 1;
6209 }
6210 if (op == 14) {
6211 if (size == 1) {
6212 fn = gen_helper_neon_qrdmlah_s16;
6213 } else {
6214 fn = gen_helper_neon_qrdmlah_s32;
6215 }
6216 } else {
6217 if (size == 1) {
6218 fn = gen_helper_neon_qrdmlsh_s16;
6219 } else {
6220 fn = gen_helper_neon_qrdmlsh_s32;
6221 }
6222 }
dd8fbd78 6223
61adacc8
RH
6224 tmp2 = neon_get_scalar(size, rm);
6225 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6226 tmp = neon_load_reg(rn, pass);
6227 tmp3 = neon_load_reg(rd, pass);
6228 fn(tmp, cpu_env, tmp, tmp2, tmp3);
6229 tcg_temp_free_i32(tmp3);
6230 neon_store_reg(rd, pass, tmp);
6231 }
6232 tcg_temp_free_i32(tmp2);
6233 }
9ee6e8bb 6234 break;
61adacc8
RH
6235 default:
6236 g_assert_not_reached();
9ee6e8bb
PB
6237 }
6238 }
6239 } else { /* size == 3 */
6240 if (!u) {
6241 /* Extract. */
9ee6e8bb 6242 imm = (insn >> 8) & 0xf;
ad69471c
PB
6243
6244 if (imm > 7 && !q)
6245 return 1;
6246
52579ea1
PM
6247 if (q && ((rd | rn | rm) & 1)) {
6248 return 1;
6249 }
6250
ad69471c
PB
6251 if (imm == 0) {
6252 neon_load_reg64(cpu_V0, rn);
6253 if (q) {
6254 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6255 }
ad69471c
PB
6256 } else if (imm == 8) {
6257 neon_load_reg64(cpu_V0, rn + 1);
6258 if (q) {
6259 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6260 }
ad69471c 6261 } else if (q) {
a7812ae4 6262 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6263 if (imm < 8) {
6264 neon_load_reg64(cpu_V0, rn);
a7812ae4 6265 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6266 } else {
6267 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6268 neon_load_reg64(tmp64, rm);
ad69471c
PB
6269 }
6270 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6271 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6272 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6273 if (imm < 8) {
6274 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6275 } else {
ad69471c
PB
6276 neon_load_reg64(cpu_V1, rm + 1);
6277 imm -= 8;
9ee6e8bb 6278 }
ad69471c 6279 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6280 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6281 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6282 tcg_temp_free_i64(tmp64);
ad69471c 6283 } else {
a7812ae4 6284 /* BUGFIX */
ad69471c 6285 neon_load_reg64(cpu_V0, rn);
a7812ae4 6286 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6287 neon_load_reg64(cpu_V1, rm);
a7812ae4 6288 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6289 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6290 }
6291 neon_store_reg64(cpu_V0, rd);
6292 if (q) {
6293 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6294 }
6295 } else if ((insn & (1 << 11)) == 0) {
6296 /* Two register misc. */
6297 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6298 size = (insn >> 18) & 3;
600b828c
PM
6299 /* UNDEF for unknown op values and bad op-size combinations */
6300 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6301 return 1;
6302 }
fe8fcf3d
PM
6303 if (neon_2rm_is_v8_op(op) &&
6304 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6305 return 1;
6306 }
fc2a9b37
PM
6307 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6308 q && ((rm | rd) & 1)) {
6309 return 1;
6310 }
9ee6e8bb 6311 switch (op) {
600b828c 6312 case NEON_2RM_VREV64:
9ee6e8bb 6313 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6314 tmp = neon_load_reg(rm, pass * 2);
6315 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6316 switch (size) {
dd8fbd78
FN
6317 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6318 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6319 case 2: /* no-op */ break;
6320 default: abort();
6321 }
dd8fbd78 6322 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6323 if (size == 2) {
dd8fbd78 6324 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6325 } else {
9ee6e8bb 6326 switch (size) {
dd8fbd78
FN
6327 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6328 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6329 default: abort();
6330 }
dd8fbd78 6331 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6332 }
6333 }
6334 break;
600b828c
PM
6335 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6336 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6337 for (pass = 0; pass < q + 1; pass++) {
6338 tmp = neon_load_reg(rm, pass * 2);
6339 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6340 tmp = neon_load_reg(rm, pass * 2 + 1);
6341 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6342 switch (size) {
6343 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6344 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6345 case 2: tcg_gen_add_i64(CPU_V001); break;
6346 default: abort();
6347 }
600b828c 6348 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6349 /* Accumulate. */
ad69471c
PB
6350 neon_load_reg64(cpu_V1, rd + pass);
6351 gen_neon_addl(size);
9ee6e8bb 6352 }
ad69471c 6353 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6354 }
6355 break;
600b828c 6356 case NEON_2RM_VTRN:
9ee6e8bb 6357 if (size == 2) {
a5a14945 6358 int n;
9ee6e8bb 6359 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6360 tmp = neon_load_reg(rm, n);
6361 tmp2 = neon_load_reg(rd, n + 1);
6362 neon_store_reg(rm, n, tmp2);
6363 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6364 }
6365 } else {
6366 goto elementwise;
6367 }
6368 break;
600b828c 6369 case NEON_2RM_VUZP:
02acedf9 6370 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6371 return 1;
9ee6e8bb
PB
6372 }
6373 break;
600b828c 6374 case NEON_2RM_VZIP:
d68a6f3a 6375 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6376 return 1;
9ee6e8bb
PB
6377 }
6378 break;
600b828c
PM
6379 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6380 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6381 if (rm & 1) {
6382 return 1;
6383 }
f764718d 6384 tmp2 = NULL;
9ee6e8bb 6385 for (pass = 0; pass < 2; pass++) {
ad69471c 6386 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6387 tmp = tcg_temp_new_i32();
600b828c
PM
6388 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6389 tmp, cpu_V0);
ad69471c
PB
6390 if (pass == 0) {
6391 tmp2 = tmp;
6392 } else {
6393 neon_store_reg(rd, 0, tmp2);
6394 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6395 }
9ee6e8bb
PB
6396 }
6397 break;
600b828c 6398 case NEON_2RM_VSHLL:
fc2a9b37 6399 if (q || (rd & 1)) {
9ee6e8bb 6400 return 1;
600b828c 6401 }
ad69471c
PB
6402 tmp = neon_load_reg(rm, 0);
6403 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6404 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6405 if (pass == 1)
6406 tmp = tmp2;
6407 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6408 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6409 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6410 }
6411 break;
600b828c 6412 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
6413 {
6414 TCGv_ptr fpst;
6415 TCGv_i32 ahp;
6416
602f6e42 6417 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
6418 q || (rm & 1)) {
6419 return 1;
6420 }
486624fc
AB
6421 fpst = get_fpstatus_ptr(true);
6422 ahp = get_ahp_flag();
58f2682e
PM
6423 tmp = neon_load_reg(rm, 0);
6424 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6425 tmp2 = neon_load_reg(rm, 1);
6426 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
60011498
PB
6427 tcg_gen_shli_i32(tmp2, tmp2, 16);
6428 tcg_gen_or_i32(tmp2, tmp2, tmp);
58f2682e
PM
6429 tcg_temp_free_i32(tmp);
6430 tmp = neon_load_reg(rm, 2);
6431 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6432 tmp3 = neon_load_reg(rm, 3);
60011498 6433 neon_store_reg(rd, 0, tmp2);
58f2682e
PM
6434 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
6435 tcg_gen_shli_i32(tmp3, tmp3, 16);
6436 tcg_gen_or_i32(tmp3, tmp3, tmp);
6437 neon_store_reg(rd, 1, tmp3);
7d1b0095 6438 tcg_temp_free_i32(tmp);
486624fc
AB
6439 tcg_temp_free_i32(ahp);
6440 tcg_temp_free_ptr(fpst);
60011498 6441 break;
486624fc 6442 }
600b828c 6443 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
6444 {
6445 TCGv_ptr fpst;
6446 TCGv_i32 ahp;
602f6e42 6447 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
6448 q || (rd & 1)) {
6449 return 1;
6450 }
486624fc
AB
6451 fpst = get_fpstatus_ptr(true);
6452 ahp = get_ahp_flag();
7d1b0095 6453 tmp3 = tcg_temp_new_i32();
60011498
PB
6454 tmp = neon_load_reg(rm, 0);
6455 tmp2 = neon_load_reg(rm, 1);
6456 tcg_gen_ext16u_i32(tmp3, tmp);
b66f6b99
PM
6457 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6458 neon_store_reg(rd, 0, tmp3);
6459 tcg_gen_shri_i32(tmp, tmp, 16);
6460 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
6461 neon_store_reg(rd, 1, tmp);
6462 tmp3 = tcg_temp_new_i32();
60011498 6463 tcg_gen_ext16u_i32(tmp3, tmp2);
b66f6b99
PM
6464 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6465 neon_store_reg(rd, 2, tmp3);
6466 tcg_gen_shri_i32(tmp2, tmp2, 16);
6467 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
6468 neon_store_reg(rd, 3, tmp2);
486624fc
AB
6469 tcg_temp_free_i32(ahp);
6470 tcg_temp_free_ptr(fpst);
60011498 6471 break;
486624fc 6472 }
9d935509 6473 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 6474 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
6475 return 1;
6476 }
1a66ac61
RH
6477 ptr1 = vfp_reg_ptr(true, rd);
6478 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
6479
6480 /* Bit 6 is the lowest opcode bit; it distinguishes between
6481 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6482 */
6483 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6484
6485 if (op == NEON_2RM_AESE) {
1a66ac61 6486 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 6487 } else {
1a66ac61 6488 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 6489 }
1a66ac61
RH
6490 tcg_temp_free_ptr(ptr1);
6491 tcg_temp_free_ptr(ptr2);
9d935509
AB
6492 tcg_temp_free_i32(tmp3);
6493 break;
f1ecb913 6494 case NEON_2RM_SHA1H:
962fcbf2 6495 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
6496 return 1;
6497 }
1a66ac61
RH
6498 ptr1 = vfp_reg_ptr(true, rd);
6499 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 6500
1a66ac61 6501 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 6502
1a66ac61
RH
6503 tcg_temp_free_ptr(ptr1);
6504 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
6505 break;
6506 case NEON_2RM_SHA1SU1:
6507 if ((rm | rd) & 1) {
6508 return 1;
6509 }
6510 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6511 if (q) {
962fcbf2 6512 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
6513 return 1;
6514 }
962fcbf2 6515 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
6516 return 1;
6517 }
1a66ac61
RH
6518 ptr1 = vfp_reg_ptr(true, rd);
6519 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 6520 if (q) {
1a66ac61 6521 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 6522 } else {
1a66ac61 6523 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 6524 }
1a66ac61
RH
6525 tcg_temp_free_ptr(ptr1);
6526 tcg_temp_free_ptr(ptr2);
f1ecb913 6527 break;
4bf940be
RH
6528
6529 case NEON_2RM_VMVN:
6530 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
6531 break;
6532 case NEON_2RM_VNEG:
6533 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
6534 break;
4e027a71
RH
6535 case NEON_2RM_VABS:
6536 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
6537 break;
4bf940be 6538
9ee6e8bb
PB
6539 default:
6540 elementwise:
6541 for (pass = 0; pass < (q ? 4 : 2); pass++) {
60737ed5 6542 tmp = neon_load_reg(rm, pass);
9ee6e8bb 6543 switch (op) {
600b828c 6544 case NEON_2RM_VREV32:
9ee6e8bb 6545 switch (size) {
dd8fbd78
FN
6546 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6547 case 1: gen_swap_half(tmp); break;
600b828c 6548 default: abort();
9ee6e8bb
PB
6549 }
6550 break;
600b828c 6551 case NEON_2RM_VREV16:
dd8fbd78 6552 gen_rev16(tmp);
9ee6e8bb 6553 break;
600b828c 6554 case NEON_2RM_VCLS:
9ee6e8bb 6555 switch (size) {
dd8fbd78
FN
6556 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6557 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6558 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6559 default: abort();
9ee6e8bb
PB
6560 }
6561 break;
600b828c 6562 case NEON_2RM_VCLZ:
9ee6e8bb 6563 switch (size) {
dd8fbd78
FN
6564 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6565 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 6566 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 6567 default: abort();
9ee6e8bb
PB
6568 }
6569 break;
600b828c 6570 case NEON_2RM_VCNT:
dd8fbd78 6571 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6572 break;
600b828c 6573 case NEON_2RM_VQABS:
9ee6e8bb 6574 switch (size) {
02da0b2d
PM
6575 case 0:
6576 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6577 break;
6578 case 1:
6579 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6580 break;
6581 case 2:
6582 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6583 break;
600b828c 6584 default: abort();
9ee6e8bb
PB
6585 }
6586 break;
600b828c 6587 case NEON_2RM_VQNEG:
9ee6e8bb 6588 switch (size) {
02da0b2d
PM
6589 case 0:
6590 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6591 break;
6592 case 1:
6593 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6594 break;
6595 case 2:
6596 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6597 break;
600b828c 6598 default: abort();
9ee6e8bb
PB
6599 }
6600 break;
600b828c 6601 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6602 tmp2 = tcg_const_i32(0);
9ee6e8bb 6603 switch(size) {
dd8fbd78
FN
6604 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6605 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6606 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6607 default: abort();
9ee6e8bb 6608 }
39d5492a 6609 tcg_temp_free_i32(tmp2);
600b828c 6610 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6611 tcg_gen_not_i32(tmp, tmp);
600b828c 6612 }
9ee6e8bb 6613 break;
600b828c 6614 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6615 tmp2 = tcg_const_i32(0);
9ee6e8bb 6616 switch(size) {
dd8fbd78
FN
6617 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6618 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6619 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6620 default: abort();
9ee6e8bb 6621 }
39d5492a 6622 tcg_temp_free_i32(tmp2);
600b828c 6623 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6624 tcg_gen_not_i32(tmp, tmp);
600b828c 6625 }
9ee6e8bb 6626 break;
600b828c 6627 case NEON_2RM_VCEQ0:
dd8fbd78 6628 tmp2 = tcg_const_i32(0);
9ee6e8bb 6629 switch(size) {
dd8fbd78
FN
6630 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6631 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6632 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6633 default: abort();
9ee6e8bb 6634 }
39d5492a 6635 tcg_temp_free_i32(tmp2);
9ee6e8bb 6636 break;
600b828c 6637 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6638 {
6639 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6640 tmp2 = tcg_const_i32(0);
aa47cfdd 6641 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6642 tcg_temp_free_i32(tmp2);
aa47cfdd 6643 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6644 break;
aa47cfdd 6645 }
600b828c 6646 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6647 {
6648 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6649 tmp2 = tcg_const_i32(0);
aa47cfdd 6650 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6651 tcg_temp_free_i32(tmp2);
aa47cfdd 6652 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6653 break;
aa47cfdd 6654 }
600b828c 6655 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6656 {
6657 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6658 tmp2 = tcg_const_i32(0);
aa47cfdd 6659 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6660 tcg_temp_free_i32(tmp2);
aa47cfdd 6661 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6662 break;
aa47cfdd 6663 }
600b828c 6664 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6665 {
6666 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6667 tmp2 = tcg_const_i32(0);
aa47cfdd 6668 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6669 tcg_temp_free_i32(tmp2);
aa47cfdd 6670 tcg_temp_free_ptr(fpstatus);
0e326109 6671 break;
aa47cfdd 6672 }
600b828c 6673 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6674 {
6675 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6676 tmp2 = tcg_const_i32(0);
aa47cfdd 6677 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6678 tcg_temp_free_i32(tmp2);
aa47cfdd 6679 tcg_temp_free_ptr(fpstatus);
0e326109 6680 break;
aa47cfdd 6681 }
600b828c 6682 case NEON_2RM_VABS_F:
fd8a68cd 6683 gen_helper_vfp_abss(tmp, tmp);
9ee6e8bb 6684 break;
600b828c 6685 case NEON_2RM_VNEG_F:
cedcc96f 6686 gen_helper_vfp_negs(tmp, tmp);
9ee6e8bb 6687 break;
600b828c 6688 case NEON_2RM_VSWP:
dd8fbd78
FN
6689 tmp2 = neon_load_reg(rd, pass);
6690 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6691 break;
600b828c 6692 case NEON_2RM_VTRN:
dd8fbd78 6693 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6694 switch (size) {
dd8fbd78
FN
6695 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6696 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6697 default: abort();
9ee6e8bb 6698 }
dd8fbd78 6699 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6700 break;
34f7b0a2
WN
6701 case NEON_2RM_VRINTN:
6702 case NEON_2RM_VRINTA:
6703 case NEON_2RM_VRINTM:
6704 case NEON_2RM_VRINTP:
6705 case NEON_2RM_VRINTZ:
6706 {
6707 TCGv_i32 tcg_rmode;
6708 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6709 int rmode;
6710
6711 if (op == NEON_2RM_VRINTZ) {
6712 rmode = FPROUNDING_ZERO;
6713 } else {
6714 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6715 }
6716
6717 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6718 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6719 cpu_env);
3b52ad1f 6720 gen_helper_rints(tmp, tmp, fpstatus);
34f7b0a2
WN
6721 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6722 cpu_env);
6723 tcg_temp_free_ptr(fpstatus);
6724 tcg_temp_free_i32(tcg_rmode);
6725 break;
6726 }
2ce70625
WN
6727 case NEON_2RM_VRINTX:
6728 {
6729 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
3b52ad1f 6730 gen_helper_rints_exact(tmp, tmp, fpstatus);
2ce70625
WN
6731 tcg_temp_free_ptr(fpstatus);
6732 break;
6733 }
901ad525
WN
6734 case NEON_2RM_VCVTAU:
6735 case NEON_2RM_VCVTAS:
6736 case NEON_2RM_VCVTNU:
6737 case NEON_2RM_VCVTNS:
6738 case NEON_2RM_VCVTPU:
6739 case NEON_2RM_VCVTPS:
6740 case NEON_2RM_VCVTMU:
6741 case NEON_2RM_VCVTMS:
6742 {
6743 bool is_signed = !extract32(insn, 7, 1);
6744 TCGv_ptr fpst = get_fpstatus_ptr(1);
6745 TCGv_i32 tcg_rmode, tcg_shift;
6746 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6747
6748 tcg_shift = tcg_const_i32(0);
6749 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6750 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6751 cpu_env);
6752
6753 if (is_signed) {
30bf0a01 6754 gen_helper_vfp_tosls(tmp, tmp,
901ad525
WN
6755 tcg_shift, fpst);
6756 } else {
30bf0a01 6757 gen_helper_vfp_touls(tmp, tmp,
901ad525
WN
6758 tcg_shift, fpst);
6759 }
6760
6761 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6762 cpu_env);
6763 tcg_temp_free_i32(tcg_rmode);
6764 tcg_temp_free_i32(tcg_shift);
6765 tcg_temp_free_ptr(fpst);
6766 break;
6767 }
600b828c 6768 case NEON_2RM_VRECPE:
b6d4443a
AB
6769 {
6770 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6771 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6772 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6773 break;
b6d4443a 6774 }
600b828c 6775 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6776 {
6777 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6778 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6779 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6780 break;
c2fb418e 6781 }
600b828c 6782 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6783 {
6784 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9a011fec 6785 gen_helper_recpe_f32(tmp, tmp, fpstatus);
b6d4443a 6786 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6787 break;
b6d4443a 6788 }
600b828c 6789 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6790 {
6791 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9a011fec 6792 gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
c2fb418e 6793 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6794 break;
c2fb418e 6795 }
600b828c 6796 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
60737ed5
PM
6797 {
6798 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6799 gen_helper_vfp_sitos(tmp, tmp, fpstatus);
6800 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6801 break;
60737ed5 6802 }
600b828c 6803 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
60737ed5
PM
6804 {
6805 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6806 gen_helper_vfp_uitos(tmp, tmp, fpstatus);
6807 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6808 break;
60737ed5 6809 }
600b828c 6810 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
60737ed5
PM
6811 {
6812 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6813 gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
6814 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6815 break;
60737ed5 6816 }
600b828c 6817 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
60737ed5
PM
6818 {
6819 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6820 gen_helper_vfp_touizs(tmp, tmp, fpstatus);
6821 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6822 break;
60737ed5 6823 }
9ee6e8bb 6824 default:
600b828c
PM
6825 /* Reserved op values were caught by the
6826 * neon_2rm_sizes[] check earlier.
6827 */
6828 abort();
9ee6e8bb 6829 }
60737ed5 6830 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6831 }
6832 break;
6833 }
6834 } else if ((insn & (1 << 10)) == 0) {
6835 /* VTBL, VTBX. */
56907d77
PM
6836 int n = ((insn >> 8) & 3) + 1;
6837 if ((rn + n) > 32) {
6838 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6839 * helper function running off the end of the register file.
6840 */
6841 return 1;
6842 }
6843 n <<= 3;
9ee6e8bb 6844 if (insn & (1 << 6)) {
8f8e3aa4 6845 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6846 } else {
7d1b0095 6847 tmp = tcg_temp_new_i32();
8f8e3aa4 6848 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6849 }
8f8e3aa4 6850 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 6851 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 6852 tmp5 = tcg_const_i32(n);
e7c06c4e 6853 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 6854 tcg_temp_free_i32(tmp);
9ee6e8bb 6855 if (insn & (1 << 6)) {
8f8e3aa4 6856 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6857 } else {
7d1b0095 6858 tmp = tcg_temp_new_i32();
8f8e3aa4 6859 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6860 }
8f8e3aa4 6861 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 6862 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 6863 tcg_temp_free_i32(tmp5);
e7c06c4e 6864 tcg_temp_free_ptr(ptr1);
8f8e3aa4 6865 neon_store_reg(rd, 0, tmp2);
3018f259 6866 neon_store_reg(rd, 1, tmp3);
7d1b0095 6867 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6868 } else if ((insn & 0x380) == 0) {
6869 /* VDUP */
32f91fb7
RH
6870 int element;
6871 TCGMemOp size;
6872
133da6aa
JR
6873 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6874 return 1;
6875 }
9ee6e8bb 6876 if (insn & (1 << 16)) {
32f91fb7
RH
6877 size = MO_8;
6878 element = (insn >> 17) & 7;
9ee6e8bb 6879 } else if (insn & (1 << 17)) {
32f91fb7
RH
6880 size = MO_16;
6881 element = (insn >> 18) & 3;
6882 } else {
6883 size = MO_32;
6884 element = (insn >> 19) & 1;
9ee6e8bb 6885 }
32f91fb7
RH
6886 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
6887 neon_element_offset(rm, element, size),
6888 q ? 16 : 8, q ? 16 : 8);
9ee6e8bb
PB
6889 } else {
6890 return 1;
6891 }
6892 }
6893 }
6894 return 0;
6895}
6896
8b7209fa
RH
6897/* Advanced SIMD three registers of the same length extension.
6898 * 31 25 23 22 20 16 12 11 10 9 8 3 0
6899 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6900 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6901 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6902 */
6903static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
6904{
26c470a7
RH
6905 gen_helper_gvec_3 *fn_gvec = NULL;
6906 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
6907 int rd, rn, rm, opr_sz;
6908 int data = 0;
87732318
RH
6909 int off_rn, off_rm;
6910 bool is_long = false, q = extract32(insn, 6, 1);
6911 bool ptr_is_env = false;
8b7209fa
RH
6912
6913 if ((insn & 0xfe200f10) == 0xfc200800) {
6914 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
6915 int size = extract32(insn, 20, 1);
6916 data = extract32(insn, 23, 2); /* rot */
962fcbf2 6917 if (!dc_isar_feature(aa32_vcma, s)
5763190f 6918 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
6919 return 1;
6920 }
6921 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
6922 } else if ((insn & 0xfea00f10) == 0xfc800800) {
6923 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
6924 int size = extract32(insn, 20, 1);
6925 data = extract32(insn, 24, 1); /* rot */
962fcbf2 6926 if (!dc_isar_feature(aa32_vcma, s)
5763190f 6927 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
6928 return 1;
6929 }
6930 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
6931 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
6932 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
6933 bool u = extract32(insn, 4, 1);
962fcbf2 6934 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
6935 return 1;
6936 }
6937 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
87732318
RH
6938 } else if ((insn & 0xff300f10) == 0xfc200810) {
6939 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
6940 int is_s = extract32(insn, 23, 1);
6941 if (!dc_isar_feature(aa32_fhm, s)) {
6942 return 1;
6943 }
6944 is_long = true;
6945 data = is_s; /* is_2 == 0 */
6946 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
6947 ptr_is_env = true;
8b7209fa
RH
6948 } else {
6949 return 1;
6950 }
6951
87732318
RH
6952 VFP_DREG_D(rd, insn);
6953 if (rd & q) {
6954 return 1;
6955 }
6956 if (q || !is_long) {
6957 VFP_DREG_N(rn, insn);
6958 VFP_DREG_M(rm, insn);
6959 if ((rn | rm) & q & !is_long) {
6960 return 1;
6961 }
6962 off_rn = vfp_reg_offset(1, rn);
6963 off_rm = vfp_reg_offset(1, rm);
6964 } else {
6965 rn = VFP_SREG_N(insn);
6966 rm = VFP_SREG_M(insn);
6967 off_rn = vfp_reg_offset(0, rn);
6968 off_rm = vfp_reg_offset(0, rm);
6969 }
6970
8b7209fa
RH
6971 if (s->fp_excp_el) {
6972 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 6973 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8b7209fa
RH
6974 return 0;
6975 }
6976 if (!s->vfp_enabled) {
6977 return 1;
6978 }
6979
6980 opr_sz = (1 + q) * 8;
26c470a7 6981 if (fn_gvec_ptr) {
87732318
RH
6982 TCGv_ptr ptr;
6983 if (ptr_is_env) {
6984 ptr = cpu_env;
6985 } else {
6986 ptr = get_fpstatus_ptr(1);
6987 }
6988 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 6989 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
6990 if (!ptr_is_env) {
6991 tcg_temp_free_ptr(ptr);
6992 }
26c470a7 6993 } else {
87732318 6994 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
6995 opr_sz, opr_sz, data, fn_gvec);
6996 }
8b7209fa
RH
6997 return 0;
6998}
6999
638808ff
RH
7000/* Advanced SIMD two registers and a scalar extension.
7001 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7002 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7003 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7004 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7005 *
7006 */
7007
7008static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7009{
26c470a7
RH
7010 gen_helper_gvec_3 *fn_gvec = NULL;
7011 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 7012 int rd, rn, rm, opr_sz, data;
87732318
RH
7013 int off_rn, off_rm;
7014 bool is_long = false, q = extract32(insn, 6, 1);
7015 bool ptr_is_env = false;
638808ff
RH
7016
7017 if ((insn & 0xff000f10) == 0xfe000800) {
7018 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
7019 int rot = extract32(insn, 20, 2);
7020 int size = extract32(insn, 23, 1);
7021 int index;
7022
962fcbf2 7023 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
7024 return 1;
7025 }
2cc99919 7026 if (size == 0) {
5763190f 7027 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2cc99919
RH
7028 return 1;
7029 }
7030 /* For fp16, rm is just Vm, and index is M. */
7031 rm = extract32(insn, 0, 4);
7032 index = extract32(insn, 5, 1);
7033 } else {
7034 /* For fp32, rm is the usual M:Vm, and index is 0. */
7035 VFP_DREG_M(rm, insn);
7036 index = 0;
7037 }
7038 data = (index << 2) | rot;
7039 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7040 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
7041 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7042 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7043 int u = extract32(insn, 4, 1);
87732318 7044
962fcbf2 7045 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
7046 return 1;
7047 }
7048 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7049 /* rm is just Vm, and index is M. */
7050 data = extract32(insn, 5, 1); /* index */
7051 rm = extract32(insn, 0, 4);
87732318
RH
7052 } else if ((insn & 0xffa00f10) == 0xfe000810) {
7053 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
7054 int is_s = extract32(insn, 20, 1);
7055 int vm20 = extract32(insn, 0, 3);
7056 int vm3 = extract32(insn, 3, 1);
7057 int m = extract32(insn, 5, 1);
7058 int index;
7059
7060 if (!dc_isar_feature(aa32_fhm, s)) {
7061 return 1;
7062 }
7063 if (q) {
7064 rm = vm20;
7065 index = m * 2 + vm3;
7066 } else {
7067 rm = vm20 * 2 + m;
7068 index = vm3;
7069 }
7070 is_long = true;
7071 data = (index << 2) | is_s; /* is_2 == 0 */
7072 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
7073 ptr_is_env = true;
638808ff
RH
7074 } else {
7075 return 1;
7076 }
7077
87732318
RH
7078 VFP_DREG_D(rd, insn);
7079 if (rd & q) {
7080 return 1;
7081 }
7082 if (q || !is_long) {
7083 VFP_DREG_N(rn, insn);
7084 if (rn & q & !is_long) {
7085 return 1;
7086 }
7087 off_rn = vfp_reg_offset(1, rn);
7088 off_rm = vfp_reg_offset(1, rm);
7089 } else {
7090 rn = VFP_SREG_N(insn);
7091 off_rn = vfp_reg_offset(0, rn);
7092 off_rm = vfp_reg_offset(0, rm);
7093 }
638808ff
RH
7094 if (s->fp_excp_el) {
7095 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 7096 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
638808ff
RH
7097 return 0;
7098 }
7099 if (!s->vfp_enabled) {
7100 return 1;
7101 }
7102
7103 opr_sz = (1 + q) * 8;
26c470a7 7104 if (fn_gvec_ptr) {
87732318
RH
7105 TCGv_ptr ptr;
7106 if (ptr_is_env) {
7107 ptr = cpu_env;
7108 } else {
7109 ptr = get_fpstatus_ptr(1);
7110 }
7111 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 7112 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
7113 if (!ptr_is_env) {
7114 tcg_temp_free_ptr(ptr);
7115 }
26c470a7 7116 } else {
87732318 7117 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
7118 opr_sz, opr_sz, data, fn_gvec);
7119 }
638808ff
RH
7120 return 0;
7121}
7122
7dcc1f89 7123static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7124{
4b6a83fb
PM
7125 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7126 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7127
7128 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7129
7130 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7131 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7132 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7133 return 1;
7134 }
d614a513 7135 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7136 return disas_iwmmxt_insn(s, insn);
d614a513 7137 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7138 return disas_dsp_insn(s, insn);
c0f4af17
PM
7139 }
7140 return 1;
4b6a83fb
PM
7141 }
7142
7143 /* Otherwise treat as a generic register access */
7144 is64 = (insn & (1 << 25)) == 0;
7145 if (!is64 && ((insn & (1 << 4)) == 0)) {
7146 /* cdp */
7147 return 1;
7148 }
7149
7150 crm = insn & 0xf;
7151 if (is64) {
7152 crn = 0;
7153 opc1 = (insn >> 4) & 0xf;
7154 opc2 = 0;
7155 rt2 = (insn >> 16) & 0xf;
7156 } else {
7157 crn = (insn >> 16) & 0xf;
7158 opc1 = (insn >> 21) & 7;
7159 opc2 = (insn >> 5) & 7;
7160 rt2 = 0;
7161 }
7162 isread = (insn >> 20) & 1;
7163 rt = (insn >> 12) & 0xf;
7164
60322b39 7165 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7166 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7167 if (ri) {
7168 /* Check access permissions */
dcbff19b 7169 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7170 return 1;
7171 }
7172
c0f4af17 7173 if (ri->accessfn ||
d614a513 7174 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7175 /* Emit code to perform further access permissions checks at
7176 * runtime; this may result in an exception.
c0f4af17
PM
7177 * Note that on XScale all cp0..c13 registers do an access check
7178 * call in order to handle c15_cpar.
f59df3f2
PM
7179 */
7180 TCGv_ptr tmpptr;
3f208fd7 7181 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7182 uint32_t syndrome;
7183
7184 /* Note that since we are an implementation which takes an
7185 * exception on a trapped conditional instruction only if the
7186 * instruction passes its condition code check, we can take
7187 * advantage of the clause in the ARM ARM that allows us to set
7188 * the COND field in the instruction to 0xE in all cases.
7189 * We could fish the actual condition out of the insn (ARM)
7190 * or the condexec bits (Thumb) but it isn't necessary.
7191 */
7192 switch (cpnum) {
7193 case 14:
7194 if (is64) {
7195 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7196 isread, false);
8bcbf37c
PM
7197 } else {
7198 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7199 rt, isread, false);
8bcbf37c
PM
7200 }
7201 break;
7202 case 15:
7203 if (is64) {
7204 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7205 isread, false);
8bcbf37c
PM
7206 } else {
7207 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7208 rt, isread, false);
8bcbf37c
PM
7209 }
7210 break;
7211 default:
7212 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7213 * so this can only happen if this is an ARMv7 or earlier CPU,
7214 * in which case the syndrome information won't actually be
7215 * guest visible.
7216 */
d614a513 7217 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7218 syndrome = syn_uncategorized();
7219 break;
7220 }
7221
43bfa4a1 7222 gen_set_condexec(s);
3977ee5d 7223 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7224 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7225 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7226 tcg_isread = tcg_const_i32(isread);
7227 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7228 tcg_isread);
f59df3f2 7229 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7230 tcg_temp_free_i32(tcg_syn);
3f208fd7 7231 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7232 }
7233
4b6a83fb
PM
7234 /* Handle special cases first */
7235 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7236 case ARM_CP_NOP:
7237 return 0;
7238 case ARM_CP_WFI:
7239 if (isread) {
7240 return 1;
7241 }
eaed129d 7242 gen_set_pc_im(s, s->pc);
dcba3a8d 7243 s->base.is_jmp = DISAS_WFI;
2bee5105 7244 return 0;
4b6a83fb
PM
7245 default:
7246 break;
7247 }
7248
c5a49c63 7249 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7250 gen_io_start();
7251 }
7252
4b6a83fb
PM
7253 if (isread) {
7254 /* Read */
7255 if (is64) {
7256 TCGv_i64 tmp64;
7257 TCGv_i32 tmp;
7258 if (ri->type & ARM_CP_CONST) {
7259 tmp64 = tcg_const_i64(ri->resetvalue);
7260 } else if (ri->readfn) {
7261 TCGv_ptr tmpptr;
4b6a83fb
PM
7262 tmp64 = tcg_temp_new_i64();
7263 tmpptr = tcg_const_ptr(ri);
7264 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7265 tcg_temp_free_ptr(tmpptr);
7266 } else {
7267 tmp64 = tcg_temp_new_i64();
7268 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7269 }
7270 tmp = tcg_temp_new_i32();
ecc7b3aa 7271 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7272 store_reg(s, rt, tmp);
7273 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7274 tmp = tcg_temp_new_i32();
ecc7b3aa 7275 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7276 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7277 store_reg(s, rt2, tmp);
7278 } else {
39d5492a 7279 TCGv_i32 tmp;
4b6a83fb
PM
7280 if (ri->type & ARM_CP_CONST) {
7281 tmp = tcg_const_i32(ri->resetvalue);
7282 } else if (ri->readfn) {
7283 TCGv_ptr tmpptr;
4b6a83fb
PM
7284 tmp = tcg_temp_new_i32();
7285 tmpptr = tcg_const_ptr(ri);
7286 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7287 tcg_temp_free_ptr(tmpptr);
7288 } else {
7289 tmp = load_cpu_offset(ri->fieldoffset);
7290 }
7291 if (rt == 15) {
7292 /* Destination register of r15 for 32 bit loads sets
7293 * the condition codes from the high 4 bits of the value
7294 */
7295 gen_set_nzcv(tmp);
7296 tcg_temp_free_i32(tmp);
7297 } else {
7298 store_reg(s, rt, tmp);
7299 }
7300 }
7301 } else {
7302 /* Write */
7303 if (ri->type & ARM_CP_CONST) {
7304 /* If not forbidden by access permissions, treat as WI */
7305 return 0;
7306 }
7307
7308 if (is64) {
39d5492a 7309 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7310 TCGv_i64 tmp64 = tcg_temp_new_i64();
7311 tmplo = load_reg(s, rt);
7312 tmphi = load_reg(s, rt2);
7313 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7314 tcg_temp_free_i32(tmplo);
7315 tcg_temp_free_i32(tmphi);
7316 if (ri->writefn) {
7317 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7318 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7319 tcg_temp_free_ptr(tmpptr);
7320 } else {
7321 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7322 }
7323 tcg_temp_free_i64(tmp64);
7324 } else {
7325 if (ri->writefn) {
39d5492a 7326 TCGv_i32 tmp;
4b6a83fb 7327 TCGv_ptr tmpptr;
4b6a83fb
PM
7328 tmp = load_reg(s, rt);
7329 tmpptr = tcg_const_ptr(ri);
7330 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7331 tcg_temp_free_ptr(tmpptr);
7332 tcg_temp_free_i32(tmp);
7333 } else {
39d5492a 7334 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7335 store_cpu_offset(tmp, ri->fieldoffset);
7336 }
7337 }
2452731c
PM
7338 }
7339
c5a49c63 7340 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7341 /* I/O operations must end the TB here (whether read or write) */
7342 gen_io_end();
7343 gen_lookup_tb(s);
7344 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7345 /* We default to ending the TB on a coprocessor register write,
7346 * but allow this to be suppressed by the register definition
7347 * (usually only necessary to work around guest bugs).
7348 */
2452731c 7349 gen_lookup_tb(s);
4b6a83fb 7350 }
2452731c 7351
4b6a83fb
PM
7352 return 0;
7353 }
7354
626187d8
PM
7355 /* Unknown register; this might be a guest error or a QEMU
7356 * unimplemented feature.
7357 */
7358 if (is64) {
7359 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7360 "64 bit system register cp:%d opc1: %d crm:%d "
7361 "(%s)\n",
7362 isread ? "read" : "write", cpnum, opc1, crm,
7363 s->ns ? "non-secure" : "secure");
626187d8
PM
7364 } else {
7365 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7366 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7367 "(%s)\n",
7368 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7369 s->ns ? "non-secure" : "secure");
626187d8
PM
7370 }
7371
4a9a539f 7372 return 1;
9ee6e8bb
PB
7373}
7374
5e3f878a
PB
7375
7376/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7377static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7378{
39d5492a 7379 TCGv_i32 tmp;
7d1b0095 7380 tmp = tcg_temp_new_i32();
ecc7b3aa 7381 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7382 store_reg(s, rlow, tmp);
7d1b0095 7383 tmp = tcg_temp_new_i32();
5e3f878a 7384 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7385 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7386 store_reg(s, rhigh, tmp);
7387}
7388
7389/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7390static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7391{
a7812ae4 7392 TCGv_i64 tmp;
39d5492a 7393 TCGv_i32 tmp2;
5e3f878a 7394
36aa55dc 7395 /* Load value and extend to 64 bits. */
a7812ae4 7396 tmp = tcg_temp_new_i64();
5e3f878a
PB
7397 tmp2 = load_reg(s, rlow);
7398 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7399 tcg_temp_free_i32(tmp2);
5e3f878a 7400 tcg_gen_add_i64(val, val, tmp);
b75263d6 7401 tcg_temp_free_i64(tmp);
5e3f878a
PB
7402}
7403
7404/* load and add a 64-bit value from a register pair. */
a7812ae4 7405static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7406{
a7812ae4 7407 TCGv_i64 tmp;
39d5492a
PM
7408 TCGv_i32 tmpl;
7409 TCGv_i32 tmph;
5e3f878a
PB
7410
7411 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7412 tmpl = load_reg(s, rlow);
7413 tmph = load_reg(s, rhigh);
a7812ae4 7414 tmp = tcg_temp_new_i64();
36aa55dc 7415 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7416 tcg_temp_free_i32(tmpl);
7417 tcg_temp_free_i32(tmph);
5e3f878a 7418 tcg_gen_add_i64(val, val, tmp);
b75263d6 7419 tcg_temp_free_i64(tmp);
5e3f878a
PB
7420}
7421
c9f10124 7422/* Set N and Z flags from hi|lo. */
39d5492a 7423static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7424{
c9f10124
RH
7425 tcg_gen_mov_i32(cpu_NF, hi);
7426 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7427}
7428
426f5abc
PB
7429/* Load/Store exclusive instructions are implemented by remembering
7430 the value/address loaded, and seeing if these are the same
354161b3 7431 when the store is performed. This should be sufficient to implement
426f5abc 7432 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7433 regular stores. The compare vs the remembered value is done during
7434 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7435static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7436 TCGv_i32 addr, int size)
426f5abc 7437{
94ee24e7 7438 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7439 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7440
50225ad0
PM
7441 s->is_ldex = true;
7442
426f5abc 7443 if (size == 3) {
39d5492a 7444 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7445 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7446
3448d47b
PM
7447 /* For AArch32, architecturally the 32-bit word at the lowest
7448 * address is always Rt and the one at addr+4 is Rt2, even if
7449 * the CPU is big-endian. That means we don't want to do a
7450 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7451 * for an architecturally 64-bit access, but instead do a
7452 * 64-bit access using MO_BE if appropriate and then split
7453 * the two halves.
7454 * This only makes a difference for BE32 user-mode, where
7455 * frob64() must not flip the two halves of the 64-bit data
7456 * but this code must treat BE32 user-mode like BE32 system.
7457 */
7458 TCGv taddr = gen_aa32_addr(s, addr, opc);
7459
7460 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7461 tcg_temp_free(taddr);
354161b3 7462 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
7463 if (s->be_data == MO_BE) {
7464 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7465 } else {
7466 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7467 }
354161b3
EC
7468 tcg_temp_free_i64(t64);
7469
7470 store_reg(s, rt2, tmp2);
03d05e2d 7471 } else {
354161b3 7472 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7473 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7474 }
03d05e2d
PM
7475
7476 store_reg(s, rt, tmp);
7477 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7478}
7479
7480static void gen_clrex(DisasContext *s)
7481{
03d05e2d 7482 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7483}
7484
426f5abc 7485static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7486 TCGv_i32 addr, int size)
426f5abc 7487{
354161b3
EC
7488 TCGv_i32 t0, t1, t2;
7489 TCGv_i64 extaddr;
7490 TCGv taddr;
42a268c2
RH
7491 TCGLabel *done_label;
7492 TCGLabel *fail_label;
354161b3 7493 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7494
7495 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7496 [addr] = {Rt};
7497 {Rd} = 0;
7498 } else {
7499 {Rd} = 1;
7500 } */
7501 fail_label = gen_new_label();
7502 done_label = gen_new_label();
03d05e2d
PM
7503 extaddr = tcg_temp_new_i64();
7504 tcg_gen_extu_i32_i64(extaddr, addr);
7505 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7506 tcg_temp_free_i64(extaddr);
7507
354161b3
EC
7508 taddr = gen_aa32_addr(s, addr, opc);
7509 t0 = tcg_temp_new_i32();
7510 t1 = load_reg(s, rt);
426f5abc 7511 if (size == 3) {
354161b3
EC
7512 TCGv_i64 o64 = tcg_temp_new_i64();
7513 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7514
354161b3 7515 t2 = load_reg(s, rt2);
3448d47b
PM
7516 /* For AArch32, architecturally the 32-bit word at the lowest
7517 * address is always Rt and the one at addr+4 is Rt2, even if
7518 * the CPU is big-endian. Since we're going to treat this as a
7519 * single 64-bit BE store, we need to put the two halves in the
7520 * opposite order for BE to LE, so that they end up in the right
7521 * places.
7522 * We don't want gen_aa32_frob64() because that does the wrong
7523 * thing for BE32 usermode.
7524 */
7525 if (s->be_data == MO_BE) {
7526 tcg_gen_concat_i32_i64(n64, t2, t1);
7527 } else {
7528 tcg_gen_concat_i32_i64(n64, t1, t2);
7529 }
354161b3 7530 tcg_temp_free_i32(t2);
03d05e2d 7531
354161b3
EC
7532 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7533 get_mem_index(s), opc);
7534 tcg_temp_free_i64(n64);
7535
354161b3
EC
7536 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7537 tcg_gen_extrl_i64_i32(t0, o64);
7538
7539 tcg_temp_free_i64(o64);
7540 } else {
7541 t2 = tcg_temp_new_i32();
7542 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7543 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7544 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7545 tcg_temp_free_i32(t2);
426f5abc 7546 }
354161b3
EC
7547 tcg_temp_free_i32(t1);
7548 tcg_temp_free(taddr);
7549 tcg_gen_mov_i32(cpu_R[rd], t0);
7550 tcg_temp_free_i32(t0);
426f5abc 7551 tcg_gen_br(done_label);
354161b3 7552
426f5abc
PB
7553 gen_set_label(fail_label);
7554 tcg_gen_movi_i32(cpu_R[rd], 1);
7555 gen_set_label(done_label);
03d05e2d 7556 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 7557}
426f5abc 7558
81465888
PM
7559/* gen_srs:
7560 * @env: CPUARMState
7561 * @s: DisasContext
7562 * @mode: mode field from insn (which stack to store to)
7563 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7564 * @writeback: true if writeback bit set
7565 *
7566 * Generate code for the SRS (Store Return State) insn.
7567 */
7568static void gen_srs(DisasContext *s,
7569 uint32_t mode, uint32_t amode, bool writeback)
7570{
7571 int32_t offset;
cbc0326b
PM
7572 TCGv_i32 addr, tmp;
7573 bool undef = false;
7574
7575 /* SRS is:
7576 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7577 * and specified mode is monitor mode
cbc0326b
PM
7578 * - UNDEFINED in Hyp mode
7579 * - UNPREDICTABLE in User or System mode
7580 * - UNPREDICTABLE if the specified mode is:
7581 * -- not implemented
7582 * -- not a valid mode number
7583 * -- a mode that's at a higher exception level
7584 * -- Monitor, if we are Non-secure
f01377f5 7585 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7586 */
ba63cf47 7587 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7588 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7589 return;
7590 }
7591
7592 if (s->current_el == 0 || s->current_el == 2) {
7593 undef = true;
7594 }
7595
7596 switch (mode) {
7597 case ARM_CPU_MODE_USR:
7598 case ARM_CPU_MODE_FIQ:
7599 case ARM_CPU_MODE_IRQ:
7600 case ARM_CPU_MODE_SVC:
7601 case ARM_CPU_MODE_ABT:
7602 case ARM_CPU_MODE_UND:
7603 case ARM_CPU_MODE_SYS:
7604 break;
7605 case ARM_CPU_MODE_HYP:
7606 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7607 undef = true;
7608 }
7609 break;
7610 case ARM_CPU_MODE_MON:
7611 /* No need to check specifically for "are we non-secure" because
7612 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7613 * so if this isn't EL3 then we must be non-secure.
7614 */
7615 if (s->current_el != 3) {
7616 undef = true;
7617 }
7618 break;
7619 default:
7620 undef = true;
7621 }
7622
7623 if (undef) {
7624 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7625 default_exception_el(s));
7626 return;
7627 }
7628
7629 addr = tcg_temp_new_i32();
7630 tmp = tcg_const_i32(mode);
f01377f5
PM
7631 /* get_r13_banked() will raise an exception if called from System mode */
7632 gen_set_condexec(s);
7633 gen_set_pc_im(s, s->pc - 4);
81465888
PM
7634 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7635 tcg_temp_free_i32(tmp);
7636 switch (amode) {
7637 case 0: /* DA */
7638 offset = -4;
7639 break;
7640 case 1: /* IA */
7641 offset = 0;
7642 break;
7643 case 2: /* DB */
7644 offset = -8;
7645 break;
7646 case 3: /* IB */
7647 offset = 4;
7648 break;
7649 default:
7650 abort();
7651 }
7652 tcg_gen_addi_i32(addr, addr, offset);
7653 tmp = load_reg(s, 14);
12dcc321 7654 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7655 tcg_temp_free_i32(tmp);
81465888
PM
7656 tmp = load_cpu_field(spsr);
7657 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 7658 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7659 tcg_temp_free_i32(tmp);
81465888
PM
7660 if (writeback) {
7661 switch (amode) {
7662 case 0:
7663 offset = -8;
7664 break;
7665 case 1:
7666 offset = 4;
7667 break;
7668 case 2:
7669 offset = -4;
7670 break;
7671 case 3:
7672 offset = 0;
7673 break;
7674 default:
7675 abort();
7676 }
7677 tcg_gen_addi_i32(addr, addr, offset);
7678 tmp = tcg_const_i32(mode);
7679 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7680 tcg_temp_free_i32(tmp);
7681 }
7682 tcg_temp_free_i32(addr);
dcba3a8d 7683 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
7684}
7685
c2d9644e
RK
7686/* Generate a label used for skipping this instruction */
7687static void arm_gen_condlabel(DisasContext *s)
7688{
7689 if (!s->condjmp) {
7690 s->condlabel = gen_new_label();
7691 s->condjmp = 1;
7692 }
7693}
7694
7695/* Skip this instruction if the ARM condition is false */
7696static void arm_skip_unless(DisasContext *s, uint32_t cond)
7697{
7698 arm_gen_condlabel(s);
7699 arm_gen_test_cc(cond ^ 1, s->condlabel);
7700}
7701
f4df2210 7702static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 7703{
f4df2210 7704 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7705 TCGv_i32 tmp;
7706 TCGv_i32 tmp2;
7707 TCGv_i32 tmp3;
7708 TCGv_i32 addr;
a7812ae4 7709 TCGv_i64 tmp64;
9ee6e8bb 7710
e13886e3
PM
7711 /* M variants do not implement ARM mode; this must raise the INVSTATE
7712 * UsageFault exception.
7713 */
b53d8923 7714 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
7715 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
7716 default_exception_el(s));
7717 return;
b53d8923 7718 }
9ee6e8bb
PB
7719 cond = insn >> 28;
7720 if (cond == 0xf){
be5e7a76
DES
7721 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7722 * choose to UNDEF. In ARMv5 and above the space is used
7723 * for miscellaneous unconditional instructions.
7724 */
7725 ARCH(5);
7726
9ee6e8bb
PB
7727 /* Unconditional instructions. */
7728 if (((insn >> 25) & 7) == 1) {
7729 /* NEON Data processing. */
d614a513 7730 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7731 goto illegal_op;
d614a513 7732 }
9ee6e8bb 7733
7dcc1f89 7734 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 7735 goto illegal_op;
7dcc1f89 7736 }
9ee6e8bb
PB
7737 return;
7738 }
7739 if ((insn & 0x0f100000) == 0x04000000) {
7740 /* NEON load/store. */
d614a513 7741 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7742 goto illegal_op;
d614a513 7743 }
9ee6e8bb 7744
7dcc1f89 7745 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 7746 goto illegal_op;
7dcc1f89 7747 }
9ee6e8bb
PB
7748 return;
7749 }
6a57f3eb
WN
7750 if ((insn & 0x0f000e10) == 0x0e000a00) {
7751 /* VFP. */
7dcc1f89 7752 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
7753 goto illegal_op;
7754 }
7755 return;
7756 }
3d185e5d
PM
7757 if (((insn & 0x0f30f000) == 0x0510f000) ||
7758 ((insn & 0x0f30f010) == 0x0710f000)) {
7759 if ((insn & (1 << 22)) == 0) {
7760 /* PLDW; v7MP */
d614a513 7761 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7762 goto illegal_op;
7763 }
7764 }
7765 /* Otherwise PLD; v5TE+ */
be5e7a76 7766 ARCH(5TE);
3d185e5d
PM
7767 return;
7768 }
7769 if (((insn & 0x0f70f000) == 0x0450f000) ||
7770 ((insn & 0x0f70f010) == 0x0650f000)) {
7771 ARCH(7);
7772 return; /* PLI; V7 */
7773 }
7774 if (((insn & 0x0f700000) == 0x04100000) ||
7775 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 7776 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7777 goto illegal_op;
7778 }
7779 return; /* v7MP: Unallocated memory hint: must NOP */
7780 }
7781
7782 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7783 ARCH(6);
7784 /* setend */
9886ecdf
PB
7785 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
7786 gen_helper_setend(cpu_env);
dcba3a8d 7787 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
7788 }
7789 return;
7790 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7791 switch ((insn >> 4) & 0xf) {
7792 case 1: /* clrex */
7793 ARCH(6K);
426f5abc 7794 gen_clrex(s);
9ee6e8bb
PB
7795 return;
7796 case 4: /* dsb */
7797 case 5: /* dmb */
9ee6e8bb 7798 ARCH(7);
61e4c432 7799 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 7800 return;
6df99dec
SS
7801 case 6: /* isb */
7802 /* We need to break the TB after this insn to execute
7803 * self-modifying code correctly and also to take
7804 * any pending interrupts immediately.
7805 */
0b609cc1 7806 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 7807 return;
9888bd1e
RH
7808 case 7: /* sb */
7809 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
7810 goto illegal_op;
7811 }
7812 /*
7813 * TODO: There is no speculation barrier opcode
7814 * for TCG; MB and end the TB instead.
7815 */
7816 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7817 gen_goto_tb(s, 0, s->pc & ~1);
7818 return;
9ee6e8bb
PB
7819 default:
7820 goto illegal_op;
7821 }
7822 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7823 /* srs */
81465888
PM
7824 ARCH(6);
7825 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7826 return;
ea825eee 7827 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7828 /* rfe */
c67b6b71 7829 int32_t offset;
9ee6e8bb
PB
7830 if (IS_USER(s))
7831 goto illegal_op;
7832 ARCH(6);
7833 rn = (insn >> 16) & 0xf;
b0109805 7834 addr = load_reg(s, rn);
9ee6e8bb
PB
7835 i = (insn >> 23) & 3;
7836 switch (i) {
b0109805 7837 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7838 case 1: offset = 0; break; /* IA */
7839 case 2: offset = -8; break; /* DB */
b0109805 7840 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7841 default: abort();
7842 }
7843 if (offset)
b0109805
PB
7844 tcg_gen_addi_i32(addr, addr, offset);
7845 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7846 tmp = tcg_temp_new_i32();
12dcc321 7847 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 7848 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7849 tmp2 = tcg_temp_new_i32();
12dcc321 7850 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
7851 if (insn & (1 << 21)) {
7852 /* Base writeback. */
7853 switch (i) {
b0109805 7854 case 0: offset = -8; break;
c67b6b71
FN
7855 case 1: offset = 4; break;
7856 case 2: offset = -4; break;
b0109805 7857 case 3: offset = 0; break;
9ee6e8bb
PB
7858 default: abort();
7859 }
7860 if (offset)
b0109805
PB
7861 tcg_gen_addi_i32(addr, addr, offset);
7862 store_reg(s, rn, addr);
7863 } else {
7d1b0095 7864 tcg_temp_free_i32(addr);
9ee6e8bb 7865 }
b0109805 7866 gen_rfe(s, tmp, tmp2);
c67b6b71 7867 return;
9ee6e8bb
PB
7868 } else if ((insn & 0x0e000000) == 0x0a000000) {
7869 /* branch link and change to thumb (blx <offset>) */
7870 int32_t offset;
7871
7872 val = (uint32_t)s->pc;
7d1b0095 7873 tmp = tcg_temp_new_i32();
d9ba4830
PB
7874 tcg_gen_movi_i32(tmp, val);
7875 store_reg(s, 14, tmp);
9ee6e8bb
PB
7876 /* Sign-extend the 24-bit offset */
7877 offset = (((int32_t)insn) << 8) >> 8;
7878 /* offset * 4 + bit24 * 2 + (thumb bit) */
7879 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7880 /* pipeline offset */
7881 val += 4;
be5e7a76 7882 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7883 gen_bx_im(s, val);
9ee6e8bb
PB
7884 return;
7885 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 7886 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 7887 /* iWMMXt register transfer. */
c0f4af17 7888 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 7889 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 7890 return;
c0f4af17
PM
7891 }
7892 }
9ee6e8bb 7893 }
8b7209fa
RH
7894 } else if ((insn & 0x0e000a00) == 0x0c000800
7895 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7896 if (disas_neon_insn_3same_ext(s, insn)) {
7897 goto illegal_op;
7898 }
7899 return;
638808ff
RH
7900 } else if ((insn & 0x0f000a00) == 0x0e000800
7901 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7902 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
7903 goto illegal_op;
7904 }
7905 return;
9ee6e8bb
PB
7906 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7907 /* Coprocessor double register transfer. */
be5e7a76 7908 ARCH(5TE);
9ee6e8bb
PB
7909 } else if ((insn & 0x0f000010) == 0x0e000010) {
7910 /* Additional coprocessor register transfer. */
7997d92f 7911 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7912 uint32_t mask;
7913 uint32_t val;
7914 /* cps (privileged) */
7915 if (IS_USER(s))
7916 return;
7917 mask = val = 0;
7918 if (insn & (1 << 19)) {
7919 if (insn & (1 << 8))
7920 mask |= CPSR_A;
7921 if (insn & (1 << 7))
7922 mask |= CPSR_I;
7923 if (insn & (1 << 6))
7924 mask |= CPSR_F;
7925 if (insn & (1 << 18))
7926 val |= mask;
7927 }
7997d92f 7928 if (insn & (1 << 17)) {
9ee6e8bb
PB
7929 mask |= CPSR_M;
7930 val |= (insn & 0x1f);
7931 }
7932 if (mask) {
2fbac54b 7933 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7934 }
7935 return;
7936 }
7937 goto illegal_op;
7938 }
7939 if (cond != 0xe) {
7940 /* if not always execute, we generate a conditional jump to
7941 next instruction */
c2d9644e 7942 arm_skip_unless(s, cond);
9ee6e8bb
PB
7943 }
7944 if ((insn & 0x0f900000) == 0x03000000) {
7945 if ((insn & (1 << 21)) == 0) {
7946 ARCH(6T2);
7947 rd = (insn >> 12) & 0xf;
7948 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7949 if ((insn & (1 << 22)) == 0) {
7950 /* MOVW */
7d1b0095 7951 tmp = tcg_temp_new_i32();
5e3f878a 7952 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7953 } else {
7954 /* MOVT */
5e3f878a 7955 tmp = load_reg(s, rd);
86831435 7956 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7957 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7958 }
5e3f878a 7959 store_reg(s, rd, tmp);
9ee6e8bb
PB
7960 } else {
7961 if (((insn >> 12) & 0xf) != 0xf)
7962 goto illegal_op;
7963 if (((insn >> 16) & 0xf) == 0) {
7964 gen_nop_hint(s, insn & 0xff);
7965 } else {
7966 /* CPSR = immediate */
7967 val = insn & 0xff;
7968 shift = ((insn >> 8) & 0xf) * 2;
7969 if (shift)
7970 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7971 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
7972 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7973 i, val)) {
9ee6e8bb 7974 goto illegal_op;
7dcc1f89 7975 }
9ee6e8bb
PB
7976 }
7977 }
7978 } else if ((insn & 0x0f900000) == 0x01000000
7979 && (insn & 0x00000090) != 0x00000090) {
7980 /* miscellaneous instructions */
7981 op1 = (insn >> 21) & 3;
7982 sh = (insn >> 4) & 0xf;
7983 rm = insn & 0xf;
7984 switch (sh) {
8bfd0550
PM
7985 case 0x0: /* MSR, MRS */
7986 if (insn & (1 << 9)) {
7987 /* MSR (banked) and MRS (banked) */
7988 int sysm = extract32(insn, 16, 4) |
7989 (extract32(insn, 8, 1) << 4);
7990 int r = extract32(insn, 22, 1);
7991
7992 if (op1 & 1) {
7993 /* MSR (banked) */
7994 gen_msr_banked(s, r, sysm, rm);
7995 } else {
7996 /* MRS (banked) */
7997 int rd = extract32(insn, 12, 4);
7998
7999 gen_mrs_banked(s, r, sysm, rd);
8000 }
8001 break;
8002 }
8003
8004 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8005 if (op1 & 1) {
8006 /* PSR = reg */
2fbac54b 8007 tmp = load_reg(s, rm);
9ee6e8bb 8008 i = ((op1 & 2) != 0);
7dcc1f89 8009 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8010 goto illegal_op;
8011 } else {
8012 /* reg = PSR */
8013 rd = (insn >> 12) & 0xf;
8014 if (op1 & 2) {
8015 if (IS_USER(s))
8016 goto illegal_op;
d9ba4830 8017 tmp = load_cpu_field(spsr);
9ee6e8bb 8018 } else {
7d1b0095 8019 tmp = tcg_temp_new_i32();
9ef39277 8020 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8021 }
d9ba4830 8022 store_reg(s, rd, tmp);
9ee6e8bb
PB
8023 }
8024 break;
8025 case 0x1:
8026 if (op1 == 1) {
8027 /* branch/exchange thumb (bx). */
be5e7a76 8028 ARCH(4T);
d9ba4830
PB
8029 tmp = load_reg(s, rm);
8030 gen_bx(s, tmp);
9ee6e8bb
PB
8031 } else if (op1 == 3) {
8032 /* clz */
be5e7a76 8033 ARCH(5);
9ee6e8bb 8034 rd = (insn >> 12) & 0xf;
1497c961 8035 tmp = load_reg(s, rm);
7539a012 8036 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8037 store_reg(s, rd, tmp);
9ee6e8bb
PB
8038 } else {
8039 goto illegal_op;
8040 }
8041 break;
8042 case 0x2:
8043 if (op1 == 1) {
8044 ARCH(5J); /* bxj */
8045 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8046 tmp = load_reg(s, rm);
8047 gen_bx(s, tmp);
9ee6e8bb
PB
8048 } else {
8049 goto illegal_op;
8050 }
8051 break;
8052 case 0x3:
8053 if (op1 != 1)
8054 goto illegal_op;
8055
be5e7a76 8056 ARCH(5);
9ee6e8bb 8057 /* branch link/exchange thumb (blx) */
d9ba4830 8058 tmp = load_reg(s, rm);
7d1b0095 8059 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8060 tcg_gen_movi_i32(tmp2, s->pc);
8061 store_reg(s, 14, tmp2);
8062 gen_bx(s, tmp);
9ee6e8bb 8063 break;
eb0ecd5a
WN
8064 case 0x4:
8065 {
8066 /* crc32/crc32c */
8067 uint32_t c = extract32(insn, 8, 4);
8068
8069 /* Check this CPU supports ARMv8 CRC instructions.
8070 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8071 * Bits 8, 10 and 11 should be zero.
8072 */
962fcbf2 8073 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
eb0ecd5a
WN
8074 goto illegal_op;
8075 }
8076
8077 rn = extract32(insn, 16, 4);
8078 rd = extract32(insn, 12, 4);
8079
8080 tmp = load_reg(s, rn);
8081 tmp2 = load_reg(s, rm);
aa633469
PM
8082 if (op1 == 0) {
8083 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8084 } else if (op1 == 1) {
8085 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8086 }
eb0ecd5a
WN
8087 tmp3 = tcg_const_i32(1 << op1);
8088 if (c & 0x2) {
8089 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8090 } else {
8091 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8092 }
8093 tcg_temp_free_i32(tmp2);
8094 tcg_temp_free_i32(tmp3);
8095 store_reg(s, rd, tmp);
8096 break;
8097 }
9ee6e8bb 8098 case 0x5: /* saturating add/subtract */
be5e7a76 8099 ARCH(5TE);
9ee6e8bb
PB
8100 rd = (insn >> 12) & 0xf;
8101 rn = (insn >> 16) & 0xf;
b40d0353 8102 tmp = load_reg(s, rm);
5e3f878a 8103 tmp2 = load_reg(s, rn);
9ee6e8bb 8104 if (op1 & 2)
9ef39277 8105 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8106 if (op1 & 1)
9ef39277 8107 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8108 else
9ef39277 8109 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8110 tcg_temp_free_i32(tmp2);
5e3f878a 8111 store_reg(s, rd, tmp);
9ee6e8bb 8112 break;
55c544ed
PM
8113 case 0x6: /* ERET */
8114 if (op1 != 3) {
8115 goto illegal_op;
8116 }
8117 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8118 goto illegal_op;
8119 }
8120 if ((insn & 0x000fff0f) != 0x0000000e) {
8121 /* UNPREDICTABLE; we choose to UNDEF */
8122 goto illegal_op;
8123 }
8124
8125 if (s->current_el == 2) {
8126 tmp = load_cpu_field(elr_el[2]);
8127 } else {
8128 tmp = load_reg(s, 14);
8129 }
8130 gen_exception_return(s, tmp);
8131 break;
49e14940 8132 case 7:
d4a2dc67
PM
8133 {
8134 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8135 switch (op1) {
19a6e31c
PM
8136 case 0:
8137 /* HLT */
8138 gen_hlt(s, imm16);
8139 break;
37e6456e
PM
8140 case 1:
8141 /* bkpt */
8142 ARCH(5);
c900a2e6 8143 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
8144 break;
8145 case 2:
8146 /* Hypervisor call (v7) */
8147 ARCH(7);
8148 if (IS_USER(s)) {
8149 goto illegal_op;
8150 }
8151 gen_hvc(s, imm16);
8152 break;
8153 case 3:
8154 /* Secure monitor call (v6+) */
8155 ARCH(6K);
8156 if (IS_USER(s)) {
8157 goto illegal_op;
8158 }
8159 gen_smc(s);
8160 break;
8161 default:
19a6e31c 8162 g_assert_not_reached();
49e14940 8163 }
9ee6e8bb 8164 break;
d4a2dc67 8165 }
9ee6e8bb
PB
8166 case 0x8: /* signed multiply */
8167 case 0xa:
8168 case 0xc:
8169 case 0xe:
be5e7a76 8170 ARCH(5TE);
9ee6e8bb
PB
8171 rs = (insn >> 8) & 0xf;
8172 rn = (insn >> 12) & 0xf;
8173 rd = (insn >> 16) & 0xf;
8174 if (op1 == 1) {
8175 /* (32 * 16) >> 16 */
5e3f878a
PB
8176 tmp = load_reg(s, rm);
8177 tmp2 = load_reg(s, rs);
9ee6e8bb 8178 if (sh & 4)
5e3f878a 8179 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8180 else
5e3f878a 8181 gen_sxth(tmp2);
a7812ae4
PB
8182 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8183 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8184 tmp = tcg_temp_new_i32();
ecc7b3aa 8185 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8186 tcg_temp_free_i64(tmp64);
9ee6e8bb 8187 if ((sh & 2) == 0) {
5e3f878a 8188 tmp2 = load_reg(s, rn);
9ef39277 8189 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8190 tcg_temp_free_i32(tmp2);
9ee6e8bb 8191 }
5e3f878a 8192 store_reg(s, rd, tmp);
9ee6e8bb
PB
8193 } else {
8194 /* 16 * 16 */
5e3f878a
PB
8195 tmp = load_reg(s, rm);
8196 tmp2 = load_reg(s, rs);
8197 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8198 tcg_temp_free_i32(tmp2);
9ee6e8bb 8199 if (op1 == 2) {
a7812ae4
PB
8200 tmp64 = tcg_temp_new_i64();
8201 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8202 tcg_temp_free_i32(tmp);
a7812ae4
PB
8203 gen_addq(s, tmp64, rn, rd);
8204 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8205 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8206 } else {
8207 if (op1 == 0) {
5e3f878a 8208 tmp2 = load_reg(s, rn);
9ef39277 8209 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8210 tcg_temp_free_i32(tmp2);
9ee6e8bb 8211 }
5e3f878a 8212 store_reg(s, rd, tmp);
9ee6e8bb
PB
8213 }
8214 }
8215 break;
8216 default:
8217 goto illegal_op;
8218 }
8219 } else if (((insn & 0x0e000000) == 0 &&
8220 (insn & 0x00000090) != 0x90) ||
8221 ((insn & 0x0e000000) == (1 << 25))) {
8222 int set_cc, logic_cc, shiftop;
8223
8224 op1 = (insn >> 21) & 0xf;
8225 set_cc = (insn >> 20) & 1;
8226 logic_cc = table_logic_cc[op1] & set_cc;
8227
8228 /* data processing instruction */
8229 if (insn & (1 << 25)) {
8230 /* immediate operand */
8231 val = insn & 0xff;
8232 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8233 if (shift) {
9ee6e8bb 8234 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8235 }
7d1b0095 8236 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8237 tcg_gen_movi_i32(tmp2, val);
8238 if (logic_cc && shift) {
8239 gen_set_CF_bit31(tmp2);
8240 }
9ee6e8bb
PB
8241 } else {
8242 /* register */
8243 rm = (insn) & 0xf;
e9bb4aa9 8244 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8245 shiftop = (insn >> 5) & 3;
8246 if (!(insn & (1 << 4))) {
8247 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8248 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8249 } else {
8250 rs = (insn >> 8) & 0xf;
8984bd2e 8251 tmp = load_reg(s, rs);
e9bb4aa9 8252 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8253 }
8254 }
8255 if (op1 != 0x0f && op1 != 0x0d) {
8256 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8257 tmp = load_reg(s, rn);
8258 } else {
f764718d 8259 tmp = NULL;
9ee6e8bb
PB
8260 }
8261 rd = (insn >> 12) & 0xf;
8262 switch(op1) {
8263 case 0x00:
e9bb4aa9
JR
8264 tcg_gen_and_i32(tmp, tmp, tmp2);
8265 if (logic_cc) {
8266 gen_logic_CC(tmp);
8267 }
7dcc1f89 8268 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8269 break;
8270 case 0x01:
e9bb4aa9
JR
8271 tcg_gen_xor_i32(tmp, tmp, tmp2);
8272 if (logic_cc) {
8273 gen_logic_CC(tmp);
8274 }
7dcc1f89 8275 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8276 break;
8277 case 0x02:
8278 if (set_cc && rd == 15) {
8279 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8280 if (IS_USER(s)) {
9ee6e8bb 8281 goto illegal_op;
e9bb4aa9 8282 }
72485ec4 8283 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8284 gen_exception_return(s, tmp);
9ee6e8bb 8285 } else {
e9bb4aa9 8286 if (set_cc) {
72485ec4 8287 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8288 } else {
8289 tcg_gen_sub_i32(tmp, tmp, tmp2);
8290 }
7dcc1f89 8291 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8292 }
8293 break;
8294 case 0x03:
e9bb4aa9 8295 if (set_cc) {
72485ec4 8296 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8297 } else {
8298 tcg_gen_sub_i32(tmp, tmp2, tmp);
8299 }
7dcc1f89 8300 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8301 break;
8302 case 0x04:
e9bb4aa9 8303 if (set_cc) {
72485ec4 8304 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8305 } else {
8306 tcg_gen_add_i32(tmp, tmp, tmp2);
8307 }
7dcc1f89 8308 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8309 break;
8310 case 0x05:
e9bb4aa9 8311 if (set_cc) {
49b4c31e 8312 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8313 } else {
8314 gen_add_carry(tmp, tmp, tmp2);
8315 }
7dcc1f89 8316 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8317 break;
8318 case 0x06:
e9bb4aa9 8319 if (set_cc) {
2de68a49 8320 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8321 } else {
8322 gen_sub_carry(tmp, tmp, tmp2);
8323 }
7dcc1f89 8324 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8325 break;
8326 case 0x07:
e9bb4aa9 8327 if (set_cc) {
2de68a49 8328 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8329 } else {
8330 gen_sub_carry(tmp, tmp2, tmp);
8331 }
7dcc1f89 8332 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8333 break;
8334 case 0x08:
8335 if (set_cc) {
e9bb4aa9
JR
8336 tcg_gen_and_i32(tmp, tmp, tmp2);
8337 gen_logic_CC(tmp);
9ee6e8bb 8338 }
7d1b0095 8339 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8340 break;
8341 case 0x09:
8342 if (set_cc) {
e9bb4aa9
JR
8343 tcg_gen_xor_i32(tmp, tmp, tmp2);
8344 gen_logic_CC(tmp);
9ee6e8bb 8345 }
7d1b0095 8346 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8347 break;
8348 case 0x0a:
8349 if (set_cc) {
72485ec4 8350 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8351 }
7d1b0095 8352 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8353 break;
8354 case 0x0b:
8355 if (set_cc) {
72485ec4 8356 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8357 }
7d1b0095 8358 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8359 break;
8360 case 0x0c:
e9bb4aa9
JR
8361 tcg_gen_or_i32(tmp, tmp, tmp2);
8362 if (logic_cc) {
8363 gen_logic_CC(tmp);
8364 }
7dcc1f89 8365 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8366 break;
8367 case 0x0d:
8368 if (logic_cc && rd == 15) {
8369 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8370 if (IS_USER(s)) {
9ee6e8bb 8371 goto illegal_op;
e9bb4aa9
JR
8372 }
8373 gen_exception_return(s, tmp2);
9ee6e8bb 8374 } else {
e9bb4aa9
JR
8375 if (logic_cc) {
8376 gen_logic_CC(tmp2);
8377 }
7dcc1f89 8378 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8379 }
8380 break;
8381 case 0x0e:
f669df27 8382 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8383 if (logic_cc) {
8384 gen_logic_CC(tmp);
8385 }
7dcc1f89 8386 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8387 break;
8388 default:
8389 case 0x0f:
e9bb4aa9
JR
8390 tcg_gen_not_i32(tmp2, tmp2);
8391 if (logic_cc) {
8392 gen_logic_CC(tmp2);
8393 }
7dcc1f89 8394 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8395 break;
8396 }
e9bb4aa9 8397 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8398 tcg_temp_free_i32(tmp2);
e9bb4aa9 8399 }
9ee6e8bb
PB
8400 } else {
8401 /* other instructions */
8402 op1 = (insn >> 24) & 0xf;
8403 switch(op1) {
8404 case 0x0:
8405 case 0x1:
8406 /* multiplies, extra load/stores */
8407 sh = (insn >> 5) & 3;
8408 if (sh == 0) {
8409 if (op1 == 0x0) {
8410 rd = (insn >> 16) & 0xf;
8411 rn = (insn >> 12) & 0xf;
8412 rs = (insn >> 8) & 0xf;
8413 rm = (insn) & 0xf;
8414 op1 = (insn >> 20) & 0xf;
8415 switch (op1) {
8416 case 0: case 1: case 2: case 3: case 6:
8417 /* 32 bit mul */
5e3f878a
PB
8418 tmp = load_reg(s, rs);
8419 tmp2 = load_reg(s, rm);
8420 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8421 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8422 if (insn & (1 << 22)) {
8423 /* Subtract (mls) */
8424 ARCH(6T2);
5e3f878a
PB
8425 tmp2 = load_reg(s, rn);
8426 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8427 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8428 } else if (insn & (1 << 21)) {
8429 /* Add */
5e3f878a
PB
8430 tmp2 = load_reg(s, rn);
8431 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8432 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8433 }
8434 if (insn & (1 << 20))
5e3f878a
PB
8435 gen_logic_CC(tmp);
8436 store_reg(s, rd, tmp);
9ee6e8bb 8437 break;
8aac08b1
AJ
8438 case 4:
8439 /* 64 bit mul double accumulate (UMAAL) */
8440 ARCH(6);
8441 tmp = load_reg(s, rs);
8442 tmp2 = load_reg(s, rm);
8443 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8444 gen_addq_lo(s, tmp64, rn);
8445 gen_addq_lo(s, tmp64, rd);
8446 gen_storeq_reg(s, rn, rd, tmp64);
8447 tcg_temp_free_i64(tmp64);
8448 break;
8449 case 8: case 9: case 10: case 11:
8450 case 12: case 13: case 14: case 15:
8451 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8452 tmp = load_reg(s, rs);
8453 tmp2 = load_reg(s, rm);
8aac08b1 8454 if (insn & (1 << 22)) {
c9f10124 8455 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8456 } else {
c9f10124 8457 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8458 }
8459 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8460 TCGv_i32 al = load_reg(s, rn);
8461 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8462 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8463 tcg_temp_free_i32(al);
8464 tcg_temp_free_i32(ah);
9ee6e8bb 8465 }
8aac08b1 8466 if (insn & (1 << 20)) {
c9f10124 8467 gen_logicq_cc(tmp, tmp2);
8aac08b1 8468 }
c9f10124
RH
8469 store_reg(s, rn, tmp);
8470 store_reg(s, rd, tmp2);
9ee6e8bb 8471 break;
8aac08b1
AJ
8472 default:
8473 goto illegal_op;
9ee6e8bb
PB
8474 }
8475 } else {
8476 rn = (insn >> 16) & 0xf;
8477 rd = (insn >> 12) & 0xf;
8478 if (insn & (1 << 23)) {
8479 /* load/store exclusive */
96c55295
PM
8480 bool is_ld = extract32(insn, 20, 1);
8481 bool is_lasr = !extract32(insn, 8, 1);
2359bf80 8482 int op2 = (insn >> 8) & 3;
86753403 8483 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8484
8485 switch (op2) {
8486 case 0: /* lda/stl */
8487 if (op1 == 1) {
8488 goto illegal_op;
8489 }
8490 ARCH(8);
8491 break;
8492 case 1: /* reserved */
8493 goto illegal_op;
8494 case 2: /* ldaex/stlex */
8495 ARCH(8);
8496 break;
8497 case 3: /* ldrex/strex */
8498 if (op1) {
8499 ARCH(6K);
8500 } else {
8501 ARCH(6);
8502 }
8503 break;
8504 }
8505
3174f8e9 8506 addr = tcg_temp_local_new_i32();
98a46317 8507 load_reg_var(s, addr, rn);
2359bf80 8508
96c55295
PM
8509 if (is_lasr && !is_ld) {
8510 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8511 }
8512
2359bf80 8513 if (op2 == 0) {
96c55295 8514 if (is_ld) {
2359bf80
MR
8515 tmp = tcg_temp_new_i32();
8516 switch (op1) {
8517 case 0: /* lda */
9bb6558a
PM
8518 gen_aa32_ld32u_iss(s, tmp, addr,
8519 get_mem_index(s),
8520 rd | ISSIsAcqRel);
2359bf80
MR
8521 break;
8522 case 2: /* ldab */
9bb6558a
PM
8523 gen_aa32_ld8u_iss(s, tmp, addr,
8524 get_mem_index(s),
8525 rd | ISSIsAcqRel);
2359bf80
MR
8526 break;
8527 case 3: /* ldah */
9bb6558a
PM
8528 gen_aa32_ld16u_iss(s, tmp, addr,
8529 get_mem_index(s),
8530 rd | ISSIsAcqRel);
2359bf80
MR
8531 break;
8532 default:
8533 abort();
8534 }
8535 store_reg(s, rd, tmp);
8536 } else {
8537 rm = insn & 0xf;
8538 tmp = load_reg(s, rm);
8539 switch (op1) {
8540 case 0: /* stl */
9bb6558a
PM
8541 gen_aa32_st32_iss(s, tmp, addr,
8542 get_mem_index(s),
8543 rm | ISSIsAcqRel);
2359bf80
MR
8544 break;
8545 case 2: /* stlb */
9bb6558a
PM
8546 gen_aa32_st8_iss(s, tmp, addr,
8547 get_mem_index(s),
8548 rm | ISSIsAcqRel);
2359bf80
MR
8549 break;
8550 case 3: /* stlh */
9bb6558a
PM
8551 gen_aa32_st16_iss(s, tmp, addr,
8552 get_mem_index(s),
8553 rm | ISSIsAcqRel);
2359bf80
MR
8554 break;
8555 default:
8556 abort();
8557 }
8558 tcg_temp_free_i32(tmp);
8559 }
96c55295 8560 } else if (is_ld) {
86753403
PB
8561 switch (op1) {
8562 case 0: /* ldrex */
426f5abc 8563 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8564 break;
8565 case 1: /* ldrexd */
426f5abc 8566 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8567 break;
8568 case 2: /* ldrexb */
426f5abc 8569 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8570 break;
8571 case 3: /* ldrexh */
426f5abc 8572 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8573 break;
8574 default:
8575 abort();
8576 }
9ee6e8bb
PB
8577 } else {
8578 rm = insn & 0xf;
86753403
PB
8579 switch (op1) {
8580 case 0: /* strex */
426f5abc 8581 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8582 break;
8583 case 1: /* strexd */
502e64fe 8584 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8585 break;
8586 case 2: /* strexb */
426f5abc 8587 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8588 break;
8589 case 3: /* strexh */
426f5abc 8590 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8591 break;
8592 default:
8593 abort();
8594 }
9ee6e8bb 8595 }
39d5492a 8596 tcg_temp_free_i32(addr);
96c55295
PM
8597
8598 if (is_lasr && is_ld) {
8599 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
8600 }
c4869ca6
OS
8601 } else if ((insn & 0x00300f00) == 0) {
8602 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
8603 * - SWP, SWPB
8604 */
8605
cf12bce0
EC
8606 TCGv taddr;
8607 TCGMemOp opc = s->be_data;
8608
9ee6e8bb
PB
8609 rm = (insn) & 0xf;
8610
9ee6e8bb 8611 if (insn & (1 << 22)) {
cf12bce0 8612 opc |= MO_UB;
9ee6e8bb 8613 } else {
cf12bce0 8614 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 8615 }
cf12bce0
EC
8616
8617 addr = load_reg(s, rn);
8618 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 8619 tcg_temp_free_i32(addr);
cf12bce0
EC
8620
8621 tmp = load_reg(s, rm);
8622 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8623 get_mem_index(s), opc);
8624 tcg_temp_free(taddr);
8625 store_reg(s, rd, tmp);
c4869ca6
OS
8626 } else {
8627 goto illegal_op;
9ee6e8bb
PB
8628 }
8629 }
8630 } else {
8631 int address_offset;
3960c336 8632 bool load = insn & (1 << 20);
63f26fcf
PM
8633 bool wbit = insn & (1 << 21);
8634 bool pbit = insn & (1 << 24);
3960c336 8635 bool doubleword = false;
9bb6558a
PM
8636 ISSInfo issinfo;
8637
9ee6e8bb
PB
8638 /* Misc load/store */
8639 rn = (insn >> 16) & 0xf;
8640 rd = (insn >> 12) & 0xf;
3960c336 8641
9bb6558a
PM
8642 /* ISS not valid if writeback */
8643 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8644
3960c336
PM
8645 if (!load && (sh & 2)) {
8646 /* doubleword */
8647 ARCH(5TE);
8648 if (rd & 1) {
8649 /* UNPREDICTABLE; we choose to UNDEF */
8650 goto illegal_op;
8651 }
8652 load = (sh & 1) == 0;
8653 doubleword = true;
8654 }
8655
b0109805 8656 addr = load_reg(s, rn);
63f26fcf 8657 if (pbit) {
b0109805 8658 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 8659 }
9ee6e8bb 8660 address_offset = 0;
3960c336
PM
8661
8662 if (doubleword) {
8663 if (!load) {
9ee6e8bb 8664 /* store */
b0109805 8665 tmp = load_reg(s, rd);
12dcc321 8666 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8667 tcg_temp_free_i32(tmp);
b0109805
PB
8668 tcg_gen_addi_i32(addr, addr, 4);
8669 tmp = load_reg(s, rd + 1);
12dcc321 8670 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8671 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8672 } else {
8673 /* load */
5a839c0d 8674 tmp = tcg_temp_new_i32();
12dcc321 8675 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8676 store_reg(s, rd, tmp);
8677 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8678 tmp = tcg_temp_new_i32();
12dcc321 8679 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8680 rd++;
9ee6e8bb
PB
8681 }
8682 address_offset = -4;
3960c336
PM
8683 } else if (load) {
8684 /* load */
8685 tmp = tcg_temp_new_i32();
8686 switch (sh) {
8687 case 1:
9bb6558a
PM
8688 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
8689 issinfo);
3960c336
PM
8690 break;
8691 case 2:
9bb6558a
PM
8692 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
8693 issinfo);
3960c336
PM
8694 break;
8695 default:
8696 case 3:
9bb6558a
PM
8697 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
8698 issinfo);
3960c336
PM
8699 break;
8700 }
9ee6e8bb
PB
8701 } else {
8702 /* store */
b0109805 8703 tmp = load_reg(s, rd);
9bb6558a 8704 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 8705 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8706 }
8707 /* Perform base writeback before the loaded value to
8708 ensure correct behavior with overlapping index registers.
b6af0975 8709 ldrd with base writeback is undefined if the
9ee6e8bb 8710 destination and index registers overlap. */
63f26fcf 8711 if (!pbit) {
b0109805
PB
8712 gen_add_datah_offset(s, insn, address_offset, addr);
8713 store_reg(s, rn, addr);
63f26fcf 8714 } else if (wbit) {
9ee6e8bb 8715 if (address_offset)
b0109805
PB
8716 tcg_gen_addi_i32(addr, addr, address_offset);
8717 store_reg(s, rn, addr);
8718 } else {
7d1b0095 8719 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8720 }
8721 if (load) {
8722 /* Complete the load. */
b0109805 8723 store_reg(s, rd, tmp);
9ee6e8bb
PB
8724 }
8725 }
8726 break;
8727 case 0x4:
8728 case 0x5:
8729 goto do_ldst;
8730 case 0x6:
8731 case 0x7:
8732 if (insn & (1 << 4)) {
8733 ARCH(6);
8734 /* Armv6 Media instructions. */
8735 rm = insn & 0xf;
8736 rn = (insn >> 16) & 0xf;
2c0262af 8737 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8738 rs = (insn >> 8) & 0xf;
8739 switch ((insn >> 23) & 3) {
8740 case 0: /* Parallel add/subtract. */
8741 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8742 tmp = load_reg(s, rn);
8743 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8744 sh = (insn >> 5) & 7;
8745 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8746 goto illegal_op;
6ddbc6e4 8747 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8748 tcg_temp_free_i32(tmp2);
6ddbc6e4 8749 store_reg(s, rd, tmp);
9ee6e8bb
PB
8750 break;
8751 case 1:
8752 if ((insn & 0x00700020) == 0) {
6c95676b 8753 /* Halfword pack. */
3670669c
PB
8754 tmp = load_reg(s, rn);
8755 tmp2 = load_reg(s, rm);
9ee6e8bb 8756 shift = (insn >> 7) & 0x1f;
3670669c
PB
8757 if (insn & (1 << 6)) {
8758 /* pkhtb */
22478e79
AZ
8759 if (shift == 0)
8760 shift = 31;
8761 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8762 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8763 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8764 } else {
8765 /* pkhbt */
22478e79
AZ
8766 if (shift)
8767 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8768 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8769 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8770 }
8771 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8772 tcg_temp_free_i32(tmp2);
3670669c 8773 store_reg(s, rd, tmp);
9ee6e8bb
PB
8774 } else if ((insn & 0x00200020) == 0x00200000) {
8775 /* [us]sat */
6ddbc6e4 8776 tmp = load_reg(s, rm);
9ee6e8bb
PB
8777 shift = (insn >> 7) & 0x1f;
8778 if (insn & (1 << 6)) {
8779 if (shift == 0)
8780 shift = 31;
6ddbc6e4 8781 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8782 } else {
6ddbc6e4 8783 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8784 }
8785 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8786 tmp2 = tcg_const_i32(sh);
8787 if (insn & (1 << 22))
9ef39277 8788 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8789 else
9ef39277 8790 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8791 tcg_temp_free_i32(tmp2);
6ddbc6e4 8792 store_reg(s, rd, tmp);
9ee6e8bb
PB
8793 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8794 /* [us]sat16 */
6ddbc6e4 8795 tmp = load_reg(s, rm);
9ee6e8bb 8796 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8797 tmp2 = tcg_const_i32(sh);
8798 if (insn & (1 << 22))
9ef39277 8799 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8800 else
9ef39277 8801 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8802 tcg_temp_free_i32(tmp2);
6ddbc6e4 8803 store_reg(s, rd, tmp);
9ee6e8bb
PB
8804 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8805 /* Select bytes. */
6ddbc6e4
PB
8806 tmp = load_reg(s, rn);
8807 tmp2 = load_reg(s, rm);
7d1b0095 8808 tmp3 = tcg_temp_new_i32();
0ecb72a5 8809 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8810 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8811 tcg_temp_free_i32(tmp3);
8812 tcg_temp_free_i32(tmp2);
6ddbc6e4 8813 store_reg(s, rd, tmp);
9ee6e8bb 8814 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8815 tmp = load_reg(s, rm);
9ee6e8bb 8816 shift = (insn >> 10) & 3;
1301f322 8817 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8818 rotate, a shift is sufficient. */
8819 if (shift != 0)
f669df27 8820 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8821 op1 = (insn >> 20) & 7;
8822 switch (op1) {
5e3f878a
PB
8823 case 0: gen_sxtb16(tmp); break;
8824 case 2: gen_sxtb(tmp); break;
8825 case 3: gen_sxth(tmp); break;
8826 case 4: gen_uxtb16(tmp); break;
8827 case 6: gen_uxtb(tmp); break;
8828 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8829 default: goto illegal_op;
8830 }
8831 if (rn != 15) {
5e3f878a 8832 tmp2 = load_reg(s, rn);
9ee6e8bb 8833 if ((op1 & 3) == 0) {
5e3f878a 8834 gen_add16(tmp, tmp2);
9ee6e8bb 8835 } else {
5e3f878a 8836 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8837 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8838 }
8839 }
6c95676b 8840 store_reg(s, rd, tmp);
9ee6e8bb
PB
8841 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8842 /* rev */
b0109805 8843 tmp = load_reg(s, rm);
9ee6e8bb
PB
8844 if (insn & (1 << 22)) {
8845 if (insn & (1 << 7)) {
b0109805 8846 gen_revsh(tmp);
9ee6e8bb
PB
8847 } else {
8848 ARCH(6T2);
b0109805 8849 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8850 }
8851 } else {
8852 if (insn & (1 << 7))
b0109805 8853 gen_rev16(tmp);
9ee6e8bb 8854 else
66896cb8 8855 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8856 }
b0109805 8857 store_reg(s, rd, tmp);
9ee6e8bb
PB
8858 } else {
8859 goto illegal_op;
8860 }
8861 break;
8862 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8863 switch ((insn >> 20) & 0x7) {
8864 case 5:
8865 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8866 /* op2 not 00x or 11x : UNDEF */
8867 goto illegal_op;
8868 }
838fa72d
AJ
8869 /* Signed multiply most significant [accumulate].
8870 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8871 tmp = load_reg(s, rm);
8872 tmp2 = load_reg(s, rs);
a7812ae4 8873 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8874
955a7dd5 8875 if (rd != 15) {
838fa72d 8876 tmp = load_reg(s, rd);
9ee6e8bb 8877 if (insn & (1 << 6)) {
838fa72d 8878 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8879 } else {
838fa72d 8880 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8881 }
8882 }
838fa72d
AJ
8883 if (insn & (1 << 5)) {
8884 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8885 }
8886 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8887 tmp = tcg_temp_new_i32();
ecc7b3aa 8888 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 8889 tcg_temp_free_i64(tmp64);
955a7dd5 8890 store_reg(s, rn, tmp);
41e9564d
PM
8891 break;
8892 case 0:
8893 case 4:
8894 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8895 if (insn & (1 << 7)) {
8896 goto illegal_op;
8897 }
8898 tmp = load_reg(s, rm);
8899 tmp2 = load_reg(s, rs);
9ee6e8bb 8900 if (insn & (1 << 5))
5e3f878a
PB
8901 gen_swap_half(tmp2);
8902 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8903 if (insn & (1 << 22)) {
5e3f878a 8904 /* smlald, smlsld */
33bbd75a
PC
8905 TCGv_i64 tmp64_2;
8906
a7812ae4 8907 tmp64 = tcg_temp_new_i64();
33bbd75a 8908 tmp64_2 = tcg_temp_new_i64();
a7812ae4 8909 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 8910 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 8911 tcg_temp_free_i32(tmp);
33bbd75a
PC
8912 tcg_temp_free_i32(tmp2);
8913 if (insn & (1 << 6)) {
8914 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8915 } else {
8916 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8917 }
8918 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
8919 gen_addq(s, tmp64, rd, rn);
8920 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8921 tcg_temp_free_i64(tmp64);
9ee6e8bb 8922 } else {
5e3f878a 8923 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
8924 if (insn & (1 << 6)) {
8925 /* This subtraction cannot overflow. */
8926 tcg_gen_sub_i32(tmp, tmp, tmp2);
8927 } else {
8928 /* This addition cannot overflow 32 bits;
8929 * however it may overflow considered as a
8930 * signed operation, in which case we must set
8931 * the Q flag.
8932 */
8933 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8934 }
8935 tcg_temp_free_i32(tmp2);
22478e79 8936 if (rd != 15)
9ee6e8bb 8937 {
22478e79 8938 tmp2 = load_reg(s, rd);
9ef39277 8939 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8940 tcg_temp_free_i32(tmp2);
9ee6e8bb 8941 }
22478e79 8942 store_reg(s, rn, tmp);
9ee6e8bb 8943 }
41e9564d 8944 break;
b8b8ea05
PM
8945 case 1:
8946 case 3:
8947 /* SDIV, UDIV */
7e0cf8b4 8948 if (!dc_isar_feature(arm_div, s)) {
b8b8ea05
PM
8949 goto illegal_op;
8950 }
8951 if (((insn >> 5) & 7) || (rd != 15)) {
8952 goto illegal_op;
8953 }
8954 tmp = load_reg(s, rm);
8955 tmp2 = load_reg(s, rs);
8956 if (insn & (1 << 21)) {
8957 gen_helper_udiv(tmp, tmp, tmp2);
8958 } else {
8959 gen_helper_sdiv(tmp, tmp, tmp2);
8960 }
8961 tcg_temp_free_i32(tmp2);
8962 store_reg(s, rn, tmp);
8963 break;
41e9564d
PM
8964 default:
8965 goto illegal_op;
9ee6e8bb
PB
8966 }
8967 break;
8968 case 3:
8969 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8970 switch (op1) {
8971 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8972 ARCH(6);
8973 tmp = load_reg(s, rm);
8974 tmp2 = load_reg(s, rs);
8975 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8976 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8977 if (rd != 15) {
8978 tmp2 = load_reg(s, rd);
6ddbc6e4 8979 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8980 tcg_temp_free_i32(tmp2);
9ee6e8bb 8981 }
ded9d295 8982 store_reg(s, rn, tmp);
9ee6e8bb
PB
8983 break;
8984 case 0x20: case 0x24: case 0x28: case 0x2c:
8985 /* Bitfield insert/clear. */
8986 ARCH(6T2);
8987 shift = (insn >> 7) & 0x1f;
8988 i = (insn >> 16) & 0x1f;
45140a57
KB
8989 if (i < shift) {
8990 /* UNPREDICTABLE; we choose to UNDEF */
8991 goto illegal_op;
8992 }
9ee6e8bb
PB
8993 i = i + 1 - shift;
8994 if (rm == 15) {
7d1b0095 8995 tmp = tcg_temp_new_i32();
5e3f878a 8996 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8997 } else {
5e3f878a 8998 tmp = load_reg(s, rm);
9ee6e8bb
PB
8999 }
9000 if (i != 32) {
5e3f878a 9001 tmp2 = load_reg(s, rd);
d593c48e 9002 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9003 tcg_temp_free_i32(tmp2);
9ee6e8bb 9004 }
5e3f878a 9005 store_reg(s, rd, tmp);
9ee6e8bb
PB
9006 break;
9007 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9008 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9009 ARCH(6T2);
5e3f878a 9010 tmp = load_reg(s, rm);
9ee6e8bb
PB
9011 shift = (insn >> 7) & 0x1f;
9012 i = ((insn >> 16) & 0x1f) + 1;
9013 if (shift + i > 32)
9014 goto illegal_op;
9015 if (i < 32) {
9016 if (op1 & 0x20) {
59a71b4c 9017 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9018 } else {
59a71b4c 9019 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9020 }
9021 }
5e3f878a 9022 store_reg(s, rd, tmp);
9ee6e8bb
PB
9023 break;
9024 default:
9025 goto illegal_op;
9026 }
9027 break;
9028 }
9029 break;
9030 }
9031 do_ldst:
9032 /* Check for undefined extension instructions
9033 * per the ARM Bible IE:
9034 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9035 */
9036 sh = (0xf << 20) | (0xf << 4);
9037 if (op1 == 0x7 && ((insn & sh) == sh))
9038 {
9039 goto illegal_op;
9040 }
9041 /* load/store byte/word */
9042 rn = (insn >> 16) & 0xf;
9043 rd = (insn >> 12) & 0xf;
b0109805 9044 tmp2 = load_reg(s, rn);
a99caa48
PM
9045 if ((insn & 0x01200000) == 0x00200000) {
9046 /* ldrt/strt */
579d21cc 9047 i = get_a32_user_mem_index(s);
a99caa48
PM
9048 } else {
9049 i = get_mem_index(s);
9050 }
9ee6e8bb 9051 if (insn & (1 << 24))
b0109805 9052 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9053 if (insn & (1 << 20)) {
9054 /* load */
5a839c0d 9055 tmp = tcg_temp_new_i32();
9ee6e8bb 9056 if (insn & (1 << 22)) {
9bb6558a 9057 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9058 } else {
9bb6558a 9059 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9060 }
9ee6e8bb
PB
9061 } else {
9062 /* store */
b0109805 9063 tmp = load_reg(s, rd);
5a839c0d 9064 if (insn & (1 << 22)) {
9bb6558a 9065 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9066 } else {
9bb6558a 9067 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9068 }
9069 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9070 }
9071 if (!(insn & (1 << 24))) {
b0109805
PB
9072 gen_add_data_offset(s, insn, tmp2);
9073 store_reg(s, rn, tmp2);
9074 } else if (insn & (1 << 21)) {
9075 store_reg(s, rn, tmp2);
9076 } else {
7d1b0095 9077 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9078 }
9079 if (insn & (1 << 20)) {
9080 /* Complete the load. */
7dcc1f89 9081 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9082 }
9083 break;
9084 case 0x08:
9085 case 0x09:
9086 {
da3e53dd
PM
9087 int j, n, loaded_base;
9088 bool exc_return = false;
9089 bool is_load = extract32(insn, 20, 1);
9090 bool user = false;
39d5492a 9091 TCGv_i32 loaded_var;
9ee6e8bb
PB
9092 /* load/store multiple words */
9093 /* XXX: store correct base if write back */
9ee6e8bb 9094 if (insn & (1 << 22)) {
da3e53dd 9095 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9096 if (IS_USER(s))
9097 goto illegal_op; /* only usable in supervisor mode */
9098
da3e53dd
PM
9099 if (is_load && extract32(insn, 15, 1)) {
9100 exc_return = true;
9101 } else {
9102 user = true;
9103 }
9ee6e8bb
PB
9104 }
9105 rn = (insn >> 16) & 0xf;
b0109805 9106 addr = load_reg(s, rn);
9ee6e8bb
PB
9107
9108 /* compute total size */
9109 loaded_base = 0;
f764718d 9110 loaded_var = NULL;
9ee6e8bb
PB
9111 n = 0;
9112 for(i=0;i<16;i++) {
9113 if (insn & (1 << i))
9114 n++;
9115 }
9116 /* XXX: test invalid n == 0 case ? */
9117 if (insn & (1 << 23)) {
9118 if (insn & (1 << 24)) {
9119 /* pre increment */
b0109805 9120 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9121 } else {
9122 /* post increment */
9123 }
9124 } else {
9125 if (insn & (1 << 24)) {
9126 /* pre decrement */
b0109805 9127 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9128 } else {
9129 /* post decrement */
9130 if (n != 1)
b0109805 9131 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9132 }
9133 }
9134 j = 0;
9135 for(i=0;i<16;i++) {
9136 if (insn & (1 << i)) {
da3e53dd 9137 if (is_load) {
9ee6e8bb 9138 /* load */
5a839c0d 9139 tmp = tcg_temp_new_i32();
12dcc321 9140 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9141 if (user) {
b75263d6 9142 tmp2 = tcg_const_i32(i);
1ce94f81 9143 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9144 tcg_temp_free_i32(tmp2);
7d1b0095 9145 tcg_temp_free_i32(tmp);
9ee6e8bb 9146 } else if (i == rn) {
b0109805 9147 loaded_var = tmp;
9ee6e8bb 9148 loaded_base = 1;
9d090d17 9149 } else if (i == 15 && exc_return) {
fb0e8e79 9150 store_pc_exc_ret(s, tmp);
9ee6e8bb 9151 } else {
7dcc1f89 9152 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9153 }
9154 } else {
9155 /* store */
9156 if (i == 15) {
9157 /* special case: r15 = PC + 8 */
9158 val = (long)s->pc + 4;
7d1b0095 9159 tmp = tcg_temp_new_i32();
b0109805 9160 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9161 } else if (user) {
7d1b0095 9162 tmp = tcg_temp_new_i32();
b75263d6 9163 tmp2 = tcg_const_i32(i);
9ef39277 9164 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9165 tcg_temp_free_i32(tmp2);
9ee6e8bb 9166 } else {
b0109805 9167 tmp = load_reg(s, i);
9ee6e8bb 9168 }
12dcc321 9169 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9170 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9171 }
9172 j++;
9173 /* no need to add after the last transfer */
9174 if (j != n)
b0109805 9175 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9176 }
9177 }
9178 if (insn & (1 << 21)) {
9179 /* write back */
9180 if (insn & (1 << 23)) {
9181 if (insn & (1 << 24)) {
9182 /* pre increment */
9183 } else {
9184 /* post increment */
b0109805 9185 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9186 }
9187 } else {
9188 if (insn & (1 << 24)) {
9189 /* pre decrement */
9190 if (n != 1)
b0109805 9191 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9192 } else {
9193 /* post decrement */
b0109805 9194 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9195 }
9196 }
b0109805
PB
9197 store_reg(s, rn, addr);
9198 } else {
7d1b0095 9199 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9200 }
9201 if (loaded_base) {
b0109805 9202 store_reg(s, rn, loaded_var);
9ee6e8bb 9203 }
da3e53dd 9204 if (exc_return) {
9ee6e8bb 9205 /* Restore CPSR from SPSR. */
d9ba4830 9206 tmp = load_cpu_field(spsr);
e69ad9df
AL
9207 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9208 gen_io_start();
9209 }
235ea1f5 9210 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
9211 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9212 gen_io_end();
9213 }
7d1b0095 9214 tcg_temp_free_i32(tmp);
b29fd33d 9215 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9216 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9217 }
9218 }
9219 break;
9220 case 0xa:
9221 case 0xb:
9222 {
9223 int32_t offset;
9224
9225 /* branch (and link) */
9226 val = (int32_t)s->pc;
9227 if (insn & (1 << 24)) {
7d1b0095 9228 tmp = tcg_temp_new_i32();
5e3f878a
PB
9229 tcg_gen_movi_i32(tmp, val);
9230 store_reg(s, 14, tmp);
9ee6e8bb 9231 }
534df156
PM
9232 offset = sextract32(insn << 2, 0, 26);
9233 val += offset + 4;
9ee6e8bb
PB
9234 gen_jmp(s, val);
9235 }
9236 break;
9237 case 0xc:
9238 case 0xd:
9239 case 0xe:
6a57f3eb
WN
9240 if (((insn >> 8) & 0xe) == 10) {
9241 /* VFP. */
7dcc1f89 9242 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9243 goto illegal_op;
9244 }
7dcc1f89 9245 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9246 /* Coprocessor. */
9ee6e8bb 9247 goto illegal_op;
6a57f3eb 9248 }
9ee6e8bb
PB
9249 break;
9250 case 0xf:
9251 /* swi */
eaed129d 9252 gen_set_pc_im(s, s->pc);
d4a2dc67 9253 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 9254 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
9255 break;
9256 default:
9257 illegal_op:
73710361
GB
9258 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9259 default_exception_el(s));
9ee6e8bb
PB
9260 break;
9261 }
9262 }
9263}
9264
296e5a0a
PM
9265static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
9266{
9267 /* Return true if this is a 16 bit instruction. We must be precise
9268 * about this (matching the decode). We assume that s->pc still
9269 * points to the first 16 bits of the insn.
9270 */
9271 if ((insn >> 11) < 0x1d) {
9272 /* Definitely a 16-bit instruction */
9273 return true;
9274 }
9275
9276 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9277 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9278 * end up actually treating this as two 16-bit insns, though,
9279 * if it's half of a bl/blx pair that might span a page boundary.
9280 */
14120108
JS
9281 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
9282 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
9283 /* Thumb2 cores (including all M profile ones) always treat
9284 * 32-bit insns as 32-bit.
9285 */
9286 return false;
9287 }
9288
bfe7ad5b 9289 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
9290 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9291 * is not on the next page; we merge this into a 32-bit
9292 * insn.
9293 */
9294 return false;
9295 }
9296 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9297 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9298 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9299 * -- handle as single 16 bit insn
9300 */
9301 return true;
9302}
9303
9ee6e8bb
PB
9304/* Return true if this is a Thumb-2 logical op. */
9305static int
9306thumb2_logic_op(int op)
9307{
9308 return (op < 8);
9309}
9310
9311/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9312 then set condition code flags based on the result of the operation.
9313 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9314 to the high bit of T1.
9315 Returns zero if the opcode is valid. */
9316
9317static int
39d5492a
PM
9318gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9319 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9320{
9321 int logic_cc;
9322
9323 logic_cc = 0;
9324 switch (op) {
9325 case 0: /* and */
396e467c 9326 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9327 logic_cc = conds;
9328 break;
9329 case 1: /* bic */
f669df27 9330 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9331 logic_cc = conds;
9332 break;
9333 case 2: /* orr */
396e467c 9334 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9335 logic_cc = conds;
9336 break;
9337 case 3: /* orn */
29501f1b 9338 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9339 logic_cc = conds;
9340 break;
9341 case 4: /* eor */
396e467c 9342 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9343 logic_cc = conds;
9344 break;
9345 case 8: /* add */
9346 if (conds)
72485ec4 9347 gen_add_CC(t0, t0, t1);
9ee6e8bb 9348 else
396e467c 9349 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9350 break;
9351 case 10: /* adc */
9352 if (conds)
49b4c31e 9353 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9354 else
396e467c 9355 gen_adc(t0, t1);
9ee6e8bb
PB
9356 break;
9357 case 11: /* sbc */
2de68a49
RH
9358 if (conds) {
9359 gen_sbc_CC(t0, t0, t1);
9360 } else {
396e467c 9361 gen_sub_carry(t0, t0, t1);
2de68a49 9362 }
9ee6e8bb
PB
9363 break;
9364 case 13: /* sub */
9365 if (conds)
72485ec4 9366 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9367 else
396e467c 9368 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9369 break;
9370 case 14: /* rsb */
9371 if (conds)
72485ec4 9372 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9373 else
396e467c 9374 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9375 break;
9376 default: /* 5, 6, 7, 9, 12, 15. */
9377 return 1;
9378 }
9379 if (logic_cc) {
396e467c 9380 gen_logic_CC(t0);
9ee6e8bb 9381 if (shifter_out)
396e467c 9382 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9383 }
9384 return 0;
9385}
9386
2eea841c
PM
9387/* Translate a 32-bit thumb instruction. */
9388static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 9389{
296e5a0a 9390 uint32_t imm, shift, offset;
9ee6e8bb 9391 uint32_t rd, rn, rm, rs;
39d5492a
PM
9392 TCGv_i32 tmp;
9393 TCGv_i32 tmp2;
9394 TCGv_i32 tmp3;
9395 TCGv_i32 addr;
a7812ae4 9396 TCGv_i64 tmp64;
9ee6e8bb
PB
9397 int op;
9398 int shiftop;
9399 int conds;
9400 int logic_cc;
9401
14120108
JS
9402 /*
9403 * ARMv6-M supports a limited subset of Thumb2 instructions.
9404 * Other Thumb1 architectures allow only 32-bit
9405 * combined BL/BLX prefix and suffix.
296e5a0a 9406 */
14120108
JS
9407 if (arm_dc_feature(s, ARM_FEATURE_M) &&
9408 !arm_dc_feature(s, ARM_FEATURE_V7)) {
9409 int i;
9410 bool found = false;
8297cb13
JS
9411 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
9412 0xf3b08040 /* dsb */,
9413 0xf3b08050 /* dmb */,
9414 0xf3b08060 /* isb */,
9415 0xf3e08000 /* mrs */,
9416 0xf000d000 /* bl */};
9417 static const uint32_t armv6m_mask[] = {0xffe0d000,
9418 0xfff0d0f0,
9419 0xfff0d0f0,
9420 0xfff0d0f0,
9421 0xffe0d000,
9422 0xf800d000};
14120108
JS
9423
9424 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
9425 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
9426 found = true;
9427 break;
9428 }
9429 }
9430 if (!found) {
9431 goto illegal_op;
9432 }
9433 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
9434 ARCH(6T2);
9435 }
9436
9437 rn = (insn >> 16) & 0xf;
9438 rs = (insn >> 12) & 0xf;
9439 rd = (insn >> 8) & 0xf;
9440 rm = insn & 0xf;
9441 switch ((insn >> 25) & 0xf) {
9442 case 0: case 1: case 2: case 3:
9443 /* 16-bit instructions. Should never happen. */
9444 abort();
9445 case 4:
9446 if (insn & (1 << 22)) {
ebfe27c5
PM
9447 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9448 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 9449 * table branch, TT.
ebfe27c5 9450 */
76eff04d
PM
9451 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9452 arm_dc_feature(s, ARM_FEATURE_V8)) {
9453 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9454 * - SG (v8M only)
9455 * The bulk of the behaviour for this instruction is implemented
9456 * in v7m_handle_execute_nsc(), which deals with the insn when
9457 * it is executed by a CPU in non-secure state from memory
9458 * which is Secure & NonSecure-Callable.
9459 * Here we only need to handle the remaining cases:
9460 * * in NS memory (including the "security extension not
9461 * implemented" case) : NOP
9462 * * in S memory but CPU already secure (clear IT bits)
9463 * We know that the attribute for the memory this insn is
9464 * in must match the current CPU state, because otherwise
9465 * get_phys_addr_pmsav8 would have generated an exception.
9466 */
9467 if (s->v8m_secure) {
9468 /* Like the IT insn, we don't need to generate any code */
9469 s->condexec_cond = 0;
9470 s->condexec_mask = 0;
9471 }
9472 } else if (insn & 0x01200000) {
ebfe27c5
PM
9473 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9474 * - load/store dual (post-indexed)
9475 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9476 * - load/store dual (literal and immediate)
9477 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9478 * - load/store dual (pre-indexed)
9479 */
910d7692
PM
9480 bool wback = extract32(insn, 21, 1);
9481
9ee6e8bb 9482 if (rn == 15) {
ebfe27c5
PM
9483 if (insn & (1 << 21)) {
9484 /* UNPREDICTABLE */
9485 goto illegal_op;
9486 }
7d1b0095 9487 addr = tcg_temp_new_i32();
b0109805 9488 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9489 } else {
b0109805 9490 addr = load_reg(s, rn);
9ee6e8bb
PB
9491 }
9492 offset = (insn & 0xff) * 4;
910d7692 9493 if ((insn & (1 << 23)) == 0) {
9ee6e8bb 9494 offset = -offset;
910d7692
PM
9495 }
9496
9497 if (s->v8m_stackcheck && rn == 13 && wback) {
9498 /*
9499 * Here 'addr' is the current SP; if offset is +ve we're
9500 * moving SP up, else down. It is UNKNOWN whether the limit
9501 * check triggers when SP starts below the limit and ends
9502 * up above it; check whichever of the current and final
9503 * SP is lower, so QEMU will trigger in that situation.
9504 */
9505 if ((int32_t)offset < 0) {
9506 TCGv_i32 newsp = tcg_temp_new_i32();
9507
9508 tcg_gen_addi_i32(newsp, addr, offset);
9509 gen_helper_v8m_stackcheck(cpu_env, newsp);
9510 tcg_temp_free_i32(newsp);
9511 } else {
9512 gen_helper_v8m_stackcheck(cpu_env, addr);
9513 }
9514 }
9515
9ee6e8bb 9516 if (insn & (1 << 24)) {
b0109805 9517 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9518 offset = 0;
9519 }
9520 if (insn & (1 << 20)) {
9521 /* ldrd */
e2592fad 9522 tmp = tcg_temp_new_i32();
12dcc321 9523 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9524 store_reg(s, rs, tmp);
9525 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9526 tmp = tcg_temp_new_i32();
12dcc321 9527 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9528 store_reg(s, rd, tmp);
9ee6e8bb
PB
9529 } else {
9530 /* strd */
b0109805 9531 tmp = load_reg(s, rs);
12dcc321 9532 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9533 tcg_temp_free_i32(tmp);
b0109805
PB
9534 tcg_gen_addi_i32(addr, addr, 4);
9535 tmp = load_reg(s, rd);
12dcc321 9536 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9537 tcg_temp_free_i32(tmp);
9ee6e8bb 9538 }
910d7692 9539 if (wback) {
9ee6e8bb 9540 /* Base writeback. */
b0109805
PB
9541 tcg_gen_addi_i32(addr, addr, offset - 4);
9542 store_reg(s, rn, addr);
9543 } else {
7d1b0095 9544 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9545 }
9546 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
9547 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9548 * - load/store exclusive word
5158de24 9549 * - TT (v8M only)
ebfe27c5
PM
9550 */
9551 if (rs == 15) {
5158de24
PM
9552 if (!(insn & (1 << 20)) &&
9553 arm_dc_feature(s, ARM_FEATURE_M) &&
9554 arm_dc_feature(s, ARM_FEATURE_V8)) {
9555 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
9556 * - TT (v8M only)
9557 */
9558 bool alt = insn & (1 << 7);
9559 TCGv_i32 addr, op, ttresp;
9560
9561 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
9562 /* we UNDEF for these UNPREDICTABLE cases */
9563 goto illegal_op;
9564 }
9565
9566 if (alt && !s->v8m_secure) {
9567 goto illegal_op;
9568 }
9569
9570 addr = load_reg(s, rn);
9571 op = tcg_const_i32(extract32(insn, 6, 2));
9572 ttresp = tcg_temp_new_i32();
9573 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
9574 tcg_temp_free_i32(addr);
9575 tcg_temp_free_i32(op);
9576 store_reg(s, rd, ttresp);
384c6c03 9577 break;
5158de24 9578 }
ebfe27c5
PM
9579 goto illegal_op;
9580 }
39d5492a 9581 addr = tcg_temp_local_new_i32();
98a46317 9582 load_reg_var(s, addr, rn);
426f5abc 9583 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9584 if (insn & (1 << 20)) {
426f5abc 9585 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9586 } else {
426f5abc 9587 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9588 }
39d5492a 9589 tcg_temp_free_i32(addr);
2359bf80 9590 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9591 /* Table Branch. */
9592 if (rn == 15) {
7d1b0095 9593 addr = tcg_temp_new_i32();
b0109805 9594 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9595 } else {
b0109805 9596 addr = load_reg(s, rn);
9ee6e8bb 9597 }
b26eefb6 9598 tmp = load_reg(s, rm);
b0109805 9599 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9600 if (insn & (1 << 4)) {
9601 /* tbh */
b0109805 9602 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9603 tcg_temp_free_i32(tmp);
e2592fad 9604 tmp = tcg_temp_new_i32();
12dcc321 9605 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9606 } else { /* tbb */
7d1b0095 9607 tcg_temp_free_i32(tmp);
e2592fad 9608 tmp = tcg_temp_new_i32();
12dcc321 9609 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9610 }
7d1b0095 9611 tcg_temp_free_i32(addr);
b0109805
PB
9612 tcg_gen_shli_i32(tmp, tmp, 1);
9613 tcg_gen_addi_i32(tmp, tmp, s->pc);
9614 store_reg(s, 15, tmp);
9ee6e8bb 9615 } else {
96c55295
PM
9616 bool is_lasr = false;
9617 bool is_ld = extract32(insn, 20, 1);
2359bf80 9618 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9619 op = (insn >> 4) & 0x3;
2359bf80
MR
9620 switch (op2) {
9621 case 0:
426f5abc 9622 goto illegal_op;
2359bf80
MR
9623 case 1:
9624 /* Load/store exclusive byte/halfword/doubleword */
9625 if (op == 2) {
9626 goto illegal_op;
9627 }
9628 ARCH(7);
9629 break;
9630 case 2:
9631 /* Load-acquire/store-release */
9632 if (op == 3) {
9633 goto illegal_op;
9634 }
9635 /* Fall through */
9636 case 3:
9637 /* Load-acquire/store-release exclusive */
9638 ARCH(8);
96c55295 9639 is_lasr = true;
2359bf80 9640 break;
426f5abc 9641 }
96c55295
PM
9642
9643 if (is_lasr && !is_ld) {
9644 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9645 }
9646
39d5492a 9647 addr = tcg_temp_local_new_i32();
98a46317 9648 load_reg_var(s, addr, rn);
2359bf80 9649 if (!(op2 & 1)) {
96c55295 9650 if (is_ld) {
2359bf80
MR
9651 tmp = tcg_temp_new_i32();
9652 switch (op) {
9653 case 0: /* ldab */
9bb6558a
PM
9654 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9655 rs | ISSIsAcqRel);
2359bf80
MR
9656 break;
9657 case 1: /* ldah */
9bb6558a
PM
9658 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9659 rs | ISSIsAcqRel);
2359bf80
MR
9660 break;
9661 case 2: /* lda */
9bb6558a
PM
9662 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9663 rs | ISSIsAcqRel);
2359bf80
MR
9664 break;
9665 default:
9666 abort();
9667 }
9668 store_reg(s, rs, tmp);
9669 } else {
9670 tmp = load_reg(s, rs);
9671 switch (op) {
9672 case 0: /* stlb */
9bb6558a
PM
9673 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9674 rs | ISSIsAcqRel);
2359bf80
MR
9675 break;
9676 case 1: /* stlh */
9bb6558a
PM
9677 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9678 rs | ISSIsAcqRel);
2359bf80
MR
9679 break;
9680 case 2: /* stl */
9bb6558a
PM
9681 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9682 rs | ISSIsAcqRel);
2359bf80
MR
9683 break;
9684 default:
9685 abort();
9686 }
9687 tcg_temp_free_i32(tmp);
9688 }
96c55295 9689 } else if (is_ld) {
426f5abc 9690 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9691 } else {
426f5abc 9692 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9693 }
39d5492a 9694 tcg_temp_free_i32(addr);
96c55295
PM
9695
9696 if (is_lasr && is_ld) {
9697 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
9698 }
9ee6e8bb
PB
9699 }
9700 } else {
9701 /* Load/store multiple, RFE, SRS. */
9702 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9703 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9704 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9705 goto illegal_op;
00115976 9706 }
9ee6e8bb
PB
9707 if (insn & (1 << 20)) {
9708 /* rfe */
b0109805
PB
9709 addr = load_reg(s, rn);
9710 if ((insn & (1 << 24)) == 0)
9711 tcg_gen_addi_i32(addr, addr, -8);
9712 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9713 tmp = tcg_temp_new_i32();
12dcc321 9714 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9715 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9716 tmp2 = tcg_temp_new_i32();
12dcc321 9717 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9718 if (insn & (1 << 21)) {
9719 /* Base writeback. */
b0109805
PB
9720 if (insn & (1 << 24)) {
9721 tcg_gen_addi_i32(addr, addr, 4);
9722 } else {
9723 tcg_gen_addi_i32(addr, addr, -4);
9724 }
9725 store_reg(s, rn, addr);
9726 } else {
7d1b0095 9727 tcg_temp_free_i32(addr);
9ee6e8bb 9728 }
b0109805 9729 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9730 } else {
9731 /* srs */
81465888
PM
9732 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9733 insn & (1 << 21));
9ee6e8bb
PB
9734 }
9735 } else {
5856d44e 9736 int i, loaded_base = 0;
39d5492a 9737 TCGv_i32 loaded_var;
7c0ed88e 9738 bool wback = extract32(insn, 21, 1);
9ee6e8bb 9739 /* Load/store multiple. */
b0109805 9740 addr = load_reg(s, rn);
9ee6e8bb
PB
9741 offset = 0;
9742 for (i = 0; i < 16; i++) {
9743 if (insn & (1 << i))
9744 offset += 4;
9745 }
7c0ed88e 9746
9ee6e8bb 9747 if (insn & (1 << 24)) {
b0109805 9748 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9749 }
9750
7c0ed88e
PM
9751 if (s->v8m_stackcheck && rn == 13 && wback) {
9752 /*
9753 * If the writeback is incrementing SP rather than
9754 * decrementing it, and the initial SP is below the
9755 * stack limit but the final written-back SP would
9756 * be above, then then we must not perform any memory
9757 * accesses, but it is IMPDEF whether we generate
9758 * an exception. We choose to do so in this case.
9759 * At this point 'addr' is the lowest address, so
9760 * either the original SP (if incrementing) or our
9761 * final SP (if decrementing), so that's what we check.
9762 */
9763 gen_helper_v8m_stackcheck(cpu_env, addr);
9764 }
9765
f764718d 9766 loaded_var = NULL;
9ee6e8bb
PB
9767 for (i = 0; i < 16; i++) {
9768 if ((insn & (1 << i)) == 0)
9769 continue;
9770 if (insn & (1 << 20)) {
9771 /* Load. */
e2592fad 9772 tmp = tcg_temp_new_i32();
12dcc321 9773 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9774 if (i == 15) {
3bb8a96f 9775 gen_bx_excret(s, tmp);
5856d44e
YO
9776 } else if (i == rn) {
9777 loaded_var = tmp;
9778 loaded_base = 1;
9ee6e8bb 9779 } else {
b0109805 9780 store_reg(s, i, tmp);
9ee6e8bb
PB
9781 }
9782 } else {
9783 /* Store. */
b0109805 9784 tmp = load_reg(s, i);
12dcc321 9785 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9786 tcg_temp_free_i32(tmp);
9ee6e8bb 9787 }
b0109805 9788 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9789 }
5856d44e
YO
9790 if (loaded_base) {
9791 store_reg(s, rn, loaded_var);
9792 }
7c0ed88e 9793 if (wback) {
9ee6e8bb
PB
9794 /* Base register writeback. */
9795 if (insn & (1 << 24)) {
b0109805 9796 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9797 }
9798 /* Fault if writeback register is in register list. */
9799 if (insn & (1 << rn))
9800 goto illegal_op;
b0109805
PB
9801 store_reg(s, rn, addr);
9802 } else {
7d1b0095 9803 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9804 }
9805 }
9806 }
9807 break;
2af9ab77
JB
9808 case 5:
9809
9ee6e8bb 9810 op = (insn >> 21) & 0xf;
2af9ab77 9811 if (op == 6) {
62b44f05
AR
9812 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9813 goto illegal_op;
9814 }
2af9ab77
JB
9815 /* Halfword pack. */
9816 tmp = load_reg(s, rn);
9817 tmp2 = load_reg(s, rm);
9818 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9819 if (insn & (1 << 5)) {
9820 /* pkhtb */
9821 if (shift == 0)
9822 shift = 31;
9823 tcg_gen_sari_i32(tmp2, tmp2, shift);
9824 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9825 tcg_gen_ext16u_i32(tmp2, tmp2);
9826 } else {
9827 /* pkhbt */
9828 if (shift)
9829 tcg_gen_shli_i32(tmp2, tmp2, shift);
9830 tcg_gen_ext16u_i32(tmp, tmp);
9831 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9832 }
9833 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9834 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9835 store_reg(s, rd, tmp);
9836 } else {
2af9ab77
JB
9837 /* Data processing register constant shift. */
9838 if (rn == 15) {
7d1b0095 9839 tmp = tcg_temp_new_i32();
2af9ab77
JB
9840 tcg_gen_movi_i32(tmp, 0);
9841 } else {
9842 tmp = load_reg(s, rn);
9843 }
9844 tmp2 = load_reg(s, rm);
9845
9846 shiftop = (insn >> 4) & 3;
9847 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9848 conds = (insn & (1 << 20)) != 0;
9849 logic_cc = (conds && thumb2_logic_op(op));
9850 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9851 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9852 goto illegal_op;
7d1b0095 9853 tcg_temp_free_i32(tmp2);
55203189
PM
9854 if (rd == 13 &&
9855 ((op == 2 && rn == 15) ||
9856 (op == 8 && rn == 13) ||
9857 (op == 13 && rn == 13))) {
9858 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
9859 store_sp_checked(s, tmp);
9860 } else if (rd != 15) {
2af9ab77
JB
9861 store_reg(s, rd, tmp);
9862 } else {
7d1b0095 9863 tcg_temp_free_i32(tmp);
2af9ab77 9864 }
3174f8e9 9865 }
9ee6e8bb
PB
9866 break;
9867 case 13: /* Misc data processing. */
9868 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9869 if (op < 4 && (insn & 0xf000) != 0xf000)
9870 goto illegal_op;
9871 switch (op) {
9872 case 0: /* Register controlled shift. */
8984bd2e
PB
9873 tmp = load_reg(s, rn);
9874 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9875 if ((insn & 0x70) != 0)
9876 goto illegal_op;
a2d12f0f
PM
9877 /*
9878 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
9879 * - MOV, MOVS (register-shifted register), flagsetting
9880 */
9ee6e8bb 9881 op = (insn >> 21) & 3;
8984bd2e
PB
9882 logic_cc = (insn & (1 << 20)) != 0;
9883 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9884 if (logic_cc)
9885 gen_logic_CC(tmp);
bedb8a6b 9886 store_reg(s, rd, tmp);
9ee6e8bb
PB
9887 break;
9888 case 1: /* Sign/zero extend. */
62b44f05
AR
9889 op = (insn >> 20) & 7;
9890 switch (op) {
9891 case 0: /* SXTAH, SXTH */
9892 case 1: /* UXTAH, UXTH */
9893 case 4: /* SXTAB, SXTB */
9894 case 5: /* UXTAB, UXTB */
9895 break;
9896 case 2: /* SXTAB16, SXTB16 */
9897 case 3: /* UXTAB16, UXTB16 */
9898 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9899 goto illegal_op;
9900 }
9901 break;
9902 default:
9903 goto illegal_op;
9904 }
9905 if (rn != 15) {
9906 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9907 goto illegal_op;
9908 }
9909 }
5e3f878a 9910 tmp = load_reg(s, rm);
9ee6e8bb 9911 shift = (insn >> 4) & 3;
1301f322 9912 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9913 rotate, a shift is sufficient. */
9914 if (shift != 0)
f669df27 9915 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9916 op = (insn >> 20) & 7;
9917 switch (op) {
5e3f878a
PB
9918 case 0: gen_sxth(tmp); break;
9919 case 1: gen_uxth(tmp); break;
9920 case 2: gen_sxtb16(tmp); break;
9921 case 3: gen_uxtb16(tmp); break;
9922 case 4: gen_sxtb(tmp); break;
9923 case 5: gen_uxtb(tmp); break;
62b44f05
AR
9924 default:
9925 g_assert_not_reached();
9ee6e8bb
PB
9926 }
9927 if (rn != 15) {
5e3f878a 9928 tmp2 = load_reg(s, rn);
9ee6e8bb 9929 if ((op >> 1) == 1) {
5e3f878a 9930 gen_add16(tmp, tmp2);
9ee6e8bb 9931 } else {
5e3f878a 9932 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9933 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9934 }
9935 }
5e3f878a 9936 store_reg(s, rd, tmp);
9ee6e8bb
PB
9937 break;
9938 case 2: /* SIMD add/subtract. */
62b44f05
AR
9939 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9940 goto illegal_op;
9941 }
9ee6e8bb
PB
9942 op = (insn >> 20) & 7;
9943 shift = (insn >> 4) & 7;
9944 if ((op & 3) == 3 || (shift & 3) == 3)
9945 goto illegal_op;
6ddbc6e4
PB
9946 tmp = load_reg(s, rn);
9947 tmp2 = load_reg(s, rm);
9948 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9949 tcg_temp_free_i32(tmp2);
6ddbc6e4 9950 store_reg(s, rd, tmp);
9ee6e8bb
PB
9951 break;
9952 case 3: /* Other data processing. */
9953 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9954 if (op < 4) {
9955 /* Saturating add/subtract. */
62b44f05
AR
9956 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9957 goto illegal_op;
9958 }
d9ba4830
PB
9959 tmp = load_reg(s, rn);
9960 tmp2 = load_reg(s, rm);
9ee6e8bb 9961 if (op & 1)
9ef39277 9962 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9963 if (op & 2)
9ef39277 9964 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9965 else
9ef39277 9966 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9967 tcg_temp_free_i32(tmp2);
9ee6e8bb 9968 } else {
62b44f05
AR
9969 switch (op) {
9970 case 0x0a: /* rbit */
9971 case 0x08: /* rev */
9972 case 0x09: /* rev16 */
9973 case 0x0b: /* revsh */
9974 case 0x18: /* clz */
9975 break;
9976 case 0x10: /* sel */
9977 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9978 goto illegal_op;
9979 }
9980 break;
9981 case 0x20: /* crc32/crc32c */
9982 case 0x21:
9983 case 0x22:
9984 case 0x28:
9985 case 0x29:
9986 case 0x2a:
962fcbf2 9987 if (!dc_isar_feature(aa32_crc32, s)) {
62b44f05
AR
9988 goto illegal_op;
9989 }
9990 break;
9991 default:
9992 goto illegal_op;
9993 }
d9ba4830 9994 tmp = load_reg(s, rn);
9ee6e8bb
PB
9995 switch (op) {
9996 case 0x0a: /* rbit */
d9ba4830 9997 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9998 break;
9999 case 0x08: /* rev */
66896cb8 10000 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10001 break;
10002 case 0x09: /* rev16 */
d9ba4830 10003 gen_rev16(tmp);
9ee6e8bb
PB
10004 break;
10005 case 0x0b: /* revsh */
d9ba4830 10006 gen_revsh(tmp);
9ee6e8bb
PB
10007 break;
10008 case 0x10: /* sel */
d9ba4830 10009 tmp2 = load_reg(s, rm);
7d1b0095 10010 tmp3 = tcg_temp_new_i32();
0ecb72a5 10011 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10012 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10013 tcg_temp_free_i32(tmp3);
10014 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10015 break;
10016 case 0x18: /* clz */
7539a012 10017 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10018 break;
eb0ecd5a
WN
10019 case 0x20:
10020 case 0x21:
10021 case 0x22:
10022 case 0x28:
10023 case 0x29:
10024 case 0x2a:
10025 {
10026 /* crc32/crc32c */
10027 uint32_t sz = op & 0x3;
10028 uint32_t c = op & 0x8;
10029
eb0ecd5a 10030 tmp2 = load_reg(s, rm);
aa633469
PM
10031 if (sz == 0) {
10032 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10033 } else if (sz == 1) {
10034 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10035 }
eb0ecd5a
WN
10036 tmp3 = tcg_const_i32(1 << sz);
10037 if (c) {
10038 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10039 } else {
10040 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10041 }
10042 tcg_temp_free_i32(tmp2);
10043 tcg_temp_free_i32(tmp3);
10044 break;
10045 }
9ee6e8bb 10046 default:
62b44f05 10047 g_assert_not_reached();
9ee6e8bb
PB
10048 }
10049 }
d9ba4830 10050 store_reg(s, rd, tmp);
9ee6e8bb
PB
10051 break;
10052 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10053 switch ((insn >> 20) & 7) {
10054 case 0: /* 32 x 32 -> 32 */
10055 case 7: /* Unsigned sum of absolute differences. */
10056 break;
10057 case 1: /* 16 x 16 -> 32 */
10058 case 2: /* Dual multiply add. */
10059 case 3: /* 32 * 16 -> 32msb */
10060 case 4: /* Dual multiply subtract. */
10061 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10062 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10063 goto illegal_op;
10064 }
10065 break;
10066 }
9ee6e8bb 10067 op = (insn >> 4) & 0xf;
d9ba4830
PB
10068 tmp = load_reg(s, rn);
10069 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10070 switch ((insn >> 20) & 7) {
10071 case 0: /* 32 x 32 -> 32 */
d9ba4830 10072 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10073 tcg_temp_free_i32(tmp2);
9ee6e8bb 10074 if (rs != 15) {
d9ba4830 10075 tmp2 = load_reg(s, rs);
9ee6e8bb 10076 if (op)
d9ba4830 10077 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10078 else
d9ba4830 10079 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10080 tcg_temp_free_i32(tmp2);
9ee6e8bb 10081 }
9ee6e8bb
PB
10082 break;
10083 case 1: /* 16 x 16 -> 32 */
d9ba4830 10084 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10085 tcg_temp_free_i32(tmp2);
9ee6e8bb 10086 if (rs != 15) {
d9ba4830 10087 tmp2 = load_reg(s, rs);
9ef39277 10088 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10089 tcg_temp_free_i32(tmp2);
9ee6e8bb 10090 }
9ee6e8bb
PB
10091 break;
10092 case 2: /* Dual multiply add. */
10093 case 4: /* Dual multiply subtract. */
10094 if (op)
d9ba4830
PB
10095 gen_swap_half(tmp2);
10096 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10097 if (insn & (1 << 22)) {
e1d177b9 10098 /* This subtraction cannot overflow. */
d9ba4830 10099 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10100 } else {
e1d177b9
PM
10101 /* This addition cannot overflow 32 bits;
10102 * however it may overflow considered as a signed
10103 * operation, in which case we must set the Q flag.
10104 */
9ef39277 10105 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10106 }
7d1b0095 10107 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10108 if (rs != 15)
10109 {
d9ba4830 10110 tmp2 = load_reg(s, rs);
9ef39277 10111 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10112 tcg_temp_free_i32(tmp2);
9ee6e8bb 10113 }
9ee6e8bb
PB
10114 break;
10115 case 3: /* 32 * 16 -> 32msb */
10116 if (op)
d9ba4830 10117 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10118 else
d9ba4830 10119 gen_sxth(tmp2);
a7812ae4
PB
10120 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10121 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10122 tmp = tcg_temp_new_i32();
ecc7b3aa 10123 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10124 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10125 if (rs != 15)
10126 {
d9ba4830 10127 tmp2 = load_reg(s, rs);
9ef39277 10128 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10129 tcg_temp_free_i32(tmp2);
9ee6e8bb 10130 }
9ee6e8bb 10131 break;
838fa72d
AJ
10132 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10133 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10134 if (rs != 15) {
838fa72d
AJ
10135 tmp = load_reg(s, rs);
10136 if (insn & (1 << 20)) {
10137 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10138 } else {
838fa72d 10139 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10140 }
2c0262af 10141 }
838fa72d
AJ
10142 if (insn & (1 << 4)) {
10143 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10144 }
10145 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10146 tmp = tcg_temp_new_i32();
ecc7b3aa 10147 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10148 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10149 break;
10150 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10151 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10152 tcg_temp_free_i32(tmp2);
9ee6e8bb 10153 if (rs != 15) {
d9ba4830
PB
10154 tmp2 = load_reg(s, rs);
10155 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10156 tcg_temp_free_i32(tmp2);
5fd46862 10157 }
9ee6e8bb 10158 break;
2c0262af 10159 }
d9ba4830 10160 store_reg(s, rd, tmp);
2c0262af 10161 break;
9ee6e8bb
PB
10162 case 6: case 7: /* 64-bit multiply, Divide. */
10163 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10164 tmp = load_reg(s, rn);
10165 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10166 if ((op & 0x50) == 0x10) {
10167 /* sdiv, udiv */
7e0cf8b4 10168 if (!dc_isar_feature(thumb_div, s)) {
9ee6e8bb 10169 goto illegal_op;
47789990 10170 }
9ee6e8bb 10171 if (op & 0x20)
5e3f878a 10172 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10173 else
5e3f878a 10174 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10175 tcg_temp_free_i32(tmp2);
5e3f878a 10176 store_reg(s, rd, tmp);
9ee6e8bb
PB
10177 } else if ((op & 0xe) == 0xc) {
10178 /* Dual multiply accumulate long. */
62b44f05
AR
10179 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10180 tcg_temp_free_i32(tmp);
10181 tcg_temp_free_i32(tmp2);
10182 goto illegal_op;
10183 }
9ee6e8bb 10184 if (op & 1)
5e3f878a
PB
10185 gen_swap_half(tmp2);
10186 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10187 if (op & 0x10) {
5e3f878a 10188 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10189 } else {
5e3f878a 10190 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10191 }
7d1b0095 10192 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10193 /* BUGFIX */
10194 tmp64 = tcg_temp_new_i64();
10195 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10196 tcg_temp_free_i32(tmp);
a7812ae4
PB
10197 gen_addq(s, tmp64, rs, rd);
10198 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10199 tcg_temp_free_i64(tmp64);
2c0262af 10200 } else {
9ee6e8bb
PB
10201 if (op & 0x20) {
10202 /* Unsigned 64-bit multiply */
a7812ae4 10203 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10204 } else {
9ee6e8bb
PB
10205 if (op & 8) {
10206 /* smlalxy */
62b44f05
AR
10207 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10208 tcg_temp_free_i32(tmp2);
10209 tcg_temp_free_i32(tmp);
10210 goto illegal_op;
10211 }
5e3f878a 10212 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10213 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10214 tmp64 = tcg_temp_new_i64();
10215 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10216 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10217 } else {
10218 /* Signed 64-bit multiply */
a7812ae4 10219 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10220 }
b5ff1b31 10221 }
9ee6e8bb
PB
10222 if (op & 4) {
10223 /* umaal */
62b44f05
AR
10224 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10225 tcg_temp_free_i64(tmp64);
10226 goto illegal_op;
10227 }
a7812ae4
PB
10228 gen_addq_lo(s, tmp64, rs);
10229 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10230 } else if (op & 0x40) {
10231 /* 64-bit accumulate. */
a7812ae4 10232 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10233 }
a7812ae4 10234 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10235 tcg_temp_free_i64(tmp64);
5fd46862 10236 }
2c0262af 10237 break;
9ee6e8bb
PB
10238 }
10239 break;
10240 case 6: case 7: case 14: case 15:
10241 /* Coprocessor. */
7517748e 10242 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8859ba3c
PM
10243 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10244 if (extract32(insn, 24, 2) == 3) {
10245 goto illegal_op; /* op0 = 0b11 : unallocated */
10246 }
10247
10248 /*
10249 * Decode VLLDM and VLSTM first: these are nonstandard because:
10250 * * if there is no FPU then these insns must NOP in
10251 * Secure state and UNDEF in Nonsecure state
10252 * * if there is an FPU then these insns do not have
10253 * the usual behaviour that disas_vfp_insn() provides of
10254 * being controlled by CPACR/NSACR enable bits or the
10255 * lazy-stacking logic.
7517748e 10256 */
b1e5336a
PM
10257 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10258 (insn & 0xffa00f00) == 0xec200a00) {
10259 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10260 * - VLLDM, VLSTM
10261 * We choose to UNDEF if the RAZ bits are non-zero.
10262 */
10263 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10264 goto illegal_op;
10265 }
019076b0
PM
10266
10267 if (arm_dc_feature(s, ARM_FEATURE_VFP)) {
10268 TCGv_i32 fptr = load_reg(s, rn);
10269
10270 if (extract32(insn, 20, 1)) {
956fe143 10271 gen_helper_v7m_vlldm(cpu_env, fptr);
019076b0
PM
10272 } else {
10273 gen_helper_v7m_vlstm(cpu_env, fptr);
10274 }
10275 tcg_temp_free_i32(fptr);
10276
10277 /* End the TB, because we have updated FP control bits */
10278 s->base.is_jmp = DISAS_UPDATE;
10279 }
b1e5336a
PM
10280 break;
10281 }
8859ba3c
PM
10282 if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
10283 ((insn >> 8) & 0xe) == 10) {
10284 /* FP, and the CPU supports it */
10285 if (disas_vfp_insn(s, insn)) {
10286 goto illegal_op;
10287 }
10288 break;
10289 }
10290
b1e5336a 10291 /* All other insns: NOCP */
7517748e
PM
10292 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10293 default_exception_el(s));
10294 break;
10295 }
0052087e
RH
10296 if ((insn & 0xfe000a00) == 0xfc000800
10297 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10298 /* The Thumb2 and ARM encodings are identical. */
10299 if (disas_neon_insn_3same_ext(s, insn)) {
10300 goto illegal_op;
10301 }
10302 } else if ((insn & 0xff000a00) == 0xfe000800
10303 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10304 /* The Thumb2 and ARM encodings are identical. */
10305 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10306 goto illegal_op;
10307 }
10308 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 10309 /* Translate into the equivalent ARM encoding. */
f06053e3 10310 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10311 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10312 goto illegal_op;
7dcc1f89 10313 }
6a57f3eb 10314 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10315 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10316 goto illegal_op;
10317 }
9ee6e8bb
PB
10318 } else {
10319 if (insn & (1 << 28))
10320 goto illegal_op;
7dcc1f89 10321 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10322 goto illegal_op;
7dcc1f89 10323 }
9ee6e8bb
PB
10324 }
10325 break;
10326 case 8: case 9: case 10: case 11:
10327 if (insn & (1 << 15)) {
10328 /* Branches, misc control. */
10329 if (insn & 0x5000) {
10330 /* Unconditional branch. */
10331 /* signextend(hw1[10:0]) -> offset[:12]. */
10332 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10333 /* hw1[10:0] -> offset[11:1]. */
10334 offset |= (insn & 0x7ff) << 1;
10335 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10336 offset[24:22] already have the same value because of the
10337 sign extension above. */
10338 offset ^= ((~insn) & (1 << 13)) << 10;
10339 offset ^= ((~insn) & (1 << 11)) << 11;
10340
9ee6e8bb
PB
10341 if (insn & (1 << 14)) {
10342 /* Branch and link. */
3174f8e9 10343 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10344 }
3b46e624 10345
b0109805 10346 offset += s->pc;
9ee6e8bb
PB
10347 if (insn & (1 << 12)) {
10348 /* b/bl */
b0109805 10349 gen_jmp(s, offset);
9ee6e8bb
PB
10350 } else {
10351 /* blx */
b0109805 10352 offset &= ~(uint32_t)2;
be5e7a76 10353 /* thumb2 bx, no need to check */
b0109805 10354 gen_bx_im(s, offset);
2c0262af 10355 }
9ee6e8bb
PB
10356 } else if (((insn >> 23) & 7) == 7) {
10357 /* Misc control */
10358 if (insn & (1 << 13))
10359 goto illegal_op;
10360
10361 if (insn & (1 << 26)) {
001b3cab
PM
10362 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10363 goto illegal_op;
10364 }
37e6456e
PM
10365 if (!(insn & (1 << 20))) {
10366 /* Hypervisor call (v7) */
10367 int imm16 = extract32(insn, 16, 4) << 12
10368 | extract32(insn, 0, 12);
10369 ARCH(7);
10370 if (IS_USER(s)) {
10371 goto illegal_op;
10372 }
10373 gen_hvc(s, imm16);
10374 } else {
10375 /* Secure monitor call (v6+) */
10376 ARCH(6K);
10377 if (IS_USER(s)) {
10378 goto illegal_op;
10379 }
10380 gen_smc(s);
10381 }
2c0262af 10382 } else {
9ee6e8bb
PB
10383 op = (insn >> 20) & 7;
10384 switch (op) {
10385 case 0: /* msr cpsr. */
b53d8923 10386 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10387 tmp = load_reg(s, rn);
b28b3377
PM
10388 /* the constant is the mask and SYSm fields */
10389 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10390 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10391 tcg_temp_free_i32(addr);
7d1b0095 10392 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10393 gen_lookup_tb(s);
10394 break;
10395 }
10396 /* fall through */
10397 case 1: /* msr spsr. */
b53d8923 10398 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10399 goto illegal_op;
b53d8923 10400 }
8bfd0550
PM
10401
10402 if (extract32(insn, 5, 1)) {
10403 /* MSR (banked) */
10404 int sysm = extract32(insn, 8, 4) |
10405 (extract32(insn, 4, 1) << 4);
10406 int r = op & 1;
10407
10408 gen_msr_banked(s, r, sysm, rm);
10409 break;
10410 }
10411
10412 /* MSR (for PSRs) */
2fbac54b
FN
10413 tmp = load_reg(s, rn);
10414 if (gen_set_psr(s,
7dcc1f89 10415 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10416 op == 1, tmp))
9ee6e8bb
PB
10417 goto illegal_op;
10418 break;
10419 case 2: /* cps, nop-hint. */
10420 if (((insn >> 8) & 7) == 0) {
10421 gen_nop_hint(s, insn & 0xff);
10422 }
10423 /* Implemented as NOP in user mode. */
10424 if (IS_USER(s))
10425 break;
10426 offset = 0;
10427 imm = 0;
10428 if (insn & (1 << 10)) {
10429 if (insn & (1 << 7))
10430 offset |= CPSR_A;
10431 if (insn & (1 << 6))
10432 offset |= CPSR_I;
10433 if (insn & (1 << 5))
10434 offset |= CPSR_F;
10435 if (insn & (1 << 9))
10436 imm = CPSR_A | CPSR_I | CPSR_F;
10437 }
10438 if (insn & (1 << 8)) {
10439 offset |= 0x1f;
10440 imm |= (insn & 0x1f);
10441 }
10442 if (offset) {
2fbac54b 10443 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10444 }
10445 break;
10446 case 3: /* Special control operations. */
14120108 10447 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 10448 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
10449 goto illegal_op;
10450 }
9ee6e8bb
PB
10451 op = (insn >> 4) & 0xf;
10452 switch (op) {
10453 case 2: /* clrex */
426f5abc 10454 gen_clrex(s);
9ee6e8bb
PB
10455 break;
10456 case 4: /* dsb */
10457 case 5: /* dmb */
61e4c432 10458 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10459 break;
6df99dec
SS
10460 case 6: /* isb */
10461 /* We need to break the TB after this insn
10462 * to execute self-modifying code correctly
10463 * and also to take any pending interrupts
10464 * immediately.
10465 */
0b609cc1 10466 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 10467 break;
9888bd1e
RH
10468 case 7: /* sb */
10469 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
10470 goto illegal_op;
10471 }
10472 /*
10473 * TODO: There is no speculation barrier opcode
10474 * for TCG; MB and end the TB instead.
10475 */
10476 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10477 gen_goto_tb(s, 0, s->pc & ~1);
10478 break;
9ee6e8bb
PB
10479 default:
10480 goto illegal_op;
10481 }
10482 break;
10483 case 4: /* bxj */
9d7c59c8
PM
10484 /* Trivial implementation equivalent to bx.
10485 * This instruction doesn't exist at all for M-profile.
10486 */
10487 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10488 goto illegal_op;
10489 }
d9ba4830
PB
10490 tmp = load_reg(s, rn);
10491 gen_bx(s, tmp);
9ee6e8bb
PB
10492 break;
10493 case 5: /* Exception return. */
b8b45b68
RV
10494 if (IS_USER(s)) {
10495 goto illegal_op;
10496 }
10497 if (rn != 14 || rd != 15) {
10498 goto illegal_op;
10499 }
55c544ed
PM
10500 if (s->current_el == 2) {
10501 /* ERET from Hyp uses ELR_Hyp, not LR */
10502 if (insn & 0xff) {
10503 goto illegal_op;
10504 }
10505 tmp = load_cpu_field(elr_el[2]);
10506 } else {
10507 tmp = load_reg(s, rn);
10508 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10509 }
b8b45b68
RV
10510 gen_exception_return(s, tmp);
10511 break;
8bfd0550 10512 case 6: /* MRS */
43ac6574
PM
10513 if (extract32(insn, 5, 1) &&
10514 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10515 /* MRS (banked) */
10516 int sysm = extract32(insn, 16, 4) |
10517 (extract32(insn, 4, 1) << 4);
10518
10519 gen_mrs_banked(s, 0, sysm, rd);
10520 break;
10521 }
10522
3d54026f
PM
10523 if (extract32(insn, 16, 4) != 0xf) {
10524 goto illegal_op;
10525 }
10526 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10527 extract32(insn, 0, 8) != 0) {
10528 goto illegal_op;
10529 }
10530
8bfd0550 10531 /* mrs cpsr */
7d1b0095 10532 tmp = tcg_temp_new_i32();
b53d8923 10533 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10534 addr = tcg_const_i32(insn & 0xff);
10535 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10536 tcg_temp_free_i32(addr);
9ee6e8bb 10537 } else {
9ef39277 10538 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10539 }
8984bd2e 10540 store_reg(s, rd, tmp);
9ee6e8bb 10541 break;
8bfd0550 10542 case 7: /* MRS */
43ac6574
PM
10543 if (extract32(insn, 5, 1) &&
10544 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10545 /* MRS (banked) */
10546 int sysm = extract32(insn, 16, 4) |
10547 (extract32(insn, 4, 1) << 4);
10548
10549 gen_mrs_banked(s, 1, sysm, rd);
10550 break;
10551 }
10552
10553 /* mrs spsr. */
9ee6e8bb 10554 /* Not accessible in user mode. */
b53d8923 10555 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10556 goto illegal_op;
b53d8923 10557 }
3d54026f
PM
10558
10559 if (extract32(insn, 16, 4) != 0xf ||
10560 extract32(insn, 0, 8) != 0) {
10561 goto illegal_op;
10562 }
10563
d9ba4830
PB
10564 tmp = load_cpu_field(spsr);
10565 store_reg(s, rd, tmp);
9ee6e8bb 10566 break;
2c0262af
FB
10567 }
10568 }
9ee6e8bb
PB
10569 } else {
10570 /* Conditional branch. */
10571 op = (insn >> 22) & 0xf;
10572 /* Generate a conditional jump to next instruction. */
c2d9644e 10573 arm_skip_unless(s, op);
9ee6e8bb
PB
10574
10575 /* offset[11:1] = insn[10:0] */
10576 offset = (insn & 0x7ff) << 1;
10577 /* offset[17:12] = insn[21:16]. */
10578 offset |= (insn & 0x003f0000) >> 4;
10579 /* offset[31:20] = insn[26]. */
10580 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10581 /* offset[18] = insn[13]. */
10582 offset |= (insn & (1 << 13)) << 5;
10583 /* offset[19] = insn[11]. */
10584 offset |= (insn & (1 << 11)) << 8;
10585
10586 /* jump to the offset */
b0109805 10587 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10588 }
10589 } else {
55203189
PM
10590 /*
10591 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
10592 * - Data-processing (modified immediate, plain binary immediate)
10593 */
9ee6e8bb 10594 if (insn & (1 << 25)) {
55203189
PM
10595 /*
10596 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
10597 * - Data-processing (plain binary immediate)
10598 */
9ee6e8bb
PB
10599 if (insn & (1 << 24)) {
10600 if (insn & (1 << 20))
10601 goto illegal_op;
10602 /* Bitfield/Saturate. */
10603 op = (insn >> 21) & 7;
10604 imm = insn & 0x1f;
10605 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10606 if (rn == 15) {
7d1b0095 10607 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10608 tcg_gen_movi_i32(tmp, 0);
10609 } else {
10610 tmp = load_reg(s, rn);
10611 }
9ee6e8bb
PB
10612 switch (op) {
10613 case 2: /* Signed bitfield extract. */
10614 imm++;
10615 if (shift + imm > 32)
10616 goto illegal_op;
59a71b4c
RH
10617 if (imm < 32) {
10618 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10619 }
9ee6e8bb
PB
10620 break;
10621 case 6: /* Unsigned bitfield extract. */
10622 imm++;
10623 if (shift + imm > 32)
10624 goto illegal_op;
59a71b4c
RH
10625 if (imm < 32) {
10626 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10627 }
9ee6e8bb
PB
10628 break;
10629 case 3: /* Bitfield insert/clear. */
10630 if (imm < shift)
10631 goto illegal_op;
10632 imm = imm + 1 - shift;
10633 if (imm != 32) {
6ddbc6e4 10634 tmp2 = load_reg(s, rd);
d593c48e 10635 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10636 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10637 }
10638 break;
10639 case 7:
10640 goto illegal_op;
10641 default: /* Saturate. */
9ee6e8bb
PB
10642 if (shift) {
10643 if (op & 1)
6ddbc6e4 10644 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10645 else
6ddbc6e4 10646 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10647 }
6ddbc6e4 10648 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10649 if (op & 4) {
10650 /* Unsigned. */
62b44f05
AR
10651 if ((op & 1) && shift == 0) {
10652 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10653 tcg_temp_free_i32(tmp);
10654 tcg_temp_free_i32(tmp2);
10655 goto illegal_op;
10656 }
9ef39277 10657 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10658 } else {
9ef39277 10659 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10660 }
2c0262af 10661 } else {
9ee6e8bb 10662 /* Signed. */
62b44f05
AR
10663 if ((op & 1) && shift == 0) {
10664 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10665 tcg_temp_free_i32(tmp);
10666 tcg_temp_free_i32(tmp2);
10667 goto illegal_op;
10668 }
9ef39277 10669 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10670 } else {
9ef39277 10671 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10672 }
2c0262af 10673 }
b75263d6 10674 tcg_temp_free_i32(tmp2);
9ee6e8bb 10675 break;
2c0262af 10676 }
6ddbc6e4 10677 store_reg(s, rd, tmp);
9ee6e8bb
PB
10678 } else {
10679 imm = ((insn & 0x04000000) >> 15)
10680 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10681 if (insn & (1 << 22)) {
10682 /* 16-bit immediate. */
10683 imm |= (insn >> 4) & 0xf000;
10684 if (insn & (1 << 23)) {
10685 /* movt */
5e3f878a 10686 tmp = load_reg(s, rd);
86831435 10687 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10688 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10689 } else {
9ee6e8bb 10690 /* movw */
7d1b0095 10691 tmp = tcg_temp_new_i32();
5e3f878a 10692 tcg_gen_movi_i32(tmp, imm);
2c0262af 10693 }
55203189 10694 store_reg(s, rd, tmp);
2c0262af 10695 } else {
9ee6e8bb
PB
10696 /* Add/sub 12-bit immediate. */
10697 if (rn == 15) {
b0109805 10698 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10699 if (insn & (1 << 23))
b0109805 10700 offset -= imm;
9ee6e8bb 10701 else
b0109805 10702 offset += imm;
7d1b0095 10703 tmp = tcg_temp_new_i32();
5e3f878a 10704 tcg_gen_movi_i32(tmp, offset);
55203189 10705 store_reg(s, rd, tmp);
2c0262af 10706 } else {
5e3f878a 10707 tmp = load_reg(s, rn);
9ee6e8bb 10708 if (insn & (1 << 23))
5e3f878a 10709 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10710 else
5e3f878a 10711 tcg_gen_addi_i32(tmp, tmp, imm);
55203189
PM
10712 if (rn == 13 && rd == 13) {
10713 /* ADD SP, SP, imm or SUB SP, SP, imm */
10714 store_sp_checked(s, tmp);
10715 } else {
10716 store_reg(s, rd, tmp);
10717 }
2c0262af 10718 }
9ee6e8bb 10719 }
191abaa2 10720 }
9ee6e8bb 10721 } else {
55203189
PM
10722 /*
10723 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
10724 * - Data-processing (modified immediate)
10725 */
9ee6e8bb
PB
10726 int shifter_out = 0;
10727 /* modified 12-bit immediate. */
10728 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10729 imm = (insn & 0xff);
10730 switch (shift) {
10731 case 0: /* XY */
10732 /* Nothing to do. */
10733 break;
10734 case 1: /* 00XY00XY */
10735 imm |= imm << 16;
10736 break;
10737 case 2: /* XY00XY00 */
10738 imm |= imm << 16;
10739 imm <<= 8;
10740 break;
10741 case 3: /* XYXYXYXY */
10742 imm |= imm << 16;
10743 imm |= imm << 8;
10744 break;
10745 default: /* Rotated constant. */
10746 shift = (shift << 1) | (imm >> 7);
10747 imm |= 0x80;
10748 imm = imm << (32 - shift);
10749 shifter_out = 1;
10750 break;
b5ff1b31 10751 }
7d1b0095 10752 tmp2 = tcg_temp_new_i32();
3174f8e9 10753 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10754 rn = (insn >> 16) & 0xf;
3174f8e9 10755 if (rn == 15) {
7d1b0095 10756 tmp = tcg_temp_new_i32();
3174f8e9
FN
10757 tcg_gen_movi_i32(tmp, 0);
10758 } else {
10759 tmp = load_reg(s, rn);
10760 }
9ee6e8bb
PB
10761 op = (insn >> 21) & 0xf;
10762 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10763 shifter_out, tmp, tmp2))
9ee6e8bb 10764 goto illegal_op;
7d1b0095 10765 tcg_temp_free_i32(tmp2);
9ee6e8bb 10766 rd = (insn >> 8) & 0xf;
55203189
PM
10767 if (rd == 13 && rn == 13
10768 && (op == 8 || op == 13)) {
10769 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
10770 store_sp_checked(s, tmp);
10771 } else if (rd != 15) {
3174f8e9
FN
10772 store_reg(s, rd, tmp);
10773 } else {
7d1b0095 10774 tcg_temp_free_i32(tmp);
2c0262af 10775 }
2c0262af 10776 }
9ee6e8bb
PB
10777 }
10778 break;
10779 case 12: /* Load/store single data item. */
10780 {
10781 int postinc = 0;
10782 int writeback = 0;
a99caa48 10783 int memidx;
9bb6558a
PM
10784 ISSInfo issinfo;
10785
9ee6e8bb 10786 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10787 if (disas_neon_ls_insn(s, insn)) {
c1713132 10788 goto illegal_op;
7dcc1f89 10789 }
9ee6e8bb
PB
10790 break;
10791 }
a2fdc890
PM
10792 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10793 if (rs == 15) {
10794 if (!(insn & (1 << 20))) {
10795 goto illegal_op;
10796 }
10797 if (op != 2) {
10798 /* Byte or halfword load space with dest == r15 : memory hints.
10799 * Catch them early so we don't emit pointless addressing code.
10800 * This space is a mix of:
10801 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10802 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10803 * cores)
10804 * unallocated hints, which must be treated as NOPs
10805 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10806 * which is easiest for the decoding logic
10807 * Some space which must UNDEF
10808 */
10809 int op1 = (insn >> 23) & 3;
10810 int op2 = (insn >> 6) & 0x3f;
10811 if (op & 2) {
10812 goto illegal_op;
10813 }
10814 if (rn == 15) {
02afbf64
PM
10815 /* UNPREDICTABLE, unallocated hint or
10816 * PLD/PLDW/PLI (literal)
10817 */
2eea841c 10818 return;
a2fdc890
PM
10819 }
10820 if (op1 & 1) {
2eea841c 10821 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10822 }
10823 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 10824 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10825 }
10826 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 10827 goto illegal_op;
a2fdc890
PM
10828 }
10829 }
a99caa48 10830 memidx = get_mem_index(s);
9ee6e8bb 10831 if (rn == 15) {
7d1b0095 10832 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10833 /* PC relative. */
10834 /* s->pc has already been incremented by 4. */
10835 imm = s->pc & 0xfffffffc;
10836 if (insn & (1 << 23))
10837 imm += insn & 0xfff;
10838 else
10839 imm -= insn & 0xfff;
b0109805 10840 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10841 } else {
b0109805 10842 addr = load_reg(s, rn);
9ee6e8bb
PB
10843 if (insn & (1 << 23)) {
10844 /* Positive offset. */
10845 imm = insn & 0xfff;
b0109805 10846 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10847 } else {
9ee6e8bb 10848 imm = insn & 0xff;
2a0308c5
PM
10849 switch ((insn >> 8) & 0xf) {
10850 case 0x0: /* Shifted Register. */
9ee6e8bb 10851 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10852 if (shift > 3) {
10853 tcg_temp_free_i32(addr);
18c9b560 10854 goto illegal_op;
2a0308c5 10855 }
b26eefb6 10856 tmp = load_reg(s, rm);
9ee6e8bb 10857 if (shift)
b26eefb6 10858 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10859 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10860 tcg_temp_free_i32(tmp);
9ee6e8bb 10861 break;
2a0308c5 10862 case 0xc: /* Negative offset. */
b0109805 10863 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10864 break;
2a0308c5 10865 case 0xe: /* User privilege. */
b0109805 10866 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10867 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10868 break;
2a0308c5 10869 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10870 imm = -imm;
10871 /* Fall through. */
2a0308c5 10872 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10873 postinc = 1;
10874 writeback = 1;
10875 break;
2a0308c5 10876 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10877 imm = -imm;
10878 /* Fall through. */
2a0308c5 10879 case 0xf: /* Pre-increment. */
9ee6e8bb
PB
10880 writeback = 1;
10881 break;
10882 default:
2a0308c5 10883 tcg_temp_free_i32(addr);
b7bcbe95 10884 goto illegal_op;
9ee6e8bb
PB
10885 }
10886 }
10887 }
9bb6558a
PM
10888
10889 issinfo = writeback ? ISSInvalid : rs;
10890
0bc003ba
PM
10891 if (s->v8m_stackcheck && rn == 13 && writeback) {
10892 /*
10893 * Stackcheck. Here we know 'addr' is the current SP;
10894 * if imm is +ve we're moving SP up, else down. It is
10895 * UNKNOWN whether the limit check triggers when SP starts
10896 * below the limit and ends up above it; we chose to do so.
10897 */
10898 if ((int32_t)imm < 0) {
10899 TCGv_i32 newsp = tcg_temp_new_i32();
10900
10901 tcg_gen_addi_i32(newsp, addr, imm);
10902 gen_helper_v8m_stackcheck(cpu_env, newsp);
10903 tcg_temp_free_i32(newsp);
10904 } else {
10905 gen_helper_v8m_stackcheck(cpu_env, addr);
10906 }
10907 }
10908
10909 if (writeback && !postinc) {
10910 tcg_gen_addi_i32(addr, addr, imm);
10911 }
10912
9ee6e8bb
PB
10913 if (insn & (1 << 20)) {
10914 /* Load. */
5a839c0d 10915 tmp = tcg_temp_new_i32();
a2fdc890 10916 switch (op) {
5a839c0d 10917 case 0:
9bb6558a 10918 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10919 break;
10920 case 4:
9bb6558a 10921 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10922 break;
10923 case 1:
9bb6558a 10924 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10925 break;
10926 case 5:
9bb6558a 10927 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10928 break;
10929 case 2:
9bb6558a 10930 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10931 break;
2a0308c5 10932 default:
5a839c0d 10933 tcg_temp_free_i32(tmp);
2a0308c5
PM
10934 tcg_temp_free_i32(addr);
10935 goto illegal_op;
a2fdc890
PM
10936 }
10937 if (rs == 15) {
3bb8a96f 10938 gen_bx_excret(s, tmp);
9ee6e8bb 10939 } else {
a2fdc890 10940 store_reg(s, rs, tmp);
9ee6e8bb
PB
10941 }
10942 } else {
10943 /* Store. */
b0109805 10944 tmp = load_reg(s, rs);
9ee6e8bb 10945 switch (op) {
5a839c0d 10946 case 0:
9bb6558a 10947 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10948 break;
10949 case 1:
9bb6558a 10950 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10951 break;
10952 case 2:
9bb6558a 10953 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10954 break;
2a0308c5 10955 default:
5a839c0d 10956 tcg_temp_free_i32(tmp);
2a0308c5
PM
10957 tcg_temp_free_i32(addr);
10958 goto illegal_op;
b7bcbe95 10959 }
5a839c0d 10960 tcg_temp_free_i32(tmp);
2c0262af 10961 }
9ee6e8bb 10962 if (postinc)
b0109805
PB
10963 tcg_gen_addi_i32(addr, addr, imm);
10964 if (writeback) {
10965 store_reg(s, rn, addr);
10966 } else {
7d1b0095 10967 tcg_temp_free_i32(addr);
b0109805 10968 }
9ee6e8bb
PB
10969 }
10970 break;
10971 default:
10972 goto illegal_op;
2c0262af 10973 }
2eea841c 10974 return;
9ee6e8bb 10975illegal_op:
2eea841c
PM
10976 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10977 default_exception_el(s));
2c0262af
FB
10978}
10979
296e5a0a 10980static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 10981{
296e5a0a 10982 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
10983 int32_t offset;
10984 int i;
39d5492a
PM
10985 TCGv_i32 tmp;
10986 TCGv_i32 tmp2;
10987 TCGv_i32 addr;
99c475ab 10988
99c475ab
FB
10989 switch (insn >> 12) {
10990 case 0: case 1:
396e467c 10991
99c475ab
FB
10992 rd = insn & 7;
10993 op = (insn >> 11) & 3;
10994 if (op == 3) {
a2d12f0f
PM
10995 /*
10996 * 0b0001_1xxx_xxxx_xxxx
10997 * - Add, subtract (three low registers)
10998 * - Add, subtract (two low registers and immediate)
10999 */
99c475ab 11000 rn = (insn >> 3) & 7;
396e467c 11001 tmp = load_reg(s, rn);
99c475ab
FB
11002 if (insn & (1 << 10)) {
11003 /* immediate */
7d1b0095 11004 tmp2 = tcg_temp_new_i32();
396e467c 11005 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11006 } else {
11007 /* reg */
11008 rm = (insn >> 6) & 7;
396e467c 11009 tmp2 = load_reg(s, rm);
99c475ab 11010 }
9ee6e8bb
PB
11011 if (insn & (1 << 9)) {
11012 if (s->condexec_mask)
396e467c 11013 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11014 else
72485ec4 11015 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11016 } else {
11017 if (s->condexec_mask)
396e467c 11018 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11019 else
72485ec4 11020 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11021 }
7d1b0095 11022 tcg_temp_free_i32(tmp2);
396e467c 11023 store_reg(s, rd, tmp);
99c475ab
FB
11024 } else {
11025 /* shift immediate */
11026 rm = (insn >> 3) & 7;
11027 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11028 tmp = load_reg(s, rm);
11029 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11030 if (!s->condexec_mask)
11031 gen_logic_CC(tmp);
11032 store_reg(s, rd, tmp);
99c475ab
FB
11033 }
11034 break;
11035 case 2: case 3:
a2d12f0f
PM
11036 /*
11037 * 0b001x_xxxx_xxxx_xxxx
11038 * - Add, subtract, compare, move (one low register and immediate)
11039 */
99c475ab
FB
11040 op = (insn >> 11) & 3;
11041 rd = (insn >> 8) & 0x7;
396e467c 11042 if (op == 0) { /* mov */
7d1b0095 11043 tmp = tcg_temp_new_i32();
396e467c 11044 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11045 if (!s->condexec_mask)
396e467c
FN
11046 gen_logic_CC(tmp);
11047 store_reg(s, rd, tmp);
11048 } else {
11049 tmp = load_reg(s, rd);
7d1b0095 11050 tmp2 = tcg_temp_new_i32();
396e467c
FN
11051 tcg_gen_movi_i32(tmp2, insn & 0xff);
11052 switch (op) {
11053 case 1: /* cmp */
72485ec4 11054 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11055 tcg_temp_free_i32(tmp);
11056 tcg_temp_free_i32(tmp2);
396e467c
FN
11057 break;
11058 case 2: /* add */
11059 if (s->condexec_mask)
11060 tcg_gen_add_i32(tmp, tmp, tmp2);
11061 else
72485ec4 11062 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11063 tcg_temp_free_i32(tmp2);
396e467c
FN
11064 store_reg(s, rd, tmp);
11065 break;
11066 case 3: /* sub */
11067 if (s->condexec_mask)
11068 tcg_gen_sub_i32(tmp, tmp, tmp2);
11069 else
72485ec4 11070 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11071 tcg_temp_free_i32(tmp2);
396e467c
FN
11072 store_reg(s, rd, tmp);
11073 break;
11074 }
99c475ab 11075 }
99c475ab
FB
11076 break;
11077 case 4:
11078 if (insn & (1 << 11)) {
11079 rd = (insn >> 8) & 7;
5899f386
FB
11080 /* load pc-relative. Bit 1 of PC is ignored. */
11081 val = s->pc + 2 + ((insn & 0xff) * 4);
11082 val &= ~(uint32_t)2;
7d1b0095 11083 addr = tcg_temp_new_i32();
b0109805 11084 tcg_gen_movi_i32(addr, val);
c40c8556 11085 tmp = tcg_temp_new_i32();
9bb6558a
PM
11086 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11087 rd | ISSIs16Bit);
7d1b0095 11088 tcg_temp_free_i32(addr);
b0109805 11089 store_reg(s, rd, tmp);
99c475ab
FB
11090 break;
11091 }
11092 if (insn & (1 << 10)) {
ebfe27c5
PM
11093 /* 0b0100_01xx_xxxx_xxxx
11094 * - data processing extended, branch and exchange
11095 */
99c475ab
FB
11096 rd = (insn & 7) | ((insn >> 4) & 8);
11097 rm = (insn >> 3) & 0xf;
11098 op = (insn >> 8) & 3;
11099 switch (op) {
11100 case 0: /* add */
396e467c
FN
11101 tmp = load_reg(s, rd);
11102 tmp2 = load_reg(s, rm);
11103 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11104 tcg_temp_free_i32(tmp2);
55203189
PM
11105 if (rd == 13) {
11106 /* ADD SP, SP, reg */
11107 store_sp_checked(s, tmp);
11108 } else {
11109 store_reg(s, rd, tmp);
11110 }
99c475ab
FB
11111 break;
11112 case 1: /* cmp */
396e467c
FN
11113 tmp = load_reg(s, rd);
11114 tmp2 = load_reg(s, rm);
72485ec4 11115 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11116 tcg_temp_free_i32(tmp2);
11117 tcg_temp_free_i32(tmp);
99c475ab
FB
11118 break;
11119 case 2: /* mov/cpy */
396e467c 11120 tmp = load_reg(s, rm);
55203189
PM
11121 if (rd == 13) {
11122 /* MOV SP, reg */
11123 store_sp_checked(s, tmp);
11124 } else {
11125 store_reg(s, rd, tmp);
11126 }
99c475ab 11127 break;
ebfe27c5
PM
11128 case 3:
11129 {
11130 /* 0b0100_0111_xxxx_xxxx
11131 * - branch [and link] exchange thumb register
11132 */
11133 bool link = insn & (1 << 7);
11134
fb602cb7 11135 if (insn & 3) {
ebfe27c5
PM
11136 goto undef;
11137 }
11138 if (link) {
be5e7a76 11139 ARCH(5);
ebfe27c5 11140 }
fb602cb7
PM
11141 if ((insn & 4)) {
11142 /* BXNS/BLXNS: only exists for v8M with the
11143 * security extensions, and always UNDEF if NonSecure.
11144 * We don't implement these in the user-only mode
11145 * either (in theory you can use them from Secure User
11146 * mode but they are too tied in to system emulation.)
11147 */
11148 if (!s->v8m_secure || IS_USER_ONLY) {
11149 goto undef;
11150 }
11151 if (link) {
3e3fa230 11152 gen_blxns(s, rm);
fb602cb7
PM
11153 } else {
11154 gen_bxns(s, rm);
11155 }
11156 break;
11157 }
11158 /* BLX/BX */
ebfe27c5
PM
11159 tmp = load_reg(s, rm);
11160 if (link) {
99c475ab 11161 val = (uint32_t)s->pc | 1;
7d1b0095 11162 tmp2 = tcg_temp_new_i32();
b0109805
PB
11163 tcg_gen_movi_i32(tmp2, val);
11164 store_reg(s, 14, tmp2);
3bb8a96f
PM
11165 gen_bx(s, tmp);
11166 } else {
11167 /* Only BX works as exception-return, not BLX */
11168 gen_bx_excret(s, tmp);
99c475ab 11169 }
99c475ab
FB
11170 break;
11171 }
ebfe27c5 11172 }
99c475ab
FB
11173 break;
11174 }
11175
a2d12f0f
PM
11176 /*
11177 * 0b0100_00xx_xxxx_xxxx
11178 * - Data-processing (two low registers)
11179 */
99c475ab
FB
11180 rd = insn & 7;
11181 rm = (insn >> 3) & 7;
11182 op = (insn >> 6) & 0xf;
11183 if (op == 2 || op == 3 || op == 4 || op == 7) {
11184 /* the shift/rotate ops want the operands backwards */
11185 val = rm;
11186 rm = rd;
11187 rd = val;
11188 val = 1;
11189 } else {
11190 val = 0;
11191 }
11192
396e467c 11193 if (op == 9) { /* neg */
7d1b0095 11194 tmp = tcg_temp_new_i32();
396e467c
FN
11195 tcg_gen_movi_i32(tmp, 0);
11196 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11197 tmp = load_reg(s, rd);
11198 } else {
f764718d 11199 tmp = NULL;
396e467c 11200 }
99c475ab 11201
396e467c 11202 tmp2 = load_reg(s, rm);
5899f386 11203 switch (op) {
99c475ab 11204 case 0x0: /* and */
396e467c 11205 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11206 if (!s->condexec_mask)
396e467c 11207 gen_logic_CC(tmp);
99c475ab
FB
11208 break;
11209 case 0x1: /* eor */
396e467c 11210 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11211 if (!s->condexec_mask)
396e467c 11212 gen_logic_CC(tmp);
99c475ab
FB
11213 break;
11214 case 0x2: /* lsl */
9ee6e8bb 11215 if (s->condexec_mask) {
365af80e 11216 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11217 } else {
9ef39277 11218 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11219 gen_logic_CC(tmp2);
9ee6e8bb 11220 }
99c475ab
FB
11221 break;
11222 case 0x3: /* lsr */
9ee6e8bb 11223 if (s->condexec_mask) {
365af80e 11224 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11225 } else {
9ef39277 11226 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11227 gen_logic_CC(tmp2);
9ee6e8bb 11228 }
99c475ab
FB
11229 break;
11230 case 0x4: /* asr */
9ee6e8bb 11231 if (s->condexec_mask) {
365af80e 11232 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11233 } else {
9ef39277 11234 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11235 gen_logic_CC(tmp2);
9ee6e8bb 11236 }
99c475ab
FB
11237 break;
11238 case 0x5: /* adc */
49b4c31e 11239 if (s->condexec_mask) {
396e467c 11240 gen_adc(tmp, tmp2);
49b4c31e
RH
11241 } else {
11242 gen_adc_CC(tmp, tmp, tmp2);
11243 }
99c475ab
FB
11244 break;
11245 case 0x6: /* sbc */
2de68a49 11246 if (s->condexec_mask) {
396e467c 11247 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11248 } else {
11249 gen_sbc_CC(tmp, tmp, tmp2);
11250 }
99c475ab
FB
11251 break;
11252 case 0x7: /* ror */
9ee6e8bb 11253 if (s->condexec_mask) {
f669df27
AJ
11254 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11255 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11256 } else {
9ef39277 11257 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11258 gen_logic_CC(tmp2);
9ee6e8bb 11259 }
99c475ab
FB
11260 break;
11261 case 0x8: /* tst */
396e467c
FN
11262 tcg_gen_and_i32(tmp, tmp, tmp2);
11263 gen_logic_CC(tmp);
99c475ab 11264 rd = 16;
5899f386 11265 break;
99c475ab 11266 case 0x9: /* neg */
9ee6e8bb 11267 if (s->condexec_mask)
396e467c 11268 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11269 else
72485ec4 11270 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11271 break;
11272 case 0xa: /* cmp */
72485ec4 11273 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11274 rd = 16;
11275 break;
11276 case 0xb: /* cmn */
72485ec4 11277 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11278 rd = 16;
11279 break;
11280 case 0xc: /* orr */
396e467c 11281 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11282 if (!s->condexec_mask)
396e467c 11283 gen_logic_CC(tmp);
99c475ab
FB
11284 break;
11285 case 0xd: /* mul */
7b2919a0 11286 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11287 if (!s->condexec_mask)
396e467c 11288 gen_logic_CC(tmp);
99c475ab
FB
11289 break;
11290 case 0xe: /* bic */
f669df27 11291 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11292 if (!s->condexec_mask)
396e467c 11293 gen_logic_CC(tmp);
99c475ab
FB
11294 break;
11295 case 0xf: /* mvn */
396e467c 11296 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11297 if (!s->condexec_mask)
396e467c 11298 gen_logic_CC(tmp2);
99c475ab 11299 val = 1;
5899f386 11300 rm = rd;
99c475ab
FB
11301 break;
11302 }
11303 if (rd != 16) {
396e467c
FN
11304 if (val) {
11305 store_reg(s, rm, tmp2);
11306 if (op != 0xf)
7d1b0095 11307 tcg_temp_free_i32(tmp);
396e467c
FN
11308 } else {
11309 store_reg(s, rd, tmp);
7d1b0095 11310 tcg_temp_free_i32(tmp2);
396e467c
FN
11311 }
11312 } else {
7d1b0095
PM
11313 tcg_temp_free_i32(tmp);
11314 tcg_temp_free_i32(tmp2);
99c475ab
FB
11315 }
11316 break;
11317
11318 case 5:
11319 /* load/store register offset. */
11320 rd = insn & 7;
11321 rn = (insn >> 3) & 7;
11322 rm = (insn >> 6) & 7;
11323 op = (insn >> 9) & 7;
b0109805 11324 addr = load_reg(s, rn);
b26eefb6 11325 tmp = load_reg(s, rm);
b0109805 11326 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11327 tcg_temp_free_i32(tmp);
99c475ab 11328
c40c8556 11329 if (op < 3) { /* store */
b0109805 11330 tmp = load_reg(s, rd);
c40c8556
PM
11331 } else {
11332 tmp = tcg_temp_new_i32();
11333 }
99c475ab
FB
11334
11335 switch (op) {
11336 case 0: /* str */
9bb6558a 11337 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11338 break;
11339 case 1: /* strh */
9bb6558a 11340 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11341 break;
11342 case 2: /* strb */
9bb6558a 11343 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11344 break;
11345 case 3: /* ldrsb */
9bb6558a 11346 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11347 break;
11348 case 4: /* ldr */
9bb6558a 11349 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11350 break;
11351 case 5: /* ldrh */
9bb6558a 11352 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11353 break;
11354 case 6: /* ldrb */
9bb6558a 11355 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11356 break;
11357 case 7: /* ldrsh */
9bb6558a 11358 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11359 break;
11360 }
c40c8556 11361 if (op >= 3) { /* load */
b0109805 11362 store_reg(s, rd, tmp);
c40c8556
PM
11363 } else {
11364 tcg_temp_free_i32(tmp);
11365 }
7d1b0095 11366 tcg_temp_free_i32(addr);
99c475ab
FB
11367 break;
11368
11369 case 6:
11370 /* load/store word immediate offset */
11371 rd = insn & 7;
11372 rn = (insn >> 3) & 7;
b0109805 11373 addr = load_reg(s, rn);
99c475ab 11374 val = (insn >> 4) & 0x7c;
b0109805 11375 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11376
11377 if (insn & (1 << 11)) {
11378 /* load */
c40c8556 11379 tmp = tcg_temp_new_i32();
12dcc321 11380 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11381 store_reg(s, rd, tmp);
99c475ab
FB
11382 } else {
11383 /* store */
b0109805 11384 tmp = load_reg(s, rd);
12dcc321 11385 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11386 tcg_temp_free_i32(tmp);
99c475ab 11387 }
7d1b0095 11388 tcg_temp_free_i32(addr);
99c475ab
FB
11389 break;
11390
11391 case 7:
11392 /* load/store byte immediate offset */
11393 rd = insn & 7;
11394 rn = (insn >> 3) & 7;
b0109805 11395 addr = load_reg(s, rn);
99c475ab 11396 val = (insn >> 6) & 0x1f;
b0109805 11397 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11398
11399 if (insn & (1 << 11)) {
11400 /* load */
c40c8556 11401 tmp = tcg_temp_new_i32();
9bb6558a 11402 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11403 store_reg(s, rd, tmp);
99c475ab
FB
11404 } else {
11405 /* store */
b0109805 11406 tmp = load_reg(s, rd);
9bb6558a 11407 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11408 tcg_temp_free_i32(tmp);
99c475ab 11409 }
7d1b0095 11410 tcg_temp_free_i32(addr);
99c475ab
FB
11411 break;
11412
11413 case 8:
11414 /* load/store halfword immediate offset */
11415 rd = insn & 7;
11416 rn = (insn >> 3) & 7;
b0109805 11417 addr = load_reg(s, rn);
99c475ab 11418 val = (insn >> 5) & 0x3e;
b0109805 11419 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11420
11421 if (insn & (1 << 11)) {
11422 /* load */
c40c8556 11423 tmp = tcg_temp_new_i32();
9bb6558a 11424 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11425 store_reg(s, rd, tmp);
99c475ab
FB
11426 } else {
11427 /* store */
b0109805 11428 tmp = load_reg(s, rd);
9bb6558a 11429 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11430 tcg_temp_free_i32(tmp);
99c475ab 11431 }
7d1b0095 11432 tcg_temp_free_i32(addr);
99c475ab
FB
11433 break;
11434
11435 case 9:
11436 /* load/store from stack */
11437 rd = (insn >> 8) & 7;
b0109805 11438 addr = load_reg(s, 13);
99c475ab 11439 val = (insn & 0xff) * 4;
b0109805 11440 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11441
11442 if (insn & (1 << 11)) {
11443 /* load */
c40c8556 11444 tmp = tcg_temp_new_i32();
9bb6558a 11445 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11446 store_reg(s, rd, tmp);
99c475ab
FB
11447 } else {
11448 /* store */
b0109805 11449 tmp = load_reg(s, rd);
9bb6558a 11450 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11451 tcg_temp_free_i32(tmp);
99c475ab 11452 }
7d1b0095 11453 tcg_temp_free_i32(addr);
99c475ab
FB
11454 break;
11455
11456 case 10:
55203189
PM
11457 /*
11458 * 0b1010_xxxx_xxxx_xxxx
11459 * - Add PC/SP (immediate)
11460 */
99c475ab 11461 rd = (insn >> 8) & 7;
5899f386
FB
11462 if (insn & (1 << 11)) {
11463 /* SP */
5e3f878a 11464 tmp = load_reg(s, 13);
5899f386
FB
11465 } else {
11466 /* PC. bit 1 is ignored. */
7d1b0095 11467 tmp = tcg_temp_new_i32();
5e3f878a 11468 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11469 }
99c475ab 11470 val = (insn & 0xff) * 4;
5e3f878a
PB
11471 tcg_gen_addi_i32(tmp, tmp, val);
11472 store_reg(s, rd, tmp);
99c475ab
FB
11473 break;
11474
11475 case 11:
11476 /* misc */
11477 op = (insn >> 8) & 0xf;
11478 switch (op) {
11479 case 0:
55203189
PM
11480 /*
11481 * 0b1011_0000_xxxx_xxxx
11482 * - ADD (SP plus immediate)
11483 * - SUB (SP minus immediate)
11484 */
b26eefb6 11485 tmp = load_reg(s, 13);
99c475ab
FB
11486 val = (insn & 0x7f) * 4;
11487 if (insn & (1 << 7))
6a0d8a1d 11488 val = -(int32_t)val;
b26eefb6 11489 tcg_gen_addi_i32(tmp, tmp, val);
55203189 11490 store_sp_checked(s, tmp);
99c475ab
FB
11491 break;
11492
9ee6e8bb
PB
11493 case 2: /* sign/zero extend. */
11494 ARCH(6);
11495 rd = insn & 7;
11496 rm = (insn >> 3) & 7;
b0109805 11497 tmp = load_reg(s, rm);
9ee6e8bb 11498 switch ((insn >> 6) & 3) {
b0109805
PB
11499 case 0: gen_sxth(tmp); break;
11500 case 1: gen_sxtb(tmp); break;
11501 case 2: gen_uxth(tmp); break;
11502 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11503 }
b0109805 11504 store_reg(s, rd, tmp);
9ee6e8bb 11505 break;
99c475ab 11506 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
11507 /*
11508 * 0b1011_x10x_xxxx_xxxx
11509 * - push/pop
11510 */
b0109805 11511 addr = load_reg(s, 13);
5899f386
FB
11512 if (insn & (1 << 8))
11513 offset = 4;
99c475ab 11514 else
5899f386
FB
11515 offset = 0;
11516 for (i = 0; i < 8; i++) {
11517 if (insn & (1 << i))
11518 offset += 4;
11519 }
11520 if ((insn & (1 << 11)) == 0) {
b0109805 11521 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11522 }
aa369e5c
PM
11523
11524 if (s->v8m_stackcheck) {
11525 /*
11526 * Here 'addr' is the lower of "old SP" and "new SP";
11527 * if this is a pop that starts below the limit and ends
11528 * above it, it is UNKNOWN whether the limit check triggers;
11529 * we choose to trigger.
11530 */
11531 gen_helper_v8m_stackcheck(cpu_env, addr);
11532 }
11533
99c475ab
FB
11534 for (i = 0; i < 8; i++) {
11535 if (insn & (1 << i)) {
11536 if (insn & (1 << 11)) {
11537 /* pop */
c40c8556 11538 tmp = tcg_temp_new_i32();
12dcc321 11539 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11540 store_reg(s, i, tmp);
99c475ab
FB
11541 } else {
11542 /* push */
b0109805 11543 tmp = load_reg(s, i);
12dcc321 11544 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11545 tcg_temp_free_i32(tmp);
99c475ab 11546 }
5899f386 11547 /* advance to the next address. */
b0109805 11548 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11549 }
11550 }
f764718d 11551 tmp = NULL;
99c475ab
FB
11552 if (insn & (1 << 8)) {
11553 if (insn & (1 << 11)) {
11554 /* pop pc */
c40c8556 11555 tmp = tcg_temp_new_i32();
12dcc321 11556 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11557 /* don't set the pc until the rest of the instruction
11558 has completed */
11559 } else {
11560 /* push lr */
b0109805 11561 tmp = load_reg(s, 14);
12dcc321 11562 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11563 tcg_temp_free_i32(tmp);
99c475ab 11564 }
b0109805 11565 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11566 }
5899f386 11567 if ((insn & (1 << 11)) == 0) {
b0109805 11568 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11569 }
99c475ab 11570 /* write back the new stack pointer */
b0109805 11571 store_reg(s, 13, addr);
99c475ab 11572 /* set the new PC value */
be5e7a76 11573 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11574 store_reg_from_load(s, 15, tmp);
be5e7a76 11575 }
99c475ab
FB
11576 break;
11577
9ee6e8bb
PB
11578 case 1: case 3: case 9: case 11: /* czb */
11579 rm = insn & 7;
d9ba4830 11580 tmp = load_reg(s, rm);
c2d9644e 11581 arm_gen_condlabel(s);
9ee6e8bb 11582 if (insn & (1 << 11))
cb63669a 11583 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11584 else
cb63669a 11585 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11586 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11587 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11588 val = (uint32_t)s->pc + 2;
11589 val += offset;
11590 gen_jmp(s, val);
11591 break;
11592
11593 case 15: /* IT, nop-hint. */
11594 if ((insn & 0xf) == 0) {
11595 gen_nop_hint(s, (insn >> 4) & 0xf);
11596 break;
11597 }
11598 /* If Then. */
11599 s->condexec_cond = (insn >> 4) & 0xe;
11600 s->condexec_mask = insn & 0x1f;
11601 /* No actual code generated for this insn, just setup state. */
11602 break;
11603
06c949e6 11604 case 0xe: /* bkpt */
d4a2dc67
PM
11605 {
11606 int imm8 = extract32(insn, 0, 8);
be5e7a76 11607 ARCH(5);
c900a2e6 11608 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 11609 break;
d4a2dc67 11610 }
06c949e6 11611
19a6e31c
PM
11612 case 0xa: /* rev, and hlt */
11613 {
11614 int op1 = extract32(insn, 6, 2);
11615
11616 if (op1 == 2) {
11617 /* HLT */
11618 int imm6 = extract32(insn, 0, 6);
11619
11620 gen_hlt(s, imm6);
11621 break;
11622 }
11623
11624 /* Otherwise this is rev */
9ee6e8bb
PB
11625 ARCH(6);
11626 rn = (insn >> 3) & 0x7;
11627 rd = insn & 0x7;
b0109805 11628 tmp = load_reg(s, rn);
19a6e31c 11629 switch (op1) {
66896cb8 11630 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11631 case 1: gen_rev16(tmp); break;
11632 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11633 default:
11634 g_assert_not_reached();
9ee6e8bb 11635 }
b0109805 11636 store_reg(s, rd, tmp);
9ee6e8bb 11637 break;
19a6e31c 11638 }
9ee6e8bb 11639
d9e028c1
PM
11640 case 6:
11641 switch ((insn >> 5) & 7) {
11642 case 2:
11643 /* setend */
11644 ARCH(6);
9886ecdf
PB
11645 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11646 gen_helper_setend(cpu_env);
dcba3a8d 11647 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 11648 }
9ee6e8bb 11649 break;
d9e028c1
PM
11650 case 3:
11651 /* cps */
11652 ARCH(6);
11653 if (IS_USER(s)) {
11654 break;
8984bd2e 11655 }
b53d8923 11656 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11657 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11658 /* FAULTMASK */
11659 if (insn & 1) {
11660 addr = tcg_const_i32(19);
11661 gen_helper_v7m_msr(cpu_env, addr, tmp);
11662 tcg_temp_free_i32(addr);
11663 }
11664 /* PRIMASK */
11665 if (insn & 2) {
11666 addr = tcg_const_i32(16);
11667 gen_helper_v7m_msr(cpu_env, addr, tmp);
11668 tcg_temp_free_i32(addr);
11669 }
11670 tcg_temp_free_i32(tmp);
11671 gen_lookup_tb(s);
11672 } else {
11673 if (insn & (1 << 4)) {
11674 shift = CPSR_A | CPSR_I | CPSR_F;
11675 } else {
11676 shift = 0;
11677 }
11678 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11679 }
d9e028c1
PM
11680 break;
11681 default:
11682 goto undef;
9ee6e8bb
PB
11683 }
11684 break;
11685
99c475ab
FB
11686 default:
11687 goto undef;
11688 }
11689 break;
11690
11691 case 12:
a7d3970d 11692 {
99c475ab 11693 /* load/store multiple */
f764718d 11694 TCGv_i32 loaded_var = NULL;
99c475ab 11695 rn = (insn >> 8) & 0x7;
b0109805 11696 addr = load_reg(s, rn);
99c475ab
FB
11697 for (i = 0; i < 8; i++) {
11698 if (insn & (1 << i)) {
99c475ab
FB
11699 if (insn & (1 << 11)) {
11700 /* load */
c40c8556 11701 tmp = tcg_temp_new_i32();
12dcc321 11702 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11703 if (i == rn) {
11704 loaded_var = tmp;
11705 } else {
11706 store_reg(s, i, tmp);
11707 }
99c475ab
FB
11708 } else {
11709 /* store */
b0109805 11710 tmp = load_reg(s, i);
12dcc321 11711 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11712 tcg_temp_free_i32(tmp);
99c475ab 11713 }
5899f386 11714 /* advance to the next address */
b0109805 11715 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11716 }
11717 }
b0109805 11718 if ((insn & (1 << rn)) == 0) {
a7d3970d 11719 /* base reg not in list: base register writeback */
b0109805
PB
11720 store_reg(s, rn, addr);
11721 } else {
a7d3970d
PM
11722 /* base reg in list: if load, complete it now */
11723 if (insn & (1 << 11)) {
11724 store_reg(s, rn, loaded_var);
11725 }
7d1b0095 11726 tcg_temp_free_i32(addr);
b0109805 11727 }
99c475ab 11728 break;
a7d3970d 11729 }
99c475ab
FB
11730 case 13:
11731 /* conditional branch or swi */
11732 cond = (insn >> 8) & 0xf;
11733 if (cond == 0xe)
11734 goto undef;
11735
11736 if (cond == 0xf) {
11737 /* swi */
eaed129d 11738 gen_set_pc_im(s, s->pc);
d4a2dc67 11739 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 11740 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
11741 break;
11742 }
11743 /* generate a conditional jump to next instruction */
c2d9644e 11744 arm_skip_unless(s, cond);
99c475ab
FB
11745
11746 /* jump to the offset */
5899f386 11747 val = (uint32_t)s->pc + 2;
99c475ab 11748 offset = ((int32_t)insn << 24) >> 24;
5899f386 11749 val += offset << 1;
8aaca4c0 11750 gen_jmp(s, val);
99c475ab
FB
11751 break;
11752
11753 case 14:
358bf29e 11754 if (insn & (1 << 11)) {
296e5a0a
PM
11755 /* thumb_insn_is_16bit() ensures we can't get here for
11756 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11757 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11758 */
11759 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11760 ARCH(5);
11761 offset = ((insn & 0x7ff) << 1);
11762 tmp = load_reg(s, 14);
11763 tcg_gen_addi_i32(tmp, tmp, offset);
11764 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11765
11766 tmp2 = tcg_temp_new_i32();
11767 tcg_gen_movi_i32(tmp2, s->pc | 1);
11768 store_reg(s, 14, tmp2);
11769 gen_bx(s, tmp);
358bf29e
PB
11770 break;
11771 }
9ee6e8bb 11772 /* unconditional branch */
99c475ab
FB
11773 val = (uint32_t)s->pc;
11774 offset = ((int32_t)insn << 21) >> 21;
11775 val += (offset << 1) + 2;
8aaca4c0 11776 gen_jmp(s, val);
99c475ab
FB
11777 break;
11778
11779 case 15:
296e5a0a
PM
11780 /* thumb_insn_is_16bit() ensures we can't get here for
11781 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11782 */
11783 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11784
11785 if (insn & (1 << 11)) {
11786 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11787 offset = ((insn & 0x7ff) << 1) | 1;
11788 tmp = load_reg(s, 14);
11789 tcg_gen_addi_i32(tmp, tmp, offset);
11790
11791 tmp2 = tcg_temp_new_i32();
11792 tcg_gen_movi_i32(tmp2, s->pc | 1);
11793 store_reg(s, 14, tmp2);
11794 gen_bx(s, tmp);
11795 } else {
11796 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11797 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11798
11799 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
11800 }
9ee6e8bb 11801 break;
99c475ab
FB
11802 }
11803 return;
9ee6e8bb 11804illegal_op:
99c475ab 11805undef:
73710361
GB
11806 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11807 default_exception_el(s));
99c475ab
FB
11808}
11809
541ebcd4
PM
11810static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11811{
11812 /* Return true if the insn at dc->pc might cross a page boundary.
11813 * (False positives are OK, false negatives are not.)
5b8d7289
PM
11814 * We know this is a Thumb insn, and our caller ensures we are
11815 * only called if dc->pc is less than 4 bytes from the page
11816 * boundary, so we cross the page if the first 16 bits indicate
11817 * that this is a 32 bit insn.
541ebcd4 11818 */
5b8d7289 11819 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 11820
5b8d7289 11821 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
11822}
11823
b542683d 11824static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 11825{
1d8a5535 11826 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 11827 CPUARMState *env = cs->env_ptr;
2fc0cc0e 11828 ARMCPU *cpu = env_archcpu(env);
aad821ac
RH
11829 uint32_t tb_flags = dc->base.tb->flags;
11830 uint32_t condexec, core_mmu_idx;
3b46e624 11831
962fcbf2 11832 dc->isar = &cpu->isar;
dcba3a8d 11833 dc->pc = dc->base.pc_first;
e50e6a20 11834 dc->condjmp = 0;
3926cc84 11835
40f860cd 11836 dc->aarch64 = 0;
cef9ee70
SS
11837 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11838 * there is no secure EL1, so we route exceptions to EL3.
11839 */
11840 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11841 !arm_el_is_aa64(env, 3);
aad821ac
RH
11842 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
11843 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
11844 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
11845 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
11846 dc->condexec_mask = (condexec & 0xf) << 1;
11847 dc->condexec_cond = condexec >> 4;
11848 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
11849 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
c1e37810 11850 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11851#if !defined(CONFIG_USER_ONLY)
c1e37810 11852 dc->user = (dc->current_el == 0);
3926cc84 11853#endif
aad821ac
RH
11854 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
11855 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
11856 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
11857 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
ea7ac69d
PM
11858 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11859 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
11860 dc->vec_stride = 0;
11861 } else {
11862 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
11863 dc->c15_cpar = 0;
11864 }
aad821ac 11865 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
fb602cb7
PM
11866 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11867 regime_is_secure(env, dc->mmu_idx);
aad821ac 11868 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
6d60c67a 11869 dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
6000531e
PM
11870 dc->v7m_new_fp_ctxt_needed =
11871 FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
e33cf0f8 11872 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
60322b39 11873 dc->cp_regs = cpu->cp_regs;
a984e42c 11874 dc->features = env->features;
40f860cd 11875
50225ad0
PM
11876 /* Single step state. The code-generation logic here is:
11877 * SS_ACTIVE == 0:
11878 * generate code with no special handling for single-stepping (except
11879 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11880 * this happens anyway because those changes are all system register or
11881 * PSTATE writes).
11882 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11883 * emit code for one insn
11884 * emit code to clear PSTATE.SS
11885 * emit code to generate software step exception for completed step
11886 * end TB (as usual for having generated an exception)
11887 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11888 * emit code to generate a software step exception
11889 * end the TB
11890 */
aad821ac
RH
11891 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
11892 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
50225ad0
PM
11893 dc->is_ldex = false;
11894 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11895
bfe7ad5b 11896 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 11897
f7708456
RH
11898 /* If architectural single step active, limit to 1. */
11899 if (is_singlestepping(dc)) {
b542683d 11900 dc->base.max_insns = 1;
f7708456
RH
11901 }
11902
d0264d86
RH
11903 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11904 to those left on the page. */
11905 if (!dc->thumb) {
bfe7ad5b 11906 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 11907 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
11908 }
11909
d9eea52c
PM
11910 cpu_V0 = tcg_temp_new_i64();
11911 cpu_V1 = tcg_temp_new_i64();
e677137d 11912 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11913 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
11914}
11915
b1476854
LV
11916static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11917{
11918 DisasContext *dc = container_of(dcbase, DisasContext, base);
11919
11920 /* A note on handling of the condexec (IT) bits:
11921 *
11922 * We want to avoid the overhead of having to write the updated condexec
11923 * bits back to the CPUARMState for every instruction in an IT block. So:
11924 * (1) if the condexec bits are not already zero then we write
11925 * zero back into the CPUARMState now. This avoids complications trying
11926 * to do it at the end of the block. (For example if we don't do this
11927 * it's hard to identify whether we can safely skip writing condexec
11928 * at the end of the TB, which we definitely want to do for the case
11929 * where a TB doesn't do anything with the IT state at all.)
11930 * (2) if we are going to leave the TB then we call gen_set_condexec()
11931 * which will write the correct value into CPUARMState if zero is wrong.
11932 * This is done both for leaving the TB at the end, and for leaving
11933 * it because of an exception we know will happen, which is done in
11934 * gen_exception_insn(). The latter is necessary because we need to
11935 * leave the TB with the PC/IT state just prior to execution of the
11936 * instruction which caused the exception.
11937 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11938 * then the CPUARMState will be wrong and we need to reset it.
11939 * This is handled in the same way as restoration of the
11940 * PC in these situations; we save the value of the condexec bits
11941 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11942 * then uses this to restore them after an exception.
11943 *
11944 * Note that there are no instructions which can read the condexec
11945 * bits, and none which can write non-static values to them, so
11946 * we don't need to care about whether CPUARMState is correct in the
11947 * middle of a TB.
11948 */
11949
11950 /* Reset the conditional execution bits immediately. This avoids
11951 complications trying to do it at the end of the block. */
11952 if (dc->condexec_mask || dc->condexec_cond) {
11953 TCGv_i32 tmp = tcg_temp_new_i32();
11954 tcg_gen_movi_i32(tmp, 0);
11955 store_cpu_field(tmp, condexec_bits);
11956 }
11957}
11958
f62bd897
LV
11959static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11960{
11961 DisasContext *dc = container_of(dcbase, DisasContext, base);
11962
f62bd897
LV
11963 tcg_gen_insn_start(dc->pc,
11964 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11965 0);
15fa08f8 11966 dc->insn_start = tcg_last_op();
f62bd897
LV
11967}
11968
a68956ad
LV
11969static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
11970 const CPUBreakpoint *bp)
11971{
11972 DisasContext *dc = container_of(dcbase, DisasContext, base);
11973
11974 if (bp->flags & BP_CPU) {
11975 gen_set_condexec(dc);
11976 gen_set_pc_im(dc, dc->pc);
11977 gen_helper_check_breakpoints(cpu_env);
11978 /* End the TB early; it's likely not going to be executed */
11979 dc->base.is_jmp = DISAS_TOO_MANY;
11980 } else {
11981 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11982 /* The address covered by the breakpoint must be
11983 included in [tb->pc, tb->pc + tb->size) in order
11984 to for it to be properly cleared -- thus we
11985 increment the PC here so that the logic setting
11986 tb->size below does the right thing. */
11987 /* TODO: Advance PC by correct instruction length to
11988 * avoid disassembler error messages */
11989 dc->pc += 2;
11990 dc->base.is_jmp = DISAS_NORETURN;
11991 }
11992
11993 return true;
11994}
11995
722ef0a5 11996static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 11997{
13189a90
LV
11998#ifdef CONFIG_USER_ONLY
11999 /* Intercept jump to the magic kernel page. */
12000 if (dc->pc >= 0xffff0000) {
12001 /* We always get here via a jump, so know we are not in a
12002 conditional execution block. */
12003 gen_exception_internal(EXCP_KERNEL_TRAP);
12004 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12005 return true;
13189a90
LV
12006 }
12007#endif
12008
12009 if (dc->ss_active && !dc->pstate_ss) {
12010 /* Singlestep state is Active-pending.
12011 * If we're in this state at the start of a TB then either
12012 * a) we just took an exception to an EL which is being debugged
12013 * and this is the first insn in the exception handler
12014 * b) debug exceptions were masked and we just unmasked them
12015 * without changing EL (eg by clearing PSTATE.D)
12016 * In either case we're going to take a swstep exception in the
12017 * "did not step an insn" case, and so the syndrome ISV and EX
12018 * bits should be zero.
12019 */
12020 assert(dc->base.num_insns == 1);
12021 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12022 default_exception_el(dc));
12023 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12024 return true;
13189a90
LV
12025 }
12026
722ef0a5
RH
12027 return false;
12028}
13189a90 12029
d0264d86 12030static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12031{
13189a90
LV
12032 if (dc->condjmp && !dc->base.is_jmp) {
12033 gen_set_label(dc->condlabel);
12034 dc->condjmp = 0;
12035 }
13189a90 12036 dc->base.pc_next = dc->pc;
23169224 12037 translator_loop_temp_check(&dc->base);
13189a90
LV
12038}
12039
722ef0a5
RH
12040static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12041{
12042 DisasContext *dc = container_of(dcbase, DisasContext, base);
12043 CPUARMState *env = cpu->env_ptr;
12044 unsigned int insn;
12045
12046 if (arm_pre_translate_insn(dc)) {
12047 return;
12048 }
12049
12050 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 12051 dc->insn = insn;
722ef0a5
RH
12052 dc->pc += 4;
12053 disas_arm_insn(dc, insn);
12054
d0264d86
RH
12055 arm_post_translate_insn(dc);
12056
12057 /* ARM is a fixed-length ISA. We performed the cross-page check
12058 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12059}
12060
dcf14dfb
PM
12061static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12062{
12063 /* Return true if this Thumb insn is always unconditional,
12064 * even inside an IT block. This is true of only a very few
12065 * instructions: BKPT, HLT, and SG.
12066 *
12067 * A larger class of instructions are UNPREDICTABLE if used
12068 * inside an IT block; we do not need to detect those here, because
12069 * what we do by default (perform the cc check and update the IT
12070 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12071 * choice for those situations.
12072 *
12073 * insn is either a 16-bit or a 32-bit instruction; the two are
12074 * distinguishable because for the 16-bit case the top 16 bits
12075 * are zeroes, and that isn't a valid 32-bit encoding.
12076 */
12077 if ((insn & 0xffffff00) == 0xbe00) {
12078 /* BKPT */
12079 return true;
12080 }
12081
12082 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12083 !arm_dc_feature(s, ARM_FEATURE_M)) {
12084 /* HLT: v8A only. This is unconditional even when it is going to
12085 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12086 * For v7 cores this was a plain old undefined encoding and so
12087 * honours its cc check. (We might be using the encoding as
12088 * a semihosting trap, but we don't change the cc check behaviour
12089 * on that account, because a debugger connected to a real v7A
12090 * core and emulating semihosting traps by catching the UNDEF
12091 * exception would also only see cases where the cc check passed.
12092 * No guest code should be trying to do a HLT semihosting trap
12093 * in an IT block anyway.
12094 */
12095 return true;
12096 }
12097
12098 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12099 arm_dc_feature(s, ARM_FEATURE_M)) {
12100 /* SG: v8M only */
12101 return true;
12102 }
12103
12104 return false;
12105}
12106
722ef0a5
RH
12107static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12108{
12109 DisasContext *dc = container_of(dcbase, DisasContext, base);
12110 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12111 uint32_t insn;
12112 bool is_16bit;
722ef0a5
RH
12113
12114 if (arm_pre_translate_insn(dc)) {
12115 return;
12116 }
12117
296e5a0a
PM
12118 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12119 is_16bit = thumb_insn_is_16bit(dc, insn);
12120 dc->pc += 2;
12121 if (!is_16bit) {
12122 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12123
12124 insn = insn << 16 | insn2;
12125 dc->pc += 2;
12126 }
58803318 12127 dc->insn = insn;
296e5a0a 12128
dcf14dfb 12129 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12130 uint32_t cond = dc->condexec_cond;
12131
12132 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
c2d9644e 12133 arm_skip_unless(dc, cond);
296e5a0a
PM
12134 }
12135 }
12136
12137 if (is_16bit) {
12138 disas_thumb_insn(dc, insn);
12139 } else {
2eea841c 12140 disas_thumb2_insn(dc, insn);
296e5a0a 12141 }
722ef0a5
RH
12142
12143 /* Advance the Thumb condexec condition. */
12144 if (dc->condexec_mask) {
12145 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12146 ((dc->condexec_mask >> 4) & 1));
12147 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12148 if (dc->condexec_mask == 0) {
12149 dc->condexec_cond = 0;
12150 }
12151 }
12152
d0264d86
RH
12153 arm_post_translate_insn(dc);
12154
12155 /* Thumb is a variable-length ISA. Stop translation when the next insn
12156 * will touch a new page. This ensures that prefetch aborts occur at
12157 * the right place.
12158 *
12159 * We want to stop the TB if the next insn starts in a new page,
12160 * or if it spans between this page and the next. This means that
12161 * if we're looking at the last halfword in the page we need to
12162 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12163 * or a 32-bit Thumb insn (which won't).
12164 * This is to avoid generating a silly TB with a single 16-bit insn
12165 * in it at the end of this page (which would execute correctly
12166 * but isn't very efficient).
12167 */
12168 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
12169 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
12170 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
12171 && insn_crosses_page(env, dc)))) {
12172 dc->base.is_jmp = DISAS_TOO_MANY;
12173 }
722ef0a5
RH
12174}
12175
70d3c035 12176static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12177{
70d3c035 12178 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12179
c5a49c63 12180 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12181 /* FIXME: This can theoretically happen with self-modifying code. */
12182 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12183 }
9ee6e8bb 12184
b5ff1b31 12185 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12186 instruction was a conditional branch or trap, and the PC has
12187 already been written. */
f021b2c4 12188 gen_set_condexec(dc);
dcba3a8d 12189 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12190 /* Exception return branches need some special case code at the
12191 * end of the TB, which is complex enough that it has to
12192 * handle the single-step vs not and the condition-failed
12193 * insn codepath itself.
12194 */
12195 gen_bx_excret_final_code(dc);
12196 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12197 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12198 switch (dc->base.is_jmp) {
7999a5c8 12199 case DISAS_SWI:
50225ad0 12200 gen_ss_advance(dc);
73710361
GB
12201 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12202 default_exception_el(dc));
7999a5c8
SF
12203 break;
12204 case DISAS_HVC:
37e6456e 12205 gen_ss_advance(dc);
73710361 12206 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12207 break;
12208 case DISAS_SMC:
37e6456e 12209 gen_ss_advance(dc);
73710361 12210 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12211 break;
12212 case DISAS_NEXT:
a68956ad 12213 case DISAS_TOO_MANY:
7999a5c8
SF
12214 case DISAS_UPDATE:
12215 gen_set_pc_im(dc, dc->pc);
12216 /* fall through */
12217 default:
5425415e
PM
12218 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12219 gen_singlestep_exception(dc);
a0c231e6
RH
12220 break;
12221 case DISAS_NORETURN:
12222 break;
7999a5c8 12223 }
8aaca4c0 12224 } else {
9ee6e8bb
PB
12225 /* While branches must always occur at the end of an IT block,
12226 there are a few other things that can cause us to terminate
65626741 12227 the TB in the middle of an IT block:
9ee6e8bb
PB
12228 - Exception generating instructions (bkpt, swi, undefined).
12229 - Page boundaries.
12230 - Hardware watchpoints.
12231 Hardware breakpoints have already been handled and skip this code.
12232 */
dcba3a8d 12233 switch(dc->base.is_jmp) {
8aaca4c0 12234 case DISAS_NEXT:
a68956ad 12235 case DISAS_TOO_MANY:
6e256c93 12236 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12237 break;
577bf808 12238 case DISAS_JUMP:
8a6b28c7
EC
12239 gen_goto_ptr();
12240 break;
e8d52302
AB
12241 case DISAS_UPDATE:
12242 gen_set_pc_im(dc, dc->pc);
12243 /* fall through */
577bf808 12244 default:
8aaca4c0 12245 /* indicate that the hash table must be used to find the next TB */
07ea28b4 12246 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 12247 break;
a0c231e6 12248 case DISAS_NORETURN:
8aaca4c0
FB
12249 /* nothing more to generate */
12250 break;
9ee6e8bb 12251 case DISAS_WFI:
58803318
SS
12252 {
12253 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12254 !(dc->insn & (1U << 31))) ? 2 : 4);
12255
12256 gen_helper_wfi(cpu_env, tmp);
12257 tcg_temp_free_i32(tmp);
84549b6d
PM
12258 /* The helper doesn't necessarily throw an exception, but we
12259 * must go back to the main loop to check for interrupts anyway.
12260 */
07ea28b4 12261 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 12262 break;
58803318 12263 }
72c1d3af
PM
12264 case DISAS_WFE:
12265 gen_helper_wfe(cpu_env);
12266 break;
c87e5a61
PM
12267 case DISAS_YIELD:
12268 gen_helper_yield(cpu_env);
12269 break;
9ee6e8bb 12270 case DISAS_SWI:
73710361
GB
12271 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12272 default_exception_el(dc));
9ee6e8bb 12273 break;
37e6456e 12274 case DISAS_HVC:
73710361 12275 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12276 break;
12277 case DISAS_SMC:
73710361 12278 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12279 break;
8aaca4c0 12280 }
f021b2c4
PM
12281 }
12282
12283 if (dc->condjmp) {
12284 /* "Condition failed" instruction codepath for the branch/trap insn */
12285 gen_set_label(dc->condlabel);
12286 gen_set_condexec(dc);
b636649f 12287 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12288 gen_set_pc_im(dc, dc->pc);
12289 gen_singlestep_exception(dc);
12290 } else {
6e256c93 12291 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12292 }
2c0262af 12293 }
23169224
LV
12294
12295 /* Functions above can change dc->pc, so re-align db->pc_next */
12296 dc->base.pc_next = dc->pc;
70d3c035
LV
12297}
12298
4013f7fc
LV
12299static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12300{
12301 DisasContext *dc = container_of(dcbase, DisasContext, base);
12302
12303 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12304 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12305}
12306
23169224
LV
12307static const TranslatorOps arm_translator_ops = {
12308 .init_disas_context = arm_tr_init_disas_context,
12309 .tb_start = arm_tr_tb_start,
12310 .insn_start = arm_tr_insn_start,
12311 .breakpoint_check = arm_tr_breakpoint_check,
12312 .translate_insn = arm_tr_translate_insn,
12313 .tb_stop = arm_tr_tb_stop,
12314 .disas_log = arm_tr_disas_log,
12315};
12316
722ef0a5
RH
12317static const TranslatorOps thumb_translator_ops = {
12318 .init_disas_context = arm_tr_init_disas_context,
12319 .tb_start = arm_tr_tb_start,
12320 .insn_start = arm_tr_insn_start,
12321 .breakpoint_check = arm_tr_breakpoint_check,
12322 .translate_insn = thumb_tr_translate_insn,
12323 .tb_stop = arm_tr_tb_stop,
12324 .disas_log = arm_tr_disas_log,
12325};
12326
70d3c035 12327/* generate intermediate code for basic block 'tb'. */
8b86d6d2 12328void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
70d3c035 12329{
23169224
LV
12330 DisasContext dc;
12331 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12332
aad821ac 12333 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
722ef0a5
RH
12334 ops = &thumb_translator_ops;
12335 }
23169224 12336#ifdef TARGET_AARCH64
aad821ac 12337 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
23169224 12338 ops = &aarch64_translator_ops;
2c0262af
FB
12339 }
12340#endif
23169224 12341
8b86d6d2 12342 translator_loop(ops, &dc.base, cpu, tb, max_insns);
2c0262af
FB
12343}
12344
90c84c56 12345void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
2c0262af 12346{
878096ee
AF
12347 ARMCPU *cpu = ARM_CPU(cs);
12348 CPUARMState *env = &cpu->env;
2c0262af
FB
12349 int i;
12350
17731115 12351 if (is_a64(env)) {
90c84c56 12352 aarch64_cpu_dump_state(cs, f, flags);
17731115
PM
12353 return;
12354 }
12355
2c0262af 12356 for(i=0;i<16;i++) {
90c84c56 12357 qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12358 if ((i % 4) == 3)
90c84c56 12359 qemu_fprintf(f, "\n");
2c0262af 12360 else
90c84c56 12361 qemu_fprintf(f, " ");
2c0262af 12362 }
06e5cf7a 12363
5b906f35
PM
12364 if (arm_feature(env, ARM_FEATURE_M)) {
12365 uint32_t xpsr = xpsr_read(env);
12366 const char *mode;
1e577cc7
PM
12367 const char *ns_status = "";
12368
12369 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12370 ns_status = env->v7m.secure ? "S " : "NS ";
12371 }
5b906f35
PM
12372
12373 if (xpsr & XPSR_EXCP) {
12374 mode = "handler";
12375 } else {
8bfc26ea 12376 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
12377 mode = "unpriv-thread";
12378 } else {
12379 mode = "priv-thread";
12380 }
12381 }
12382
90c84c56
MA
12383 qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
12384 xpsr,
12385 xpsr & XPSR_N ? 'N' : '-',
12386 xpsr & XPSR_Z ? 'Z' : '-',
12387 xpsr & XPSR_C ? 'C' : '-',
12388 xpsr & XPSR_V ? 'V' : '-',
12389 xpsr & XPSR_T ? 'T' : 'A',
12390 ns_status,
12391 mode);
06e5cf7a 12392 } else {
5b906f35
PM
12393 uint32_t psr = cpsr_read(env);
12394 const char *ns_status = "";
12395
12396 if (arm_feature(env, ARM_FEATURE_EL3) &&
12397 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12398 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12399 }
12400
90c84c56
MA
12401 qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12402 psr,
12403 psr & CPSR_N ? 'N' : '-',
12404 psr & CPSR_Z ? 'Z' : '-',
12405 psr & CPSR_C ? 'C' : '-',
12406 psr & CPSR_V ? 'V' : '-',
12407 psr & CPSR_T ? 'T' : 'A',
12408 ns_status,
12409 aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
5b906f35 12410 }
b7bcbe95 12411
f2617cfc
PM
12412 if (flags & CPU_DUMP_FPU) {
12413 int numvfpregs = 0;
12414 if (arm_feature(env, ARM_FEATURE_VFP)) {
12415 numvfpregs += 16;
12416 }
12417 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12418 numvfpregs += 16;
12419 }
12420 for (i = 0; i < numvfpregs; i++) {
9a2b5256 12421 uint64_t v = *aa32_vfp_dreg(env, i);
90c84c56
MA
12422 qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12423 i * 2, (uint32_t)v,
12424 i * 2 + 1, (uint32_t)(v >> 32),
12425 i, v);
f2617cfc 12426 }
90c84c56 12427 qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
b7bcbe95 12428 }
2c0262af 12429}
a6b025d3 12430
bad729e2
RH
12431void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12432 target_ulong *data)
d2856f1a 12433{
3926cc84 12434 if (is_a64(env)) {
bad729e2 12435 env->pc = data[0];
40f860cd 12436 env->condexec_bits = 0;
aaa1f954 12437 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12438 } else {
bad729e2
RH
12439 env->regs[15] = data[0];
12440 env->condexec_bits = data[1];
aaa1f954 12441 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12442 }
d2856f1a 12443}