]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Allow ARMCPRegInfo read/write functions to throw exceptions
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
1d854765 31#include "arm_ldst.h"
f1672e6f 32#include "hw/semihosting/semihost.h"
1497c961 33
2ef6175a
RH
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
2c0262af 36
a7e30d84 37#include "trace-tcg.h"
508127e2 38#include "exec/log.h"
a7e30d84
LV
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
09cbd501 45#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d 55
b5ff1b31
FB
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
022c62cb 69#include "exec/gen-icount.h"
2e70f6ef 70
308e5636 71static const char * const regnames[] =
155c3eac
FN
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
74
61adacc8
RH
75/* Function prototypes for gen_ functions calling Neon helpers. */
76typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
77 TCGv_i32, TCGv_i32);
c253dd78
PM
78/* Function prototypes for gen_ functions for fix point conversions */
79typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
61adacc8 80
b26eefb6
PB
81/* initialize TCG globals. */
82void arm_translate_init(void)
83{
155c3eac
FN
84 int i;
85
155c3eac 86 for (i = 0; i < 16; i++) {
e1ccc054 87 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 88 offsetof(CPUARMState, regs[i]),
155c3eac
FN
89 regnames[i]);
90 }
e1ccc054
RH
91 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
92 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
93 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
94 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 95
e1ccc054 96 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 97 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 98 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 100
14ade10f 101 a64_translate_init();
b26eefb6
PB
102}
103
9bb6558a
PM
104/* Flags for the disas_set_da_iss info argument:
105 * lower bits hold the Rt register number, higher bits are flags.
106 */
107typedef enum ISSInfo {
108 ISSNone = 0,
109 ISSRegMask = 0x1f,
110 ISSInvalid = (1 << 5),
111 ISSIsAcqRel = (1 << 6),
112 ISSIsWrite = (1 << 7),
113 ISSIs16Bit = (1 << 8),
114} ISSInfo;
115
116/* Save the syndrome information for a Data Abort */
117static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
118{
119 uint32_t syn;
120 int sas = memop & MO_SIZE;
121 bool sse = memop & MO_SIGN;
122 bool is_acqrel = issinfo & ISSIsAcqRel;
123 bool is_write = issinfo & ISSIsWrite;
124 bool is_16bit = issinfo & ISSIs16Bit;
125 int srt = issinfo & ISSRegMask;
126
127 if (issinfo & ISSInvalid) {
128 /* Some callsites want to conditionally provide ISS info,
129 * eg "only if this was not a writeback"
130 */
131 return;
132 }
133
134 if (srt == 15) {
135 /* For AArch32, insns where the src/dest is R15 never generate
136 * ISS information. Catching that here saves checking at all
137 * the call sites.
138 */
139 return;
140 }
141
142 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
143 0, 0, 0, is_write, 0, is_16bit);
144 disas_set_insn_syndrome(s, syn);
145}
146
8bd5c820 147static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 148{
8bd5c820 149 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
150 * insns:
151 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
152 * otherwise, access as if at PL0.
153 */
154 switch (s->mmu_idx) {
155 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
156 case ARMMMUIdx_S12NSE0:
157 case ARMMMUIdx_S12NSE1:
8bd5c820 158 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
159 case ARMMMUIdx_S1E3:
160 case ARMMMUIdx_S1SE0:
161 case ARMMMUIdx_S1SE1:
8bd5c820 162 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
163 case ARMMMUIdx_MUser:
164 case ARMMMUIdx_MPriv:
165 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
166 case ARMMMUIdx_MUserNegPri:
167 case ARMMMUIdx_MPrivNegPri:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
169 case ARMMMUIdx_MSUser:
170 case ARMMMUIdx_MSPriv:
b9f587d6 171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
172 case ARMMMUIdx_MSUserNegPri:
173 case ARMMMUIdx_MSPrivNegPri:
174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
175 case ARMMMUIdx_S2NS:
176 default:
177 g_assert_not_reached();
178 }
179}
180
39d5492a 181static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 182{
39d5492a 183 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
184 tcg_gen_ld_i32(tmp, cpu_env, offset);
185 return tmp;
186}
187
0ecb72a5 188#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 189
39d5492a 190static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
191{
192 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 193 tcg_temp_free_i32(var);
d9ba4830
PB
194}
195
196#define store_cpu_field(var, name) \
0ecb72a5 197 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 198
fdbcf632
RH
199/* The architectural value of PC. */
200static uint32_t read_pc(DisasContext *s)
201{
202 return s->pc_curr + (s->thumb ? 4 : 8);
203}
204
b26eefb6 205/* Set a variable to the value of a CPU register. */
39d5492a 206static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
207{
208 if (reg == 15) {
fdbcf632 209 tcg_gen_movi_i32(var, read_pc(s));
b26eefb6 210 } else {
155c3eac 211 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
212 }
213}
214
215/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 216static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 217{
39d5492a 218 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
219 load_reg_var(s, tmp, reg);
220 return tmp;
221}
222
16e0d823
RH
223/*
224 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
225 * This is used for load/store for which use of PC implies (literal),
226 * or ADD that implies ADR.
227 */
228static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
229{
230 TCGv_i32 tmp = tcg_temp_new_i32();
231
232 if (reg == 15) {
233 tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
234 } else {
235 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
236 }
237 return tmp;
238}
239
b26eefb6
PB
240/* Set a CPU register. The source must be a temporary and will be
241 marked as dead. */
39d5492a 242static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
243{
244 if (reg == 15) {
9b6a3ea7
PM
245 /* In Thumb mode, we must ignore bit 0.
246 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
247 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
248 * We choose to ignore [1:0] in ARM mode for all architecture versions.
249 */
250 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 251 s->base.is_jmp = DISAS_JUMP;
b26eefb6 252 }
155c3eac 253 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 254 tcg_temp_free_i32(var);
b26eefb6
PB
255}
256
55203189
PM
257/*
258 * Variant of store_reg which applies v8M stack-limit checks before updating
259 * SP. If the check fails this will result in an exception being taken.
260 * We disable the stack checks for CONFIG_USER_ONLY because we have
261 * no idea what the stack limits should be in that case.
262 * If stack checking is not being done this just acts like store_reg().
263 */
264static void store_sp_checked(DisasContext *s, TCGv_i32 var)
265{
266#ifndef CONFIG_USER_ONLY
267 if (s->v8m_stackcheck) {
268 gen_helper_v8m_stackcheck(cpu_env, var);
269 }
270#endif
271 store_reg(s, 13, var);
272}
273
b26eefb6 274/* Value extensions. */
86831435
PB
275#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
276#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
277#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
278#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
279
1497c961
PB
280#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
281#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 282
b26eefb6 283
39d5492a 284static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 285{
39d5492a 286 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 287 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
288 tcg_temp_free_i32(tmp_mask);
289}
d9ba4830
PB
290/* Set NZCV flags from the high 4 bits of var. */
291#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
292
d4a2dc67 293static void gen_exception_internal(int excp)
d9ba4830 294{
d4a2dc67
PM
295 TCGv_i32 tcg_excp = tcg_const_i32(excp);
296
297 assert(excp_is_internal(excp));
298 gen_helper_exception_internal(cpu_env, tcg_excp);
299 tcg_temp_free_i32(tcg_excp);
300}
301
50225ad0
PM
302static void gen_step_complete_exception(DisasContext *s)
303{
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
312 */
313 gen_ss_advance(s);
c1d5f50f 314 gen_swstep_exception(s, 1, s->is_ldex);
dcba3a8d 315 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
316}
317
5425415e
PM
318static void gen_singlestep_exception(DisasContext *s)
319{
320 /* Generate the right kind of exception for singlestep, which is
321 * either the architectural singlestep or EXCP_DEBUG for QEMU's
322 * gdb singlestepping.
323 */
324 if (s->ss_active) {
325 gen_step_complete_exception(s);
326 } else {
327 gen_exception_internal(EXCP_DEBUG);
328 }
329}
330
b636649f
PM
331static inline bool is_singlestepping(DisasContext *s)
332{
333 /* Return true if we are singlestepping either because of
334 * architectural singlestep or QEMU gdbstub singlestep. This does
335 * not include the command line '-singlestep' mode which is rather
336 * misnamed as it only means "one instruction per TB" and doesn't
337 * affect the code we generate.
338 */
dcba3a8d 339 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
340}
341
39d5492a 342static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 343{
39d5492a
PM
344 TCGv_i32 tmp1 = tcg_temp_new_i32();
345 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
346 tcg_gen_ext16s_i32(tmp1, a);
347 tcg_gen_ext16s_i32(tmp2, b);
3670669c 348 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 349 tcg_temp_free_i32(tmp2);
3670669c
PB
350 tcg_gen_sari_i32(a, a, 16);
351 tcg_gen_sari_i32(b, b, 16);
352 tcg_gen_mul_i32(b, b, a);
353 tcg_gen_mov_i32(a, tmp1);
7d1b0095 354 tcg_temp_free_i32(tmp1);
3670669c
PB
355}
356
357/* Byteswap each halfword. */
39d5492a 358static void gen_rev16(TCGv_i32 var)
3670669c 359{
39d5492a 360 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 361 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 362 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
363 tcg_gen_and_i32(tmp, tmp, mask);
364 tcg_gen_and_i32(var, var, mask);
3670669c 365 tcg_gen_shli_i32(var, var, 8);
3670669c 366 tcg_gen_or_i32(var, var, tmp);
68cedf73 367 tcg_temp_free_i32(mask);
7d1b0095 368 tcg_temp_free_i32(tmp);
3670669c
PB
369}
370
371/* Byteswap low halfword and sign extend. */
39d5492a 372static void gen_revsh(TCGv_i32 var)
3670669c 373{
1a855029
AJ
374 tcg_gen_ext16u_i32(var, var);
375 tcg_gen_bswap16_i32(var, var);
376 tcg_gen_ext16s_i32(var, var);
3670669c
PB
377}
378
5e3f878a 379/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 380static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 381{
39d5492a
PM
382 TCGv_i32 lo = tcg_temp_new_i32();
383 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 384 TCGv_i64 ret;
5e3f878a 385
831d7fe8 386 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 387 tcg_temp_free_i32(a);
7d1b0095 388 tcg_temp_free_i32(b);
831d7fe8
RH
389
390 ret = tcg_temp_new_i64();
391 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
392 tcg_temp_free_i32(lo);
393 tcg_temp_free_i32(hi);
831d7fe8
RH
394
395 return ret;
5e3f878a
PB
396}
397
39d5492a 398static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 399{
39d5492a
PM
400 TCGv_i32 lo = tcg_temp_new_i32();
401 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 402 TCGv_i64 ret;
5e3f878a 403
831d7fe8 404 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 405 tcg_temp_free_i32(a);
7d1b0095 406 tcg_temp_free_i32(b);
831d7fe8
RH
407
408 ret = tcg_temp_new_i64();
409 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
410 tcg_temp_free_i32(lo);
411 tcg_temp_free_i32(hi);
831d7fe8
RH
412
413 return ret;
5e3f878a
PB
414}
415
8f01245e 416/* Swap low and high halfwords. */
39d5492a 417static void gen_swap_half(TCGv_i32 var)
8f01245e 418{
adefba76 419 tcg_gen_rotri_i32(var, var, 16);
8f01245e
PB
420}
421
b26eefb6
PB
422/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
423 tmp = (t0 ^ t1) & 0x8000;
424 t0 &= ~0x8000;
425 t1 &= ~0x8000;
426 t0 = (t0 + t1) ^ tmp;
427 */
428
39d5492a 429static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 430{
39d5492a 431 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
432 tcg_gen_xor_i32(tmp, t0, t1);
433 tcg_gen_andi_i32(tmp, tmp, 0x8000);
434 tcg_gen_andi_i32(t0, t0, ~0x8000);
435 tcg_gen_andi_i32(t1, t1, ~0x8000);
436 tcg_gen_add_i32(t0, t0, t1);
437 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
438 tcg_temp_free_i32(tmp);
439 tcg_temp_free_i32(t1);
b26eefb6
PB
440}
441
442/* Set CF to the top bit of var. */
39d5492a 443static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 444{
66c374de 445 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
446}
447
448/* Set N and Z flags from var. */
39d5492a 449static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 450{
66c374de
AJ
451 tcg_gen_mov_i32(cpu_NF, var);
452 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
453}
454
455/* T0 += T1 + CF. */
39d5492a 456static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 457{
396e467c 458 tcg_gen_add_i32(t0, t0, t1);
66c374de 459 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
460}
461
e9bb4aa9 462/* dest = T0 + T1 + CF. */
39d5492a 463static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 464{
e9bb4aa9 465 tcg_gen_add_i32(dest, t0, t1);
66c374de 466 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
467}
468
3670669c 469/* dest = T0 - T1 + CF - 1. */
39d5492a 470static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 471{
3670669c 472 tcg_gen_sub_i32(dest, t0, t1);
66c374de 473 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 474 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
475}
476
72485ec4 477/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 478static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 479{
39d5492a 480 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
481 tcg_gen_movi_i32(tmp, 0);
482 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 483 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 484 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
485 tcg_gen_xor_i32(tmp, t0, t1);
486 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
487 tcg_temp_free_i32(tmp);
488 tcg_gen_mov_i32(dest, cpu_NF);
489}
490
49b4c31e 491/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 492static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 493{
39d5492a 494 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
495 if (TCG_TARGET_HAS_add2_i32) {
496 tcg_gen_movi_i32(tmp, 0);
497 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 498 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
499 } else {
500 TCGv_i64 q0 = tcg_temp_new_i64();
501 TCGv_i64 q1 = tcg_temp_new_i64();
502 tcg_gen_extu_i32_i64(q0, t0);
503 tcg_gen_extu_i32_i64(q1, t1);
504 tcg_gen_add_i64(q0, q0, q1);
505 tcg_gen_extu_i32_i64(q1, cpu_CF);
506 tcg_gen_add_i64(q0, q0, q1);
507 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
508 tcg_temp_free_i64(q0);
509 tcg_temp_free_i64(q1);
510 }
511 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
512 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
513 tcg_gen_xor_i32(tmp, t0, t1);
514 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
515 tcg_temp_free_i32(tmp);
516 tcg_gen_mov_i32(dest, cpu_NF);
517}
518
72485ec4 519/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 520static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 521{
39d5492a 522 TCGv_i32 tmp;
72485ec4
AJ
523 tcg_gen_sub_i32(cpu_NF, t0, t1);
524 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
525 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
526 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
527 tmp = tcg_temp_new_i32();
528 tcg_gen_xor_i32(tmp, t0, t1);
529 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
530 tcg_temp_free_i32(tmp);
531 tcg_gen_mov_i32(dest, cpu_NF);
532}
533
e77f0832 534/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 535static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 536{
39d5492a 537 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
538 tcg_gen_not_i32(tmp, t1);
539 gen_adc_CC(dest, t0, tmp);
39d5492a 540 tcg_temp_free_i32(tmp);
2de68a49
RH
541}
542
365af80e 543#define GEN_SHIFT(name) \
39d5492a 544static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 545{ \
39d5492a 546 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
547 tmp1 = tcg_temp_new_i32(); \
548 tcg_gen_andi_i32(tmp1, t1, 0xff); \
549 tmp2 = tcg_const_i32(0); \
550 tmp3 = tcg_const_i32(0x1f); \
551 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
552 tcg_temp_free_i32(tmp3); \
553 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
554 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
555 tcg_temp_free_i32(tmp2); \
556 tcg_temp_free_i32(tmp1); \
557}
558GEN_SHIFT(shl)
559GEN_SHIFT(shr)
560#undef GEN_SHIFT
561
39d5492a 562static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 563{
39d5492a 564 TCGv_i32 tmp1, tmp2;
365af80e
AJ
565 tmp1 = tcg_temp_new_i32();
566 tcg_gen_andi_i32(tmp1, t1, 0xff);
567 tmp2 = tcg_const_i32(0x1f);
568 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
569 tcg_temp_free_i32(tmp2);
570 tcg_gen_sar_i32(dest, t0, tmp1);
571 tcg_temp_free_i32(tmp1);
572}
573
39d5492a 574static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 575{
191f4bfe 576 tcg_gen_extract_i32(cpu_CF, var, shift, 1);
9a119ff6 577}
b26eefb6 578
9a119ff6 579/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
580static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
581 int shift, int flags)
9a119ff6
PB
582{
583 switch (shiftop) {
584 case 0: /* LSL */
585 if (shift != 0) {
586 if (flags)
587 shifter_out_im(var, 32 - shift);
588 tcg_gen_shli_i32(var, var, shift);
589 }
590 break;
591 case 1: /* LSR */
592 if (shift == 0) {
593 if (flags) {
66c374de 594 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
595 }
596 tcg_gen_movi_i32(var, 0);
597 } else {
598 if (flags)
599 shifter_out_im(var, shift - 1);
600 tcg_gen_shri_i32(var, var, shift);
601 }
602 break;
603 case 2: /* ASR */
604 if (shift == 0)
605 shift = 32;
606 if (flags)
607 shifter_out_im(var, shift - 1);
608 if (shift == 32)
609 shift = 31;
610 tcg_gen_sari_i32(var, var, shift);
611 break;
612 case 3: /* ROR/RRX */
613 if (shift != 0) {
614 if (flags)
615 shifter_out_im(var, shift - 1);
f669df27 616 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 617 } else {
39d5492a 618 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 619 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
620 if (flags)
621 shifter_out_im(var, 0);
622 tcg_gen_shri_i32(var, var, 1);
b26eefb6 623 tcg_gen_or_i32(var, var, tmp);
7d1b0095 624 tcg_temp_free_i32(tmp);
b26eefb6
PB
625 }
626 }
627};
628
39d5492a
PM
629static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
630 TCGv_i32 shift, int flags)
8984bd2e
PB
631{
632 if (flags) {
633 switch (shiftop) {
9ef39277
BS
634 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
635 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
636 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
637 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
638 }
639 } else {
640 switch (shiftop) {
365af80e
AJ
641 case 0:
642 gen_shl(var, var, shift);
643 break;
644 case 1:
645 gen_shr(var, var, shift);
646 break;
647 case 2:
648 gen_sar(var, var, shift);
649 break;
f669df27
AJ
650 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
651 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
652 }
653 }
7d1b0095 654 tcg_temp_free_i32(shift);
8984bd2e
PB
655}
656
6ddbc6e4
PB
657#define PAS_OP(pfx) \
658 switch (op2) { \
659 case 0: gen_pas_helper(glue(pfx,add16)); break; \
660 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
661 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
662 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
663 case 4: gen_pas_helper(glue(pfx,add8)); break; \
664 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
665 }
39d5492a 666static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 667{
a7812ae4 668 TCGv_ptr tmp;
6ddbc6e4
PB
669
670 switch (op1) {
671#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
672 case 1:
a7812ae4 673 tmp = tcg_temp_new_ptr();
0ecb72a5 674 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 675 PAS_OP(s)
b75263d6 676 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
677 break;
678 case 5:
a7812ae4 679 tmp = tcg_temp_new_ptr();
0ecb72a5 680 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 681 PAS_OP(u)
b75263d6 682 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
683 break;
684#undef gen_pas_helper
685#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
686 case 2:
687 PAS_OP(q);
688 break;
689 case 3:
690 PAS_OP(sh);
691 break;
692 case 6:
693 PAS_OP(uq);
694 break;
695 case 7:
696 PAS_OP(uh);
697 break;
698#undef gen_pas_helper
699 }
700}
9ee6e8bb
PB
701#undef PAS_OP
702
6ddbc6e4
PB
703/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
704#define PAS_OP(pfx) \
ed89a2f1 705 switch (op1) { \
6ddbc6e4
PB
706 case 0: gen_pas_helper(glue(pfx,add8)); break; \
707 case 1: gen_pas_helper(glue(pfx,add16)); break; \
708 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
709 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
710 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
711 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
712 }
39d5492a 713static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 714{
a7812ae4 715 TCGv_ptr tmp;
6ddbc6e4 716
ed89a2f1 717 switch (op2) {
6ddbc6e4
PB
718#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
719 case 0:
a7812ae4 720 tmp = tcg_temp_new_ptr();
0ecb72a5 721 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 722 PAS_OP(s)
b75263d6 723 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
724 break;
725 case 4:
a7812ae4 726 tmp = tcg_temp_new_ptr();
0ecb72a5 727 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 728 PAS_OP(u)
b75263d6 729 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
730 break;
731#undef gen_pas_helper
732#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
733 case 1:
734 PAS_OP(q);
735 break;
736 case 2:
737 PAS_OP(sh);
738 break;
739 case 5:
740 PAS_OP(uq);
741 break;
742 case 6:
743 PAS_OP(uh);
744 break;
745#undef gen_pas_helper
746 }
747}
9ee6e8bb
PB
748#undef PAS_OP
749
39fb730a 750/*
6c2c63d3 751 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
752 * This is common between ARM and Aarch64 targets.
753 */
6c2c63d3 754void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 755{
6c2c63d3
RH
756 TCGv_i32 value;
757 TCGCond cond;
758 bool global = true;
d9ba4830 759
d9ba4830
PB
760 switch (cc) {
761 case 0: /* eq: Z */
d9ba4830 762 case 1: /* ne: !Z */
6c2c63d3
RH
763 cond = TCG_COND_EQ;
764 value = cpu_ZF;
d9ba4830 765 break;
6c2c63d3 766
d9ba4830 767 case 2: /* cs: C */
d9ba4830 768 case 3: /* cc: !C */
6c2c63d3
RH
769 cond = TCG_COND_NE;
770 value = cpu_CF;
d9ba4830 771 break;
6c2c63d3 772
d9ba4830 773 case 4: /* mi: N */
d9ba4830 774 case 5: /* pl: !N */
6c2c63d3
RH
775 cond = TCG_COND_LT;
776 value = cpu_NF;
d9ba4830 777 break;
6c2c63d3 778
d9ba4830 779 case 6: /* vs: V */
d9ba4830 780 case 7: /* vc: !V */
6c2c63d3
RH
781 cond = TCG_COND_LT;
782 value = cpu_VF;
d9ba4830 783 break;
6c2c63d3 784
d9ba4830 785 case 8: /* hi: C && !Z */
6c2c63d3
RH
786 case 9: /* ls: !C || Z -> !(C && !Z) */
787 cond = TCG_COND_NE;
788 value = tcg_temp_new_i32();
789 global = false;
790 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
791 ZF is non-zero for !Z; so AND the two subexpressions. */
792 tcg_gen_neg_i32(value, cpu_CF);
793 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 794 break;
6c2c63d3 795
d9ba4830 796 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 797 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
798 /* Since we're only interested in the sign bit, == 0 is >= 0. */
799 cond = TCG_COND_GE;
800 value = tcg_temp_new_i32();
801 global = false;
802 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 803 break;
6c2c63d3 804
d9ba4830 805 case 12: /* gt: !Z && N == V */
d9ba4830 806 case 13: /* le: Z || N != V */
6c2c63d3
RH
807 cond = TCG_COND_NE;
808 value = tcg_temp_new_i32();
809 global = false;
810 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
811 * the sign bit then AND with ZF to yield the result. */
812 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
813 tcg_gen_sari_i32(value, value, 31);
814 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 815 break;
6c2c63d3 816
9305eac0
RH
817 case 14: /* always */
818 case 15: /* always */
819 /* Use the ALWAYS condition, which will fold early.
820 * It doesn't matter what we use for the value. */
821 cond = TCG_COND_ALWAYS;
822 value = cpu_ZF;
823 goto no_invert;
824
d9ba4830
PB
825 default:
826 fprintf(stderr, "Bad condition code 0x%x\n", cc);
827 abort();
828 }
6c2c63d3
RH
829
830 if (cc & 1) {
831 cond = tcg_invert_cond(cond);
832 }
833
9305eac0 834 no_invert:
6c2c63d3
RH
835 cmp->cond = cond;
836 cmp->value = value;
837 cmp->value_global = global;
838}
839
840void arm_free_cc(DisasCompare *cmp)
841{
842 if (!cmp->value_global) {
843 tcg_temp_free_i32(cmp->value);
844 }
845}
846
847void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
848{
849 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
850}
851
852void arm_gen_test_cc(int cc, TCGLabel *label)
853{
854 DisasCompare cmp;
855 arm_test_cc(&cmp, cc);
856 arm_jump_cc(&cmp, label);
857 arm_free_cc(&cmp);
d9ba4830 858}
2c0262af 859
b1d8e52e 860static const uint8_t table_logic_cc[16] = {
2c0262af
FB
861 1, /* and */
862 1, /* xor */
863 0, /* sub */
864 0, /* rsb */
865 0, /* add */
866 0, /* adc */
867 0, /* sbc */
868 0, /* rsc */
869 1, /* andl */
870 1, /* xorl */
871 0, /* cmp */
872 0, /* cmn */
873 1, /* orr */
874 1, /* mov */
875 1, /* bic */
876 1, /* mvn */
877};
3b46e624 878
4d5e8c96
PM
879static inline void gen_set_condexec(DisasContext *s)
880{
881 if (s->condexec_mask) {
882 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
883 TCGv_i32 tmp = tcg_temp_new_i32();
884 tcg_gen_movi_i32(tmp, val);
885 store_cpu_field(tmp, condexec_bits);
886 }
887}
888
889static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
890{
891 tcg_gen_movi_i32(cpu_R[15], val);
892}
893
d9ba4830
PB
894/* Set PC and Thumb state from an immediate address. */
895static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 896{
39d5492a 897 TCGv_i32 tmp;
99c475ab 898
dcba3a8d 899 s->base.is_jmp = DISAS_JUMP;
d9ba4830 900 if (s->thumb != (addr & 1)) {
7d1b0095 901 tmp = tcg_temp_new_i32();
d9ba4830 902 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 903 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 904 tcg_temp_free_i32(tmp);
d9ba4830 905 }
155c3eac 906 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
907}
908
909/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 910static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 911{
dcba3a8d 912 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
913 tcg_gen_andi_i32(cpu_R[15], var, ~1);
914 tcg_gen_andi_i32(var, var, 1);
915 store_cpu_field(var, thumb);
d9ba4830
PB
916}
917
3bb8a96f
PM
918/* Set PC and Thumb state from var. var is marked as dead.
919 * For M-profile CPUs, include logic to detect exception-return
920 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
921 * and BX reg, and no others, and happens only for code in Handler mode.
922 */
923static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
924{
925 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 926 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
927 */
928 gen_bx(s, var);
d02a8698
PM
929 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
930 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 931 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
932 }
933}
934
935static inline void gen_bx_excret_final_code(DisasContext *s)
936{
937 /* Generate the code to finish possible exception return and end the TB */
938 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
939 uint32_t min_magic;
940
941 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
942 /* Covers FNC_RETURN and EXC_RETURN magic */
943 min_magic = FNC_RETURN_MIN_MAGIC;
944 } else {
945 /* EXC_RETURN magic only */
946 min_magic = EXC_RETURN_MIN_MAGIC;
947 }
3bb8a96f
PM
948
949 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 950 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
951 /* No: end the TB as we would for a DISAS_JMP */
952 if (is_singlestepping(s)) {
953 gen_singlestep_exception(s);
954 } else {
07ea28b4 955 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
956 }
957 gen_set_label(excret_label);
958 /* Yes: this is an exception return.
959 * At this point in runtime env->regs[15] and env->thumb will hold
960 * the exception-return magic number, which do_v7m_exception_exit()
961 * will read. Nothing else will be able to see those values because
962 * the cpu-exec main loop guarantees that we will always go straight
963 * from raising the exception to the exception-handling code.
964 *
965 * gen_ss_advance(s) does nothing on M profile currently but
966 * calling it is conceptually the right thing as we have executed
967 * this instruction (compare SWI, HVC, SMC handling).
968 */
969 gen_ss_advance(s);
970 gen_exception_internal(EXCP_EXCEPTION_EXIT);
971}
972
fb602cb7
PM
973static inline void gen_bxns(DisasContext *s, int rm)
974{
975 TCGv_i32 var = load_reg(s, rm);
976
977 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
978 * we need to sync state before calling it, but:
979 * - we don't need to do gen_set_pc_im() because the bxns helper will
980 * always set the PC itself
981 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
982 * unless it's outside an IT block or the last insn in an IT block,
983 * so we know that condexec == 0 (already set at the top of the TB)
984 * is correct in the non-UNPREDICTABLE cases, and we can choose
985 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
986 */
987 gen_helper_v7m_bxns(cpu_env, var);
988 tcg_temp_free_i32(var);
ef475b5d 989 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
990}
991
3e3fa230
PM
992static inline void gen_blxns(DisasContext *s, int rm)
993{
994 TCGv_i32 var = load_reg(s, rm);
995
996 /* We don't need to sync condexec state, for the same reason as bxns.
997 * We do however need to set the PC, because the blxns helper reads it.
998 * The blxns helper may throw an exception.
999 */
a0415916 1000 gen_set_pc_im(s, s->base.pc_next);
3e3fa230
PM
1001 gen_helper_v7m_blxns(cpu_env, var);
1002 tcg_temp_free_i32(var);
1003 s->base.is_jmp = DISAS_EXIT;
1004}
1005
21aeb343
JR
1006/* Variant of store_reg which uses branch&exchange logic when storing
1007 to r15 in ARM architecture v7 and above. The source must be a temporary
1008 and will be marked as dead. */
7dcc1f89 1009static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1010{
1011 if (reg == 15 && ENABLE_ARCH_7) {
1012 gen_bx(s, var);
1013 } else {
1014 store_reg(s, reg, var);
1015 }
1016}
1017
be5e7a76
DES
1018/* Variant of store_reg which uses branch&exchange logic when storing
1019 * to r15 in ARM architecture v5T and above. This is used for storing
1020 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1021 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1022static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1023{
1024 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1025 gen_bx_excret(s, var);
be5e7a76
DES
1026 } else {
1027 store_reg(s, reg, var);
1028 }
1029}
1030
e334bd31
PB
1031#ifdef CONFIG_USER_ONLY
1032#define IS_USER_ONLY 1
1033#else
1034#define IS_USER_ONLY 0
1035#endif
1036
08307563
PM
1037/* Abstractions of "generate code to do a guest load/store for
1038 * AArch32", where a vaddr is always 32 bits (and is zero
1039 * extended if we're a 64 bit core) and data is also
1040 * 32 bits unless specifically doing a 64 bit access.
1041 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1042 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1043 */
08307563 1044
7f5616f5 1045static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1046{
7f5616f5
RH
1047 TCGv addr = tcg_temp_new();
1048 tcg_gen_extu_i32_tl(addr, a32);
1049
e334bd31 1050 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1051 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1052 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1053 }
7f5616f5 1054 return addr;
08307563
PM
1055}
1056
7f5616f5
RH
1057static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1058 int index, TCGMemOp opc)
08307563 1059{
2aeba0d0
JS
1060 TCGv addr;
1061
1062 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1063 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1064 opc |= MO_ALIGN;
1065 }
1066
1067 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1068 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1069 tcg_temp_free(addr);
08307563
PM
1070}
1071
7f5616f5
RH
1072static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1073 int index, TCGMemOp opc)
1074{
2aeba0d0
JS
1075 TCGv addr;
1076
1077 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1078 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1079 opc |= MO_ALIGN;
1080 }
1081
1082 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1083 tcg_gen_qemu_st_i32(val, addr, index, opc);
1084 tcg_temp_free(addr);
1085}
08307563 1086
7f5616f5 1087#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1088static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1089 TCGv_i32 a32, int index) \
08307563 1090{ \
7f5616f5 1091 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1092} \
1093static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1094 TCGv_i32 val, \
1095 TCGv_i32 a32, int index, \
1096 ISSInfo issinfo) \
1097{ \
1098 gen_aa32_ld##SUFF(s, val, a32, index); \
1099 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1100}
1101
7f5616f5 1102#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1103static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1104 TCGv_i32 a32, int index) \
08307563 1105{ \
7f5616f5 1106 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1107} \
1108static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1109 TCGv_i32 val, \
1110 TCGv_i32 a32, int index, \
1111 ISSInfo issinfo) \
1112{ \
1113 gen_aa32_st##SUFF(s, val, a32, index); \
1114 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1115}
1116
7f5616f5 1117static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1118{
e334bd31
PB
1119 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1120 if (!IS_USER_ONLY && s->sctlr_b) {
1121 tcg_gen_rotri_i64(val, val, 32);
1122 }
08307563
PM
1123}
1124
7f5616f5
RH
1125static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1126 int index, TCGMemOp opc)
08307563 1127{
7f5616f5
RH
1128 TCGv addr = gen_aa32_addr(s, a32, opc);
1129 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1130 gen_aa32_frob64(s, val);
1131 tcg_temp_free(addr);
1132}
1133
1134static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1135 TCGv_i32 a32, int index)
1136{
1137 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1138}
1139
1140static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1141 int index, TCGMemOp opc)
1142{
1143 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1144
1145 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1146 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1147 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1148 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1149 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1150 tcg_temp_free_i64(tmp);
e334bd31 1151 } else {
7f5616f5 1152 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1153 }
7f5616f5 1154 tcg_temp_free(addr);
08307563
PM
1155}
1156
7f5616f5
RH
1157static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1158 TCGv_i32 a32, int index)
1159{
1160 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1161}
08307563 1162
7f5616f5
RH
1163DO_GEN_LD(8s, MO_SB)
1164DO_GEN_LD(8u, MO_UB)
1165DO_GEN_LD(16s, MO_SW)
1166DO_GEN_LD(16u, MO_UW)
1167DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1168DO_GEN_ST(8, MO_UB)
1169DO_GEN_ST(16, MO_UW)
1170DO_GEN_ST(32, MO_UL)
08307563 1171
37e6456e
PM
1172static inline void gen_hvc(DisasContext *s, int imm16)
1173{
1174 /* The pre HVC helper handles cases when HVC gets trapped
1175 * as an undefined insn by runtime configuration (ie before
1176 * the insn really executes).
1177 */
43722a6d 1178 gen_set_pc_im(s, s->pc_curr);
37e6456e
PM
1179 gen_helper_pre_hvc(cpu_env);
1180 /* Otherwise we will treat this as a real exception which
1181 * happens after execution of the insn. (The distinction matters
1182 * for the PC value reported to the exception handler and also
1183 * for single stepping.)
1184 */
1185 s->svc_imm = imm16;
a0415916 1186 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 1187 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1188}
1189
1190static inline void gen_smc(DisasContext *s)
1191{
1192 /* As with HVC, we may take an exception either before or after
1193 * the insn executes.
1194 */
1195 TCGv_i32 tmp;
1196
43722a6d 1197 gen_set_pc_im(s, s->pc_curr);
37e6456e
PM
1198 tmp = tcg_const_i32(syn_aa32_smc());
1199 gen_helper_pre_smc(cpu_env, tmp);
1200 tcg_temp_free_i32(tmp);
a0415916 1201 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 1202 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1203}
1204
aee828e7 1205static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
d4a2dc67
PM
1206{
1207 gen_set_condexec(s);
aee828e7 1208 gen_set_pc_im(s, pc);
d4a2dc67 1209 gen_exception_internal(excp);
dcba3a8d 1210 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1211}
1212
a767fac8 1213static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
73710361 1214 int syn, uint32_t target_el)
d4a2dc67
PM
1215{
1216 gen_set_condexec(s);
a767fac8 1217 gen_set_pc_im(s, pc);
73710361 1218 gen_exception(excp, syn, target_el);
dcba3a8d 1219 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1220}
1221
06bcbda3 1222static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
c900a2e6
PM
1223{
1224 TCGv_i32 tcg_syn;
1225
1226 gen_set_condexec(s);
06bcbda3 1227 gen_set_pc_im(s, s->pc_curr);
c900a2e6
PM
1228 tcg_syn = tcg_const_i32(syn);
1229 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1230 tcg_temp_free_i32(tcg_syn);
1231 s->base.is_jmp = DISAS_NORETURN;
1232}
1233
1ce21ba1
RH
1234static void unallocated_encoding(DisasContext *s)
1235{
1236 /* Unallocated and reserved encodings are uncategorized */
1237 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
1238 default_exception_el(s));
1239}
1240
b5ff1b31
FB
1241/* Force a TB lookup after an instruction that changes the CPU state. */
1242static inline void gen_lookup_tb(DisasContext *s)
1243{
a0415916 1244 tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
dcba3a8d 1245 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1246}
1247
19a6e31c
PM
1248static inline void gen_hlt(DisasContext *s, int imm)
1249{
1250 /* HLT. This has two purposes.
1251 * Architecturally, it is an external halting debug instruction.
1252 * Since QEMU doesn't implement external debug, we treat this as
1253 * it is required for halting debug disabled: it will UNDEF.
1254 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1255 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1256 * must trigger semihosting even for ARMv7 and earlier, where
1257 * HLT was an undefined encoding.
1258 * In system mode, we don't allow userspace access to
1259 * semihosting, to provide some semblance of security
1260 * (and for consistency with our 32-bit semihosting).
1261 */
1262 if (semihosting_enabled() &&
1263#ifndef CONFIG_USER_ONLY
1264 s->current_el != 0 &&
1265#endif
1266 (imm == (s->thumb ? 0x3c : 0xf000))) {
aee828e7 1267 gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
19a6e31c
PM
1268 return;
1269 }
1270
1ce21ba1 1271 unallocated_encoding(s);
19a6e31c
PM
1272}
1273
b0109805 1274static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1275 TCGv_i32 var)
2c0262af 1276{
1e8d4eec 1277 int val, rm, shift, shiftop;
39d5492a 1278 TCGv_i32 offset;
2c0262af
FB
1279
1280 if (!(insn & (1 << 25))) {
1281 /* immediate */
1282 val = insn & 0xfff;
1283 if (!(insn & (1 << 23)))
1284 val = -val;
537730b9 1285 if (val != 0)
b0109805 1286 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1287 } else {
1288 /* shift/register */
1289 rm = (insn) & 0xf;
1290 shift = (insn >> 7) & 0x1f;
1e8d4eec 1291 shiftop = (insn >> 5) & 3;
b26eefb6 1292 offset = load_reg(s, rm);
9a119ff6 1293 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1294 if (!(insn & (1 << 23)))
b0109805 1295 tcg_gen_sub_i32(var, var, offset);
2c0262af 1296 else
b0109805 1297 tcg_gen_add_i32(var, var, offset);
7d1b0095 1298 tcg_temp_free_i32(offset);
2c0262af
FB
1299 }
1300}
1301
191f9a93 1302static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1303 int extra, TCGv_i32 var)
2c0262af
FB
1304{
1305 int val, rm;
39d5492a 1306 TCGv_i32 offset;
3b46e624 1307
2c0262af
FB
1308 if (insn & (1 << 22)) {
1309 /* immediate */
1310 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1311 if (!(insn & (1 << 23)))
1312 val = -val;
18acad92 1313 val += extra;
537730b9 1314 if (val != 0)
b0109805 1315 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1316 } else {
1317 /* register */
191f9a93 1318 if (extra)
b0109805 1319 tcg_gen_addi_i32(var, var, extra);
2c0262af 1320 rm = (insn) & 0xf;
b26eefb6 1321 offset = load_reg(s, rm);
2c0262af 1322 if (!(insn & (1 << 23)))
b0109805 1323 tcg_gen_sub_i32(var, var, offset);
2c0262af 1324 else
b0109805 1325 tcg_gen_add_i32(var, var, offset);
7d1b0095 1326 tcg_temp_free_i32(offset);
2c0262af
FB
1327 }
1328}
1329
5aaebd13
PM
1330static TCGv_ptr get_fpstatus_ptr(int neon)
1331{
1332 TCGv_ptr statusptr = tcg_temp_new_ptr();
1333 int offset;
1334 if (neon) {
0ecb72a5 1335 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1336 } else {
0ecb72a5 1337 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1338 }
1339 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1340 return statusptr;
1341}
1342
c39c2b90 1343static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1344{
9a2b5256 1345 if (dp) {
c39c2b90 1346 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1347 } else {
c39c2b90 1348 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1349 if (reg & 1) {
1350 ofs += offsetof(CPU_DoubleU, l.upper);
1351 } else {
1352 ofs += offsetof(CPU_DoubleU, l.lower);
1353 }
1354 return ofs;
8e96005d
FB
1355 }
1356}
9ee6e8bb
PB
1357
1358/* Return the offset of a 32-bit piece of a NEON register.
1359 zero is the least significant end of the register. */
1360static inline long
1361neon_reg_offset (int reg, int n)
1362{
1363 int sreg;
1364 sreg = reg * 2 + n;
1365 return vfp_reg_offset(0, sreg);
1366}
1367
32f91fb7
RH
1368/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1369 * where 0 is the least significant end of the register.
1370 */
1371static inline long
1372neon_element_offset(int reg, int element, TCGMemOp size)
1373{
1374 int element_size = 1 << size;
1375 int ofs = element * element_size;
1376#ifdef HOST_WORDS_BIGENDIAN
1377 /* Calculate the offset assuming fully little-endian,
1378 * then XOR to account for the order of the 8-byte units.
1379 */
1380 if (element_size < 8) {
1381 ofs ^= 8 - element_size;
1382 }
1383#endif
1384 return neon_reg_offset(reg, 0) + ofs;
1385}
1386
39d5492a 1387static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1388{
39d5492a 1389 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1390 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1391 return tmp;
1392}
1393
2d6ac920
RH
1394static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1395{
1396 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1397
1398 switch (mop) {
1399 case MO_UB:
1400 tcg_gen_ld8u_i32(var, cpu_env, offset);
1401 break;
1402 case MO_UW:
1403 tcg_gen_ld16u_i32(var, cpu_env, offset);
1404 break;
1405 case MO_UL:
1406 tcg_gen_ld_i32(var, cpu_env, offset);
1407 break;
1408 default:
1409 g_assert_not_reached();
1410 }
1411}
1412
ac55d007
RH
1413static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1414{
1415 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1416
1417 switch (mop) {
1418 case MO_UB:
1419 tcg_gen_ld8u_i64(var, cpu_env, offset);
1420 break;
1421 case MO_UW:
1422 tcg_gen_ld16u_i64(var, cpu_env, offset);
1423 break;
1424 case MO_UL:
1425 tcg_gen_ld32u_i64(var, cpu_env, offset);
1426 break;
1427 case MO_Q:
1428 tcg_gen_ld_i64(var, cpu_env, offset);
1429 break;
1430 default:
1431 g_assert_not_reached();
1432 }
1433}
1434
39d5492a 1435static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1436{
1437 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1438 tcg_temp_free_i32(var);
8f8e3aa4
PB
1439}
1440
2d6ac920
RH
1441static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1442{
1443 long offset = neon_element_offset(reg, ele, size);
1444
1445 switch (size) {
1446 case MO_8:
1447 tcg_gen_st8_i32(var, cpu_env, offset);
1448 break;
1449 case MO_16:
1450 tcg_gen_st16_i32(var, cpu_env, offset);
1451 break;
1452 case MO_32:
1453 tcg_gen_st_i32(var, cpu_env, offset);
1454 break;
1455 default:
1456 g_assert_not_reached();
1457 }
1458}
1459
ac55d007
RH
1460static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1461{
1462 long offset = neon_element_offset(reg, ele, size);
1463
1464 switch (size) {
1465 case MO_8:
1466 tcg_gen_st8_i64(var, cpu_env, offset);
1467 break;
1468 case MO_16:
1469 tcg_gen_st16_i64(var, cpu_env, offset);
1470 break;
1471 case MO_32:
1472 tcg_gen_st32_i64(var, cpu_env, offset);
1473 break;
1474 case MO_64:
1475 tcg_gen_st_i64(var, cpu_env, offset);
1476 break;
1477 default:
1478 g_assert_not_reached();
1479 }
1480}
1481
a7812ae4 1482static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1483{
1484 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1485}
1486
a7812ae4 1487static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1488{
1489 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1490}
1491
160f3b64
PM
1492static inline void neon_load_reg32(TCGv_i32 var, int reg)
1493{
1494 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1495}
1496
1497static inline void neon_store_reg32(TCGv_i32 var, int reg)
1498{
1499 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1500}
1501
1a66ac61
RH
1502static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1503{
1504 TCGv_ptr ret = tcg_temp_new_ptr();
1505 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1506 return ret;
1507}
1508
d00584b7 1509#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1510
78e138bc
PM
1511/* Include the VFP decoder */
1512#include "translate-vfp.inc.c"
1513
a7812ae4 1514static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1515{
0ecb72a5 1516 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1517}
1518
a7812ae4 1519static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1520{
0ecb72a5 1521 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1522}
1523
39d5492a 1524static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1525{
39d5492a 1526 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1527 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1528 return var;
e677137d
PB
1529}
1530
39d5492a 1531static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1532{
0ecb72a5 1533 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1534 tcg_temp_free_i32(var);
e677137d
PB
1535}
1536
1537static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1538{
1539 iwmmxt_store_reg(cpu_M0, rn);
1540}
1541
1542static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1543{
1544 iwmmxt_load_reg(cpu_M0, rn);
1545}
1546
1547static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1548{
1549 iwmmxt_load_reg(cpu_V1, rn);
1550 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1551}
1552
1553static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1554{
1555 iwmmxt_load_reg(cpu_V1, rn);
1556 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1557}
1558
1559static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1560{
1561 iwmmxt_load_reg(cpu_V1, rn);
1562 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1563}
1564
1565#define IWMMXT_OP(name) \
1566static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1567{ \
1568 iwmmxt_load_reg(cpu_V1, rn); \
1569 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1570}
1571
477955bd
PM
1572#define IWMMXT_OP_ENV(name) \
1573static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1574{ \
1575 iwmmxt_load_reg(cpu_V1, rn); \
1576 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1577}
1578
1579#define IWMMXT_OP_ENV_SIZE(name) \
1580IWMMXT_OP_ENV(name##b) \
1581IWMMXT_OP_ENV(name##w) \
1582IWMMXT_OP_ENV(name##l)
e677137d 1583
477955bd 1584#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1585static inline void gen_op_iwmmxt_##name##_M0(void) \
1586{ \
477955bd 1587 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1588}
1589
1590IWMMXT_OP(maddsq)
1591IWMMXT_OP(madduq)
1592IWMMXT_OP(sadb)
1593IWMMXT_OP(sadw)
1594IWMMXT_OP(mulslw)
1595IWMMXT_OP(mulshw)
1596IWMMXT_OP(mululw)
1597IWMMXT_OP(muluhw)
1598IWMMXT_OP(macsw)
1599IWMMXT_OP(macuw)
1600
477955bd
PM
1601IWMMXT_OP_ENV_SIZE(unpackl)
1602IWMMXT_OP_ENV_SIZE(unpackh)
1603
1604IWMMXT_OP_ENV1(unpacklub)
1605IWMMXT_OP_ENV1(unpackluw)
1606IWMMXT_OP_ENV1(unpacklul)
1607IWMMXT_OP_ENV1(unpackhub)
1608IWMMXT_OP_ENV1(unpackhuw)
1609IWMMXT_OP_ENV1(unpackhul)
1610IWMMXT_OP_ENV1(unpacklsb)
1611IWMMXT_OP_ENV1(unpacklsw)
1612IWMMXT_OP_ENV1(unpacklsl)
1613IWMMXT_OP_ENV1(unpackhsb)
1614IWMMXT_OP_ENV1(unpackhsw)
1615IWMMXT_OP_ENV1(unpackhsl)
1616
1617IWMMXT_OP_ENV_SIZE(cmpeq)
1618IWMMXT_OP_ENV_SIZE(cmpgtu)
1619IWMMXT_OP_ENV_SIZE(cmpgts)
1620
1621IWMMXT_OP_ENV_SIZE(mins)
1622IWMMXT_OP_ENV_SIZE(minu)
1623IWMMXT_OP_ENV_SIZE(maxs)
1624IWMMXT_OP_ENV_SIZE(maxu)
1625
1626IWMMXT_OP_ENV_SIZE(subn)
1627IWMMXT_OP_ENV_SIZE(addn)
1628IWMMXT_OP_ENV_SIZE(subu)
1629IWMMXT_OP_ENV_SIZE(addu)
1630IWMMXT_OP_ENV_SIZE(subs)
1631IWMMXT_OP_ENV_SIZE(adds)
1632
1633IWMMXT_OP_ENV(avgb0)
1634IWMMXT_OP_ENV(avgb1)
1635IWMMXT_OP_ENV(avgw0)
1636IWMMXT_OP_ENV(avgw1)
e677137d 1637
477955bd
PM
1638IWMMXT_OP_ENV(packuw)
1639IWMMXT_OP_ENV(packul)
1640IWMMXT_OP_ENV(packuq)
1641IWMMXT_OP_ENV(packsw)
1642IWMMXT_OP_ENV(packsl)
1643IWMMXT_OP_ENV(packsq)
e677137d 1644
e677137d
PB
1645static void gen_op_iwmmxt_set_mup(void)
1646{
39d5492a 1647 TCGv_i32 tmp;
e677137d
PB
1648 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1649 tcg_gen_ori_i32(tmp, tmp, 2);
1650 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1651}
1652
1653static void gen_op_iwmmxt_set_cup(void)
1654{
39d5492a 1655 TCGv_i32 tmp;
e677137d
PB
1656 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1657 tcg_gen_ori_i32(tmp, tmp, 1);
1658 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1659}
1660
1661static void gen_op_iwmmxt_setpsr_nz(void)
1662{
39d5492a 1663 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1664 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1665 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1666}
1667
1668static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1669{
1670 iwmmxt_load_reg(cpu_V1, rn);
86831435 1671 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1672 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1673}
1674
39d5492a
PM
1675static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1676 TCGv_i32 dest)
18c9b560
AZ
1677{
1678 int rd;
1679 uint32_t offset;
39d5492a 1680 TCGv_i32 tmp;
18c9b560
AZ
1681
1682 rd = (insn >> 16) & 0xf;
da6b5335 1683 tmp = load_reg(s, rd);
18c9b560
AZ
1684
1685 offset = (insn & 0xff) << ((insn >> 7) & 2);
1686 if (insn & (1 << 24)) {
1687 /* Pre indexed */
1688 if (insn & (1 << 23))
da6b5335 1689 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1690 else
da6b5335
FN
1691 tcg_gen_addi_i32(tmp, tmp, -offset);
1692 tcg_gen_mov_i32(dest, tmp);
18c9b560 1693 if (insn & (1 << 21))
da6b5335
FN
1694 store_reg(s, rd, tmp);
1695 else
7d1b0095 1696 tcg_temp_free_i32(tmp);
18c9b560
AZ
1697 } else if (insn & (1 << 21)) {
1698 /* Post indexed */
da6b5335 1699 tcg_gen_mov_i32(dest, tmp);
18c9b560 1700 if (insn & (1 << 23))
da6b5335 1701 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1702 else
da6b5335
FN
1703 tcg_gen_addi_i32(tmp, tmp, -offset);
1704 store_reg(s, rd, tmp);
18c9b560
AZ
1705 } else if (!(insn & (1 << 23)))
1706 return 1;
1707 return 0;
1708}
1709
39d5492a 1710static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1711{
1712 int rd = (insn >> 0) & 0xf;
39d5492a 1713 TCGv_i32 tmp;
18c9b560 1714
da6b5335
FN
1715 if (insn & (1 << 8)) {
1716 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1717 return 1;
da6b5335
FN
1718 } else {
1719 tmp = iwmmxt_load_creg(rd);
1720 }
1721 } else {
7d1b0095 1722 tmp = tcg_temp_new_i32();
da6b5335 1723 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1724 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1725 }
1726 tcg_gen_andi_i32(tmp, tmp, mask);
1727 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1728 tcg_temp_free_i32(tmp);
18c9b560
AZ
1729 return 0;
1730}
1731
a1c7273b 1732/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1733 (ie. an undefined instruction). */
7dcc1f89 1734static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1735{
1736 int rd, wrd;
1737 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1738 TCGv_i32 addr;
1739 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1740
1741 if ((insn & 0x0e000e00) == 0x0c000000) {
1742 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1743 wrd = insn & 0xf;
1744 rdlo = (insn >> 12) & 0xf;
1745 rdhi = (insn >> 16) & 0xf;
d00584b7 1746 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1747 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1748 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
664b7e3b 1749 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1750 } else { /* TMCRR */
da6b5335
FN
1751 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1752 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1753 gen_op_iwmmxt_set_mup();
1754 }
1755 return 0;
1756 }
1757
1758 wrd = (insn >> 12) & 0xf;
7d1b0095 1759 addr = tcg_temp_new_i32();
da6b5335 1760 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1761 tcg_temp_free_i32(addr);
18c9b560 1762 return 1;
da6b5335 1763 }
18c9b560 1764 if (insn & ARM_CP_RW_BIT) {
d00584b7 1765 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1766 tmp = tcg_temp_new_i32();
12dcc321 1767 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1768 iwmmxt_store_creg(wrd, tmp);
18c9b560 1769 } else {
e677137d
PB
1770 i = 1;
1771 if (insn & (1 << 8)) {
d00584b7 1772 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1773 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1774 i = 0;
d00584b7 1775 } else { /* WLDRW wRd */
29531141 1776 tmp = tcg_temp_new_i32();
12dcc321 1777 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1778 }
1779 } else {
29531141 1780 tmp = tcg_temp_new_i32();
d00584b7 1781 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1782 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 1783 } else { /* WLDRB */
12dcc321 1784 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1785 }
1786 }
1787 if (i) {
1788 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1789 tcg_temp_free_i32(tmp);
e677137d 1790 }
18c9b560
AZ
1791 gen_op_iwmmxt_movq_wRn_M0(wrd);
1792 }
1793 } else {
d00584b7 1794 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1795 tmp = iwmmxt_load_creg(wrd);
12dcc321 1796 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1797 } else {
1798 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1799 tmp = tcg_temp_new_i32();
e677137d 1800 if (insn & (1 << 8)) {
d00584b7 1801 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1802 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 1803 } else { /* WSTRW wRd */
ecc7b3aa 1804 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1805 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1806 }
1807 } else {
d00584b7 1808 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1809 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1810 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 1811 } else { /* WSTRB */
ecc7b3aa 1812 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1813 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1814 }
1815 }
18c9b560 1816 }
29531141 1817 tcg_temp_free_i32(tmp);
18c9b560 1818 }
7d1b0095 1819 tcg_temp_free_i32(addr);
18c9b560
AZ
1820 return 0;
1821 }
1822
1823 if ((insn & 0x0f000000) != 0x0e000000)
1824 return 1;
1825
1826 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 1827 case 0x000: /* WOR */
18c9b560
AZ
1828 wrd = (insn >> 12) & 0xf;
1829 rd0 = (insn >> 0) & 0xf;
1830 rd1 = (insn >> 16) & 0xf;
1831 gen_op_iwmmxt_movq_M0_wRn(rd0);
1832 gen_op_iwmmxt_orq_M0_wRn(rd1);
1833 gen_op_iwmmxt_setpsr_nz();
1834 gen_op_iwmmxt_movq_wRn_M0(wrd);
1835 gen_op_iwmmxt_set_mup();
1836 gen_op_iwmmxt_set_cup();
1837 break;
d00584b7 1838 case 0x011: /* TMCR */
18c9b560
AZ
1839 if (insn & 0xf)
1840 return 1;
1841 rd = (insn >> 12) & 0xf;
1842 wrd = (insn >> 16) & 0xf;
1843 switch (wrd) {
1844 case ARM_IWMMXT_wCID:
1845 case ARM_IWMMXT_wCASF:
1846 break;
1847 case ARM_IWMMXT_wCon:
1848 gen_op_iwmmxt_set_cup();
1849 /* Fall through. */
1850 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1851 tmp = iwmmxt_load_creg(wrd);
1852 tmp2 = load_reg(s, rd);
f669df27 1853 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1854 tcg_temp_free_i32(tmp2);
da6b5335 1855 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1856 break;
1857 case ARM_IWMMXT_wCGR0:
1858 case ARM_IWMMXT_wCGR1:
1859 case ARM_IWMMXT_wCGR2:
1860 case ARM_IWMMXT_wCGR3:
1861 gen_op_iwmmxt_set_cup();
da6b5335
FN
1862 tmp = load_reg(s, rd);
1863 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1864 break;
1865 default:
1866 return 1;
1867 }
1868 break;
d00584b7 1869 case 0x100: /* WXOR */
18c9b560
AZ
1870 wrd = (insn >> 12) & 0xf;
1871 rd0 = (insn >> 0) & 0xf;
1872 rd1 = (insn >> 16) & 0xf;
1873 gen_op_iwmmxt_movq_M0_wRn(rd0);
1874 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1875 gen_op_iwmmxt_setpsr_nz();
1876 gen_op_iwmmxt_movq_wRn_M0(wrd);
1877 gen_op_iwmmxt_set_mup();
1878 gen_op_iwmmxt_set_cup();
1879 break;
d00584b7 1880 case 0x111: /* TMRC */
18c9b560
AZ
1881 if (insn & 0xf)
1882 return 1;
1883 rd = (insn >> 12) & 0xf;
1884 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1885 tmp = iwmmxt_load_creg(wrd);
1886 store_reg(s, rd, tmp);
18c9b560 1887 break;
d00584b7 1888 case 0x300: /* WANDN */
18c9b560
AZ
1889 wrd = (insn >> 12) & 0xf;
1890 rd0 = (insn >> 0) & 0xf;
1891 rd1 = (insn >> 16) & 0xf;
1892 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1893 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1894 gen_op_iwmmxt_andq_M0_wRn(rd1);
1895 gen_op_iwmmxt_setpsr_nz();
1896 gen_op_iwmmxt_movq_wRn_M0(wrd);
1897 gen_op_iwmmxt_set_mup();
1898 gen_op_iwmmxt_set_cup();
1899 break;
d00584b7 1900 case 0x200: /* WAND */
18c9b560
AZ
1901 wrd = (insn >> 12) & 0xf;
1902 rd0 = (insn >> 0) & 0xf;
1903 rd1 = (insn >> 16) & 0xf;
1904 gen_op_iwmmxt_movq_M0_wRn(rd0);
1905 gen_op_iwmmxt_andq_M0_wRn(rd1);
1906 gen_op_iwmmxt_setpsr_nz();
1907 gen_op_iwmmxt_movq_wRn_M0(wrd);
1908 gen_op_iwmmxt_set_mup();
1909 gen_op_iwmmxt_set_cup();
1910 break;
d00584b7 1911 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
1912 wrd = (insn >> 12) & 0xf;
1913 rd0 = (insn >> 0) & 0xf;
1914 rd1 = (insn >> 16) & 0xf;
1915 gen_op_iwmmxt_movq_M0_wRn(rd0);
1916 if (insn & (1 << 21))
1917 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1918 else
1919 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1920 gen_op_iwmmxt_movq_wRn_M0(wrd);
1921 gen_op_iwmmxt_set_mup();
1922 break;
d00584b7 1923 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
1924 wrd = (insn >> 12) & 0xf;
1925 rd0 = (insn >> 16) & 0xf;
1926 rd1 = (insn >> 0) & 0xf;
1927 gen_op_iwmmxt_movq_M0_wRn(rd0);
1928 switch ((insn >> 22) & 3) {
1929 case 0:
1930 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1931 break;
1932 case 1:
1933 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1934 break;
1935 case 2:
1936 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1937 break;
1938 case 3:
1939 return 1;
1940 }
1941 gen_op_iwmmxt_movq_wRn_M0(wrd);
1942 gen_op_iwmmxt_set_mup();
1943 gen_op_iwmmxt_set_cup();
1944 break;
d00584b7 1945 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
1946 wrd = (insn >> 12) & 0xf;
1947 rd0 = (insn >> 16) & 0xf;
1948 rd1 = (insn >> 0) & 0xf;
1949 gen_op_iwmmxt_movq_M0_wRn(rd0);
1950 switch ((insn >> 22) & 3) {
1951 case 0:
1952 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1953 break;
1954 case 1:
1955 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1956 break;
1957 case 2:
1958 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1959 break;
1960 case 3:
1961 return 1;
1962 }
1963 gen_op_iwmmxt_movq_wRn_M0(wrd);
1964 gen_op_iwmmxt_set_mup();
1965 gen_op_iwmmxt_set_cup();
1966 break;
d00584b7 1967 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
1968 wrd = (insn >> 12) & 0xf;
1969 rd0 = (insn >> 16) & 0xf;
1970 rd1 = (insn >> 0) & 0xf;
1971 gen_op_iwmmxt_movq_M0_wRn(rd0);
1972 if (insn & (1 << 22))
1973 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1974 else
1975 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1976 if (!(insn & (1 << 20)))
1977 gen_op_iwmmxt_addl_M0_wRn(wrd);
1978 gen_op_iwmmxt_movq_wRn_M0(wrd);
1979 gen_op_iwmmxt_set_mup();
1980 break;
d00584b7 1981 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
1982 wrd = (insn >> 12) & 0xf;
1983 rd0 = (insn >> 16) & 0xf;
1984 rd1 = (insn >> 0) & 0xf;
1985 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1986 if (insn & (1 << 21)) {
1987 if (insn & (1 << 20))
1988 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1989 else
1990 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1991 } else {
1992 if (insn & (1 << 20))
1993 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1994 else
1995 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1996 }
18c9b560
AZ
1997 gen_op_iwmmxt_movq_wRn_M0(wrd);
1998 gen_op_iwmmxt_set_mup();
1999 break;
d00584b7 2000 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
2001 wrd = (insn >> 12) & 0xf;
2002 rd0 = (insn >> 16) & 0xf;
2003 rd1 = (insn >> 0) & 0xf;
2004 gen_op_iwmmxt_movq_M0_wRn(rd0);
2005 if (insn & (1 << 21))
2006 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2007 else
2008 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2009 if (!(insn & (1 << 20))) {
e677137d
PB
2010 iwmmxt_load_reg(cpu_V1, wrd);
2011 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2012 }
2013 gen_op_iwmmxt_movq_wRn_M0(wrd);
2014 gen_op_iwmmxt_set_mup();
2015 break;
d00584b7 2016 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2017 wrd = (insn >> 12) & 0xf;
2018 rd0 = (insn >> 16) & 0xf;
2019 rd1 = (insn >> 0) & 0xf;
2020 gen_op_iwmmxt_movq_M0_wRn(rd0);
2021 switch ((insn >> 22) & 3) {
2022 case 0:
2023 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2024 break;
2025 case 1:
2026 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2027 break;
2028 case 2:
2029 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2030 break;
2031 case 3:
2032 return 1;
2033 }
2034 gen_op_iwmmxt_movq_wRn_M0(wrd);
2035 gen_op_iwmmxt_set_mup();
2036 gen_op_iwmmxt_set_cup();
2037 break;
d00584b7 2038 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2039 wrd = (insn >> 12) & 0xf;
2040 rd0 = (insn >> 16) & 0xf;
2041 rd1 = (insn >> 0) & 0xf;
2042 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2043 if (insn & (1 << 22)) {
2044 if (insn & (1 << 20))
2045 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2046 else
2047 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2048 } else {
2049 if (insn & (1 << 20))
2050 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2051 else
2052 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2053 }
18c9b560
AZ
2054 gen_op_iwmmxt_movq_wRn_M0(wrd);
2055 gen_op_iwmmxt_set_mup();
2056 gen_op_iwmmxt_set_cup();
2057 break;
d00584b7 2058 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 rd1 = (insn >> 0) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2063 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2064 tcg_gen_andi_i32(tmp, tmp, 7);
2065 iwmmxt_load_reg(cpu_V1, rd1);
2066 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2067 tcg_temp_free_i32(tmp);
18c9b560
AZ
2068 gen_op_iwmmxt_movq_wRn_M0(wrd);
2069 gen_op_iwmmxt_set_mup();
2070 break;
d00584b7 2071 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2072 if (((insn >> 6) & 3) == 3)
2073 return 1;
18c9b560
AZ
2074 rd = (insn >> 12) & 0xf;
2075 wrd = (insn >> 16) & 0xf;
da6b5335 2076 tmp = load_reg(s, rd);
18c9b560
AZ
2077 gen_op_iwmmxt_movq_M0_wRn(wrd);
2078 switch ((insn >> 6) & 3) {
2079 case 0:
da6b5335
FN
2080 tmp2 = tcg_const_i32(0xff);
2081 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2082 break;
2083 case 1:
da6b5335
FN
2084 tmp2 = tcg_const_i32(0xffff);
2085 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2086 break;
2087 case 2:
da6b5335
FN
2088 tmp2 = tcg_const_i32(0xffffffff);
2089 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2090 break;
da6b5335 2091 default:
f764718d
RH
2092 tmp2 = NULL;
2093 tmp3 = NULL;
18c9b560 2094 }
da6b5335 2095 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2096 tcg_temp_free_i32(tmp3);
2097 tcg_temp_free_i32(tmp2);
7d1b0095 2098 tcg_temp_free_i32(tmp);
18c9b560
AZ
2099 gen_op_iwmmxt_movq_wRn_M0(wrd);
2100 gen_op_iwmmxt_set_mup();
2101 break;
d00584b7 2102 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2103 rd = (insn >> 12) & 0xf;
2104 wrd = (insn >> 16) & 0xf;
da6b5335 2105 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2106 return 1;
2107 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2108 tmp = tcg_temp_new_i32();
18c9b560
AZ
2109 switch ((insn >> 22) & 3) {
2110 case 0:
da6b5335 2111 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2112 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2113 if (insn & 8) {
2114 tcg_gen_ext8s_i32(tmp, tmp);
2115 } else {
2116 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2117 }
2118 break;
2119 case 1:
da6b5335 2120 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2121 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2122 if (insn & 8) {
2123 tcg_gen_ext16s_i32(tmp, tmp);
2124 } else {
2125 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2126 }
2127 break;
2128 case 2:
da6b5335 2129 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2130 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2131 break;
18c9b560 2132 }
da6b5335 2133 store_reg(s, rd, tmp);
18c9b560 2134 break;
d00584b7 2135 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2136 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2137 return 1;
da6b5335 2138 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2139 switch ((insn >> 22) & 3) {
2140 case 0:
da6b5335 2141 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2142 break;
2143 case 1:
da6b5335 2144 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2145 break;
2146 case 2:
da6b5335 2147 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2148 break;
18c9b560 2149 }
da6b5335
FN
2150 tcg_gen_shli_i32(tmp, tmp, 28);
2151 gen_set_nzcv(tmp);
7d1b0095 2152 tcg_temp_free_i32(tmp);
18c9b560 2153 break;
d00584b7 2154 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2155 if (((insn >> 6) & 3) == 3)
2156 return 1;
18c9b560
AZ
2157 rd = (insn >> 12) & 0xf;
2158 wrd = (insn >> 16) & 0xf;
da6b5335 2159 tmp = load_reg(s, rd);
18c9b560
AZ
2160 switch ((insn >> 6) & 3) {
2161 case 0:
da6b5335 2162 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2163 break;
2164 case 1:
da6b5335 2165 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2166 break;
2167 case 2:
da6b5335 2168 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2169 break;
18c9b560 2170 }
7d1b0095 2171 tcg_temp_free_i32(tmp);
18c9b560
AZ
2172 gen_op_iwmmxt_movq_wRn_M0(wrd);
2173 gen_op_iwmmxt_set_mup();
2174 break;
d00584b7 2175 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2176 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2177 return 1;
da6b5335 2178 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2179 tmp2 = tcg_temp_new_i32();
da6b5335 2180 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2181 switch ((insn >> 22) & 3) {
2182 case 0:
2183 for (i = 0; i < 7; i ++) {
da6b5335
FN
2184 tcg_gen_shli_i32(tmp2, tmp2, 4);
2185 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2186 }
2187 break;
2188 case 1:
2189 for (i = 0; i < 3; i ++) {
da6b5335
FN
2190 tcg_gen_shli_i32(tmp2, tmp2, 8);
2191 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2192 }
2193 break;
2194 case 2:
da6b5335
FN
2195 tcg_gen_shli_i32(tmp2, tmp2, 16);
2196 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2197 break;
18c9b560 2198 }
da6b5335 2199 gen_set_nzcv(tmp);
7d1b0095
PM
2200 tcg_temp_free_i32(tmp2);
2201 tcg_temp_free_i32(tmp);
18c9b560 2202 break;
d00584b7 2203 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2204 wrd = (insn >> 12) & 0xf;
2205 rd0 = (insn >> 16) & 0xf;
2206 gen_op_iwmmxt_movq_M0_wRn(rd0);
2207 switch ((insn >> 22) & 3) {
2208 case 0:
e677137d 2209 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2210 break;
2211 case 1:
e677137d 2212 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2213 break;
2214 case 2:
e677137d 2215 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2216 break;
2217 case 3:
2218 return 1;
2219 }
2220 gen_op_iwmmxt_movq_wRn_M0(wrd);
2221 gen_op_iwmmxt_set_mup();
2222 break;
d00584b7 2223 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2224 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2225 return 1;
da6b5335 2226 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2227 tmp2 = tcg_temp_new_i32();
da6b5335 2228 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2229 switch ((insn >> 22) & 3) {
2230 case 0:
2231 for (i = 0; i < 7; i ++) {
da6b5335
FN
2232 tcg_gen_shli_i32(tmp2, tmp2, 4);
2233 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2234 }
2235 break;
2236 case 1:
2237 for (i = 0; i < 3; i ++) {
da6b5335
FN
2238 tcg_gen_shli_i32(tmp2, tmp2, 8);
2239 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2240 }
2241 break;
2242 case 2:
da6b5335
FN
2243 tcg_gen_shli_i32(tmp2, tmp2, 16);
2244 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2245 break;
18c9b560 2246 }
da6b5335 2247 gen_set_nzcv(tmp);
7d1b0095
PM
2248 tcg_temp_free_i32(tmp2);
2249 tcg_temp_free_i32(tmp);
18c9b560 2250 break;
d00584b7 2251 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2252 rd = (insn >> 12) & 0xf;
2253 rd0 = (insn >> 16) & 0xf;
da6b5335 2254 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2255 return 1;
2256 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2257 tmp = tcg_temp_new_i32();
18c9b560
AZ
2258 switch ((insn >> 22) & 3) {
2259 case 0:
da6b5335 2260 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2261 break;
2262 case 1:
da6b5335 2263 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2264 break;
2265 case 2:
da6b5335 2266 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2267 break;
18c9b560 2268 }
da6b5335 2269 store_reg(s, rd, tmp);
18c9b560 2270 break;
d00584b7 2271 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2272 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2273 wrd = (insn >> 12) & 0xf;
2274 rd0 = (insn >> 16) & 0xf;
2275 rd1 = (insn >> 0) & 0xf;
2276 gen_op_iwmmxt_movq_M0_wRn(rd0);
2277 switch ((insn >> 22) & 3) {
2278 case 0:
2279 if (insn & (1 << 21))
2280 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2281 else
2282 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2283 break;
2284 case 1:
2285 if (insn & (1 << 21))
2286 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2287 else
2288 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2289 break;
2290 case 2:
2291 if (insn & (1 << 21))
2292 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2293 else
2294 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2295 break;
2296 case 3:
2297 return 1;
2298 }
2299 gen_op_iwmmxt_movq_wRn_M0(wrd);
2300 gen_op_iwmmxt_set_mup();
2301 gen_op_iwmmxt_set_cup();
2302 break;
d00584b7 2303 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2304 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2305 wrd = (insn >> 12) & 0xf;
2306 rd0 = (insn >> 16) & 0xf;
2307 gen_op_iwmmxt_movq_M0_wRn(rd0);
2308 switch ((insn >> 22) & 3) {
2309 case 0:
2310 if (insn & (1 << 21))
2311 gen_op_iwmmxt_unpacklsb_M0();
2312 else
2313 gen_op_iwmmxt_unpacklub_M0();
2314 break;
2315 case 1:
2316 if (insn & (1 << 21))
2317 gen_op_iwmmxt_unpacklsw_M0();
2318 else
2319 gen_op_iwmmxt_unpackluw_M0();
2320 break;
2321 case 2:
2322 if (insn & (1 << 21))
2323 gen_op_iwmmxt_unpacklsl_M0();
2324 else
2325 gen_op_iwmmxt_unpacklul_M0();
2326 break;
2327 case 3:
2328 return 1;
2329 }
2330 gen_op_iwmmxt_movq_wRn_M0(wrd);
2331 gen_op_iwmmxt_set_mup();
2332 gen_op_iwmmxt_set_cup();
2333 break;
d00584b7 2334 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2335 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2336 wrd = (insn >> 12) & 0xf;
2337 rd0 = (insn >> 16) & 0xf;
2338 gen_op_iwmmxt_movq_M0_wRn(rd0);
2339 switch ((insn >> 22) & 3) {
2340 case 0:
2341 if (insn & (1 << 21))
2342 gen_op_iwmmxt_unpackhsb_M0();
2343 else
2344 gen_op_iwmmxt_unpackhub_M0();
2345 break;
2346 case 1:
2347 if (insn & (1 << 21))
2348 gen_op_iwmmxt_unpackhsw_M0();
2349 else
2350 gen_op_iwmmxt_unpackhuw_M0();
2351 break;
2352 case 2:
2353 if (insn & (1 << 21))
2354 gen_op_iwmmxt_unpackhsl_M0();
2355 else
2356 gen_op_iwmmxt_unpackhul_M0();
2357 break;
2358 case 3:
2359 return 1;
2360 }
2361 gen_op_iwmmxt_movq_wRn_M0(wrd);
2362 gen_op_iwmmxt_set_mup();
2363 gen_op_iwmmxt_set_cup();
2364 break;
d00584b7 2365 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2366 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2367 if (((insn >> 22) & 3) == 0)
2368 return 1;
18c9b560
AZ
2369 wrd = (insn >> 12) & 0xf;
2370 rd0 = (insn >> 16) & 0xf;
2371 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2372 tmp = tcg_temp_new_i32();
da6b5335 2373 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2374 tcg_temp_free_i32(tmp);
18c9b560 2375 return 1;
da6b5335 2376 }
18c9b560 2377 switch ((insn >> 22) & 3) {
18c9b560 2378 case 1:
477955bd 2379 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2380 break;
2381 case 2:
477955bd 2382 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2383 break;
2384 case 3:
477955bd 2385 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2386 break;
2387 }
7d1b0095 2388 tcg_temp_free_i32(tmp);
18c9b560
AZ
2389 gen_op_iwmmxt_movq_wRn_M0(wrd);
2390 gen_op_iwmmxt_set_mup();
2391 gen_op_iwmmxt_set_cup();
2392 break;
d00584b7 2393 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2394 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2395 if (((insn >> 22) & 3) == 0)
2396 return 1;
18c9b560
AZ
2397 wrd = (insn >> 12) & 0xf;
2398 rd0 = (insn >> 16) & 0xf;
2399 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2400 tmp = tcg_temp_new_i32();
da6b5335 2401 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2402 tcg_temp_free_i32(tmp);
18c9b560 2403 return 1;
da6b5335 2404 }
18c9b560 2405 switch ((insn >> 22) & 3) {
18c9b560 2406 case 1:
477955bd 2407 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2408 break;
2409 case 2:
477955bd 2410 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2411 break;
2412 case 3:
477955bd 2413 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2414 break;
2415 }
7d1b0095 2416 tcg_temp_free_i32(tmp);
18c9b560
AZ
2417 gen_op_iwmmxt_movq_wRn_M0(wrd);
2418 gen_op_iwmmxt_set_mup();
2419 gen_op_iwmmxt_set_cup();
2420 break;
d00584b7 2421 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2422 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2423 if (((insn >> 22) & 3) == 0)
2424 return 1;
18c9b560
AZ
2425 wrd = (insn >> 12) & 0xf;
2426 rd0 = (insn >> 16) & 0xf;
2427 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2428 tmp = tcg_temp_new_i32();
da6b5335 2429 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2430 tcg_temp_free_i32(tmp);
18c9b560 2431 return 1;
da6b5335 2432 }
18c9b560 2433 switch ((insn >> 22) & 3) {
18c9b560 2434 case 1:
477955bd 2435 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2436 break;
2437 case 2:
477955bd 2438 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2439 break;
2440 case 3:
477955bd 2441 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2442 break;
2443 }
7d1b0095 2444 tcg_temp_free_i32(tmp);
18c9b560
AZ
2445 gen_op_iwmmxt_movq_wRn_M0(wrd);
2446 gen_op_iwmmxt_set_mup();
2447 gen_op_iwmmxt_set_cup();
2448 break;
d00584b7 2449 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2450 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2451 if (((insn >> 22) & 3) == 0)
2452 return 1;
18c9b560
AZ
2453 wrd = (insn >> 12) & 0xf;
2454 rd0 = (insn >> 16) & 0xf;
2455 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2456 tmp = tcg_temp_new_i32();
18c9b560 2457 switch ((insn >> 22) & 3) {
18c9b560 2458 case 1:
da6b5335 2459 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2460 tcg_temp_free_i32(tmp);
18c9b560 2461 return 1;
da6b5335 2462 }
477955bd 2463 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2464 break;
2465 case 2:
da6b5335 2466 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2467 tcg_temp_free_i32(tmp);
18c9b560 2468 return 1;
da6b5335 2469 }
477955bd 2470 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2471 break;
2472 case 3:
da6b5335 2473 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2474 tcg_temp_free_i32(tmp);
18c9b560 2475 return 1;
da6b5335 2476 }
477955bd 2477 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2478 break;
2479 }
7d1b0095 2480 tcg_temp_free_i32(tmp);
18c9b560
AZ
2481 gen_op_iwmmxt_movq_wRn_M0(wrd);
2482 gen_op_iwmmxt_set_mup();
2483 gen_op_iwmmxt_set_cup();
2484 break;
d00584b7 2485 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2486 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2487 wrd = (insn >> 12) & 0xf;
2488 rd0 = (insn >> 16) & 0xf;
2489 rd1 = (insn >> 0) & 0xf;
2490 gen_op_iwmmxt_movq_M0_wRn(rd0);
2491 switch ((insn >> 22) & 3) {
2492 case 0:
2493 if (insn & (1 << 21))
2494 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2495 else
2496 gen_op_iwmmxt_minub_M0_wRn(rd1);
2497 break;
2498 case 1:
2499 if (insn & (1 << 21))
2500 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2501 else
2502 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2503 break;
2504 case 2:
2505 if (insn & (1 << 21))
2506 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2507 else
2508 gen_op_iwmmxt_minul_M0_wRn(rd1);
2509 break;
2510 case 3:
2511 return 1;
2512 }
2513 gen_op_iwmmxt_movq_wRn_M0(wrd);
2514 gen_op_iwmmxt_set_mup();
2515 break;
d00584b7 2516 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2517 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2518 wrd = (insn >> 12) & 0xf;
2519 rd0 = (insn >> 16) & 0xf;
2520 rd1 = (insn >> 0) & 0xf;
2521 gen_op_iwmmxt_movq_M0_wRn(rd0);
2522 switch ((insn >> 22) & 3) {
2523 case 0:
2524 if (insn & (1 << 21))
2525 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2526 else
2527 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2528 break;
2529 case 1:
2530 if (insn & (1 << 21))
2531 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2532 else
2533 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2534 break;
2535 case 2:
2536 if (insn & (1 << 21))
2537 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2538 else
2539 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2540 break;
2541 case 3:
2542 return 1;
2543 }
2544 gen_op_iwmmxt_movq_wRn_M0(wrd);
2545 gen_op_iwmmxt_set_mup();
2546 break;
d00584b7 2547 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2548 case 0x402: case 0x502: case 0x602: case 0x702:
2549 wrd = (insn >> 12) & 0xf;
2550 rd0 = (insn >> 16) & 0xf;
2551 rd1 = (insn >> 0) & 0xf;
2552 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2553 tmp = tcg_const_i32((insn >> 20) & 3);
2554 iwmmxt_load_reg(cpu_V1, rd1);
2555 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2556 tcg_temp_free_i32(tmp);
18c9b560
AZ
2557 gen_op_iwmmxt_movq_wRn_M0(wrd);
2558 gen_op_iwmmxt_set_mup();
2559 break;
d00584b7 2560 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2561 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2562 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2563 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2564 wrd = (insn >> 12) & 0xf;
2565 rd0 = (insn >> 16) & 0xf;
2566 rd1 = (insn >> 0) & 0xf;
2567 gen_op_iwmmxt_movq_M0_wRn(rd0);
2568 switch ((insn >> 20) & 0xf) {
2569 case 0x0:
2570 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2571 break;
2572 case 0x1:
2573 gen_op_iwmmxt_subub_M0_wRn(rd1);
2574 break;
2575 case 0x3:
2576 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2577 break;
2578 case 0x4:
2579 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2580 break;
2581 case 0x5:
2582 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2583 break;
2584 case 0x7:
2585 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2586 break;
2587 case 0x8:
2588 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2589 break;
2590 case 0x9:
2591 gen_op_iwmmxt_subul_M0_wRn(rd1);
2592 break;
2593 case 0xb:
2594 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2595 break;
2596 default:
2597 return 1;
2598 }
2599 gen_op_iwmmxt_movq_wRn_M0(wrd);
2600 gen_op_iwmmxt_set_mup();
2601 gen_op_iwmmxt_set_cup();
2602 break;
d00584b7 2603 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2604 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2605 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2606 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2607 wrd = (insn >> 12) & 0xf;
2608 rd0 = (insn >> 16) & 0xf;
2609 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2610 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2611 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2612 tcg_temp_free_i32(tmp);
18c9b560
AZ
2613 gen_op_iwmmxt_movq_wRn_M0(wrd);
2614 gen_op_iwmmxt_set_mup();
2615 gen_op_iwmmxt_set_cup();
2616 break;
d00584b7 2617 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2618 case 0x418: case 0x518: case 0x618: case 0x718:
2619 case 0x818: case 0x918: case 0xa18: case 0xb18:
2620 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2621 wrd = (insn >> 12) & 0xf;
2622 rd0 = (insn >> 16) & 0xf;
2623 rd1 = (insn >> 0) & 0xf;
2624 gen_op_iwmmxt_movq_M0_wRn(rd0);
2625 switch ((insn >> 20) & 0xf) {
2626 case 0x0:
2627 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2628 break;
2629 case 0x1:
2630 gen_op_iwmmxt_addub_M0_wRn(rd1);
2631 break;
2632 case 0x3:
2633 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2634 break;
2635 case 0x4:
2636 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2637 break;
2638 case 0x5:
2639 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2640 break;
2641 case 0x7:
2642 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2643 break;
2644 case 0x8:
2645 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2646 break;
2647 case 0x9:
2648 gen_op_iwmmxt_addul_M0_wRn(rd1);
2649 break;
2650 case 0xb:
2651 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2652 break;
2653 default:
2654 return 1;
2655 }
2656 gen_op_iwmmxt_movq_wRn_M0(wrd);
2657 gen_op_iwmmxt_set_mup();
2658 gen_op_iwmmxt_set_cup();
2659 break;
d00584b7 2660 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2661 case 0x408: case 0x508: case 0x608: case 0x708:
2662 case 0x808: case 0x908: case 0xa08: case 0xb08:
2663 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2664 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2665 return 1;
18c9b560
AZ
2666 wrd = (insn >> 12) & 0xf;
2667 rd0 = (insn >> 16) & 0xf;
2668 rd1 = (insn >> 0) & 0xf;
2669 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2670 switch ((insn >> 22) & 3) {
18c9b560
AZ
2671 case 1:
2672 if (insn & (1 << 21))
2673 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2674 else
2675 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2676 break;
2677 case 2:
2678 if (insn & (1 << 21))
2679 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2680 else
2681 gen_op_iwmmxt_packul_M0_wRn(rd1);
2682 break;
2683 case 3:
2684 if (insn & (1 << 21))
2685 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2686 else
2687 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2688 break;
2689 }
2690 gen_op_iwmmxt_movq_wRn_M0(wrd);
2691 gen_op_iwmmxt_set_mup();
2692 gen_op_iwmmxt_set_cup();
2693 break;
2694 case 0x201: case 0x203: case 0x205: case 0x207:
2695 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2696 case 0x211: case 0x213: case 0x215: case 0x217:
2697 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2698 wrd = (insn >> 5) & 0xf;
2699 rd0 = (insn >> 12) & 0xf;
2700 rd1 = (insn >> 0) & 0xf;
2701 if (rd0 == 0xf || rd1 == 0xf)
2702 return 1;
2703 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2704 tmp = load_reg(s, rd0);
2705 tmp2 = load_reg(s, rd1);
18c9b560 2706 switch ((insn >> 16) & 0xf) {
d00584b7 2707 case 0x0: /* TMIA */
da6b5335 2708 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2709 break;
d00584b7 2710 case 0x8: /* TMIAPH */
da6b5335 2711 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2712 break;
d00584b7 2713 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2714 if (insn & (1 << 16))
da6b5335 2715 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2716 if (insn & (1 << 17))
da6b5335
FN
2717 tcg_gen_shri_i32(tmp2, tmp2, 16);
2718 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2719 break;
2720 default:
7d1b0095
PM
2721 tcg_temp_free_i32(tmp2);
2722 tcg_temp_free_i32(tmp);
18c9b560
AZ
2723 return 1;
2724 }
7d1b0095
PM
2725 tcg_temp_free_i32(tmp2);
2726 tcg_temp_free_i32(tmp);
18c9b560
AZ
2727 gen_op_iwmmxt_movq_wRn_M0(wrd);
2728 gen_op_iwmmxt_set_mup();
2729 break;
2730 default:
2731 return 1;
2732 }
2733
2734 return 0;
2735}
2736
a1c7273b 2737/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2738 (ie. an undefined instruction). */
7dcc1f89 2739static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2740{
2741 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2742 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2743
2744 if ((insn & 0x0ff00f10) == 0x0e200010) {
2745 /* Multiply with Internal Accumulate Format */
2746 rd0 = (insn >> 12) & 0xf;
2747 rd1 = insn & 0xf;
2748 acc = (insn >> 5) & 7;
2749
2750 if (acc != 0)
2751 return 1;
2752
3a554c0f
FN
2753 tmp = load_reg(s, rd0);
2754 tmp2 = load_reg(s, rd1);
18c9b560 2755 switch ((insn >> 16) & 0xf) {
d00584b7 2756 case 0x0: /* MIA */
3a554c0f 2757 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2758 break;
d00584b7 2759 case 0x8: /* MIAPH */
3a554c0f 2760 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2761 break;
d00584b7
PM
2762 case 0xc: /* MIABB */
2763 case 0xd: /* MIABT */
2764 case 0xe: /* MIATB */
2765 case 0xf: /* MIATT */
18c9b560 2766 if (insn & (1 << 16))
3a554c0f 2767 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2768 if (insn & (1 << 17))
3a554c0f
FN
2769 tcg_gen_shri_i32(tmp2, tmp2, 16);
2770 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2771 break;
2772 default:
2773 return 1;
2774 }
7d1b0095
PM
2775 tcg_temp_free_i32(tmp2);
2776 tcg_temp_free_i32(tmp);
18c9b560
AZ
2777
2778 gen_op_iwmmxt_movq_wRn_M0(acc);
2779 return 0;
2780 }
2781
2782 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2783 /* Internal Accumulator Access Format */
2784 rdhi = (insn >> 16) & 0xf;
2785 rdlo = (insn >> 12) & 0xf;
2786 acc = insn & 7;
2787
2788 if (acc != 0)
2789 return 1;
2790
d00584b7 2791 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2792 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2793 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
664b7e3b 2794 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2795 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 2796 } else { /* MAR */
3a554c0f
FN
2797 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2798 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2799 }
2800 return 0;
2801 }
2802
2803 return 1;
2804}
2805
9ee6e8bb
PB
2806#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2807#define VFP_SREG(insn, bigbit, smallbit) \
2808 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2809#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2810 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2811 reg = (((insn) >> (bigbit)) & 0x0f) \
2812 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2813 } else { \
2814 if (insn & (1 << (smallbit))) \
2815 return 1; \
2816 reg = ((insn) >> (bigbit)) & 0x0f; \
2817 }} while (0)
2818
2819#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2820#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2821#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2822#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2823#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2824#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2825
39d5492a 2826static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2827{
39d5492a 2828 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2829 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2830 tcg_gen_shli_i32(tmp, var, 16);
2831 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2832 tcg_temp_free_i32(tmp);
ad69471c
PB
2833}
2834
39d5492a 2835static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2836{
39d5492a 2837 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2838 tcg_gen_andi_i32(var, var, 0xffff0000);
2839 tcg_gen_shri_i32(tmp, var, 16);
2840 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2841 tcg_temp_free_i32(tmp);
ad69471c
PB
2842}
2843
06db8196
PM
2844/*
2845 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2846 * (ie. an undefined instruction).
2847 */
7dcc1f89 2848static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95 2849{
d614a513 2850 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 2851 return 1;
d614a513 2852 }
40f137e1 2853
78e138bc
PM
2854 /*
2855 * If the decodetree decoder handles this insn it will always
2856 * emit code to either execute the insn or generate an appropriate
2857 * exception; so we don't need to ever return non-zero to tell
2858 * the calling code to emit an UNDEF exception.
2859 */
2860 if (extract32(insn, 28, 4) == 0xf) {
2861 if (disas_vfp_uncond(s, insn)) {
2862 return 0;
2863 }
2864 } else {
2865 if (disas_vfp(s, insn)) {
2866 return 0;
2867 }
2868 }
3111bfc2
PM
2869 /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
2870 return 1;
b7bcbe95
FB
2871}
2872
90aa39a1 2873static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 2874{
90aa39a1 2875#ifndef CONFIG_USER_ONLY
dcba3a8d 2876 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
a0415916 2877 ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
90aa39a1
SF
2878#else
2879 return true;
2880#endif
2881}
6e256c93 2882
8a6b28c7
EC
2883static void gen_goto_ptr(void)
2884{
7f11636d 2885 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
2886}
2887
4cae8f56
AB
2888/* This will end the TB but doesn't guarantee we'll return to
2889 * cpu_loop_exec. Any live exit_requests will be processed as we
2890 * enter the next TB.
2891 */
8a6b28c7 2892static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
2893{
2894 if (use_goto_tb(s, dest)) {
57fec1fe 2895 tcg_gen_goto_tb(n);
eaed129d 2896 gen_set_pc_im(s, dest);
07ea28b4 2897 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 2898 } else {
eaed129d 2899 gen_set_pc_im(s, dest);
8a6b28c7 2900 gen_goto_ptr();
6e256c93 2901 }
dcba3a8d 2902 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
2903}
2904
8aaca4c0
FB
2905static inline void gen_jmp (DisasContext *s, uint32_t dest)
2906{
b636649f 2907 if (unlikely(is_singlestepping(s))) {
8aaca4c0 2908 /* An indirect jump so that we still trigger the debug exception. */
5899f386 2909 if (s->thumb)
d9ba4830
PB
2910 dest |= 1;
2911 gen_bx_im(s, dest);
8aaca4c0 2912 } else {
6e256c93 2913 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
2914 }
2915}
2916
39d5492a 2917static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 2918{
ee097184 2919 if (x)
d9ba4830 2920 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 2921 else
d9ba4830 2922 gen_sxth(t0);
ee097184 2923 if (y)
d9ba4830 2924 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 2925 else
d9ba4830
PB
2926 gen_sxth(t1);
2927 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
2928}
2929
2930/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
2931static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2932{
b5ff1b31
FB
2933 uint32_t mask;
2934
2935 mask = 0;
2936 if (flags & (1 << 0))
2937 mask |= 0xff;
2938 if (flags & (1 << 1))
2939 mask |= 0xff00;
2940 if (flags & (1 << 2))
2941 mask |= 0xff0000;
2942 if (flags & (1 << 3))
2943 mask |= 0xff000000;
9ee6e8bb 2944
2ae23e75 2945 /* Mask out undefined bits. */
9ee6e8bb 2946 mask &= ~CPSR_RESERVED;
d614a513 2947 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 2948 mask &= ~CPSR_T;
d614a513
PM
2949 }
2950 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 2951 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
2952 }
2953 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 2954 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
2955 }
2956 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 2957 mask &= ~CPSR_IT;
d614a513 2958 }
4051e12c
PM
2959 /* Mask out execution state and reserved bits. */
2960 if (!spsr) {
2961 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
2962 }
b5ff1b31
FB
2963 /* Mask out privileged bits. */
2964 if (IS_USER(s))
9ee6e8bb 2965 mask &= CPSR_USER;
b5ff1b31
FB
2966 return mask;
2967}
2968
2fbac54b 2969/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 2970static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 2971{
39d5492a 2972 TCGv_i32 tmp;
b5ff1b31
FB
2973 if (spsr) {
2974 /* ??? This is also undefined in system mode. */
2975 if (IS_USER(s))
2976 return 1;
d9ba4830
PB
2977
2978 tmp = load_cpu_field(spsr);
2979 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
2980 tcg_gen_andi_i32(t0, t0, mask);
2981 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 2982 store_cpu_field(tmp, spsr);
b5ff1b31 2983 } else {
2fbac54b 2984 gen_set_cpsr(t0, mask);
b5ff1b31 2985 }
7d1b0095 2986 tcg_temp_free_i32(t0);
b5ff1b31
FB
2987 gen_lookup_tb(s);
2988 return 0;
2989}
2990
2fbac54b
FN
2991/* Returns nonzero if access to the PSR is not permitted. */
2992static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
2993{
39d5492a 2994 TCGv_i32 tmp;
7d1b0095 2995 tmp = tcg_temp_new_i32();
2fbac54b
FN
2996 tcg_gen_movi_i32(tmp, val);
2997 return gen_set_psr(s, mask, spsr, tmp);
2998}
2999
8bfd0550
PM
3000static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
3001 int *tgtmode, int *regno)
3002{
3003 /* Decode the r and sysm fields of MSR/MRS banked accesses into
3004 * the target mode and register number, and identify the various
3005 * unpredictable cases.
3006 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
3007 * + executed in user mode
3008 * + using R15 as the src/dest register
3009 * + accessing an unimplemented register
3010 * + accessing a register that's inaccessible at current PL/security state*
3011 * + accessing a register that you could access with a different insn
3012 * We choose to UNDEF in all these cases.
3013 * Since we don't know which of the various AArch32 modes we are in
3014 * we have to defer some checks to runtime.
3015 * Accesses to Monitor mode registers from Secure EL1 (which implies
3016 * that EL3 is AArch64) must trap to EL3.
3017 *
3018 * If the access checks fail this function will emit code to take
3019 * an exception and return false. Otherwise it will return true,
3020 * and set *tgtmode and *regno appropriately.
3021 */
3022 int exc_target = default_exception_el(s);
3023
3024 /* These instructions are present only in ARMv8, or in ARMv7 with the
3025 * Virtualization Extensions.
3026 */
3027 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
3028 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3029 goto undef;
3030 }
3031
3032 if (IS_USER(s) || rn == 15) {
3033 goto undef;
3034 }
3035
3036 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
3037 * of registers into (r, sysm).
3038 */
3039 if (r) {
3040 /* SPSRs for other modes */
3041 switch (sysm) {
3042 case 0xe: /* SPSR_fiq */
3043 *tgtmode = ARM_CPU_MODE_FIQ;
3044 break;
3045 case 0x10: /* SPSR_irq */
3046 *tgtmode = ARM_CPU_MODE_IRQ;
3047 break;
3048 case 0x12: /* SPSR_svc */
3049 *tgtmode = ARM_CPU_MODE_SVC;
3050 break;
3051 case 0x14: /* SPSR_abt */
3052 *tgtmode = ARM_CPU_MODE_ABT;
3053 break;
3054 case 0x16: /* SPSR_und */
3055 *tgtmode = ARM_CPU_MODE_UND;
3056 break;
3057 case 0x1c: /* SPSR_mon */
3058 *tgtmode = ARM_CPU_MODE_MON;
3059 break;
3060 case 0x1e: /* SPSR_hyp */
3061 *tgtmode = ARM_CPU_MODE_HYP;
3062 break;
3063 default: /* unallocated */
3064 goto undef;
3065 }
3066 /* We arbitrarily assign SPSR a register number of 16. */
3067 *regno = 16;
3068 } else {
3069 /* general purpose registers for other modes */
3070 switch (sysm) {
3071 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
3072 *tgtmode = ARM_CPU_MODE_USR;
3073 *regno = sysm + 8;
3074 break;
3075 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
3076 *tgtmode = ARM_CPU_MODE_FIQ;
3077 *regno = sysm;
3078 break;
3079 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
3080 *tgtmode = ARM_CPU_MODE_IRQ;
3081 *regno = sysm & 1 ? 13 : 14;
3082 break;
3083 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
3084 *tgtmode = ARM_CPU_MODE_SVC;
3085 *regno = sysm & 1 ? 13 : 14;
3086 break;
3087 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
3088 *tgtmode = ARM_CPU_MODE_ABT;
3089 *regno = sysm & 1 ? 13 : 14;
3090 break;
3091 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
3092 *tgtmode = ARM_CPU_MODE_UND;
3093 *regno = sysm & 1 ? 13 : 14;
3094 break;
3095 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
3096 *tgtmode = ARM_CPU_MODE_MON;
3097 *regno = sysm & 1 ? 13 : 14;
3098 break;
3099 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
3100 *tgtmode = ARM_CPU_MODE_HYP;
3101 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
3102 *regno = sysm & 1 ? 13 : 17;
3103 break;
3104 default: /* unallocated */
3105 goto undef;
3106 }
3107 }
3108
3109 /* Catch the 'accessing inaccessible register' cases we can detect
3110 * at translate time.
3111 */
3112 switch (*tgtmode) {
3113 case ARM_CPU_MODE_MON:
3114 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
3115 goto undef;
3116 }
3117 if (s->current_el == 1) {
3118 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
3119 * then accesses to Mon registers trap to EL3
3120 */
3121 exc_target = 3;
3122 goto undef;
3123 }
3124 break;
3125 case ARM_CPU_MODE_HYP:
aec4dd09
PM
3126 /*
3127 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
3128 * (and so we can forbid accesses from EL2 or below). elr_hyp
3129 * can be accessed also from Hyp mode, so forbid accesses from
3130 * EL0 or EL1.
8bfd0550 3131 */
aec4dd09
PM
3132 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
3133 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
3134 goto undef;
3135 }
3136 break;
3137 default:
3138 break;
3139 }
3140
3141 return true;
3142
3143undef:
3144 /* If we get here then some access check did not pass */
a767fac8
RH
3145 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
3146 syn_uncategorized(), exc_target);
8bfd0550
PM
3147 return false;
3148}
3149
3150static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
3151{
3152 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3153 int tgtmode = 0, regno = 0;
3154
3155 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3156 return;
3157 }
3158
3159 /* Sync state because msr_banked() can raise exceptions */
3160 gen_set_condexec(s);
43722a6d 3161 gen_set_pc_im(s, s->pc_curr);
8bfd0550
PM
3162 tcg_reg = load_reg(s, rn);
3163 tcg_tgtmode = tcg_const_i32(tgtmode);
3164 tcg_regno = tcg_const_i32(regno);
3165 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
3166 tcg_temp_free_i32(tcg_tgtmode);
3167 tcg_temp_free_i32(tcg_regno);
3168 tcg_temp_free_i32(tcg_reg);
dcba3a8d 3169 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
3170}
3171
3172static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
3173{
3174 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3175 int tgtmode = 0, regno = 0;
3176
3177 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3178 return;
3179 }
3180
3181 /* Sync state because mrs_banked() can raise exceptions */
3182 gen_set_condexec(s);
43722a6d 3183 gen_set_pc_im(s, s->pc_curr);
8bfd0550
PM
3184 tcg_reg = tcg_temp_new_i32();
3185 tcg_tgtmode = tcg_const_i32(tgtmode);
3186 tcg_regno = tcg_const_i32(regno);
3187 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
3188 tcg_temp_free_i32(tcg_tgtmode);
3189 tcg_temp_free_i32(tcg_regno);
3190 store_reg(s, rn, tcg_reg);
dcba3a8d 3191 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
3192}
3193
fb0e8e79
PM
3194/* Store value to PC as for an exception return (ie don't
3195 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
3196 * will do the masking based on the new value of the Thumb bit.
3197 */
3198static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3199{
fb0e8e79
PM
3200 tcg_gen_mov_i32(cpu_R[15], pc);
3201 tcg_temp_free_i32(pc);
b5ff1b31
FB
3202}
3203
b0109805 3204/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3205static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3206{
fb0e8e79
PM
3207 store_pc_exc_ret(s, pc);
3208 /* The cpsr_write_eret helper will mask the low bits of PC
3209 * appropriately depending on the new Thumb bit, so it must
3210 * be called after storing the new PC.
3211 */
e69ad9df
AL
3212 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3213 gen_io_start();
3214 }
235ea1f5 3215 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 3216 tcg_temp_free_i32(cpsr);
b29fd33d 3217 /* Must exit loop to check un-masked IRQs */
dcba3a8d 3218 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 3219}
3b46e624 3220
fb0e8e79
PM
3221/* Generate an old-style exception return. Marks pc as dead. */
3222static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3223{
3224 gen_rfe(s, pc, load_cpu_field(spsr));
3225}
3226
c22edfeb
AB
3227/*
3228 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
3229 * only call the helper when running single threaded TCG code to ensure
3230 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
3231 * just skip this instruction. Currently the SEV/SEVL instructions
3232 * which are *one* of many ways to wake the CPU from WFE are not
3233 * implemented so we can't sleep like WFI does.
3234 */
9ee6e8bb
PB
3235static void gen_nop_hint(DisasContext *s, int val)
3236{
3237 switch (val) {
2399d4e7
EC
3238 /* When running in MTTCG we don't generate jumps to the yield and
3239 * WFE helpers as it won't affect the scheduling of other vCPUs.
3240 * If we wanted to more completely model WFE/SEV so we don't busy
3241 * spin unnecessarily we would need to do something more involved.
3242 */
c87e5a61 3243 case 1: /* yield */
2399d4e7 3244 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
a0415916 3245 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 3246 s->base.is_jmp = DISAS_YIELD;
c22edfeb 3247 }
c87e5a61 3248 break;
9ee6e8bb 3249 case 3: /* wfi */
a0415916 3250 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 3251 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
3252 break;
3253 case 2: /* wfe */
2399d4e7 3254 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
a0415916 3255 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 3256 s->base.is_jmp = DISAS_WFE;
c22edfeb 3257 }
72c1d3af 3258 break;
9ee6e8bb 3259 case 4: /* sev */
12b10571
MR
3260 case 5: /* sevl */
3261 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3262 default: /* nop */
3263 break;
3264 }
3265}
99c475ab 3266
ad69471c 3267#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3268
39d5492a 3269static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3270{
3271 switch (size) {
dd8fbd78
FN
3272 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3273 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3274 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3275 default: abort();
9ee6e8bb 3276 }
9ee6e8bb
PB
3277}
3278
39d5492a 3279static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3280{
3281 switch (size) {
dd8fbd78
FN
3282 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3283 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3284 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3285 default: return;
3286 }
3287}
3288
3289/* 32-bit pairwise ops end up the same as the elementwise versions. */
9ecd3c5c
RH
3290#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3291#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3292#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3293#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
ad69471c 3294
ad69471c
PB
3295#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3296 switch ((size << 1) | u) { \
3297 case 0: \
dd8fbd78 3298 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3299 break; \
3300 case 1: \
dd8fbd78 3301 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3302 break; \
3303 case 2: \
dd8fbd78 3304 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3305 break; \
3306 case 3: \
dd8fbd78 3307 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3308 break; \
3309 case 4: \
dd8fbd78 3310 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3311 break; \
3312 case 5: \
dd8fbd78 3313 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3314 break; \
3315 default: return 1; \
3316 }} while (0)
9ee6e8bb
PB
3317
3318#define GEN_NEON_INTEGER_OP(name) do { \
3319 switch ((size << 1) | u) { \
ad69471c 3320 case 0: \
dd8fbd78 3321 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3322 break; \
3323 case 1: \
dd8fbd78 3324 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3325 break; \
3326 case 2: \
dd8fbd78 3327 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3328 break; \
3329 case 3: \
dd8fbd78 3330 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3331 break; \
3332 case 4: \
dd8fbd78 3333 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3334 break; \
3335 case 5: \
dd8fbd78 3336 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3337 break; \
9ee6e8bb
PB
3338 default: return 1; \
3339 }} while (0)
3340
39d5492a 3341static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3342{
39d5492a 3343 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3344 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3345 return tmp;
9ee6e8bb
PB
3346}
3347
39d5492a 3348static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3349{
dd8fbd78 3350 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3351 tcg_temp_free_i32(var);
9ee6e8bb
PB
3352}
3353
39d5492a 3354static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3355{
39d5492a 3356 TCGv_i32 tmp;
9ee6e8bb 3357 if (size == 1) {
0fad6efc
PM
3358 tmp = neon_load_reg(reg & 7, reg >> 4);
3359 if (reg & 8) {
dd8fbd78 3360 gen_neon_dup_high16(tmp);
0fad6efc
PM
3361 } else {
3362 gen_neon_dup_low16(tmp);
dd8fbd78 3363 }
0fad6efc
PM
3364 } else {
3365 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3366 }
dd8fbd78 3367 return tmp;
9ee6e8bb
PB
3368}
3369
02acedf9 3370static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3371{
b13708bb
RH
3372 TCGv_ptr pd, pm;
3373
600b828c 3374 if (!q && size == 2) {
02acedf9
PM
3375 return 1;
3376 }
b13708bb
RH
3377 pd = vfp_reg_ptr(true, rd);
3378 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
3379 if (q) {
3380 switch (size) {
3381 case 0:
b13708bb 3382 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
3383 break;
3384 case 1:
b13708bb 3385 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
3386 break;
3387 case 2:
b13708bb 3388 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
3389 break;
3390 default:
3391 abort();
3392 }
3393 } else {
3394 switch (size) {
3395 case 0:
b13708bb 3396 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
3397 break;
3398 case 1:
b13708bb 3399 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
3400 break;
3401 default:
3402 abort();
3403 }
3404 }
b13708bb
RH
3405 tcg_temp_free_ptr(pd);
3406 tcg_temp_free_ptr(pm);
02acedf9 3407 return 0;
19457615
FN
3408}
3409
d68a6f3a 3410static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3411{
b13708bb
RH
3412 TCGv_ptr pd, pm;
3413
600b828c 3414 if (!q && size == 2) {
d68a6f3a
PM
3415 return 1;
3416 }
b13708bb
RH
3417 pd = vfp_reg_ptr(true, rd);
3418 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
3419 if (q) {
3420 switch (size) {
3421 case 0:
b13708bb 3422 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
3423 break;
3424 case 1:
b13708bb 3425 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
3426 break;
3427 case 2:
b13708bb 3428 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
3429 break;
3430 default:
3431 abort();
3432 }
3433 } else {
3434 switch (size) {
3435 case 0:
b13708bb 3436 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
3437 break;
3438 case 1:
b13708bb 3439 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
3440 break;
3441 default:
3442 abort();
3443 }
3444 }
b13708bb
RH
3445 tcg_temp_free_ptr(pd);
3446 tcg_temp_free_ptr(pm);
d68a6f3a 3447 return 0;
19457615
FN
3448}
3449
39d5492a 3450static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3451{
39d5492a 3452 TCGv_i32 rd, tmp;
19457615 3453
7d1b0095
PM
3454 rd = tcg_temp_new_i32();
3455 tmp = tcg_temp_new_i32();
19457615
FN
3456
3457 tcg_gen_shli_i32(rd, t0, 8);
3458 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3459 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3460 tcg_gen_or_i32(rd, rd, tmp);
3461
3462 tcg_gen_shri_i32(t1, t1, 8);
3463 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3464 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3465 tcg_gen_or_i32(t1, t1, tmp);
3466 tcg_gen_mov_i32(t0, rd);
3467
7d1b0095
PM
3468 tcg_temp_free_i32(tmp);
3469 tcg_temp_free_i32(rd);
19457615
FN
3470}
3471
39d5492a 3472static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3473{
39d5492a 3474 TCGv_i32 rd, tmp;
19457615 3475
7d1b0095
PM
3476 rd = tcg_temp_new_i32();
3477 tmp = tcg_temp_new_i32();
19457615
FN
3478
3479 tcg_gen_shli_i32(rd, t0, 16);
3480 tcg_gen_andi_i32(tmp, t1, 0xffff);
3481 tcg_gen_or_i32(rd, rd, tmp);
3482 tcg_gen_shri_i32(t1, t1, 16);
3483 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3484 tcg_gen_or_i32(t1, t1, tmp);
3485 tcg_gen_mov_i32(t0, rd);
3486
7d1b0095
PM
3487 tcg_temp_free_i32(tmp);
3488 tcg_temp_free_i32(rd);
19457615
FN
3489}
3490
3491
9ee6e8bb
PB
3492static struct {
3493 int nregs;
3494 int interleave;
3495 int spacing;
308e5636 3496} const neon_ls_element_type[11] = {
ac55d007
RH
3497 {1, 4, 1},
3498 {1, 4, 2},
9ee6e8bb 3499 {4, 1, 1},
ac55d007
RH
3500 {2, 2, 2},
3501 {1, 3, 1},
3502 {1, 3, 2},
9ee6e8bb
PB
3503 {3, 1, 1},
3504 {1, 1, 1},
ac55d007
RH
3505 {1, 2, 1},
3506 {1, 2, 2},
9ee6e8bb
PB
3507 {2, 1, 1}
3508};
3509
3510/* Translate a NEON load/store element instruction. Return nonzero if the
3511 instruction is invalid. */
7dcc1f89 3512static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3513{
3514 int rd, rn, rm;
3515 int op;
3516 int nregs;
3517 int interleave;
84496233 3518 int spacing;
9ee6e8bb
PB
3519 int stride;
3520 int size;
3521 int reg;
9ee6e8bb 3522 int load;
9ee6e8bb 3523 int n;
7377c2c9 3524 int vec_size;
ac55d007
RH
3525 int mmu_idx;
3526 TCGMemOp endian;
39d5492a
PM
3527 TCGv_i32 addr;
3528 TCGv_i32 tmp;
3529 TCGv_i32 tmp2;
84496233 3530 TCGv_i64 tmp64;
9ee6e8bb 3531
2c7ffc41
PM
3532 /* FIXME: this access check should not take precedence over UNDEF
3533 * for invalid encodings; we will generate incorrect syndrome information
3534 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3535 */
9dbbc748 3536 if (s->fp_excp_el) {
a767fac8 3537 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4be42f40 3538 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3539 return 0;
3540 }
3541
5df8bac1 3542 if (!s->vfp_enabled)
9ee6e8bb
PB
3543 return 1;
3544 VFP_DREG_D(rd, insn);
3545 rn = (insn >> 16) & 0xf;
3546 rm = insn & 0xf;
3547 load = (insn & (1 << 21)) != 0;
ac55d007
RH
3548 endian = s->be_data;
3549 mmu_idx = get_mem_index(s);
9ee6e8bb
PB
3550 if ((insn & (1 << 23)) == 0) {
3551 /* Load store all elements. */
3552 op = (insn >> 8) & 0xf;
3553 size = (insn >> 6) & 3;
84496233 3554 if (op > 10)
9ee6e8bb 3555 return 1;
f2dd89d0
PM
3556 /* Catch UNDEF cases for bad values of align field */
3557 switch (op & 0xc) {
3558 case 4:
3559 if (((insn >> 5) & 1) == 1) {
3560 return 1;
3561 }
3562 break;
3563 case 8:
3564 if (((insn >> 4) & 3) == 3) {
3565 return 1;
3566 }
3567 break;
3568 default:
3569 break;
3570 }
9ee6e8bb
PB
3571 nregs = neon_ls_element_type[op].nregs;
3572 interleave = neon_ls_element_type[op].interleave;
84496233 3573 spacing = neon_ls_element_type[op].spacing;
ac55d007 3574 if (size == 3 && (interleave | spacing) != 1) {
84496233 3575 return 1;
ac55d007 3576 }
e23f12b3
RH
3577 /* For our purposes, bytes are always little-endian. */
3578 if (size == 0) {
3579 endian = MO_LE;
3580 }
3581 /* Consecutive little-endian elements from a single register
3582 * can be promoted to a larger little-endian operation.
3583 */
3584 if (interleave == 1 && endian == MO_LE) {
3585 size = 3;
3586 }
ac55d007 3587 tmp64 = tcg_temp_new_i64();
e318a60b 3588 addr = tcg_temp_new_i32();
ac55d007 3589 tmp2 = tcg_const_i32(1 << size);
dcc65026 3590 load_reg_var(s, addr, rn);
9ee6e8bb 3591 for (reg = 0; reg < nregs; reg++) {
ac55d007
RH
3592 for (n = 0; n < 8 >> size; n++) {
3593 int xs;
3594 for (xs = 0; xs < interleave; xs++) {
3595 int tt = rd + reg + spacing * xs;
3596
3597 if (load) {
3598 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
3599 neon_store_element64(tt, n, size, tmp64);
3600 } else {
3601 neon_load_element64(tmp64, tt, n, size);
3602 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
9ee6e8bb 3603 }
ac55d007 3604 tcg_gen_add_i32(addr, addr, tmp2);
9ee6e8bb
PB
3605 }
3606 }
9ee6e8bb 3607 }
e318a60b 3608 tcg_temp_free_i32(addr);
ac55d007
RH
3609 tcg_temp_free_i32(tmp2);
3610 tcg_temp_free_i64(tmp64);
3611 stride = nregs * interleave * 8;
9ee6e8bb
PB
3612 } else {
3613 size = (insn >> 10) & 3;
3614 if (size == 3) {
3615 /* Load single element to all lanes. */
8e18cde3
PM
3616 int a = (insn >> 4) & 1;
3617 if (!load) {
9ee6e8bb 3618 return 1;
8e18cde3 3619 }
9ee6e8bb
PB
3620 size = (insn >> 6) & 3;
3621 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3622
3623 if (size == 3) {
3624 if (nregs != 4 || a == 0) {
9ee6e8bb 3625 return 1;
99c475ab 3626 }
8e18cde3
PM
3627 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3628 size = 2;
3629 }
3630 if (nregs == 1 && a == 1 && size == 0) {
3631 return 1;
3632 }
3633 if (nregs == 3 && a == 1) {
3634 return 1;
3635 }
e318a60b 3636 addr = tcg_temp_new_i32();
8e18cde3 3637 load_reg_var(s, addr, rn);
7377c2c9
RH
3638
3639 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
3640 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
3641 */
3642 stride = (insn & (1 << 5)) ? 2 : 1;
3643 vec_size = nregs == 1 ? stride * 8 : 8;
3644
3645 tmp = tcg_temp_new_i32();
3646 for (reg = 0; reg < nregs; reg++) {
3647 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3648 s->be_data | size);
3649 if ((rd & 1) && vec_size == 16) {
3650 /* We cannot write 16 bytes at once because the
3651 * destination is unaligned.
3652 */
3653 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3654 8, 8, tmp);
3655 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
3656 neon_reg_offset(rd, 0), 8, 8);
3657 } else {
3658 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3659 vec_size, vec_size, tmp);
8e18cde3 3660 }
7377c2c9
RH
3661 tcg_gen_addi_i32(addr, addr, 1 << size);
3662 rd += stride;
9ee6e8bb 3663 }
7377c2c9 3664 tcg_temp_free_i32(tmp);
e318a60b 3665 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3666 stride = (1 << size) * nregs;
3667 } else {
3668 /* Single element. */
93262b16 3669 int idx = (insn >> 4) & 0xf;
2d6ac920 3670 int reg_idx;
9ee6e8bb
PB
3671 switch (size) {
3672 case 0:
2d6ac920 3673 reg_idx = (insn >> 5) & 7;
9ee6e8bb
PB
3674 stride = 1;
3675 break;
3676 case 1:
2d6ac920 3677 reg_idx = (insn >> 6) & 3;
9ee6e8bb
PB
3678 stride = (insn & (1 << 5)) ? 2 : 1;
3679 break;
3680 case 2:
2d6ac920 3681 reg_idx = (insn >> 7) & 1;
9ee6e8bb
PB
3682 stride = (insn & (1 << 6)) ? 2 : 1;
3683 break;
3684 default:
3685 abort();
3686 }
3687 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
3688 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3689 switch (nregs) {
3690 case 1:
3691 if (((idx & (1 << size)) != 0) ||
3692 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3693 return 1;
3694 }
3695 break;
3696 case 3:
3697 if ((idx & 1) != 0) {
3698 return 1;
3699 }
3700 /* fall through */
3701 case 2:
3702 if (size == 2 && (idx & 2) != 0) {
3703 return 1;
3704 }
3705 break;
3706 case 4:
3707 if ((size == 2) && ((idx & 3) == 3)) {
3708 return 1;
3709 }
3710 break;
3711 default:
3712 abort();
3713 }
3714 if ((rd + stride * (nregs - 1)) > 31) {
3715 /* Attempts to write off the end of the register file
3716 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3717 * the neon_load_reg() would write off the end of the array.
3718 */
3719 return 1;
3720 }
2d6ac920 3721 tmp = tcg_temp_new_i32();
e318a60b 3722 addr = tcg_temp_new_i32();
dcc65026 3723 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3724 for (reg = 0; reg < nregs; reg++) {
3725 if (load) {
2d6ac920
RH
3726 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3727 s->be_data | size);
3728 neon_store_element(rd, reg_idx, size, tmp);
9ee6e8bb 3729 } else { /* Store */
2d6ac920
RH
3730 neon_load_element(tmp, rd, reg_idx, size);
3731 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
3732 s->be_data | size);
99c475ab 3733 }
9ee6e8bb 3734 rd += stride;
1b2b1e54 3735 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 3736 }
e318a60b 3737 tcg_temp_free_i32(addr);
2d6ac920 3738 tcg_temp_free_i32(tmp);
9ee6e8bb 3739 stride = nregs * (1 << size);
99c475ab 3740 }
9ee6e8bb
PB
3741 }
3742 if (rm != 15) {
39d5492a 3743 TCGv_i32 base;
b26eefb6
PB
3744
3745 base = load_reg(s, rn);
9ee6e8bb 3746 if (rm == 13) {
b26eefb6 3747 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3748 } else {
39d5492a 3749 TCGv_i32 index;
b26eefb6
PB
3750 index = load_reg(s, rm);
3751 tcg_gen_add_i32(base, base, index);
7d1b0095 3752 tcg_temp_free_i32(index);
9ee6e8bb 3753 }
b26eefb6 3754 store_reg(s, rn, base);
9ee6e8bb
PB
3755 }
3756 return 0;
3757}
3b46e624 3758
39d5492a 3759static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3760{
3761 switch (size) {
3762 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3763 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 3764 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
3765 default: abort();
3766 }
3767}
3768
39d5492a 3769static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3770{
3771 switch (size) {
02da0b2d
PM
3772 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3773 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3774 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
3775 default: abort();
3776 }
3777}
3778
39d5492a 3779static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
3780{
3781 switch (size) {
02da0b2d
PM
3782 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3783 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3784 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
3785 default: abort();
3786 }
3787}
3788
39d5492a 3789static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
3790{
3791 switch (size) {
02da0b2d
PM
3792 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
3793 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
3794 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
3795 default: abort();
3796 }
3797}
3798
39d5492a 3799static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
3800 int q, int u)
3801{
3802 if (q) {
3803 if (u) {
3804 switch (size) {
3805 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3806 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3807 default: abort();
3808 }
3809 } else {
3810 switch (size) {
3811 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3812 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3813 default: abort();
3814 }
3815 }
3816 } else {
3817 if (u) {
3818 switch (size) {
b408a9b0
CL
3819 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
3820 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
3821 default: abort();
3822 }
3823 } else {
3824 switch (size) {
3825 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3826 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3827 default: abort();
3828 }
3829 }
3830 }
3831}
3832
39d5492a 3833static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
3834{
3835 if (u) {
3836 switch (size) {
3837 case 0: gen_helper_neon_widen_u8(dest, src); break;
3838 case 1: gen_helper_neon_widen_u16(dest, src); break;
3839 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3840 default: abort();
3841 }
3842 } else {
3843 switch (size) {
3844 case 0: gen_helper_neon_widen_s8(dest, src); break;
3845 case 1: gen_helper_neon_widen_s16(dest, src); break;
3846 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3847 default: abort();
3848 }
3849 }
7d1b0095 3850 tcg_temp_free_i32(src);
ad69471c
PB
3851}
3852
3853static inline void gen_neon_addl(int size)
3854{
3855 switch (size) {
3856 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3857 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3858 case 2: tcg_gen_add_i64(CPU_V001); break;
3859 default: abort();
3860 }
3861}
3862
3863static inline void gen_neon_subl(int size)
3864{
3865 switch (size) {
3866 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3867 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3868 case 2: tcg_gen_sub_i64(CPU_V001); break;
3869 default: abort();
3870 }
3871}
3872
a7812ae4 3873static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
3874{
3875 switch (size) {
3876 case 0: gen_helper_neon_negl_u16(var, var); break;
3877 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
3878 case 2:
3879 tcg_gen_neg_i64(var, var);
3880 break;
ad69471c
PB
3881 default: abort();
3882 }
3883}
3884
a7812ae4 3885static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
3886{
3887 switch (size) {
02da0b2d
PM
3888 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
3889 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
3890 default: abort();
3891 }
3892}
3893
39d5492a
PM
3894static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
3895 int size, int u)
ad69471c 3896{
a7812ae4 3897 TCGv_i64 tmp;
ad69471c
PB
3898
3899 switch ((size << 1) | u) {
3900 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
3901 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
3902 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
3903 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
3904 case 4:
3905 tmp = gen_muls_i64_i32(a, b);
3906 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 3907 tcg_temp_free_i64(tmp);
ad69471c
PB
3908 break;
3909 case 5:
3910 tmp = gen_mulu_i64_i32(a, b);
3911 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 3912 tcg_temp_free_i64(tmp);
ad69471c
PB
3913 break;
3914 default: abort();
3915 }
c6067f04
CL
3916
3917 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3918 Don't forget to clean them now. */
3919 if (size < 2) {
7d1b0095
PM
3920 tcg_temp_free_i32(a);
3921 tcg_temp_free_i32(b);
c6067f04 3922 }
ad69471c
PB
3923}
3924
39d5492a
PM
3925static void gen_neon_narrow_op(int op, int u, int size,
3926 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
3927{
3928 if (op) {
3929 if (u) {
3930 gen_neon_unarrow_sats(size, dest, src);
3931 } else {
3932 gen_neon_narrow(size, dest, src);
3933 }
3934 } else {
3935 if (u) {
3936 gen_neon_narrow_satu(size, dest, src);
3937 } else {
3938 gen_neon_narrow_sats(size, dest, src);
3939 }
3940 }
3941}
3942
62698be3
PM
3943/* Symbolic constants for op fields for Neon 3-register same-length.
3944 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3945 * table A7-9.
3946 */
3947#define NEON_3R_VHADD 0
3948#define NEON_3R_VQADD 1
3949#define NEON_3R_VRHADD 2
3950#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3951#define NEON_3R_VHSUB 4
3952#define NEON_3R_VQSUB 5
3953#define NEON_3R_VCGT 6
3954#define NEON_3R_VCGE 7
3955#define NEON_3R_VSHL 8
3956#define NEON_3R_VQSHL 9
3957#define NEON_3R_VRSHL 10
3958#define NEON_3R_VQRSHL 11
3959#define NEON_3R_VMAX 12
3960#define NEON_3R_VMIN 13
3961#define NEON_3R_VABD 14
3962#define NEON_3R_VABA 15
3963#define NEON_3R_VADD_VSUB 16
3964#define NEON_3R_VTST_VCEQ 17
4a7832b0 3965#define NEON_3R_VML 18 /* VMLA, VMLS */
62698be3
PM
3966#define NEON_3R_VMUL 19
3967#define NEON_3R_VPMAX 20
3968#define NEON_3R_VPMIN 21
3969#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 3970#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 3971#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 3972#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
3973#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
3974#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
3975#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
3976#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
3977#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 3978#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
3979
3980static const uint8_t neon_3r_sizes[] = {
3981 [NEON_3R_VHADD] = 0x7,
3982 [NEON_3R_VQADD] = 0xf,
3983 [NEON_3R_VRHADD] = 0x7,
3984 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
3985 [NEON_3R_VHSUB] = 0x7,
3986 [NEON_3R_VQSUB] = 0xf,
3987 [NEON_3R_VCGT] = 0x7,
3988 [NEON_3R_VCGE] = 0x7,
3989 [NEON_3R_VSHL] = 0xf,
3990 [NEON_3R_VQSHL] = 0xf,
3991 [NEON_3R_VRSHL] = 0xf,
3992 [NEON_3R_VQRSHL] = 0xf,
3993 [NEON_3R_VMAX] = 0x7,
3994 [NEON_3R_VMIN] = 0x7,
3995 [NEON_3R_VABD] = 0x7,
3996 [NEON_3R_VABA] = 0x7,
3997 [NEON_3R_VADD_VSUB] = 0xf,
3998 [NEON_3R_VTST_VCEQ] = 0x7,
3999 [NEON_3R_VML] = 0x7,
4000 [NEON_3R_VMUL] = 0x7,
4001 [NEON_3R_VPMAX] = 0x7,
4002 [NEON_3R_VPMIN] = 0x7,
4003 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 4004 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 4005 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 4006 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
4007 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4008 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4009 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4010 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4011 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4012 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4013};
4014
600b828c
PM
4015/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4016 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4017 * table A7-13.
4018 */
4019#define NEON_2RM_VREV64 0
4020#define NEON_2RM_VREV32 1
4021#define NEON_2RM_VREV16 2
4022#define NEON_2RM_VPADDL 4
4023#define NEON_2RM_VPADDL_U 5
9d935509
AB
4024#define NEON_2RM_AESE 6 /* Includes AESD */
4025#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4026#define NEON_2RM_VCLS 8
4027#define NEON_2RM_VCLZ 9
4028#define NEON_2RM_VCNT 10
4029#define NEON_2RM_VMVN 11
4030#define NEON_2RM_VPADAL 12
4031#define NEON_2RM_VPADAL_U 13
4032#define NEON_2RM_VQABS 14
4033#define NEON_2RM_VQNEG 15
4034#define NEON_2RM_VCGT0 16
4035#define NEON_2RM_VCGE0 17
4036#define NEON_2RM_VCEQ0 18
4037#define NEON_2RM_VCLE0 19
4038#define NEON_2RM_VCLT0 20
f1ecb913 4039#define NEON_2RM_SHA1H 21
600b828c
PM
4040#define NEON_2RM_VABS 22
4041#define NEON_2RM_VNEG 23
4042#define NEON_2RM_VCGT0_F 24
4043#define NEON_2RM_VCGE0_F 25
4044#define NEON_2RM_VCEQ0_F 26
4045#define NEON_2RM_VCLE0_F 27
4046#define NEON_2RM_VCLT0_F 28
4047#define NEON_2RM_VABS_F 30
4048#define NEON_2RM_VNEG_F 31
4049#define NEON_2RM_VSWP 32
4050#define NEON_2RM_VTRN 33
4051#define NEON_2RM_VUZP 34
4052#define NEON_2RM_VZIP 35
4053#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4054#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4055#define NEON_2RM_VSHLL 38
f1ecb913 4056#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 4057#define NEON_2RM_VRINTN 40
2ce70625 4058#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4059#define NEON_2RM_VRINTA 42
4060#define NEON_2RM_VRINTZ 43
600b828c 4061#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4062#define NEON_2RM_VRINTM 45
600b828c 4063#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4064#define NEON_2RM_VRINTP 47
901ad525
WN
4065#define NEON_2RM_VCVTAU 48
4066#define NEON_2RM_VCVTAS 49
4067#define NEON_2RM_VCVTNU 50
4068#define NEON_2RM_VCVTNS 51
4069#define NEON_2RM_VCVTPU 52
4070#define NEON_2RM_VCVTPS 53
4071#define NEON_2RM_VCVTMU 54
4072#define NEON_2RM_VCVTMS 55
600b828c
PM
4073#define NEON_2RM_VRECPE 56
4074#define NEON_2RM_VRSQRTE 57
4075#define NEON_2RM_VRECPE_F 58
4076#define NEON_2RM_VRSQRTE_F 59
4077#define NEON_2RM_VCVT_FS 60
4078#define NEON_2RM_VCVT_FU 61
4079#define NEON_2RM_VCVT_SF 62
4080#define NEON_2RM_VCVT_UF 63
4081
fe8fcf3d
PM
4082static bool neon_2rm_is_v8_op(int op)
4083{
4084 /* Return true if this neon 2reg-misc op is ARMv8 and up */
4085 switch (op) {
4086 case NEON_2RM_VRINTN:
4087 case NEON_2RM_VRINTA:
4088 case NEON_2RM_VRINTM:
4089 case NEON_2RM_VRINTP:
4090 case NEON_2RM_VRINTZ:
4091 case NEON_2RM_VRINTX:
4092 case NEON_2RM_VCVTAU:
4093 case NEON_2RM_VCVTAS:
4094 case NEON_2RM_VCVTNU:
4095 case NEON_2RM_VCVTNS:
4096 case NEON_2RM_VCVTPU:
4097 case NEON_2RM_VCVTPS:
4098 case NEON_2RM_VCVTMU:
4099 case NEON_2RM_VCVTMS:
4100 return true;
4101 default:
4102 return false;
4103 }
4104}
4105
600b828c
PM
4106/* Each entry in this array has bit n set if the insn allows
4107 * size value n (otherwise it will UNDEF). Since unallocated
4108 * op values will have no bits set they always UNDEF.
4109 */
4110static const uint8_t neon_2rm_sizes[] = {
4111 [NEON_2RM_VREV64] = 0x7,
4112 [NEON_2RM_VREV32] = 0x3,
4113 [NEON_2RM_VREV16] = 0x1,
4114 [NEON_2RM_VPADDL] = 0x7,
4115 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4116 [NEON_2RM_AESE] = 0x1,
4117 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4118 [NEON_2RM_VCLS] = 0x7,
4119 [NEON_2RM_VCLZ] = 0x7,
4120 [NEON_2RM_VCNT] = 0x1,
4121 [NEON_2RM_VMVN] = 0x1,
4122 [NEON_2RM_VPADAL] = 0x7,
4123 [NEON_2RM_VPADAL_U] = 0x7,
4124 [NEON_2RM_VQABS] = 0x7,
4125 [NEON_2RM_VQNEG] = 0x7,
4126 [NEON_2RM_VCGT0] = 0x7,
4127 [NEON_2RM_VCGE0] = 0x7,
4128 [NEON_2RM_VCEQ0] = 0x7,
4129 [NEON_2RM_VCLE0] = 0x7,
4130 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 4131 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
4132 [NEON_2RM_VABS] = 0x7,
4133 [NEON_2RM_VNEG] = 0x7,
4134 [NEON_2RM_VCGT0_F] = 0x4,
4135 [NEON_2RM_VCGE0_F] = 0x4,
4136 [NEON_2RM_VCEQ0_F] = 0x4,
4137 [NEON_2RM_VCLE0_F] = 0x4,
4138 [NEON_2RM_VCLT0_F] = 0x4,
4139 [NEON_2RM_VABS_F] = 0x4,
4140 [NEON_2RM_VNEG_F] = 0x4,
4141 [NEON_2RM_VSWP] = 0x1,
4142 [NEON_2RM_VTRN] = 0x7,
4143 [NEON_2RM_VUZP] = 0x7,
4144 [NEON_2RM_VZIP] = 0x7,
4145 [NEON_2RM_VMOVN] = 0x7,
4146 [NEON_2RM_VQMOVN] = 0x7,
4147 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 4148 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 4149 [NEON_2RM_VRINTN] = 0x4,
2ce70625 4150 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
4151 [NEON_2RM_VRINTA] = 0x4,
4152 [NEON_2RM_VRINTZ] = 0x4,
600b828c 4153 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 4154 [NEON_2RM_VRINTM] = 0x4,
600b828c 4155 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 4156 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
4157 [NEON_2RM_VCVTAU] = 0x4,
4158 [NEON_2RM_VCVTAS] = 0x4,
4159 [NEON_2RM_VCVTNU] = 0x4,
4160 [NEON_2RM_VCVTNS] = 0x4,
4161 [NEON_2RM_VCVTPU] = 0x4,
4162 [NEON_2RM_VCVTPS] = 0x4,
4163 [NEON_2RM_VCVTMU] = 0x4,
4164 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
4165 [NEON_2RM_VRECPE] = 0x4,
4166 [NEON_2RM_VRSQRTE] = 0x4,
4167 [NEON_2RM_VRECPE_F] = 0x4,
4168 [NEON_2RM_VRSQRTE_F] = 0x4,
4169 [NEON_2RM_VCVT_FS] = 0x4,
4170 [NEON_2RM_VCVT_FU] = 0x4,
4171 [NEON_2RM_VCVT_SF] = 0x4,
4172 [NEON_2RM_VCVT_UF] = 0x4,
4173};
4174
36a71934
RH
4175
4176/* Expand v8.1 simd helper. */
4177static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
4178 int q, int rd, int rn, int rm)
4179{
962fcbf2 4180 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
4181 int opr_sz = (1 + q) * 8;
4182 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
4183 vfp_reg_offset(1, rn),
4184 vfp_reg_offset(1, rm), cpu_env,
4185 opr_sz, opr_sz, 0, fn);
4186 return 0;
4187 }
4188 return 1;
4189}
4190
41f6c113
RH
4191static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4192{
4193 tcg_gen_vec_sar8i_i64(a, a, shift);
4194 tcg_gen_vec_add8_i64(d, d, a);
4195}
4196
4197static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4198{
4199 tcg_gen_vec_sar16i_i64(a, a, shift);
4200 tcg_gen_vec_add16_i64(d, d, a);
4201}
4202
4203static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4204{
4205 tcg_gen_sari_i32(a, a, shift);
4206 tcg_gen_add_i32(d, d, a);
4207}
4208
4209static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4210{
4211 tcg_gen_sari_i64(a, a, shift);
4212 tcg_gen_add_i64(d, d, a);
4213}
4214
4215static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4216{
4217 tcg_gen_sari_vec(vece, a, a, sh);
4218 tcg_gen_add_vec(vece, d, d, a);
4219}
4220
53229a77
RH
4221static const TCGOpcode vecop_list_ssra[] = {
4222 INDEX_op_sari_vec, INDEX_op_add_vec, 0
4223};
4224
41f6c113
RH
4225const GVecGen2i ssra_op[4] = {
4226 { .fni8 = gen_ssra8_i64,
4227 .fniv = gen_ssra_vec,
4228 .load_dest = true,
53229a77 4229 .opt_opc = vecop_list_ssra,
41f6c113
RH
4230 .vece = MO_8 },
4231 { .fni8 = gen_ssra16_i64,
4232 .fniv = gen_ssra_vec,
4233 .load_dest = true,
53229a77 4234 .opt_opc = vecop_list_ssra,
41f6c113
RH
4235 .vece = MO_16 },
4236 { .fni4 = gen_ssra32_i32,
4237 .fniv = gen_ssra_vec,
4238 .load_dest = true,
53229a77 4239 .opt_opc = vecop_list_ssra,
41f6c113
RH
4240 .vece = MO_32 },
4241 { .fni8 = gen_ssra64_i64,
4242 .fniv = gen_ssra_vec,
4243 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 4244 .opt_opc = vecop_list_ssra,
41f6c113 4245 .load_dest = true,
41f6c113
RH
4246 .vece = MO_64 },
4247};
4248
4249static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4250{
4251 tcg_gen_vec_shr8i_i64(a, a, shift);
4252 tcg_gen_vec_add8_i64(d, d, a);
4253}
4254
4255static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4256{
4257 tcg_gen_vec_shr16i_i64(a, a, shift);
4258 tcg_gen_vec_add16_i64(d, d, a);
4259}
4260
4261static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4262{
4263 tcg_gen_shri_i32(a, a, shift);
4264 tcg_gen_add_i32(d, d, a);
4265}
4266
4267static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4268{
4269 tcg_gen_shri_i64(a, a, shift);
4270 tcg_gen_add_i64(d, d, a);
4271}
4272
4273static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4274{
4275 tcg_gen_shri_vec(vece, a, a, sh);
4276 tcg_gen_add_vec(vece, d, d, a);
4277}
4278
53229a77
RH
4279static const TCGOpcode vecop_list_usra[] = {
4280 INDEX_op_shri_vec, INDEX_op_add_vec, 0
4281};
4282
41f6c113
RH
4283const GVecGen2i usra_op[4] = {
4284 { .fni8 = gen_usra8_i64,
4285 .fniv = gen_usra_vec,
4286 .load_dest = true,
53229a77 4287 .opt_opc = vecop_list_usra,
41f6c113
RH
4288 .vece = MO_8, },
4289 { .fni8 = gen_usra16_i64,
4290 .fniv = gen_usra_vec,
4291 .load_dest = true,
53229a77 4292 .opt_opc = vecop_list_usra,
41f6c113
RH
4293 .vece = MO_16, },
4294 { .fni4 = gen_usra32_i32,
4295 .fniv = gen_usra_vec,
4296 .load_dest = true,
53229a77 4297 .opt_opc = vecop_list_usra,
41f6c113
RH
4298 .vece = MO_32, },
4299 { .fni8 = gen_usra64_i64,
4300 .fniv = gen_usra_vec,
4301 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4302 .load_dest = true,
53229a77 4303 .opt_opc = vecop_list_usra,
41f6c113
RH
4304 .vece = MO_64, },
4305};
eabcd6fa 4306
f3cd8218
RH
4307static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4308{
4309 uint64_t mask = dup_const(MO_8, 0xff >> shift);
4310 TCGv_i64 t = tcg_temp_new_i64();
4311
4312 tcg_gen_shri_i64(t, a, shift);
4313 tcg_gen_andi_i64(t, t, mask);
4314 tcg_gen_andi_i64(d, d, ~mask);
4315 tcg_gen_or_i64(d, d, t);
4316 tcg_temp_free_i64(t);
4317}
4318
4319static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4320{
4321 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
4322 TCGv_i64 t = tcg_temp_new_i64();
4323
4324 tcg_gen_shri_i64(t, a, shift);
4325 tcg_gen_andi_i64(t, t, mask);
4326 tcg_gen_andi_i64(d, d, ~mask);
4327 tcg_gen_or_i64(d, d, t);
4328 tcg_temp_free_i64(t);
4329}
4330
4331static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4332{
4333 tcg_gen_shri_i32(a, a, shift);
4334 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
4335}
4336
4337static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4338{
4339 tcg_gen_shri_i64(a, a, shift);
4340 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
4341}
4342
4343static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4344{
4345 if (sh == 0) {
4346 tcg_gen_mov_vec(d, a);
4347 } else {
4348 TCGv_vec t = tcg_temp_new_vec_matching(d);
4349 TCGv_vec m = tcg_temp_new_vec_matching(d);
4350
4351 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
4352 tcg_gen_shri_vec(vece, t, a, sh);
4353 tcg_gen_and_vec(vece, d, d, m);
4354 tcg_gen_or_vec(vece, d, d, t);
4355
4356 tcg_temp_free_vec(t);
4357 tcg_temp_free_vec(m);
4358 }
4359}
4360
53229a77
RH
4361static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
4362
f3cd8218
RH
4363const GVecGen2i sri_op[4] = {
4364 { .fni8 = gen_shr8_ins_i64,
4365 .fniv = gen_shr_ins_vec,
4366 .load_dest = true,
53229a77 4367 .opt_opc = vecop_list_sri,
f3cd8218
RH
4368 .vece = MO_8 },
4369 { .fni8 = gen_shr16_ins_i64,
4370 .fniv = gen_shr_ins_vec,
4371 .load_dest = true,
53229a77 4372 .opt_opc = vecop_list_sri,
f3cd8218
RH
4373 .vece = MO_16 },
4374 { .fni4 = gen_shr32_ins_i32,
4375 .fniv = gen_shr_ins_vec,
4376 .load_dest = true,
53229a77 4377 .opt_opc = vecop_list_sri,
f3cd8218
RH
4378 .vece = MO_32 },
4379 { .fni8 = gen_shr64_ins_i64,
4380 .fniv = gen_shr_ins_vec,
4381 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4382 .load_dest = true,
53229a77 4383 .opt_opc = vecop_list_sri,
f3cd8218
RH
4384 .vece = MO_64 },
4385};
4386
4387static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4388{
4389 uint64_t mask = dup_const(MO_8, 0xff << shift);
4390 TCGv_i64 t = tcg_temp_new_i64();
4391
4392 tcg_gen_shli_i64(t, a, shift);
4393 tcg_gen_andi_i64(t, t, mask);
4394 tcg_gen_andi_i64(d, d, ~mask);
4395 tcg_gen_or_i64(d, d, t);
4396 tcg_temp_free_i64(t);
4397}
4398
4399static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4400{
4401 uint64_t mask = dup_const(MO_16, 0xffff << shift);
4402 TCGv_i64 t = tcg_temp_new_i64();
4403
4404 tcg_gen_shli_i64(t, a, shift);
4405 tcg_gen_andi_i64(t, t, mask);
4406 tcg_gen_andi_i64(d, d, ~mask);
4407 tcg_gen_or_i64(d, d, t);
4408 tcg_temp_free_i64(t);
4409}
4410
4411static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4412{
4413 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
4414}
4415
4416static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4417{
4418 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
4419}
4420
4421static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4422{
4423 if (sh == 0) {
4424 tcg_gen_mov_vec(d, a);
4425 } else {
4426 TCGv_vec t = tcg_temp_new_vec_matching(d);
4427 TCGv_vec m = tcg_temp_new_vec_matching(d);
4428
4429 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
4430 tcg_gen_shli_vec(vece, t, a, sh);
4431 tcg_gen_and_vec(vece, d, d, m);
4432 tcg_gen_or_vec(vece, d, d, t);
4433
4434 tcg_temp_free_vec(t);
4435 tcg_temp_free_vec(m);
4436 }
4437}
4438
53229a77
RH
4439static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
4440
f3cd8218
RH
4441const GVecGen2i sli_op[4] = {
4442 { .fni8 = gen_shl8_ins_i64,
4443 .fniv = gen_shl_ins_vec,
4444 .load_dest = true,
53229a77 4445 .opt_opc = vecop_list_sli,
f3cd8218
RH
4446 .vece = MO_8 },
4447 { .fni8 = gen_shl16_ins_i64,
4448 .fniv = gen_shl_ins_vec,
4449 .load_dest = true,
53229a77 4450 .opt_opc = vecop_list_sli,
f3cd8218
RH
4451 .vece = MO_16 },
4452 { .fni4 = gen_shl32_ins_i32,
4453 .fniv = gen_shl_ins_vec,
4454 .load_dest = true,
53229a77 4455 .opt_opc = vecop_list_sli,
f3cd8218
RH
4456 .vece = MO_32 },
4457 { .fni8 = gen_shl64_ins_i64,
4458 .fniv = gen_shl_ins_vec,
4459 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4460 .load_dest = true,
53229a77 4461 .opt_opc = vecop_list_sli,
f3cd8218
RH
4462 .vece = MO_64 },
4463};
4464
4a7832b0
RH
4465static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4466{
4467 gen_helper_neon_mul_u8(a, a, b);
4468 gen_helper_neon_add_u8(d, d, a);
4469}
4470
4471static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4472{
4473 gen_helper_neon_mul_u8(a, a, b);
4474 gen_helper_neon_sub_u8(d, d, a);
4475}
4476
4477static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4478{
4479 gen_helper_neon_mul_u16(a, a, b);
4480 gen_helper_neon_add_u16(d, d, a);
4481}
4482
4483static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4484{
4485 gen_helper_neon_mul_u16(a, a, b);
4486 gen_helper_neon_sub_u16(d, d, a);
4487}
4488
4489static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4490{
4491 tcg_gen_mul_i32(a, a, b);
4492 tcg_gen_add_i32(d, d, a);
4493}
4494
4495static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4496{
4497 tcg_gen_mul_i32(a, a, b);
4498 tcg_gen_sub_i32(d, d, a);
4499}
4500
4501static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4502{
4503 tcg_gen_mul_i64(a, a, b);
4504 tcg_gen_add_i64(d, d, a);
4505}
4506
4507static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4508{
4509 tcg_gen_mul_i64(a, a, b);
4510 tcg_gen_sub_i64(d, d, a);
4511}
4512
4513static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4514{
4515 tcg_gen_mul_vec(vece, a, a, b);
4516 tcg_gen_add_vec(vece, d, d, a);
4517}
4518
4519static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4520{
4521 tcg_gen_mul_vec(vece, a, a, b);
4522 tcg_gen_sub_vec(vece, d, d, a);
4523}
4524
4525/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4526 * these tables are shared with AArch64 which does support them.
4527 */
53229a77
RH
4528
4529static const TCGOpcode vecop_list_mla[] = {
4530 INDEX_op_mul_vec, INDEX_op_add_vec, 0
4531};
4532
4533static const TCGOpcode vecop_list_mls[] = {
4534 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
4535};
4536
4a7832b0
RH
4537const GVecGen3 mla_op[4] = {
4538 { .fni4 = gen_mla8_i32,
4539 .fniv = gen_mla_vec,
4a7832b0 4540 .load_dest = true,
53229a77 4541 .opt_opc = vecop_list_mla,
4a7832b0
RH
4542 .vece = MO_8 },
4543 { .fni4 = gen_mla16_i32,
4544 .fniv = gen_mla_vec,
4a7832b0 4545 .load_dest = true,
53229a77 4546 .opt_opc = vecop_list_mla,
4a7832b0
RH
4547 .vece = MO_16 },
4548 { .fni4 = gen_mla32_i32,
4549 .fniv = gen_mla_vec,
4a7832b0 4550 .load_dest = true,
53229a77 4551 .opt_opc = vecop_list_mla,
4a7832b0
RH
4552 .vece = MO_32 },
4553 { .fni8 = gen_mla64_i64,
4554 .fniv = gen_mla_vec,
4a7832b0
RH
4555 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4556 .load_dest = true,
53229a77 4557 .opt_opc = vecop_list_mla,
4a7832b0
RH
4558 .vece = MO_64 },
4559};
4560
4561const GVecGen3 mls_op[4] = {
4562 { .fni4 = gen_mls8_i32,
4563 .fniv = gen_mls_vec,
4a7832b0 4564 .load_dest = true,
53229a77 4565 .opt_opc = vecop_list_mls,
4a7832b0
RH
4566 .vece = MO_8 },
4567 { .fni4 = gen_mls16_i32,
4568 .fniv = gen_mls_vec,
4a7832b0 4569 .load_dest = true,
53229a77 4570 .opt_opc = vecop_list_mls,
4a7832b0
RH
4571 .vece = MO_16 },
4572 { .fni4 = gen_mls32_i32,
4573 .fniv = gen_mls_vec,
4a7832b0 4574 .load_dest = true,
53229a77 4575 .opt_opc = vecop_list_mls,
4a7832b0
RH
4576 .vece = MO_32 },
4577 { .fni8 = gen_mls64_i64,
4578 .fniv = gen_mls_vec,
4a7832b0
RH
4579 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4580 .load_dest = true,
53229a77 4581 .opt_opc = vecop_list_mls,
4a7832b0
RH
4582 .vece = MO_64 },
4583};
4584
ea580fa3
RH
4585/* CMTST : test is "if (X & Y != 0)". */
4586static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4587{
4588 tcg_gen_and_i32(d, a, b);
4589 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
4590 tcg_gen_neg_i32(d, d);
4591}
4592
4593void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4594{
4595 tcg_gen_and_i64(d, a, b);
4596 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
4597 tcg_gen_neg_i64(d, d);
4598}
4599
4600static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4601{
4602 tcg_gen_and_vec(vece, d, a, b);
4603 tcg_gen_dupi_vec(vece, a, 0);
4604 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
4605}
4606
53229a77
RH
4607static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
4608
ea580fa3
RH
4609const GVecGen3 cmtst_op[4] = {
4610 { .fni4 = gen_helper_neon_tst_u8,
4611 .fniv = gen_cmtst_vec,
53229a77 4612 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4613 .vece = MO_8 },
4614 { .fni4 = gen_helper_neon_tst_u16,
4615 .fniv = gen_cmtst_vec,
53229a77 4616 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4617 .vece = MO_16 },
4618 { .fni4 = gen_cmtst_i32,
4619 .fniv = gen_cmtst_vec,
53229a77 4620 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4621 .vece = MO_32 },
4622 { .fni8 = gen_cmtst_i64,
4623 .fniv = gen_cmtst_vec,
4624 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 4625 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4626 .vece = MO_64 },
4627};
4628
89e68b57
RH
4629static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4630 TCGv_vec a, TCGv_vec b)
4631{
4632 TCGv_vec x = tcg_temp_new_vec_matching(t);
4633 tcg_gen_add_vec(vece, x, a, b);
4634 tcg_gen_usadd_vec(vece, t, a, b);
4635 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4636 tcg_gen_or_vec(vece, sat, sat, x);
4637 tcg_temp_free_vec(x);
4638}
4639
53229a77
RH
4640static const TCGOpcode vecop_list_uqadd[] = {
4641 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4642};
4643
89e68b57
RH
4644const GVecGen4 uqadd_op[4] = {
4645 { .fniv = gen_uqadd_vec,
4646 .fno = gen_helper_gvec_uqadd_b,
89e68b57 4647 .write_aofs = true,
53229a77 4648 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4649 .vece = MO_8 },
4650 { .fniv = gen_uqadd_vec,
4651 .fno = gen_helper_gvec_uqadd_h,
89e68b57 4652 .write_aofs = true,
53229a77 4653 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4654 .vece = MO_16 },
4655 { .fniv = gen_uqadd_vec,
4656 .fno = gen_helper_gvec_uqadd_s,
89e68b57 4657 .write_aofs = true,
53229a77 4658 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4659 .vece = MO_32 },
4660 { .fniv = gen_uqadd_vec,
4661 .fno = gen_helper_gvec_uqadd_d,
89e68b57 4662 .write_aofs = true,
53229a77 4663 .opt_opc = vecop_list_uqadd,
89e68b57
RH
4664 .vece = MO_64 },
4665};
4666
4667static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4668 TCGv_vec a, TCGv_vec b)
4669{
4670 TCGv_vec x = tcg_temp_new_vec_matching(t);
4671 tcg_gen_add_vec(vece, x, a, b);
4672 tcg_gen_ssadd_vec(vece, t, a, b);
4673 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4674 tcg_gen_or_vec(vece, sat, sat, x);
4675 tcg_temp_free_vec(x);
4676}
4677
53229a77
RH
4678static const TCGOpcode vecop_list_sqadd[] = {
4679 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4680};
4681
89e68b57
RH
4682const GVecGen4 sqadd_op[4] = {
4683 { .fniv = gen_sqadd_vec,
4684 .fno = gen_helper_gvec_sqadd_b,
53229a77 4685 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4686 .write_aofs = true,
4687 .vece = MO_8 },
4688 { .fniv = gen_sqadd_vec,
4689 .fno = gen_helper_gvec_sqadd_h,
53229a77 4690 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4691 .write_aofs = true,
4692 .vece = MO_16 },
4693 { .fniv = gen_sqadd_vec,
4694 .fno = gen_helper_gvec_sqadd_s,
53229a77 4695 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4696 .write_aofs = true,
4697 .vece = MO_32 },
4698 { .fniv = gen_sqadd_vec,
4699 .fno = gen_helper_gvec_sqadd_d,
53229a77 4700 .opt_opc = vecop_list_sqadd,
89e68b57
RH
4701 .write_aofs = true,
4702 .vece = MO_64 },
4703};
4704
4705static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4706 TCGv_vec a, TCGv_vec b)
4707{
4708 TCGv_vec x = tcg_temp_new_vec_matching(t);
4709 tcg_gen_sub_vec(vece, x, a, b);
4710 tcg_gen_ussub_vec(vece, t, a, b);
4711 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4712 tcg_gen_or_vec(vece, sat, sat, x);
4713 tcg_temp_free_vec(x);
4714}
4715
53229a77
RH
4716static const TCGOpcode vecop_list_uqsub[] = {
4717 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4718};
4719
89e68b57
RH
4720const GVecGen4 uqsub_op[4] = {
4721 { .fniv = gen_uqsub_vec,
4722 .fno = gen_helper_gvec_uqsub_b,
53229a77 4723 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4724 .write_aofs = true,
4725 .vece = MO_8 },
4726 { .fniv = gen_uqsub_vec,
4727 .fno = gen_helper_gvec_uqsub_h,
53229a77 4728 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4729 .write_aofs = true,
4730 .vece = MO_16 },
4731 { .fniv = gen_uqsub_vec,
4732 .fno = gen_helper_gvec_uqsub_s,
53229a77 4733 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4734 .write_aofs = true,
4735 .vece = MO_32 },
4736 { .fniv = gen_uqsub_vec,
4737 .fno = gen_helper_gvec_uqsub_d,
53229a77 4738 .opt_opc = vecop_list_uqsub,
89e68b57
RH
4739 .write_aofs = true,
4740 .vece = MO_64 },
4741};
4742
4743static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4744 TCGv_vec a, TCGv_vec b)
4745{
4746 TCGv_vec x = tcg_temp_new_vec_matching(t);
4747 tcg_gen_sub_vec(vece, x, a, b);
4748 tcg_gen_sssub_vec(vece, t, a, b);
4749 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4750 tcg_gen_or_vec(vece, sat, sat, x);
4751 tcg_temp_free_vec(x);
4752}
4753
53229a77
RH
4754static const TCGOpcode vecop_list_sqsub[] = {
4755 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4756};
4757
89e68b57
RH
4758const GVecGen4 sqsub_op[4] = {
4759 { .fniv = gen_sqsub_vec,
4760 .fno = gen_helper_gvec_sqsub_b,
53229a77 4761 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4762 .write_aofs = true,
4763 .vece = MO_8 },
4764 { .fniv = gen_sqsub_vec,
4765 .fno = gen_helper_gvec_sqsub_h,
53229a77 4766 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4767 .write_aofs = true,
4768 .vece = MO_16 },
4769 { .fniv = gen_sqsub_vec,
4770 .fno = gen_helper_gvec_sqsub_s,
53229a77 4771 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4772 .write_aofs = true,
4773 .vece = MO_32 },
4774 { .fniv = gen_sqsub_vec,
4775 .fno = gen_helper_gvec_sqsub_d,
53229a77 4776 .opt_opc = vecop_list_sqsub,
89e68b57
RH
4777 .write_aofs = true,
4778 .vece = MO_64 },
4779};
4780
9ee6e8bb
PB
4781/* Translate a NEON data processing instruction. Return nonzero if the
4782 instruction is invalid.
ad69471c
PB
4783 We process data in a mixture of 32-bit and 64-bit chunks.
4784 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4785
7dcc1f89 4786static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4787{
4788 int op;
4789 int q;
eabcd6fa 4790 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
9ee6e8bb
PB
4791 int size;
4792 int shift;
4793 int pass;
4794 int count;
4795 int pairwise;
4796 int u;
eabcd6fa 4797 int vec_size;
f3cd8218 4798 uint32_t imm;
39d5492a 4799 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 4800 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 4801 TCGv_i64 tmp64;
9ee6e8bb 4802
2c7ffc41
PM
4803 /* FIXME: this access check should not take precedence over UNDEF
4804 * for invalid encodings; we will generate incorrect syndrome information
4805 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4806 */
9dbbc748 4807 if (s->fp_excp_el) {
a767fac8 4808 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4be42f40 4809 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4810 return 0;
4811 }
4812
5df8bac1 4813 if (!s->vfp_enabled)
9ee6e8bb
PB
4814 return 1;
4815 q = (insn & (1 << 6)) != 0;
4816 u = (insn >> 24) & 1;
4817 VFP_DREG_D(rd, insn);
4818 VFP_DREG_N(rn, insn);
4819 VFP_DREG_M(rm, insn);
4820 size = (insn >> 20) & 3;
eabcd6fa
RH
4821 vec_size = q ? 16 : 8;
4822 rd_ofs = neon_reg_offset(rd, 0);
4823 rn_ofs = neon_reg_offset(rn, 0);
4824 rm_ofs = neon_reg_offset(rm, 0);
4825
9ee6e8bb
PB
4826 if ((insn & (1 << 23)) == 0) {
4827 /* Three register same length. */
4828 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4829 /* Catch invalid op and bad size combinations: UNDEF */
4830 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4831 return 1;
4832 }
25f84f79
PM
4833 /* All insns of this form UNDEF for either this condition or the
4834 * superset of cases "Q==1"; we catch the latter later.
4835 */
4836 if (q && ((rd | rn | rm) & 1)) {
4837 return 1;
4838 }
36a71934
RH
4839 switch (op) {
4840 case NEON_3R_SHA:
4841 /* The SHA-1/SHA-256 3-register instructions require special
4842 * treatment here, as their size field is overloaded as an
4843 * op type selector, and they all consume their input in a
4844 * single pass.
4845 */
f1ecb913
AB
4846 if (!q) {
4847 return 1;
4848 }
4849 if (!u) { /* SHA-1 */
962fcbf2 4850 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
4851 return 1;
4852 }
1a66ac61
RH
4853 ptr1 = vfp_reg_ptr(true, rd);
4854 ptr2 = vfp_reg_ptr(true, rn);
4855 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 4856 tmp4 = tcg_const_i32(size);
1a66ac61 4857 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
4858 tcg_temp_free_i32(tmp4);
4859 } else { /* SHA-256 */
962fcbf2 4860 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
4861 return 1;
4862 }
1a66ac61
RH
4863 ptr1 = vfp_reg_ptr(true, rd);
4864 ptr2 = vfp_reg_ptr(true, rn);
4865 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
4866 switch (size) {
4867 case 0:
1a66ac61 4868 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
4869 break;
4870 case 1:
1a66ac61 4871 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
4872 break;
4873 case 2:
1a66ac61 4874 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
4875 break;
4876 }
4877 }
1a66ac61
RH
4878 tcg_temp_free_ptr(ptr1);
4879 tcg_temp_free_ptr(ptr2);
4880 tcg_temp_free_ptr(ptr3);
f1ecb913 4881 return 0;
36a71934
RH
4882
4883 case NEON_3R_VPADD_VQRDMLAH:
4884 if (!u) {
4885 break; /* VPADD */
4886 }
4887 /* VQRDMLAH */
4888 switch (size) {
4889 case 1:
4890 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
4891 q, rd, rn, rm);
4892 case 2:
4893 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
4894 q, rd, rn, rm);
4895 }
4896 return 1;
4897
4898 case NEON_3R_VFM_VQRDMLSH:
4899 if (!u) {
4900 /* VFM, VFMS */
4901 if (size == 1) {
4902 return 1;
4903 }
4904 break;
4905 }
4906 /* VQRDMLSH */
4907 switch (size) {
4908 case 1:
4909 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
4910 q, rd, rn, rm);
4911 case 2:
4912 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
4913 q, rd, rn, rm);
4914 }
4915 return 1;
eabcd6fa
RH
4916
4917 case NEON_3R_LOGIC: /* Logic ops. */
4918 switch ((u << 2) | size) {
4919 case 0: /* VAND */
4920 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
4921 vec_size, vec_size);
4922 break;
4923 case 1: /* VBIC */
4924 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
4925 vec_size, vec_size);
4926 break;
2900847f
RH
4927 case 2: /* VORR */
4928 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
4929 vec_size, vec_size);
eabcd6fa
RH
4930 break;
4931 case 3: /* VORN */
4932 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
4933 vec_size, vec_size);
4934 break;
4935 case 4: /* VEOR */
4936 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
4937 vec_size, vec_size);
4938 break;
4939 case 5: /* VBSL */
3a7a2b4e
RH
4940 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
4941 vec_size, vec_size);
eabcd6fa
RH
4942 break;
4943 case 6: /* VBIT */
3a7a2b4e
RH
4944 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
4945 vec_size, vec_size);
eabcd6fa
RH
4946 break;
4947 case 7: /* VBIF */
3a7a2b4e
RH
4948 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
4949 vec_size, vec_size);
eabcd6fa
RH
4950 break;
4951 }
4952 return 0;
e4717ae0
RH
4953
4954 case NEON_3R_VADD_VSUB:
4955 if (u) {
4956 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
4957 vec_size, vec_size);
4958 } else {
4959 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
4960 vec_size, vec_size);
4961 }
4962 return 0;
82083184 4963
89e68b57
RH
4964 case NEON_3R_VQADD:
4965 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
4966 rn_ofs, rm_ofs, vec_size, vec_size,
4967 (u ? uqadd_op : sqadd_op) + size);
2f143d3a 4968 return 0;
89e68b57
RH
4969
4970 case NEON_3R_VQSUB:
4971 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
4972 rn_ofs, rm_ofs, vec_size, vec_size,
4973 (u ? uqsub_op : sqsub_op) + size);
2f143d3a 4974 return 0;
89e68b57 4975
82083184
RH
4976 case NEON_3R_VMUL: /* VMUL */
4977 if (u) {
4978 /* Polynomial case allows only P8 and is handled below. */
4979 if (size != 0) {
4980 return 1;
4981 }
4982 } else {
4983 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
4984 vec_size, vec_size);
4985 return 0;
4986 }
4987 break;
4a7832b0
RH
4988
4989 case NEON_3R_VML: /* VMLA, VMLS */
4990 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
4991 u ? &mls_op[size] : &mla_op[size]);
4992 return 0;
ea580fa3
RH
4993
4994 case NEON_3R_VTST_VCEQ:
4995 if (u) { /* VCEQ */
4996 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
4997 vec_size, vec_size);
4998 } else { /* VTST */
4999 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
5000 vec_size, vec_size, &cmtst_op[size]);
5001 }
5002 return 0;
5003
5004 case NEON_3R_VCGT:
5005 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
5006 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5007 return 0;
5008
5009 case NEON_3R_VCGE:
5010 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
5011 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5012 return 0;
6f278221
RH
5013
5014 case NEON_3R_VMAX:
5015 if (u) {
5016 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
5017 vec_size, vec_size);
5018 } else {
5019 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
5020 vec_size, vec_size);
5021 }
5022 return 0;
5023 case NEON_3R_VMIN:
5024 if (u) {
5025 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
5026 vec_size, vec_size);
5027 } else {
5028 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
5029 vec_size, vec_size);
5030 }
5031 return 0;
f1ecb913 5032 }
4a7832b0 5033
eabcd6fa 5034 if (size == 3) {
62698be3 5035 /* 64-bit element instructions. */
9ee6e8bb 5036 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5037 neon_load_reg64(cpu_V0, rn + pass);
5038 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5039 switch (op) {
62698be3 5040 case NEON_3R_VSHL:
ad69471c
PB
5041 if (u) {
5042 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5043 } else {
5044 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5045 }
5046 break;
62698be3 5047 case NEON_3R_VQSHL:
ad69471c 5048 if (u) {
02da0b2d
PM
5049 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5050 cpu_V1, cpu_V0);
ad69471c 5051 } else {
02da0b2d
PM
5052 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5053 cpu_V1, cpu_V0);
ad69471c
PB
5054 }
5055 break;
62698be3 5056 case NEON_3R_VRSHL:
ad69471c
PB
5057 if (u) {
5058 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5059 } else {
ad69471c
PB
5060 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5061 }
5062 break;
62698be3 5063 case NEON_3R_VQRSHL:
ad69471c 5064 if (u) {
02da0b2d
PM
5065 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5066 cpu_V1, cpu_V0);
ad69471c 5067 } else {
02da0b2d
PM
5068 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5069 cpu_V1, cpu_V0);
1e8d4eec 5070 }
9ee6e8bb 5071 break;
9ee6e8bb
PB
5072 default:
5073 abort();
2c0262af 5074 }
ad69471c 5075 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5076 }
9ee6e8bb 5077 return 0;
2c0262af 5078 }
25f84f79 5079 pairwise = 0;
9ee6e8bb 5080 switch (op) {
62698be3
PM
5081 case NEON_3R_VSHL:
5082 case NEON_3R_VQSHL:
5083 case NEON_3R_VRSHL:
5084 case NEON_3R_VQRSHL:
9ee6e8bb 5085 {
ad69471c
PB
5086 int rtmp;
5087 /* Shift instruction operands are reversed. */
5088 rtmp = rn;
9ee6e8bb 5089 rn = rm;
ad69471c 5090 rm = rtmp;
9ee6e8bb 5091 }
2c0262af 5092 break;
36a71934 5093 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
5094 case NEON_3R_VPMAX:
5095 case NEON_3R_VPMIN:
9ee6e8bb 5096 pairwise = 1;
2c0262af 5097 break;
25f84f79
PM
5098 case NEON_3R_FLOAT_ARITH:
5099 pairwise = (u && size < 2); /* if VPADD (float) */
5100 break;
5101 case NEON_3R_FLOAT_MINMAX:
5102 pairwise = u; /* if VPMIN/VPMAX (float) */
5103 break;
5104 case NEON_3R_FLOAT_CMP:
5105 if (!u && size) {
5106 /* no encoding for U=0 C=1x */
5107 return 1;
5108 }
5109 break;
5110 case NEON_3R_FLOAT_ACMP:
5111 if (!u) {
5112 return 1;
5113 }
5114 break;
505935fc
WN
5115 case NEON_3R_FLOAT_MISC:
5116 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5117 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5118 return 1;
5119 }
2c0262af 5120 break;
36a71934
RH
5121 case NEON_3R_VFM_VQRDMLSH:
5122 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
5123 return 1;
5124 }
5125 break;
9ee6e8bb 5126 default:
2c0262af 5127 break;
9ee6e8bb 5128 }
dd8fbd78 5129
25f84f79
PM
5130 if (pairwise && q) {
5131 /* All the pairwise insns UNDEF if Q is set */
5132 return 1;
5133 }
5134
9ee6e8bb
PB
5135 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5136
5137 if (pairwise) {
5138 /* Pairwise. */
a5a14945
JR
5139 if (pass < 1) {
5140 tmp = neon_load_reg(rn, 0);
5141 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5142 } else {
a5a14945
JR
5143 tmp = neon_load_reg(rm, 0);
5144 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5145 }
5146 } else {
5147 /* Elementwise. */
dd8fbd78
FN
5148 tmp = neon_load_reg(rn, pass);
5149 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5150 }
5151 switch (op) {
62698be3 5152 case NEON_3R_VHADD:
9ee6e8bb
PB
5153 GEN_NEON_INTEGER_OP(hadd);
5154 break;
62698be3 5155 case NEON_3R_VRHADD:
9ee6e8bb 5156 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5157 break;
62698be3 5158 case NEON_3R_VHSUB:
9ee6e8bb
PB
5159 GEN_NEON_INTEGER_OP(hsub);
5160 break;
62698be3 5161 case NEON_3R_VSHL:
ad69471c 5162 GEN_NEON_INTEGER_OP(shl);
2c0262af 5163 break;
62698be3 5164 case NEON_3R_VQSHL:
02da0b2d 5165 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5166 break;
62698be3 5167 case NEON_3R_VRSHL:
ad69471c 5168 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5169 break;
62698be3 5170 case NEON_3R_VQRSHL:
02da0b2d 5171 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5172 break;
62698be3 5173 case NEON_3R_VABD:
9ee6e8bb
PB
5174 GEN_NEON_INTEGER_OP(abd);
5175 break;
62698be3 5176 case NEON_3R_VABA:
9ee6e8bb 5177 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5178 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5179 tmp2 = neon_load_reg(rd, pass);
5180 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5181 break;
62698be3 5182 case NEON_3R_VMUL:
82083184
RH
5183 /* VMUL.P8; other cases already eliminated. */
5184 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb 5185 break;
62698be3 5186 case NEON_3R_VPMAX:
9ee6e8bb
PB
5187 GEN_NEON_INTEGER_OP(pmax);
5188 break;
62698be3 5189 case NEON_3R_VPMIN:
9ee6e8bb
PB
5190 GEN_NEON_INTEGER_OP(pmin);
5191 break;
62698be3 5192 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5193 if (!u) { /* VQDMULH */
5194 switch (size) {
02da0b2d
PM
5195 case 1:
5196 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5197 break;
5198 case 2:
5199 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5200 break;
62698be3 5201 default: abort();
9ee6e8bb 5202 }
62698be3 5203 } else { /* VQRDMULH */
9ee6e8bb 5204 switch (size) {
02da0b2d
PM
5205 case 1:
5206 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5207 break;
5208 case 2:
5209 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5210 break;
62698be3 5211 default: abort();
9ee6e8bb
PB
5212 }
5213 }
5214 break;
36a71934 5215 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 5216 switch (size) {
dd8fbd78
FN
5217 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5218 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5219 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5220 default: abort();
9ee6e8bb
PB
5221 }
5222 break;
62698be3 5223 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5224 {
5225 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5226 switch ((u << 2) | size) {
5227 case 0: /* VADD */
aa47cfdd
PM
5228 case 4: /* VPADD */
5229 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5230 break;
5231 case 2: /* VSUB */
aa47cfdd 5232 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5233 break;
5234 case 6: /* VABD */
aa47cfdd 5235 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5236 break;
5237 default:
62698be3 5238 abort();
9ee6e8bb 5239 }
aa47cfdd 5240 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5241 break;
aa47cfdd 5242 }
62698be3 5243 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5244 {
5245 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5246 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5247 if (!u) {
7d1b0095 5248 tcg_temp_free_i32(tmp2);
dd8fbd78 5249 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5250 if (size == 0) {
aa47cfdd 5251 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5252 } else {
aa47cfdd 5253 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5254 }
5255 }
aa47cfdd 5256 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5257 break;
aa47cfdd 5258 }
62698be3 5259 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5260 {
5261 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5262 if (!u) {
aa47cfdd 5263 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5264 } else {
aa47cfdd
PM
5265 if (size == 0) {
5266 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5267 } else {
5268 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5269 }
b5ff1b31 5270 }
aa47cfdd 5271 tcg_temp_free_ptr(fpstatus);
2c0262af 5272 break;
aa47cfdd 5273 }
62698be3 5274 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5275 {
5276 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5277 if (size == 0) {
5278 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5279 } else {
5280 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5281 }
5282 tcg_temp_free_ptr(fpstatus);
2c0262af 5283 break;
aa47cfdd 5284 }
62698be3 5285 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5286 {
5287 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5288 if (size == 0) {
f71a2ae5 5289 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5290 } else {
f71a2ae5 5291 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5292 }
5293 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5294 break;
aa47cfdd 5295 }
505935fc
WN
5296 case NEON_3R_FLOAT_MISC:
5297 if (u) {
5298 /* VMAXNM/VMINNM */
5299 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5300 if (size == 0) {
f71a2ae5 5301 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5302 } else {
f71a2ae5 5303 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5304 }
5305 tcg_temp_free_ptr(fpstatus);
5306 } else {
5307 if (size == 0) {
5308 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5309 } else {
5310 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5311 }
5312 }
2c0262af 5313 break;
36a71934 5314 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
5315 {
5316 /* VFMA, VFMS: fused multiply-add */
5317 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5318 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5319 if (size) {
5320 /* VFMS */
5321 gen_helper_vfp_negs(tmp, tmp);
5322 }
5323 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5324 tcg_temp_free_i32(tmp3);
5325 tcg_temp_free_ptr(fpstatus);
5326 break;
5327 }
9ee6e8bb
PB
5328 default:
5329 abort();
2c0262af 5330 }
7d1b0095 5331 tcg_temp_free_i32(tmp2);
dd8fbd78 5332
9ee6e8bb
PB
5333 /* Save the result. For elementwise operations we can put it
5334 straight into the destination register. For pairwise operations
5335 we have to be careful to avoid clobbering the source operands. */
5336 if (pairwise && rd == rm) {
dd8fbd78 5337 neon_store_scratch(pass, tmp);
9ee6e8bb 5338 } else {
dd8fbd78 5339 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5340 }
5341
5342 } /* for pass */
5343 if (pairwise && rd == rm) {
5344 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5345 tmp = neon_load_scratch(pass);
5346 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5347 }
5348 }
ad69471c 5349 /* End of 3 register same size operations. */
9ee6e8bb
PB
5350 } else if (insn & (1 << 4)) {
5351 if ((insn & 0x00380080) != 0) {
5352 /* Two registers and shift. */
5353 op = (insn >> 8) & 0xf;
5354 if (insn & (1 << 7)) {
cc13115b
PM
5355 /* 64-bit shift. */
5356 if (op > 7) {
5357 return 1;
5358 }
9ee6e8bb
PB
5359 size = 3;
5360 } else {
5361 size = 2;
5362 while ((insn & (1 << (size + 19))) == 0)
5363 size--;
5364 }
5365 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
9ee6e8bb
PB
5366 if (op < 8) {
5367 /* Shift by immediate:
5368 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5369 if (q && ((rd | rm) & 1)) {
5370 return 1;
5371 }
5372 if (!u && (op == 4 || op == 6)) {
5373 return 1;
5374 }
9ee6e8bb
PB
5375 /* Right shifts are encoded as N - shift, where N is the
5376 element size in bits. */
1dc8425e 5377 if (op <= 4) {
9ee6e8bb 5378 shift = shift - (1 << (size + 3));
1dc8425e
RH
5379 }
5380
5381 switch (op) {
5382 case 0: /* VSHR */
5383 /* Right shift comes here negative. */
5384 shift = -shift;
5385 /* Shifts larger than the element size are architecturally
5386 * valid. Unsigned results in all zeros; signed results
5387 * in all sign bits.
5388 */
5389 if (!u) {
5390 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
5391 MIN(shift, (8 << size) - 1),
5392 vec_size, vec_size);
5393 } else if (shift >= 8 << size) {
5394 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5395 } else {
5396 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
5397 vec_size, vec_size);
5398 }
5399 return 0;
5400
41f6c113
RH
5401 case 1: /* VSRA */
5402 /* Right shift comes here negative. */
5403 shift = -shift;
5404 /* Shifts larger than the element size are architecturally
5405 * valid. Unsigned results in all zeros; signed results
5406 * in all sign bits.
5407 */
5408 if (!u) {
5409 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5410 MIN(shift, (8 << size) - 1),
5411 &ssra_op[size]);
5412 } else if (shift >= 8 << size) {
5413 /* rd += 0 */
5414 } else {
5415 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5416 shift, &usra_op[size]);
5417 }
5418 return 0;
5419
f3cd8218
RH
5420 case 4: /* VSRI */
5421 if (!u) {
5422 return 1;
5423 }
5424 /* Right shift comes here negative. */
5425 shift = -shift;
5426 /* Shift out of range leaves destination unchanged. */
5427 if (shift < 8 << size) {
5428 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5429 shift, &sri_op[size]);
5430 }
5431 return 0;
5432
1dc8425e 5433 case 5: /* VSHL, VSLI */
f3cd8218
RH
5434 if (u) { /* VSLI */
5435 /* Shift out of range leaves destination unchanged. */
5436 if (shift < 8 << size) {
5437 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
5438 vec_size, shift, &sli_op[size]);
5439 }
5440 } else { /* VSHL */
1dc8425e
RH
5441 /* Shifts larger than the element size are
5442 * architecturally valid and results in zero.
5443 */
5444 if (shift >= 8 << size) {
5445 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5446 } else {
5447 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
5448 vec_size, vec_size);
5449 }
1dc8425e 5450 }
f3cd8218 5451 return 0;
1dc8425e
RH
5452 }
5453
9ee6e8bb
PB
5454 if (size == 3) {
5455 count = q + 1;
5456 } else {
5457 count = q ? 4: 2;
5458 }
1dc8425e
RH
5459
5460 /* To avoid excessive duplication of ops we implement shift
5461 * by immediate using the variable shift operations.
5462 */
5463 imm = dup_const(size, shift);
9ee6e8bb
PB
5464
5465 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5466 if (size == 3) {
5467 neon_load_reg64(cpu_V0, rm + pass);
5468 tcg_gen_movi_i64(cpu_V1, imm);
5469 switch (op) {
ad69471c
PB
5470 case 2: /* VRSHR */
5471 case 3: /* VRSRA */
5472 if (u)
5473 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5474 else
ad69471c 5475 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5476 break;
0322b26e 5477 case 6: /* VQSHLU */
02da0b2d
PM
5478 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5479 cpu_V0, cpu_V1);
ad69471c 5480 break;
0322b26e
PM
5481 case 7: /* VQSHL */
5482 if (u) {
02da0b2d 5483 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5484 cpu_V0, cpu_V1);
5485 } else {
02da0b2d 5486 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5487 cpu_V0, cpu_V1);
5488 }
9ee6e8bb 5489 break;
1dc8425e
RH
5490 default:
5491 g_assert_not_reached();
9ee6e8bb 5492 }
41f6c113 5493 if (op == 3) {
ad69471c 5494 /* Accumulate. */
5371cb81 5495 neon_load_reg64(cpu_V1, rd + pass);
ad69471c 5496 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5497 }
5498 neon_store_reg64(cpu_V0, rd + pass);
5499 } else { /* size < 3 */
5500 /* Operands in T0 and T1. */
dd8fbd78 5501 tmp = neon_load_reg(rm, pass);
7d1b0095 5502 tmp2 = tcg_temp_new_i32();
dd8fbd78 5503 tcg_gen_movi_i32(tmp2, imm);
ad69471c 5504 switch (op) {
ad69471c
PB
5505 case 2: /* VRSHR */
5506 case 3: /* VRSRA */
5507 GEN_NEON_INTEGER_OP(rshl);
5508 break;
0322b26e 5509 case 6: /* VQSHLU */
ad69471c 5510 switch (size) {
0322b26e 5511 case 0:
02da0b2d
PM
5512 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5513 tmp, tmp2);
0322b26e
PM
5514 break;
5515 case 1:
02da0b2d
PM
5516 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5517 tmp, tmp2);
0322b26e
PM
5518 break;
5519 case 2:
02da0b2d
PM
5520 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5521 tmp, tmp2);
0322b26e
PM
5522 break;
5523 default:
cc13115b 5524 abort();
ad69471c
PB
5525 }
5526 break;
0322b26e 5527 case 7: /* VQSHL */
02da0b2d 5528 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5529 break;
1dc8425e
RH
5530 default:
5531 g_assert_not_reached();
ad69471c 5532 }
7d1b0095 5533 tcg_temp_free_i32(tmp2);
ad69471c 5534
41f6c113 5535 if (op == 3) {
ad69471c 5536 /* Accumulate. */
dd8fbd78 5537 tmp2 = neon_load_reg(rd, pass);
5371cb81 5538 gen_neon_add(size, tmp, tmp2);
7d1b0095 5539 tcg_temp_free_i32(tmp2);
ad69471c 5540 }
dd8fbd78 5541 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5542 }
5543 } /* for pass */
5544 } else if (op < 10) {
ad69471c 5545 /* Shift by immediate and narrow:
9ee6e8bb 5546 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5547 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5548 if (rm & 1) {
5549 return 1;
5550 }
9ee6e8bb
PB
5551 shift = shift - (1 << (size + 3));
5552 size++;
92cdfaeb 5553 if (size == 3) {
a7812ae4 5554 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5555 neon_load_reg64(cpu_V0, rm);
5556 neon_load_reg64(cpu_V1, rm + 1);
5557 for (pass = 0; pass < 2; pass++) {
5558 TCGv_i64 in;
5559 if (pass == 0) {
5560 in = cpu_V0;
5561 } else {
5562 in = cpu_V1;
5563 }
ad69471c 5564 if (q) {
0b36f4cd 5565 if (input_unsigned) {
92cdfaeb 5566 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5567 } else {
92cdfaeb 5568 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5569 }
ad69471c 5570 } else {
0b36f4cd 5571 if (input_unsigned) {
92cdfaeb 5572 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5573 } else {
92cdfaeb 5574 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5575 }
ad69471c 5576 }
7d1b0095 5577 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5578 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5579 neon_store_reg(rd, pass, tmp);
5580 } /* for pass */
5581 tcg_temp_free_i64(tmp64);
5582 } else {
5583 if (size == 1) {
5584 imm = (uint16_t)shift;
5585 imm |= imm << 16;
2c0262af 5586 } else {
92cdfaeb
PM
5587 /* size == 2 */
5588 imm = (uint32_t)shift;
5589 }
5590 tmp2 = tcg_const_i32(imm);
5591 tmp4 = neon_load_reg(rm + 1, 0);
5592 tmp5 = neon_load_reg(rm + 1, 1);
5593 for (pass = 0; pass < 2; pass++) {
5594 if (pass == 0) {
5595 tmp = neon_load_reg(rm, 0);
5596 } else {
5597 tmp = tmp4;
5598 }
0b36f4cd
CL
5599 gen_neon_shift_narrow(size, tmp, tmp2, q,
5600 input_unsigned);
92cdfaeb
PM
5601 if (pass == 0) {
5602 tmp3 = neon_load_reg(rm, 1);
5603 } else {
5604 tmp3 = tmp5;
5605 }
0b36f4cd
CL
5606 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5607 input_unsigned);
36aa55dc 5608 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5609 tcg_temp_free_i32(tmp);
5610 tcg_temp_free_i32(tmp3);
5611 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5612 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5613 neon_store_reg(rd, pass, tmp);
5614 } /* for pass */
c6067f04 5615 tcg_temp_free_i32(tmp2);
b75263d6 5616 }
9ee6e8bb 5617 } else if (op == 10) {
cc13115b
PM
5618 /* VSHLL, VMOVL */
5619 if (q || (rd & 1)) {
9ee6e8bb 5620 return 1;
cc13115b 5621 }
ad69471c
PB
5622 tmp = neon_load_reg(rm, 0);
5623 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5624 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5625 if (pass == 1)
5626 tmp = tmp2;
5627
5628 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5629
9ee6e8bb
PB
5630 if (shift != 0) {
5631 /* The shift is less than the width of the source
ad69471c
PB
5632 type, so we can just shift the whole register. */
5633 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5634 /* Widen the result of shift: we need to clear
5635 * the potential overflow bits resulting from
5636 * left bits of the narrow input appearing as
5637 * right bits of left the neighbour narrow
5638 * input. */
ad69471c
PB
5639 if (size < 2 || !u) {
5640 uint64_t imm64;
5641 if (size == 0) {
5642 imm = (0xffu >> (8 - shift));
5643 imm |= imm << 16;
acdf01ef 5644 } else if (size == 1) {
ad69471c 5645 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5646 } else {
5647 /* size == 2 */
5648 imm = 0xffffffff >> (32 - shift);
5649 }
5650 if (size < 2) {
5651 imm64 = imm | (((uint64_t)imm) << 32);
5652 } else {
5653 imm64 = imm;
9ee6e8bb 5654 }
acdf01ef 5655 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5656 }
5657 }
ad69471c 5658 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5659 }
f73534a5 5660 } else if (op >= 14) {
9ee6e8bb 5661 /* VCVT fixed-point. */
c253dd78
PM
5662 TCGv_ptr fpst;
5663 TCGv_i32 shiftv;
5664 VFPGenFixPointFn *fn;
5665
cc13115b
PM
5666 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5667 return 1;
5668 }
c253dd78
PM
5669
5670 if (!(op & 1)) {
5671 if (u) {
5672 fn = gen_helper_vfp_ultos;
5673 } else {
5674 fn = gen_helper_vfp_sltos;
5675 }
5676 } else {
5677 if (u) {
5678 fn = gen_helper_vfp_touls_round_to_zero;
5679 } else {
5680 fn = gen_helper_vfp_tosls_round_to_zero;
5681 }
5682 }
5683
f73534a5
PM
5684 /* We have already masked out the must-be-1 top bit of imm6,
5685 * hence this 32-shift where the ARM ARM has 64-imm6.
5686 */
5687 shift = 32 - shift;
c253dd78
PM
5688 fpst = get_fpstatus_ptr(1);
5689 shiftv = tcg_const_i32(shift);
9ee6e8bb 5690 for (pass = 0; pass < (q ? 4 : 2); pass++) {
c253dd78
PM
5691 TCGv_i32 tmpf = neon_load_reg(rm, pass);
5692 fn(tmpf, tmpf, shiftv, fpst);
5693 neon_store_reg(rd, pass, tmpf);
2c0262af 5694 }
c253dd78
PM
5695 tcg_temp_free_ptr(fpst);
5696 tcg_temp_free_i32(shiftv);
2c0262af 5697 } else {
9ee6e8bb
PB
5698 return 1;
5699 }
5700 } else { /* (insn & 0x00380080) == 0 */
246fa4ac
RH
5701 int invert, reg_ofs, vec_size;
5702
7d80fee5
PM
5703 if (q && (rd & 1)) {
5704 return 1;
5705 }
9ee6e8bb
PB
5706
5707 op = (insn >> 8) & 0xf;
5708 /* One register and immediate. */
5709 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5710 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5711 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5712 * We choose to not special-case this and will behave as if a
5713 * valid constant encoding of 0 had been given.
5714 */
9ee6e8bb
PB
5715 switch (op) {
5716 case 0: case 1:
5717 /* no-op */
5718 break;
5719 case 2: case 3:
5720 imm <<= 8;
5721 break;
5722 case 4: case 5:
5723 imm <<= 16;
5724 break;
5725 case 6: case 7:
5726 imm <<= 24;
5727 break;
5728 case 8: case 9:
5729 imm |= imm << 16;
5730 break;
5731 case 10: case 11:
5732 imm = (imm << 8) | (imm << 24);
5733 break;
5734 case 12:
8e31209e 5735 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5736 break;
5737 case 13:
5738 imm = (imm << 16) | 0xffff;
5739 break;
5740 case 14:
5741 imm |= (imm << 8) | (imm << 16) | (imm << 24);
246fa4ac 5742 if (invert) {
9ee6e8bb 5743 imm = ~imm;
246fa4ac 5744 }
9ee6e8bb
PB
5745 break;
5746 case 15:
7d80fee5
PM
5747 if (invert) {
5748 return 1;
5749 }
9ee6e8bb
PB
5750 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5751 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5752 break;
5753 }
246fa4ac 5754 if (invert) {
9ee6e8bb 5755 imm = ~imm;
246fa4ac 5756 }
9ee6e8bb 5757
246fa4ac
RH
5758 reg_ofs = neon_reg_offset(rd, 0);
5759 vec_size = q ? 16 : 8;
5760
5761 if (op & 1 && op < 12) {
5762 if (invert) {
5763 /* The immediate value has already been inverted,
5764 * so BIC becomes AND.
5765 */
5766 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
5767 vec_size, vec_size);
9ee6e8bb 5768 } else {
246fa4ac
RH
5769 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
5770 vec_size, vec_size);
5771 }
5772 } else {
5773 /* VMOV, VMVN. */
5774 if (op == 14 && invert) {
5775 TCGv_i64 t64 = tcg_temp_new_i64();
5776
5777 for (pass = 0; pass <= q; ++pass) {
5778 uint64_t val = 0;
a5a14945 5779 int n;
246fa4ac
RH
5780
5781 for (n = 0; n < 8; n++) {
5782 if (imm & (1 << (n + pass * 8))) {
5783 val |= 0xffull << (n * 8);
5784 }
9ee6e8bb 5785 }
246fa4ac
RH
5786 tcg_gen_movi_i64(t64, val);
5787 neon_store_reg64(t64, rd + pass);
9ee6e8bb 5788 }
246fa4ac
RH
5789 tcg_temp_free_i64(t64);
5790 } else {
5791 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
9ee6e8bb
PB
5792 }
5793 }
5794 }
e4b3861d 5795 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5796 if (size != 3) {
5797 op = (insn >> 8) & 0xf;
5798 if ((insn & (1 << 6)) == 0) {
5799 /* Three registers of different lengths. */
5800 int src1_wide;
5801 int src2_wide;
5802 int prewiden;
526d0096
PM
5803 /* undefreq: bit 0 : UNDEF if size == 0
5804 * bit 1 : UNDEF if size == 1
5805 * bit 2 : UNDEF if size == 2
5806 * bit 3 : UNDEF if U == 1
5807 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
5808 */
5809 int undefreq;
5810 /* prewiden, src1_wide, src2_wide, undefreq */
5811 static const int neon_3reg_wide[16][4] = {
5812 {1, 0, 0, 0}, /* VADDL */
5813 {1, 1, 0, 0}, /* VADDW */
5814 {1, 0, 0, 0}, /* VSUBL */
5815 {1, 1, 0, 0}, /* VSUBW */
5816 {0, 1, 1, 0}, /* VADDHN */
5817 {0, 0, 0, 0}, /* VABAL */
5818 {0, 1, 1, 0}, /* VSUBHN */
5819 {0, 0, 0, 0}, /* VABDL */
5820 {0, 0, 0, 0}, /* VMLAL */
526d0096 5821 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 5822 {0, 0, 0, 0}, /* VMLSL */
526d0096 5823 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 5824 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 5825 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 5826 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 5827 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5828 };
5829
5830 prewiden = neon_3reg_wide[op][0];
5831 src1_wide = neon_3reg_wide[op][1];
5832 src2_wide = neon_3reg_wide[op][2];
695272dc 5833 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5834
526d0096
PM
5835 if ((undefreq & (1 << size)) ||
5836 ((undefreq & 8) && u)) {
695272dc
PM
5837 return 1;
5838 }
5839 if ((src1_wide && (rn & 1)) ||
5840 (src2_wide && (rm & 1)) ||
5841 (!src2_wide && (rd & 1))) {
ad69471c 5842 return 1;
695272dc 5843 }
ad69471c 5844
4e624eda
PM
5845 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
5846 * outside the loop below as it only performs a single pass.
5847 */
5848 if (op == 14 && size == 2) {
5849 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
5850
962fcbf2 5851 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
5852 return 1;
5853 }
5854 tcg_rn = tcg_temp_new_i64();
5855 tcg_rm = tcg_temp_new_i64();
5856 tcg_rd = tcg_temp_new_i64();
5857 neon_load_reg64(tcg_rn, rn);
5858 neon_load_reg64(tcg_rm, rm);
5859 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
5860 neon_store_reg64(tcg_rd, rd);
5861 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
5862 neon_store_reg64(tcg_rd, rd + 1);
5863 tcg_temp_free_i64(tcg_rn);
5864 tcg_temp_free_i64(tcg_rm);
5865 tcg_temp_free_i64(tcg_rd);
5866 return 0;
5867 }
5868
9ee6e8bb
PB
5869 /* Avoid overlapping operands. Wide source operands are
5870 always aligned so will never overlap with wide
5871 destinations in problematic ways. */
8f8e3aa4 5872 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5873 tmp = neon_load_reg(rm, 1);
5874 neon_store_scratch(2, tmp);
8f8e3aa4 5875 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5876 tmp = neon_load_reg(rn, 1);
5877 neon_store_scratch(2, tmp);
9ee6e8bb 5878 }
f764718d 5879 tmp3 = NULL;
9ee6e8bb 5880 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5881 if (src1_wide) {
5882 neon_load_reg64(cpu_V0, rn + pass);
f764718d 5883 tmp = NULL;
9ee6e8bb 5884 } else {
ad69471c 5885 if (pass == 1 && rd == rn) {
dd8fbd78 5886 tmp = neon_load_scratch(2);
9ee6e8bb 5887 } else {
ad69471c
PB
5888 tmp = neon_load_reg(rn, pass);
5889 }
5890 if (prewiden) {
5891 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5892 }
5893 }
ad69471c
PB
5894 if (src2_wide) {
5895 neon_load_reg64(cpu_V1, rm + pass);
f764718d 5896 tmp2 = NULL;
9ee6e8bb 5897 } else {
ad69471c 5898 if (pass == 1 && rd == rm) {
dd8fbd78 5899 tmp2 = neon_load_scratch(2);
9ee6e8bb 5900 } else {
ad69471c
PB
5901 tmp2 = neon_load_reg(rm, pass);
5902 }
5903 if (prewiden) {
5904 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5905 }
9ee6e8bb
PB
5906 }
5907 switch (op) {
5908 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5909 gen_neon_addl(size);
9ee6e8bb 5910 break;
79b0e534 5911 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5912 gen_neon_subl(size);
9ee6e8bb
PB
5913 break;
5914 case 5: case 7: /* VABAL, VABDL */
5915 switch ((size << 1) | u) {
ad69471c
PB
5916 case 0:
5917 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5918 break;
5919 case 1:
5920 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5921 break;
5922 case 2:
5923 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5924 break;
5925 case 3:
5926 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5927 break;
5928 case 4:
5929 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5930 break;
5931 case 5:
5932 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5933 break;
9ee6e8bb
PB
5934 default: abort();
5935 }
7d1b0095
PM
5936 tcg_temp_free_i32(tmp2);
5937 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5938 break;
5939 case 8: case 9: case 10: case 11: case 12: case 13:
5940 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5941 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5942 break;
5943 case 14: /* Polynomial VMULL */
e5ca24cb 5944 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5945 tcg_temp_free_i32(tmp2);
5946 tcg_temp_free_i32(tmp);
e5ca24cb 5947 break;
695272dc
PM
5948 default: /* 15 is RESERVED: caught earlier */
5949 abort();
9ee6e8bb 5950 }
ebcd88ce
PM
5951 if (op == 13) {
5952 /* VQDMULL */
5953 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5954 neon_store_reg64(cpu_V0, rd + pass);
5955 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5956 /* Accumulate. */
ebcd88ce 5957 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5958 switch (op) {
4dc064e6
PM
5959 case 10: /* VMLSL */
5960 gen_neon_negl(cpu_V0, size);
5961 /* Fall through */
5962 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5963 gen_neon_addl(size);
9ee6e8bb
PB
5964 break;
5965 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5966 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5967 if (op == 11) {
5968 gen_neon_negl(cpu_V0, size);
5969 }
ad69471c
PB
5970 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5971 break;
9ee6e8bb
PB
5972 default:
5973 abort();
5974 }
ad69471c 5975 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5976 } else if (op == 4 || op == 6) {
5977 /* Narrowing operation. */
7d1b0095 5978 tmp = tcg_temp_new_i32();
79b0e534 5979 if (!u) {
9ee6e8bb 5980 switch (size) {
ad69471c
PB
5981 case 0:
5982 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5983 break;
5984 case 1:
5985 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5986 break;
5987 case 2:
664b7e3b 5988 tcg_gen_extrh_i64_i32(tmp, cpu_V0);
ad69471c 5989 break;
9ee6e8bb
PB
5990 default: abort();
5991 }
5992 } else {
5993 switch (size) {
ad69471c
PB
5994 case 0:
5995 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5996 break;
5997 case 1:
5998 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5999 break;
6000 case 2:
6001 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
664b7e3b 6002 tcg_gen_extrh_i64_i32(tmp, cpu_V0);
ad69471c 6003 break;
9ee6e8bb
PB
6004 default: abort();
6005 }
6006 }
ad69471c
PB
6007 if (pass == 0) {
6008 tmp3 = tmp;
6009 } else {
6010 neon_store_reg(rd, 0, tmp3);
6011 neon_store_reg(rd, 1, tmp);
6012 }
9ee6e8bb
PB
6013 } else {
6014 /* Write back the result. */
ad69471c 6015 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6016 }
6017 }
6018 } else {
3e3326df
PM
6019 /* Two registers and a scalar. NB that for ops of this form
6020 * the ARM ARM labels bit 24 as Q, but it is in our variable
6021 * 'u', not 'q'.
6022 */
6023 if (size == 0) {
6024 return 1;
6025 }
9ee6e8bb 6026 switch (op) {
9ee6e8bb 6027 case 1: /* Float VMLA scalar */
9ee6e8bb 6028 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6029 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6030 if (size == 1) {
6031 return 1;
6032 }
6033 /* fall through */
6034 case 0: /* Integer VMLA scalar */
6035 case 4: /* Integer VMLS scalar */
6036 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6037 case 12: /* VQDMULH scalar */
6038 case 13: /* VQRDMULH scalar */
3e3326df
PM
6039 if (u && ((rd | rn) & 1)) {
6040 return 1;
6041 }
dd8fbd78
FN
6042 tmp = neon_get_scalar(size, rm);
6043 neon_store_scratch(0, tmp);
9ee6e8bb 6044 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6045 tmp = neon_load_scratch(0);
6046 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6047 if (op == 12) {
6048 if (size == 1) {
02da0b2d 6049 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6050 } else {
02da0b2d 6051 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6052 }
6053 } else if (op == 13) {
6054 if (size == 1) {
02da0b2d 6055 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6056 } else {
02da0b2d 6057 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6058 }
6059 } else if (op & 1) {
aa47cfdd
PM
6060 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6061 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6062 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6063 } else {
6064 switch (size) {
dd8fbd78
FN
6065 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6066 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6067 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6068 default: abort();
9ee6e8bb
PB
6069 }
6070 }
7d1b0095 6071 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6072 if (op < 8) {
6073 /* Accumulate. */
dd8fbd78 6074 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6075 switch (op) {
6076 case 0:
dd8fbd78 6077 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6078 break;
6079 case 1:
aa47cfdd
PM
6080 {
6081 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6082 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6083 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6084 break;
aa47cfdd 6085 }
9ee6e8bb 6086 case 4:
dd8fbd78 6087 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6088 break;
6089 case 5:
aa47cfdd
PM
6090 {
6091 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6092 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6093 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6094 break;
aa47cfdd 6095 }
9ee6e8bb
PB
6096 default:
6097 abort();
6098 }
7d1b0095 6099 tcg_temp_free_i32(tmp2);
9ee6e8bb 6100 }
dd8fbd78 6101 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6102 }
6103 break;
9ee6e8bb 6104 case 3: /* VQDMLAL scalar */
9ee6e8bb 6105 case 7: /* VQDMLSL scalar */
9ee6e8bb 6106 case 11: /* VQDMULL scalar */
3e3326df 6107 if (u == 1) {
ad69471c 6108 return 1;
3e3326df
PM
6109 }
6110 /* fall through */
6111 case 2: /* VMLAL sclar */
6112 case 6: /* VMLSL scalar */
6113 case 10: /* VMULL scalar */
6114 if (rd & 1) {
6115 return 1;
6116 }
dd8fbd78 6117 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6118 /* We need a copy of tmp2 because gen_neon_mull
6119 * deletes it during pass 0. */
7d1b0095 6120 tmp4 = tcg_temp_new_i32();
c6067f04 6121 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6122 tmp3 = neon_load_reg(rn, 1);
ad69471c 6123
9ee6e8bb 6124 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6125 if (pass == 0) {
6126 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6127 } else {
dd8fbd78 6128 tmp = tmp3;
c6067f04 6129 tmp2 = tmp4;
9ee6e8bb 6130 }
ad69471c 6131 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6132 if (op != 11) {
6133 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6134 }
9ee6e8bb 6135 switch (op) {
4dc064e6
PM
6136 case 6:
6137 gen_neon_negl(cpu_V0, size);
6138 /* Fall through */
6139 case 2:
ad69471c 6140 gen_neon_addl(size);
9ee6e8bb
PB
6141 break;
6142 case 3: case 7:
ad69471c 6143 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6144 if (op == 7) {
6145 gen_neon_negl(cpu_V0, size);
6146 }
ad69471c 6147 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6148 break;
6149 case 10:
6150 /* no-op */
6151 break;
6152 case 11:
ad69471c 6153 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6154 break;
6155 default:
6156 abort();
6157 }
ad69471c 6158 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6159 }
61adacc8
RH
6160 break;
6161 case 14: /* VQRDMLAH scalar */
6162 case 15: /* VQRDMLSH scalar */
6163 {
6164 NeonGenThreeOpEnvFn *fn;
dd8fbd78 6165
962fcbf2 6166 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
6167 return 1;
6168 }
6169 if (u && ((rd | rn) & 1)) {
6170 return 1;
6171 }
6172 if (op == 14) {
6173 if (size == 1) {
6174 fn = gen_helper_neon_qrdmlah_s16;
6175 } else {
6176 fn = gen_helper_neon_qrdmlah_s32;
6177 }
6178 } else {
6179 if (size == 1) {
6180 fn = gen_helper_neon_qrdmlsh_s16;
6181 } else {
6182 fn = gen_helper_neon_qrdmlsh_s32;
6183 }
6184 }
dd8fbd78 6185
61adacc8
RH
6186 tmp2 = neon_get_scalar(size, rm);
6187 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6188 tmp = neon_load_reg(rn, pass);
6189 tmp3 = neon_load_reg(rd, pass);
6190 fn(tmp, cpu_env, tmp, tmp2, tmp3);
6191 tcg_temp_free_i32(tmp3);
6192 neon_store_reg(rd, pass, tmp);
6193 }
6194 tcg_temp_free_i32(tmp2);
6195 }
9ee6e8bb 6196 break;
61adacc8
RH
6197 default:
6198 g_assert_not_reached();
9ee6e8bb
PB
6199 }
6200 }
6201 } else { /* size == 3 */
6202 if (!u) {
6203 /* Extract. */
9ee6e8bb 6204 imm = (insn >> 8) & 0xf;
ad69471c
PB
6205
6206 if (imm > 7 && !q)
6207 return 1;
6208
52579ea1
PM
6209 if (q && ((rd | rn | rm) & 1)) {
6210 return 1;
6211 }
6212
ad69471c
PB
6213 if (imm == 0) {
6214 neon_load_reg64(cpu_V0, rn);
6215 if (q) {
6216 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6217 }
ad69471c
PB
6218 } else if (imm == 8) {
6219 neon_load_reg64(cpu_V0, rn + 1);
6220 if (q) {
6221 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6222 }
ad69471c 6223 } else if (q) {
a7812ae4 6224 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6225 if (imm < 8) {
6226 neon_load_reg64(cpu_V0, rn);
a7812ae4 6227 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6228 } else {
6229 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6230 neon_load_reg64(tmp64, rm);
ad69471c
PB
6231 }
6232 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6233 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6234 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6235 if (imm < 8) {
6236 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6237 } else {
ad69471c
PB
6238 neon_load_reg64(cpu_V1, rm + 1);
6239 imm -= 8;
9ee6e8bb 6240 }
ad69471c 6241 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6242 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6243 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6244 tcg_temp_free_i64(tmp64);
ad69471c 6245 } else {
a7812ae4 6246 /* BUGFIX */
ad69471c 6247 neon_load_reg64(cpu_V0, rn);
a7812ae4 6248 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6249 neon_load_reg64(cpu_V1, rm);
a7812ae4 6250 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6251 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6252 }
6253 neon_store_reg64(cpu_V0, rd);
6254 if (q) {
6255 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6256 }
6257 } else if ((insn & (1 << 11)) == 0) {
6258 /* Two register misc. */
6259 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6260 size = (insn >> 18) & 3;
600b828c
PM
6261 /* UNDEF for unknown op values and bad op-size combinations */
6262 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6263 return 1;
6264 }
fe8fcf3d
PM
6265 if (neon_2rm_is_v8_op(op) &&
6266 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6267 return 1;
6268 }
fc2a9b37
PM
6269 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6270 q && ((rm | rd) & 1)) {
6271 return 1;
6272 }
9ee6e8bb 6273 switch (op) {
600b828c 6274 case NEON_2RM_VREV64:
9ee6e8bb 6275 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6276 tmp = neon_load_reg(rm, pass * 2);
6277 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6278 switch (size) {
dd8fbd78
FN
6279 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6280 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6281 case 2: /* no-op */ break;
6282 default: abort();
6283 }
dd8fbd78 6284 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6285 if (size == 2) {
dd8fbd78 6286 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6287 } else {
9ee6e8bb 6288 switch (size) {
dd8fbd78
FN
6289 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6290 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6291 default: abort();
6292 }
dd8fbd78 6293 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6294 }
6295 }
6296 break;
600b828c
PM
6297 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6298 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6299 for (pass = 0; pass < q + 1; pass++) {
6300 tmp = neon_load_reg(rm, pass * 2);
6301 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6302 tmp = neon_load_reg(rm, pass * 2 + 1);
6303 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6304 switch (size) {
6305 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6306 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6307 case 2: tcg_gen_add_i64(CPU_V001); break;
6308 default: abort();
6309 }
600b828c 6310 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6311 /* Accumulate. */
ad69471c
PB
6312 neon_load_reg64(cpu_V1, rd + pass);
6313 gen_neon_addl(size);
9ee6e8bb 6314 }
ad69471c 6315 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6316 }
6317 break;
600b828c 6318 case NEON_2RM_VTRN:
9ee6e8bb 6319 if (size == 2) {
a5a14945 6320 int n;
9ee6e8bb 6321 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6322 tmp = neon_load_reg(rm, n);
6323 tmp2 = neon_load_reg(rd, n + 1);
6324 neon_store_reg(rm, n, tmp2);
6325 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6326 }
6327 } else {
6328 goto elementwise;
6329 }
6330 break;
600b828c 6331 case NEON_2RM_VUZP:
02acedf9 6332 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6333 return 1;
9ee6e8bb
PB
6334 }
6335 break;
600b828c 6336 case NEON_2RM_VZIP:
d68a6f3a 6337 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6338 return 1;
9ee6e8bb
PB
6339 }
6340 break;
600b828c
PM
6341 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6342 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6343 if (rm & 1) {
6344 return 1;
6345 }
f764718d 6346 tmp2 = NULL;
9ee6e8bb 6347 for (pass = 0; pass < 2; pass++) {
ad69471c 6348 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6349 tmp = tcg_temp_new_i32();
600b828c
PM
6350 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6351 tmp, cpu_V0);
ad69471c
PB
6352 if (pass == 0) {
6353 tmp2 = tmp;
6354 } else {
6355 neon_store_reg(rd, 0, tmp2);
6356 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6357 }
9ee6e8bb
PB
6358 }
6359 break;
600b828c 6360 case NEON_2RM_VSHLL:
fc2a9b37 6361 if (q || (rd & 1)) {
9ee6e8bb 6362 return 1;
600b828c 6363 }
ad69471c
PB
6364 tmp = neon_load_reg(rm, 0);
6365 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6366 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6367 if (pass == 1)
6368 tmp = tmp2;
6369 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6370 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6371 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6372 }
6373 break;
600b828c 6374 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
6375 {
6376 TCGv_ptr fpst;
6377 TCGv_i32 ahp;
6378
602f6e42 6379 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
6380 q || (rm & 1)) {
6381 return 1;
6382 }
486624fc
AB
6383 fpst = get_fpstatus_ptr(true);
6384 ahp = get_ahp_flag();
58f2682e
PM
6385 tmp = neon_load_reg(rm, 0);
6386 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6387 tmp2 = neon_load_reg(rm, 1);
6388 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
60011498
PB
6389 tcg_gen_shli_i32(tmp2, tmp2, 16);
6390 tcg_gen_or_i32(tmp2, tmp2, tmp);
58f2682e
PM
6391 tcg_temp_free_i32(tmp);
6392 tmp = neon_load_reg(rm, 2);
6393 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6394 tmp3 = neon_load_reg(rm, 3);
60011498 6395 neon_store_reg(rd, 0, tmp2);
58f2682e
PM
6396 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
6397 tcg_gen_shli_i32(tmp3, tmp3, 16);
6398 tcg_gen_or_i32(tmp3, tmp3, tmp);
6399 neon_store_reg(rd, 1, tmp3);
7d1b0095 6400 tcg_temp_free_i32(tmp);
486624fc
AB
6401 tcg_temp_free_i32(ahp);
6402 tcg_temp_free_ptr(fpst);
60011498 6403 break;
486624fc 6404 }
600b828c 6405 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
6406 {
6407 TCGv_ptr fpst;
6408 TCGv_i32 ahp;
602f6e42 6409 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
6410 q || (rd & 1)) {
6411 return 1;
6412 }
486624fc
AB
6413 fpst = get_fpstatus_ptr(true);
6414 ahp = get_ahp_flag();
7d1b0095 6415 tmp3 = tcg_temp_new_i32();
60011498
PB
6416 tmp = neon_load_reg(rm, 0);
6417 tmp2 = neon_load_reg(rm, 1);
6418 tcg_gen_ext16u_i32(tmp3, tmp);
b66f6b99
PM
6419 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6420 neon_store_reg(rd, 0, tmp3);
6421 tcg_gen_shri_i32(tmp, tmp, 16);
6422 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
6423 neon_store_reg(rd, 1, tmp);
6424 tmp3 = tcg_temp_new_i32();
60011498 6425 tcg_gen_ext16u_i32(tmp3, tmp2);
b66f6b99
PM
6426 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6427 neon_store_reg(rd, 2, tmp3);
6428 tcg_gen_shri_i32(tmp2, tmp2, 16);
6429 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
6430 neon_store_reg(rd, 3, tmp2);
486624fc
AB
6431 tcg_temp_free_i32(ahp);
6432 tcg_temp_free_ptr(fpst);
60011498 6433 break;
486624fc 6434 }
9d935509 6435 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 6436 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
6437 return 1;
6438 }
1a66ac61
RH
6439 ptr1 = vfp_reg_ptr(true, rd);
6440 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
6441
6442 /* Bit 6 is the lowest opcode bit; it distinguishes between
6443 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6444 */
6445 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6446
6447 if (op == NEON_2RM_AESE) {
1a66ac61 6448 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 6449 } else {
1a66ac61 6450 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 6451 }
1a66ac61
RH
6452 tcg_temp_free_ptr(ptr1);
6453 tcg_temp_free_ptr(ptr2);
9d935509
AB
6454 tcg_temp_free_i32(tmp3);
6455 break;
f1ecb913 6456 case NEON_2RM_SHA1H:
962fcbf2 6457 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
6458 return 1;
6459 }
1a66ac61
RH
6460 ptr1 = vfp_reg_ptr(true, rd);
6461 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 6462
1a66ac61 6463 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 6464
1a66ac61
RH
6465 tcg_temp_free_ptr(ptr1);
6466 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
6467 break;
6468 case NEON_2RM_SHA1SU1:
6469 if ((rm | rd) & 1) {
6470 return 1;
6471 }
6472 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6473 if (q) {
962fcbf2 6474 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
6475 return 1;
6476 }
962fcbf2 6477 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
6478 return 1;
6479 }
1a66ac61
RH
6480 ptr1 = vfp_reg_ptr(true, rd);
6481 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 6482 if (q) {
1a66ac61 6483 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 6484 } else {
1a66ac61 6485 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 6486 }
1a66ac61
RH
6487 tcg_temp_free_ptr(ptr1);
6488 tcg_temp_free_ptr(ptr2);
f1ecb913 6489 break;
4bf940be
RH
6490
6491 case NEON_2RM_VMVN:
6492 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
6493 break;
6494 case NEON_2RM_VNEG:
6495 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
6496 break;
4e027a71
RH
6497 case NEON_2RM_VABS:
6498 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
6499 break;
4bf940be 6500
9ee6e8bb
PB
6501 default:
6502 elementwise:
6503 for (pass = 0; pass < (q ? 4 : 2); pass++) {
60737ed5 6504 tmp = neon_load_reg(rm, pass);
9ee6e8bb 6505 switch (op) {
600b828c 6506 case NEON_2RM_VREV32:
9ee6e8bb 6507 switch (size) {
dd8fbd78
FN
6508 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6509 case 1: gen_swap_half(tmp); break;
600b828c 6510 default: abort();
9ee6e8bb
PB
6511 }
6512 break;
600b828c 6513 case NEON_2RM_VREV16:
dd8fbd78 6514 gen_rev16(tmp);
9ee6e8bb 6515 break;
600b828c 6516 case NEON_2RM_VCLS:
9ee6e8bb 6517 switch (size) {
dd8fbd78
FN
6518 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6519 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6520 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6521 default: abort();
9ee6e8bb
PB
6522 }
6523 break;
600b828c 6524 case NEON_2RM_VCLZ:
9ee6e8bb 6525 switch (size) {
dd8fbd78
FN
6526 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6527 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 6528 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 6529 default: abort();
9ee6e8bb
PB
6530 }
6531 break;
600b828c 6532 case NEON_2RM_VCNT:
dd8fbd78 6533 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6534 break;
600b828c 6535 case NEON_2RM_VQABS:
9ee6e8bb 6536 switch (size) {
02da0b2d
PM
6537 case 0:
6538 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6539 break;
6540 case 1:
6541 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6542 break;
6543 case 2:
6544 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6545 break;
600b828c 6546 default: abort();
9ee6e8bb
PB
6547 }
6548 break;
600b828c 6549 case NEON_2RM_VQNEG:
9ee6e8bb 6550 switch (size) {
02da0b2d
PM
6551 case 0:
6552 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6553 break;
6554 case 1:
6555 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6556 break;
6557 case 2:
6558 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6559 break;
600b828c 6560 default: abort();
9ee6e8bb
PB
6561 }
6562 break;
600b828c 6563 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6564 tmp2 = tcg_const_i32(0);
9ee6e8bb 6565 switch(size) {
dd8fbd78
FN
6566 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6567 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6568 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6569 default: abort();
9ee6e8bb 6570 }
39d5492a 6571 tcg_temp_free_i32(tmp2);
600b828c 6572 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6573 tcg_gen_not_i32(tmp, tmp);
600b828c 6574 }
9ee6e8bb 6575 break;
600b828c 6576 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6577 tmp2 = tcg_const_i32(0);
9ee6e8bb 6578 switch(size) {
dd8fbd78
FN
6579 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6580 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6581 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6582 default: abort();
9ee6e8bb 6583 }
39d5492a 6584 tcg_temp_free_i32(tmp2);
600b828c 6585 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6586 tcg_gen_not_i32(tmp, tmp);
600b828c 6587 }
9ee6e8bb 6588 break;
600b828c 6589 case NEON_2RM_VCEQ0:
dd8fbd78 6590 tmp2 = tcg_const_i32(0);
9ee6e8bb 6591 switch(size) {
dd8fbd78
FN
6592 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6593 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6594 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6595 default: abort();
9ee6e8bb 6596 }
39d5492a 6597 tcg_temp_free_i32(tmp2);
9ee6e8bb 6598 break;
600b828c 6599 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6600 {
6601 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6602 tmp2 = tcg_const_i32(0);
aa47cfdd 6603 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6604 tcg_temp_free_i32(tmp2);
aa47cfdd 6605 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6606 break;
aa47cfdd 6607 }
600b828c 6608 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6609 {
6610 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6611 tmp2 = tcg_const_i32(0);
aa47cfdd 6612 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6613 tcg_temp_free_i32(tmp2);
aa47cfdd 6614 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6615 break;
aa47cfdd 6616 }
600b828c 6617 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6618 {
6619 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6620 tmp2 = tcg_const_i32(0);
aa47cfdd 6621 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6622 tcg_temp_free_i32(tmp2);
aa47cfdd 6623 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6624 break;
aa47cfdd 6625 }
600b828c 6626 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6627 {
6628 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6629 tmp2 = tcg_const_i32(0);
aa47cfdd 6630 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6631 tcg_temp_free_i32(tmp2);
aa47cfdd 6632 tcg_temp_free_ptr(fpstatus);
0e326109 6633 break;
aa47cfdd 6634 }
600b828c 6635 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6636 {
6637 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6638 tmp2 = tcg_const_i32(0);
aa47cfdd 6639 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6640 tcg_temp_free_i32(tmp2);
aa47cfdd 6641 tcg_temp_free_ptr(fpstatus);
0e326109 6642 break;
aa47cfdd 6643 }
600b828c 6644 case NEON_2RM_VABS_F:
fd8a68cd 6645 gen_helper_vfp_abss(tmp, tmp);
9ee6e8bb 6646 break;
600b828c 6647 case NEON_2RM_VNEG_F:
cedcc96f 6648 gen_helper_vfp_negs(tmp, tmp);
9ee6e8bb 6649 break;
600b828c 6650 case NEON_2RM_VSWP:
dd8fbd78
FN
6651 tmp2 = neon_load_reg(rd, pass);
6652 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6653 break;
600b828c 6654 case NEON_2RM_VTRN:
dd8fbd78 6655 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6656 switch (size) {
dd8fbd78
FN
6657 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6658 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6659 default: abort();
9ee6e8bb 6660 }
dd8fbd78 6661 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6662 break;
34f7b0a2
WN
6663 case NEON_2RM_VRINTN:
6664 case NEON_2RM_VRINTA:
6665 case NEON_2RM_VRINTM:
6666 case NEON_2RM_VRINTP:
6667 case NEON_2RM_VRINTZ:
6668 {
6669 TCGv_i32 tcg_rmode;
6670 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6671 int rmode;
6672
6673 if (op == NEON_2RM_VRINTZ) {
6674 rmode = FPROUNDING_ZERO;
6675 } else {
6676 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6677 }
6678
6679 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6680 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6681 cpu_env);
3b52ad1f 6682 gen_helper_rints(tmp, tmp, fpstatus);
34f7b0a2
WN
6683 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6684 cpu_env);
6685 tcg_temp_free_ptr(fpstatus);
6686 tcg_temp_free_i32(tcg_rmode);
6687 break;
6688 }
2ce70625
WN
6689 case NEON_2RM_VRINTX:
6690 {
6691 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
3b52ad1f 6692 gen_helper_rints_exact(tmp, tmp, fpstatus);
2ce70625
WN
6693 tcg_temp_free_ptr(fpstatus);
6694 break;
6695 }
901ad525
WN
6696 case NEON_2RM_VCVTAU:
6697 case NEON_2RM_VCVTAS:
6698 case NEON_2RM_VCVTNU:
6699 case NEON_2RM_VCVTNS:
6700 case NEON_2RM_VCVTPU:
6701 case NEON_2RM_VCVTPS:
6702 case NEON_2RM_VCVTMU:
6703 case NEON_2RM_VCVTMS:
6704 {
6705 bool is_signed = !extract32(insn, 7, 1);
6706 TCGv_ptr fpst = get_fpstatus_ptr(1);
6707 TCGv_i32 tcg_rmode, tcg_shift;
6708 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6709
6710 tcg_shift = tcg_const_i32(0);
6711 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6712 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6713 cpu_env);
6714
6715 if (is_signed) {
30bf0a01 6716 gen_helper_vfp_tosls(tmp, tmp,
901ad525
WN
6717 tcg_shift, fpst);
6718 } else {
30bf0a01 6719 gen_helper_vfp_touls(tmp, tmp,
901ad525
WN
6720 tcg_shift, fpst);
6721 }
6722
6723 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6724 cpu_env);
6725 tcg_temp_free_i32(tcg_rmode);
6726 tcg_temp_free_i32(tcg_shift);
6727 tcg_temp_free_ptr(fpst);
6728 break;
6729 }
600b828c 6730 case NEON_2RM_VRECPE:
b6d4443a
AB
6731 {
6732 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6733 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6734 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6735 break;
b6d4443a 6736 }
600b828c 6737 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6738 {
6739 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6740 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6741 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6742 break;
c2fb418e 6743 }
600b828c 6744 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6745 {
6746 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9a011fec 6747 gen_helper_recpe_f32(tmp, tmp, fpstatus);
b6d4443a 6748 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6749 break;
b6d4443a 6750 }
600b828c 6751 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6752 {
6753 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9a011fec 6754 gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
c2fb418e 6755 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6756 break;
c2fb418e 6757 }
600b828c 6758 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
60737ed5
PM
6759 {
6760 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6761 gen_helper_vfp_sitos(tmp, tmp, fpstatus);
6762 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6763 break;
60737ed5 6764 }
600b828c 6765 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
60737ed5
PM
6766 {
6767 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6768 gen_helper_vfp_uitos(tmp, tmp, fpstatus);
6769 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6770 break;
60737ed5 6771 }
600b828c 6772 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
60737ed5
PM
6773 {
6774 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6775 gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
6776 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6777 break;
60737ed5 6778 }
600b828c 6779 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
60737ed5
PM
6780 {
6781 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6782 gen_helper_vfp_touizs(tmp, tmp, fpstatus);
6783 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6784 break;
60737ed5 6785 }
9ee6e8bb 6786 default:
600b828c
PM
6787 /* Reserved op values were caught by the
6788 * neon_2rm_sizes[] check earlier.
6789 */
6790 abort();
9ee6e8bb 6791 }
60737ed5 6792 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6793 }
6794 break;
6795 }
6796 } else if ((insn & (1 << 10)) == 0) {
6797 /* VTBL, VTBX. */
56907d77
PM
6798 int n = ((insn >> 8) & 3) + 1;
6799 if ((rn + n) > 32) {
6800 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6801 * helper function running off the end of the register file.
6802 */
6803 return 1;
6804 }
6805 n <<= 3;
9ee6e8bb 6806 if (insn & (1 << 6)) {
8f8e3aa4 6807 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6808 } else {
7d1b0095 6809 tmp = tcg_temp_new_i32();
8f8e3aa4 6810 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6811 }
8f8e3aa4 6812 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 6813 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 6814 tmp5 = tcg_const_i32(n);
e7c06c4e 6815 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 6816 tcg_temp_free_i32(tmp);
9ee6e8bb 6817 if (insn & (1 << 6)) {
8f8e3aa4 6818 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6819 } else {
7d1b0095 6820 tmp = tcg_temp_new_i32();
8f8e3aa4 6821 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6822 }
8f8e3aa4 6823 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 6824 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 6825 tcg_temp_free_i32(tmp5);
e7c06c4e 6826 tcg_temp_free_ptr(ptr1);
8f8e3aa4 6827 neon_store_reg(rd, 0, tmp2);
3018f259 6828 neon_store_reg(rd, 1, tmp3);
7d1b0095 6829 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6830 } else if ((insn & 0x380) == 0) {
6831 /* VDUP */
32f91fb7
RH
6832 int element;
6833 TCGMemOp size;
6834
133da6aa
JR
6835 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6836 return 1;
6837 }
9ee6e8bb 6838 if (insn & (1 << 16)) {
32f91fb7
RH
6839 size = MO_8;
6840 element = (insn >> 17) & 7;
9ee6e8bb 6841 } else if (insn & (1 << 17)) {
32f91fb7
RH
6842 size = MO_16;
6843 element = (insn >> 18) & 3;
6844 } else {
6845 size = MO_32;
6846 element = (insn >> 19) & 1;
9ee6e8bb 6847 }
32f91fb7
RH
6848 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
6849 neon_element_offset(rm, element, size),
6850 q ? 16 : 8, q ? 16 : 8);
9ee6e8bb
PB
6851 } else {
6852 return 1;
6853 }
6854 }
6855 }
6856 return 0;
6857}
6858
8b7209fa
RH
6859/* Advanced SIMD three registers of the same length extension.
6860 * 31 25 23 22 20 16 12 11 10 9 8 3 0
6861 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6862 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6863 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6864 */
6865static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
6866{
26c470a7
RH
6867 gen_helper_gvec_3 *fn_gvec = NULL;
6868 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
6869 int rd, rn, rm, opr_sz;
6870 int data = 0;
87732318
RH
6871 int off_rn, off_rm;
6872 bool is_long = false, q = extract32(insn, 6, 1);
6873 bool ptr_is_env = false;
8b7209fa
RH
6874
6875 if ((insn & 0xfe200f10) == 0xfc200800) {
6876 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
6877 int size = extract32(insn, 20, 1);
6878 data = extract32(insn, 23, 2); /* rot */
962fcbf2 6879 if (!dc_isar_feature(aa32_vcma, s)
5763190f 6880 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
6881 return 1;
6882 }
6883 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
6884 } else if ((insn & 0xfea00f10) == 0xfc800800) {
6885 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
6886 int size = extract32(insn, 20, 1);
6887 data = extract32(insn, 24, 1); /* rot */
962fcbf2 6888 if (!dc_isar_feature(aa32_vcma, s)
5763190f 6889 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
6890 return 1;
6891 }
6892 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
6893 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
6894 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
6895 bool u = extract32(insn, 4, 1);
962fcbf2 6896 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
6897 return 1;
6898 }
6899 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
87732318
RH
6900 } else if ((insn & 0xff300f10) == 0xfc200810) {
6901 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
6902 int is_s = extract32(insn, 23, 1);
6903 if (!dc_isar_feature(aa32_fhm, s)) {
6904 return 1;
6905 }
6906 is_long = true;
6907 data = is_s; /* is_2 == 0 */
6908 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
6909 ptr_is_env = true;
8b7209fa
RH
6910 } else {
6911 return 1;
6912 }
6913
87732318
RH
6914 VFP_DREG_D(rd, insn);
6915 if (rd & q) {
6916 return 1;
6917 }
6918 if (q || !is_long) {
6919 VFP_DREG_N(rn, insn);
6920 VFP_DREG_M(rm, insn);
6921 if ((rn | rm) & q & !is_long) {
6922 return 1;
6923 }
6924 off_rn = vfp_reg_offset(1, rn);
6925 off_rm = vfp_reg_offset(1, rm);
6926 } else {
6927 rn = VFP_SREG_N(insn);
6928 rm = VFP_SREG_M(insn);
6929 off_rn = vfp_reg_offset(0, rn);
6930 off_rm = vfp_reg_offset(0, rm);
6931 }
6932
8b7209fa 6933 if (s->fp_excp_el) {
a767fac8 6934 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4be42f40 6935 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8b7209fa
RH
6936 return 0;
6937 }
6938 if (!s->vfp_enabled) {
6939 return 1;
6940 }
6941
6942 opr_sz = (1 + q) * 8;
26c470a7 6943 if (fn_gvec_ptr) {
87732318
RH
6944 TCGv_ptr ptr;
6945 if (ptr_is_env) {
6946 ptr = cpu_env;
6947 } else {
6948 ptr = get_fpstatus_ptr(1);
6949 }
6950 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 6951 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
6952 if (!ptr_is_env) {
6953 tcg_temp_free_ptr(ptr);
6954 }
26c470a7 6955 } else {
87732318 6956 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
6957 opr_sz, opr_sz, data, fn_gvec);
6958 }
8b7209fa
RH
6959 return 0;
6960}
6961
638808ff
RH
6962/* Advanced SIMD two registers and a scalar extension.
6963 * 31 24 23 22 20 16 12 11 10 9 8 3 0
6964 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
6965 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6966 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
6967 *
6968 */
6969
6970static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
6971{
26c470a7
RH
6972 gen_helper_gvec_3 *fn_gvec = NULL;
6973 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 6974 int rd, rn, rm, opr_sz, data;
87732318
RH
6975 int off_rn, off_rm;
6976 bool is_long = false, q = extract32(insn, 6, 1);
6977 bool ptr_is_env = false;
638808ff
RH
6978
6979 if ((insn & 0xff000f10) == 0xfe000800) {
6980 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
6981 int rot = extract32(insn, 20, 2);
6982 int size = extract32(insn, 23, 1);
6983 int index;
6984
962fcbf2 6985 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
6986 return 1;
6987 }
2cc99919 6988 if (size == 0) {
5763190f 6989 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2cc99919
RH
6990 return 1;
6991 }
6992 /* For fp16, rm is just Vm, and index is M. */
6993 rm = extract32(insn, 0, 4);
6994 index = extract32(insn, 5, 1);
6995 } else {
6996 /* For fp32, rm is the usual M:Vm, and index is 0. */
6997 VFP_DREG_M(rm, insn);
6998 index = 0;
6999 }
7000 data = (index << 2) | rot;
7001 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7002 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
7003 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7004 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7005 int u = extract32(insn, 4, 1);
87732318 7006
962fcbf2 7007 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
7008 return 1;
7009 }
7010 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7011 /* rm is just Vm, and index is M. */
7012 data = extract32(insn, 5, 1); /* index */
7013 rm = extract32(insn, 0, 4);
87732318
RH
7014 } else if ((insn & 0xffa00f10) == 0xfe000810) {
7015 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
7016 int is_s = extract32(insn, 20, 1);
7017 int vm20 = extract32(insn, 0, 3);
7018 int vm3 = extract32(insn, 3, 1);
7019 int m = extract32(insn, 5, 1);
7020 int index;
7021
7022 if (!dc_isar_feature(aa32_fhm, s)) {
7023 return 1;
7024 }
7025 if (q) {
7026 rm = vm20;
7027 index = m * 2 + vm3;
7028 } else {
7029 rm = vm20 * 2 + m;
7030 index = vm3;
7031 }
7032 is_long = true;
7033 data = (index << 2) | is_s; /* is_2 == 0 */
7034 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
7035 ptr_is_env = true;
638808ff
RH
7036 } else {
7037 return 1;
7038 }
7039
87732318
RH
7040 VFP_DREG_D(rd, insn);
7041 if (rd & q) {
7042 return 1;
7043 }
7044 if (q || !is_long) {
7045 VFP_DREG_N(rn, insn);
7046 if (rn & q & !is_long) {
7047 return 1;
7048 }
7049 off_rn = vfp_reg_offset(1, rn);
7050 off_rm = vfp_reg_offset(1, rm);
7051 } else {
7052 rn = VFP_SREG_N(insn);
7053 off_rn = vfp_reg_offset(0, rn);
7054 off_rm = vfp_reg_offset(0, rm);
7055 }
638808ff 7056 if (s->fp_excp_el) {
a767fac8 7057 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4be42f40 7058 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
638808ff
RH
7059 return 0;
7060 }
7061 if (!s->vfp_enabled) {
7062 return 1;
7063 }
7064
7065 opr_sz = (1 + q) * 8;
26c470a7 7066 if (fn_gvec_ptr) {
87732318
RH
7067 TCGv_ptr ptr;
7068 if (ptr_is_env) {
7069 ptr = cpu_env;
7070 } else {
7071 ptr = get_fpstatus_ptr(1);
7072 }
7073 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 7074 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
7075 if (!ptr_is_env) {
7076 tcg_temp_free_ptr(ptr);
7077 }
26c470a7 7078 } else {
87732318 7079 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
7080 opr_sz, opr_sz, data, fn_gvec);
7081 }
638808ff
RH
7082 return 0;
7083}
7084
7dcc1f89 7085static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7086{
4b6a83fb
PM
7087 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7088 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7089
7090 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7091
7092 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7093 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7094 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7095 return 1;
7096 }
d614a513 7097 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7098 return disas_iwmmxt_insn(s, insn);
d614a513 7099 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7100 return disas_dsp_insn(s, insn);
c0f4af17
PM
7101 }
7102 return 1;
4b6a83fb
PM
7103 }
7104
7105 /* Otherwise treat as a generic register access */
7106 is64 = (insn & (1 << 25)) == 0;
7107 if (!is64 && ((insn & (1 << 4)) == 0)) {
7108 /* cdp */
7109 return 1;
7110 }
7111
7112 crm = insn & 0xf;
7113 if (is64) {
7114 crn = 0;
7115 opc1 = (insn >> 4) & 0xf;
7116 opc2 = 0;
7117 rt2 = (insn >> 16) & 0xf;
7118 } else {
7119 crn = (insn >> 16) & 0xf;
7120 opc1 = (insn >> 21) & 7;
7121 opc2 = (insn >> 5) & 7;
7122 rt2 = 0;
7123 }
7124 isread = (insn >> 20) & 1;
7125 rt = (insn >> 12) & 0xf;
7126
60322b39 7127 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7128 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7129 if (ri) {
7130 /* Check access permissions */
dcbff19b 7131 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7132 return 1;
7133 }
7134
c0f4af17 7135 if (ri->accessfn ||
d614a513 7136 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7137 /* Emit code to perform further access permissions checks at
7138 * runtime; this may result in an exception.
c0f4af17
PM
7139 * Note that on XScale all cp0..c13 registers do an access check
7140 * call in order to handle c15_cpar.
f59df3f2
PM
7141 */
7142 TCGv_ptr tmpptr;
3f208fd7 7143 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7144 uint32_t syndrome;
7145
7146 /* Note that since we are an implementation which takes an
7147 * exception on a trapped conditional instruction only if the
7148 * instruction passes its condition code check, we can take
7149 * advantage of the clause in the ARM ARM that allows us to set
7150 * the COND field in the instruction to 0xE in all cases.
7151 * We could fish the actual condition out of the insn (ARM)
7152 * or the condexec bits (Thumb) but it isn't necessary.
7153 */
7154 switch (cpnum) {
7155 case 14:
7156 if (is64) {
7157 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7158 isread, false);
8bcbf37c
PM
7159 } else {
7160 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7161 rt, isread, false);
8bcbf37c
PM
7162 }
7163 break;
7164 case 15:
7165 if (is64) {
7166 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7167 isread, false);
8bcbf37c
PM
7168 } else {
7169 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7170 rt, isread, false);
8bcbf37c
PM
7171 }
7172 break;
7173 default:
7174 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7175 * so this can only happen if this is an ARMv7 or earlier CPU,
7176 * in which case the syndrome information won't actually be
7177 * guest visible.
7178 */
d614a513 7179 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7180 syndrome = syn_uncategorized();
7181 break;
7182 }
7183
43bfa4a1 7184 gen_set_condexec(s);
43722a6d 7185 gen_set_pc_im(s, s->pc_curr);
f59df3f2 7186 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7187 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7188 tcg_isread = tcg_const_i32(isread);
7189 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7190 tcg_isread);
f59df3f2 7191 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7192 tcg_temp_free_i32(tcg_syn);
3f208fd7 7193 tcg_temp_free_i32(tcg_isread);
37ff584c
PM
7194 } else if (ri->type & ARM_CP_RAISES_EXC) {
7195 /*
7196 * The readfn or writefn might raise an exception;
7197 * synchronize the CPU state in case it does.
7198 */
7199 gen_set_condexec(s);
7200 gen_set_pc_im(s, s->pc_curr);
f59df3f2
PM
7201 }
7202
4b6a83fb
PM
7203 /* Handle special cases first */
7204 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7205 case ARM_CP_NOP:
7206 return 0;
7207 case ARM_CP_WFI:
7208 if (isread) {
7209 return 1;
7210 }
a0415916 7211 gen_set_pc_im(s, s->base.pc_next);
dcba3a8d 7212 s->base.is_jmp = DISAS_WFI;
2bee5105 7213 return 0;
4b6a83fb
PM
7214 default:
7215 break;
7216 }
7217
c5a49c63 7218 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7219 gen_io_start();
7220 }
7221
4b6a83fb
PM
7222 if (isread) {
7223 /* Read */
7224 if (is64) {
7225 TCGv_i64 tmp64;
7226 TCGv_i32 tmp;
7227 if (ri->type & ARM_CP_CONST) {
7228 tmp64 = tcg_const_i64(ri->resetvalue);
7229 } else if (ri->readfn) {
7230 TCGv_ptr tmpptr;
4b6a83fb
PM
7231 tmp64 = tcg_temp_new_i64();
7232 tmpptr = tcg_const_ptr(ri);
7233 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7234 tcg_temp_free_ptr(tmpptr);
7235 } else {
7236 tmp64 = tcg_temp_new_i64();
7237 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7238 }
7239 tmp = tcg_temp_new_i32();
ecc7b3aa 7240 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb 7241 store_reg(s, rt, tmp);
ed336850 7242 tmp = tcg_temp_new_i32();
664b7e3b 7243 tcg_gen_extrh_i64_i32(tmp, tmp64);
ed336850 7244 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7245 store_reg(s, rt2, tmp);
7246 } else {
39d5492a 7247 TCGv_i32 tmp;
4b6a83fb
PM
7248 if (ri->type & ARM_CP_CONST) {
7249 tmp = tcg_const_i32(ri->resetvalue);
7250 } else if (ri->readfn) {
7251 TCGv_ptr tmpptr;
4b6a83fb
PM
7252 tmp = tcg_temp_new_i32();
7253 tmpptr = tcg_const_ptr(ri);
7254 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7255 tcg_temp_free_ptr(tmpptr);
7256 } else {
7257 tmp = load_cpu_offset(ri->fieldoffset);
7258 }
7259 if (rt == 15) {
7260 /* Destination register of r15 for 32 bit loads sets
7261 * the condition codes from the high 4 bits of the value
7262 */
7263 gen_set_nzcv(tmp);
7264 tcg_temp_free_i32(tmp);
7265 } else {
7266 store_reg(s, rt, tmp);
7267 }
7268 }
7269 } else {
7270 /* Write */
7271 if (ri->type & ARM_CP_CONST) {
7272 /* If not forbidden by access permissions, treat as WI */
7273 return 0;
7274 }
7275
7276 if (is64) {
39d5492a 7277 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7278 TCGv_i64 tmp64 = tcg_temp_new_i64();
7279 tmplo = load_reg(s, rt);
7280 tmphi = load_reg(s, rt2);
7281 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7282 tcg_temp_free_i32(tmplo);
7283 tcg_temp_free_i32(tmphi);
7284 if (ri->writefn) {
7285 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7286 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7287 tcg_temp_free_ptr(tmpptr);
7288 } else {
7289 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7290 }
7291 tcg_temp_free_i64(tmp64);
7292 } else {
7293 if (ri->writefn) {
39d5492a 7294 TCGv_i32 tmp;
4b6a83fb 7295 TCGv_ptr tmpptr;
4b6a83fb
PM
7296 tmp = load_reg(s, rt);
7297 tmpptr = tcg_const_ptr(ri);
7298 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7299 tcg_temp_free_ptr(tmpptr);
7300 tcg_temp_free_i32(tmp);
7301 } else {
39d5492a 7302 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7303 store_cpu_offset(tmp, ri->fieldoffset);
7304 }
7305 }
2452731c
PM
7306 }
7307
c5a49c63 7308 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c 7309 /* I/O operations must end the TB here (whether read or write) */
2452731c
PM
7310 gen_lookup_tb(s);
7311 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7312 /* We default to ending the TB on a coprocessor register write,
7313 * but allow this to be suppressed by the register definition
7314 * (usually only necessary to work around guest bugs).
7315 */
2452731c 7316 gen_lookup_tb(s);
4b6a83fb 7317 }
2452731c 7318
4b6a83fb
PM
7319 return 0;
7320 }
7321
626187d8
PM
7322 /* Unknown register; this might be a guest error or a QEMU
7323 * unimplemented feature.
7324 */
7325 if (is64) {
7326 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7327 "64 bit system register cp:%d opc1: %d crm:%d "
7328 "(%s)\n",
7329 isread ? "read" : "write", cpnum, opc1, crm,
7330 s->ns ? "non-secure" : "secure");
626187d8
PM
7331 } else {
7332 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7333 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7334 "(%s)\n",
7335 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7336 s->ns ? "non-secure" : "secure");
626187d8
PM
7337 }
7338
4a9a539f 7339 return 1;
9ee6e8bb
PB
7340}
7341
5e3f878a
PB
7342
7343/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7344static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7345{
39d5492a 7346 TCGv_i32 tmp;
7d1b0095 7347 tmp = tcg_temp_new_i32();
ecc7b3aa 7348 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7349 store_reg(s, rlow, tmp);
7d1b0095 7350 tmp = tcg_temp_new_i32();
664b7e3b 7351 tcg_gen_extrh_i64_i32(tmp, val);
5e3f878a
PB
7352 store_reg(s, rhigh, tmp);
7353}
7354
7355/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7356static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7357{
a7812ae4 7358 TCGv_i64 tmp;
39d5492a 7359 TCGv_i32 tmp2;
5e3f878a 7360
36aa55dc 7361 /* Load value and extend to 64 bits. */
a7812ae4 7362 tmp = tcg_temp_new_i64();
5e3f878a
PB
7363 tmp2 = load_reg(s, rlow);
7364 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7365 tcg_temp_free_i32(tmp2);
5e3f878a 7366 tcg_gen_add_i64(val, val, tmp);
b75263d6 7367 tcg_temp_free_i64(tmp);
5e3f878a
PB
7368}
7369
7370/* load and add a 64-bit value from a register pair. */
a7812ae4 7371static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7372{
a7812ae4 7373 TCGv_i64 tmp;
39d5492a
PM
7374 TCGv_i32 tmpl;
7375 TCGv_i32 tmph;
5e3f878a
PB
7376
7377 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7378 tmpl = load_reg(s, rlow);
7379 tmph = load_reg(s, rhigh);
a7812ae4 7380 tmp = tcg_temp_new_i64();
36aa55dc 7381 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7382 tcg_temp_free_i32(tmpl);
7383 tcg_temp_free_i32(tmph);
5e3f878a 7384 tcg_gen_add_i64(val, val, tmp);
b75263d6 7385 tcg_temp_free_i64(tmp);
5e3f878a
PB
7386}
7387
c9f10124 7388/* Set N and Z flags from hi|lo. */
39d5492a 7389static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7390{
c9f10124
RH
7391 tcg_gen_mov_i32(cpu_NF, hi);
7392 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7393}
7394
426f5abc
PB
7395/* Load/Store exclusive instructions are implemented by remembering
7396 the value/address loaded, and seeing if these are the same
354161b3 7397 when the store is performed. This should be sufficient to implement
426f5abc 7398 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7399 regular stores. The compare vs the remembered value is done during
7400 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7401static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7402 TCGv_i32 addr, int size)
426f5abc 7403{
94ee24e7 7404 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7405 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7406
50225ad0
PM
7407 s->is_ldex = true;
7408
426f5abc 7409 if (size == 3) {
39d5492a 7410 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7411 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7412
3448d47b
PM
7413 /* For AArch32, architecturally the 32-bit word at the lowest
7414 * address is always Rt and the one at addr+4 is Rt2, even if
7415 * the CPU is big-endian. That means we don't want to do a
7416 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7417 * for an architecturally 64-bit access, but instead do a
7418 * 64-bit access using MO_BE if appropriate and then split
7419 * the two halves.
7420 * This only makes a difference for BE32 user-mode, where
7421 * frob64() must not flip the two halves of the 64-bit data
7422 * but this code must treat BE32 user-mode like BE32 system.
7423 */
7424 TCGv taddr = gen_aa32_addr(s, addr, opc);
7425
7426 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7427 tcg_temp_free(taddr);
354161b3 7428 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
7429 if (s->be_data == MO_BE) {
7430 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7431 } else {
7432 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7433 }
354161b3
EC
7434 tcg_temp_free_i64(t64);
7435
7436 store_reg(s, rt2, tmp2);
03d05e2d 7437 } else {
354161b3 7438 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7439 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7440 }
03d05e2d
PM
7441
7442 store_reg(s, rt, tmp);
7443 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7444}
7445
7446static void gen_clrex(DisasContext *s)
7447{
03d05e2d 7448 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7449}
7450
426f5abc 7451static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7452 TCGv_i32 addr, int size)
426f5abc 7453{
354161b3
EC
7454 TCGv_i32 t0, t1, t2;
7455 TCGv_i64 extaddr;
7456 TCGv taddr;
42a268c2
RH
7457 TCGLabel *done_label;
7458 TCGLabel *fail_label;
354161b3 7459 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7460
7461 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7462 [addr] = {Rt};
7463 {Rd} = 0;
7464 } else {
7465 {Rd} = 1;
7466 } */
7467 fail_label = gen_new_label();
7468 done_label = gen_new_label();
03d05e2d
PM
7469 extaddr = tcg_temp_new_i64();
7470 tcg_gen_extu_i32_i64(extaddr, addr);
7471 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7472 tcg_temp_free_i64(extaddr);
7473
354161b3
EC
7474 taddr = gen_aa32_addr(s, addr, opc);
7475 t0 = tcg_temp_new_i32();
7476 t1 = load_reg(s, rt);
426f5abc 7477 if (size == 3) {
354161b3
EC
7478 TCGv_i64 o64 = tcg_temp_new_i64();
7479 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7480
354161b3 7481 t2 = load_reg(s, rt2);
3448d47b
PM
7482 /* For AArch32, architecturally the 32-bit word at the lowest
7483 * address is always Rt and the one at addr+4 is Rt2, even if
7484 * the CPU is big-endian. Since we're going to treat this as a
7485 * single 64-bit BE store, we need to put the two halves in the
7486 * opposite order for BE to LE, so that they end up in the right
7487 * places.
7488 * We don't want gen_aa32_frob64() because that does the wrong
7489 * thing for BE32 usermode.
7490 */
7491 if (s->be_data == MO_BE) {
7492 tcg_gen_concat_i32_i64(n64, t2, t1);
7493 } else {
7494 tcg_gen_concat_i32_i64(n64, t1, t2);
7495 }
354161b3 7496 tcg_temp_free_i32(t2);
03d05e2d 7497
354161b3
EC
7498 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7499 get_mem_index(s), opc);
7500 tcg_temp_free_i64(n64);
7501
354161b3
EC
7502 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7503 tcg_gen_extrl_i64_i32(t0, o64);
7504
7505 tcg_temp_free_i64(o64);
7506 } else {
7507 t2 = tcg_temp_new_i32();
7508 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7509 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7510 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7511 tcg_temp_free_i32(t2);
426f5abc 7512 }
354161b3
EC
7513 tcg_temp_free_i32(t1);
7514 tcg_temp_free(taddr);
7515 tcg_gen_mov_i32(cpu_R[rd], t0);
7516 tcg_temp_free_i32(t0);
426f5abc 7517 tcg_gen_br(done_label);
354161b3 7518
426f5abc
PB
7519 gen_set_label(fail_label);
7520 tcg_gen_movi_i32(cpu_R[rd], 1);
7521 gen_set_label(done_label);
03d05e2d 7522 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 7523}
426f5abc 7524
81465888
PM
7525/* gen_srs:
7526 * @env: CPUARMState
7527 * @s: DisasContext
7528 * @mode: mode field from insn (which stack to store to)
7529 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7530 * @writeback: true if writeback bit set
7531 *
7532 * Generate code for the SRS (Store Return State) insn.
7533 */
7534static void gen_srs(DisasContext *s,
7535 uint32_t mode, uint32_t amode, bool writeback)
7536{
7537 int32_t offset;
cbc0326b
PM
7538 TCGv_i32 addr, tmp;
7539 bool undef = false;
7540
7541 /* SRS is:
7542 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7543 * and specified mode is monitor mode
cbc0326b
PM
7544 * - UNDEFINED in Hyp mode
7545 * - UNPREDICTABLE in User or System mode
7546 * - UNPREDICTABLE if the specified mode is:
7547 * -- not implemented
7548 * -- not a valid mode number
7549 * -- a mode that's at a higher exception level
7550 * -- Monitor, if we are Non-secure
f01377f5 7551 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7552 */
ba63cf47 7553 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
a767fac8 7554 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
cbc0326b
PM
7555 return;
7556 }
7557
7558 if (s->current_el == 0 || s->current_el == 2) {
7559 undef = true;
7560 }
7561
7562 switch (mode) {
7563 case ARM_CPU_MODE_USR:
7564 case ARM_CPU_MODE_FIQ:
7565 case ARM_CPU_MODE_IRQ:
7566 case ARM_CPU_MODE_SVC:
7567 case ARM_CPU_MODE_ABT:
7568 case ARM_CPU_MODE_UND:
7569 case ARM_CPU_MODE_SYS:
7570 break;
7571 case ARM_CPU_MODE_HYP:
7572 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7573 undef = true;
7574 }
7575 break;
7576 case ARM_CPU_MODE_MON:
7577 /* No need to check specifically for "are we non-secure" because
7578 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7579 * so if this isn't EL3 then we must be non-secure.
7580 */
7581 if (s->current_el != 3) {
7582 undef = true;
7583 }
7584 break;
7585 default:
7586 undef = true;
7587 }
7588
7589 if (undef) {
1ce21ba1 7590 unallocated_encoding(s);
cbc0326b
PM
7591 return;
7592 }
7593
7594 addr = tcg_temp_new_i32();
7595 tmp = tcg_const_i32(mode);
f01377f5
PM
7596 /* get_r13_banked() will raise an exception if called from System mode */
7597 gen_set_condexec(s);
43722a6d 7598 gen_set_pc_im(s, s->pc_curr);
81465888
PM
7599 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7600 tcg_temp_free_i32(tmp);
7601 switch (amode) {
7602 case 0: /* DA */
7603 offset = -4;
7604 break;
7605 case 1: /* IA */
7606 offset = 0;
7607 break;
7608 case 2: /* DB */
7609 offset = -8;
7610 break;
7611 case 3: /* IB */
7612 offset = 4;
7613 break;
7614 default:
7615 abort();
7616 }
7617 tcg_gen_addi_i32(addr, addr, offset);
7618 tmp = load_reg(s, 14);
12dcc321 7619 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7620 tcg_temp_free_i32(tmp);
81465888
PM
7621 tmp = load_cpu_field(spsr);
7622 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 7623 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7624 tcg_temp_free_i32(tmp);
81465888
PM
7625 if (writeback) {
7626 switch (amode) {
7627 case 0:
7628 offset = -8;
7629 break;
7630 case 1:
7631 offset = 4;
7632 break;
7633 case 2:
7634 offset = -4;
7635 break;
7636 case 3:
7637 offset = 0;
7638 break;
7639 default:
7640 abort();
7641 }
7642 tcg_gen_addi_i32(addr, addr, offset);
7643 tmp = tcg_const_i32(mode);
7644 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7645 tcg_temp_free_i32(tmp);
7646 }
7647 tcg_temp_free_i32(addr);
dcba3a8d 7648 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
7649}
7650
c2d9644e
RK
7651/* Generate a label used for skipping this instruction */
7652static void arm_gen_condlabel(DisasContext *s)
7653{
7654 if (!s->condjmp) {
7655 s->condlabel = gen_new_label();
7656 s->condjmp = 1;
7657 }
7658}
7659
7660/* Skip this instruction if the ARM condition is false */
7661static void arm_skip_unless(DisasContext *s, uint32_t cond)
7662{
7663 arm_gen_condlabel(s);
7664 arm_gen_test_cc(cond ^ 1, s->condlabel);
7665}
7666
f4df2210 7667static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 7668{
f4df2210 7669 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7670 TCGv_i32 tmp;
7671 TCGv_i32 tmp2;
7672 TCGv_i32 tmp3;
7673 TCGv_i32 addr;
a7812ae4 7674 TCGv_i64 tmp64;
9ee6e8bb 7675
e13886e3
PM
7676 /* M variants do not implement ARM mode; this must raise the INVSTATE
7677 * UsageFault exception.
7678 */
b53d8923 7679 if (arm_dc_feature(s, ARM_FEATURE_M)) {
a767fac8 7680 gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
e13886e3
PM
7681 default_exception_el(s));
7682 return;
b53d8923 7683 }
9ee6e8bb
PB
7684 cond = insn >> 28;
7685 if (cond == 0xf){
be5e7a76
DES
7686 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7687 * choose to UNDEF. In ARMv5 and above the space is used
7688 * for miscellaneous unconditional instructions.
7689 */
7690 ARCH(5);
7691
9ee6e8bb
PB
7692 /* Unconditional instructions. */
7693 if (((insn >> 25) & 7) == 1) {
7694 /* NEON Data processing. */
d614a513 7695 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7696 goto illegal_op;
d614a513 7697 }
9ee6e8bb 7698
7dcc1f89 7699 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 7700 goto illegal_op;
7dcc1f89 7701 }
9ee6e8bb
PB
7702 return;
7703 }
7704 if ((insn & 0x0f100000) == 0x04000000) {
7705 /* NEON load/store. */
d614a513 7706 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7707 goto illegal_op;
d614a513 7708 }
9ee6e8bb 7709
7dcc1f89 7710 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 7711 goto illegal_op;
7dcc1f89 7712 }
9ee6e8bb
PB
7713 return;
7714 }
6a57f3eb
WN
7715 if ((insn & 0x0f000e10) == 0x0e000a00) {
7716 /* VFP. */
7dcc1f89 7717 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
7718 goto illegal_op;
7719 }
7720 return;
7721 }
3d185e5d
PM
7722 if (((insn & 0x0f30f000) == 0x0510f000) ||
7723 ((insn & 0x0f30f010) == 0x0710f000)) {
7724 if ((insn & (1 << 22)) == 0) {
7725 /* PLDW; v7MP */
d614a513 7726 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7727 goto illegal_op;
7728 }
7729 }
7730 /* Otherwise PLD; v5TE+ */
be5e7a76 7731 ARCH(5TE);
3d185e5d
PM
7732 return;
7733 }
7734 if (((insn & 0x0f70f000) == 0x0450f000) ||
7735 ((insn & 0x0f70f010) == 0x0650f000)) {
7736 ARCH(7);
7737 return; /* PLI; V7 */
7738 }
7739 if (((insn & 0x0f700000) == 0x04100000) ||
7740 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 7741 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7742 goto illegal_op;
7743 }
7744 return; /* v7MP: Unallocated memory hint: must NOP */
7745 }
7746
7747 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7748 ARCH(6);
7749 /* setend */
9886ecdf
PB
7750 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
7751 gen_helper_setend(cpu_env);
dcba3a8d 7752 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
7753 }
7754 return;
7755 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7756 switch ((insn >> 4) & 0xf) {
7757 case 1: /* clrex */
7758 ARCH(6K);
426f5abc 7759 gen_clrex(s);
9ee6e8bb
PB
7760 return;
7761 case 4: /* dsb */
7762 case 5: /* dmb */
9ee6e8bb 7763 ARCH(7);
61e4c432 7764 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 7765 return;
6df99dec
SS
7766 case 6: /* isb */
7767 /* We need to break the TB after this insn to execute
7768 * self-modifying code correctly and also to take
7769 * any pending interrupts immediately.
7770 */
a0415916 7771 gen_goto_tb(s, 0, s->base.pc_next);
6df99dec 7772 return;
9888bd1e
RH
7773 case 7: /* sb */
7774 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
7775 goto illegal_op;
7776 }
7777 /*
7778 * TODO: There is no speculation barrier opcode
7779 * for TCG; MB and end the TB instead.
7780 */
7781 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
a0415916 7782 gen_goto_tb(s, 0, s->base.pc_next);
9888bd1e 7783 return;
9ee6e8bb
PB
7784 default:
7785 goto illegal_op;
7786 }
7787 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7788 /* srs */
81465888
PM
7789 ARCH(6);
7790 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7791 return;
ea825eee 7792 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7793 /* rfe */
c67b6b71 7794 int32_t offset;
9ee6e8bb
PB
7795 if (IS_USER(s))
7796 goto illegal_op;
7797 ARCH(6);
7798 rn = (insn >> 16) & 0xf;
b0109805 7799 addr = load_reg(s, rn);
9ee6e8bb
PB
7800 i = (insn >> 23) & 3;
7801 switch (i) {
b0109805 7802 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7803 case 1: offset = 0; break; /* IA */
7804 case 2: offset = -8; break; /* DB */
b0109805 7805 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7806 default: abort();
7807 }
7808 if (offset)
b0109805
PB
7809 tcg_gen_addi_i32(addr, addr, offset);
7810 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7811 tmp = tcg_temp_new_i32();
12dcc321 7812 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 7813 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7814 tmp2 = tcg_temp_new_i32();
12dcc321 7815 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
7816 if (insn & (1 << 21)) {
7817 /* Base writeback. */
7818 switch (i) {
b0109805 7819 case 0: offset = -8; break;
c67b6b71
FN
7820 case 1: offset = 4; break;
7821 case 2: offset = -4; break;
b0109805 7822 case 3: offset = 0; break;
9ee6e8bb
PB
7823 default: abort();
7824 }
7825 if (offset)
b0109805
PB
7826 tcg_gen_addi_i32(addr, addr, offset);
7827 store_reg(s, rn, addr);
7828 } else {
7d1b0095 7829 tcg_temp_free_i32(addr);
9ee6e8bb 7830 }
b0109805 7831 gen_rfe(s, tmp, tmp2);
c67b6b71 7832 return;
9ee6e8bb
PB
7833 } else if ((insn & 0x0e000000) == 0x0a000000) {
7834 /* branch link and change to thumb (blx <offset>) */
7835 int32_t offset;
7836
7d1b0095 7837 tmp = tcg_temp_new_i32();
a0415916 7838 tcg_gen_movi_i32(tmp, s->base.pc_next);
d9ba4830 7839 store_reg(s, 14, tmp);
9ee6e8bb
PB
7840 /* Sign-extend the 24-bit offset */
7841 offset = (((int32_t)insn) << 8) >> 8;
fdbcf632 7842 val = read_pc(s);
9ee6e8bb
PB
7843 /* offset * 4 + bit24 * 2 + (thumb bit) */
7844 val += (offset << 2) | ((insn >> 23) & 2) | 1;
be5e7a76 7845 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7846 gen_bx_im(s, val);
9ee6e8bb
PB
7847 return;
7848 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 7849 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 7850 /* iWMMXt register transfer. */
c0f4af17 7851 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 7852 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 7853 return;
c0f4af17
PM
7854 }
7855 }
9ee6e8bb 7856 }
8b7209fa
RH
7857 } else if ((insn & 0x0e000a00) == 0x0c000800
7858 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7859 if (disas_neon_insn_3same_ext(s, insn)) {
7860 goto illegal_op;
7861 }
7862 return;
638808ff
RH
7863 } else if ((insn & 0x0f000a00) == 0x0e000800
7864 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7865 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
7866 goto illegal_op;
7867 }
7868 return;
9ee6e8bb
PB
7869 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7870 /* Coprocessor double register transfer. */
be5e7a76 7871 ARCH(5TE);
9ee6e8bb
PB
7872 } else if ((insn & 0x0f000010) == 0x0e000010) {
7873 /* Additional coprocessor register transfer. */
7997d92f 7874 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7875 uint32_t mask;
7876 uint32_t val;
7877 /* cps (privileged) */
7878 if (IS_USER(s))
7879 return;
7880 mask = val = 0;
7881 if (insn & (1 << 19)) {
7882 if (insn & (1 << 8))
7883 mask |= CPSR_A;
7884 if (insn & (1 << 7))
7885 mask |= CPSR_I;
7886 if (insn & (1 << 6))
7887 mask |= CPSR_F;
7888 if (insn & (1 << 18))
7889 val |= mask;
7890 }
7997d92f 7891 if (insn & (1 << 17)) {
9ee6e8bb
PB
7892 mask |= CPSR_M;
7893 val |= (insn & 0x1f);
7894 }
7895 if (mask) {
2fbac54b 7896 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7897 }
7898 return;
7899 }
7900 goto illegal_op;
7901 }
7902 if (cond != 0xe) {
7903 /* if not always execute, we generate a conditional jump to
7904 next instruction */
c2d9644e 7905 arm_skip_unless(s, cond);
9ee6e8bb
PB
7906 }
7907 if ((insn & 0x0f900000) == 0x03000000) {
7908 if ((insn & (1 << 21)) == 0) {
7909 ARCH(6T2);
7910 rd = (insn >> 12) & 0xf;
7911 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7912 if ((insn & (1 << 22)) == 0) {
7913 /* MOVW */
7d1b0095 7914 tmp = tcg_temp_new_i32();
5e3f878a 7915 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7916 } else {
7917 /* MOVT */
5e3f878a 7918 tmp = load_reg(s, rd);
86831435 7919 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7920 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7921 }
5e3f878a 7922 store_reg(s, rd, tmp);
9ee6e8bb
PB
7923 } else {
7924 if (((insn >> 12) & 0xf) != 0xf)
7925 goto illegal_op;
7926 if (((insn >> 16) & 0xf) == 0) {
7927 gen_nop_hint(s, insn & 0xff);
7928 } else {
7929 /* CPSR = immediate */
7930 val = insn & 0xff;
7931 shift = ((insn >> 8) & 0xf) * 2;
dd861b3f 7932 val = ror32(val, shift);
9ee6e8bb 7933 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
7934 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7935 i, val)) {
9ee6e8bb 7936 goto illegal_op;
7dcc1f89 7937 }
9ee6e8bb
PB
7938 }
7939 }
7940 } else if ((insn & 0x0f900000) == 0x01000000
7941 && (insn & 0x00000090) != 0x00000090) {
7942 /* miscellaneous instructions */
7943 op1 = (insn >> 21) & 3;
7944 sh = (insn >> 4) & 0xf;
7945 rm = insn & 0xf;
7946 switch (sh) {
8bfd0550
PM
7947 case 0x0: /* MSR, MRS */
7948 if (insn & (1 << 9)) {
7949 /* MSR (banked) and MRS (banked) */
7950 int sysm = extract32(insn, 16, 4) |
7951 (extract32(insn, 8, 1) << 4);
7952 int r = extract32(insn, 22, 1);
7953
7954 if (op1 & 1) {
7955 /* MSR (banked) */
7956 gen_msr_banked(s, r, sysm, rm);
7957 } else {
7958 /* MRS (banked) */
7959 int rd = extract32(insn, 12, 4);
7960
7961 gen_mrs_banked(s, r, sysm, rd);
7962 }
7963 break;
7964 }
7965
7966 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
7967 if (op1 & 1) {
7968 /* PSR = reg */
2fbac54b 7969 tmp = load_reg(s, rm);
9ee6e8bb 7970 i = ((op1 & 2) != 0);
7dcc1f89 7971 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7972 goto illegal_op;
7973 } else {
7974 /* reg = PSR */
7975 rd = (insn >> 12) & 0xf;
7976 if (op1 & 2) {
7977 if (IS_USER(s))
7978 goto illegal_op;
d9ba4830 7979 tmp = load_cpu_field(spsr);
9ee6e8bb 7980 } else {
7d1b0095 7981 tmp = tcg_temp_new_i32();
9ef39277 7982 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7983 }
d9ba4830 7984 store_reg(s, rd, tmp);
9ee6e8bb
PB
7985 }
7986 break;
7987 case 0x1:
7988 if (op1 == 1) {
7989 /* branch/exchange thumb (bx). */
be5e7a76 7990 ARCH(4T);
d9ba4830
PB
7991 tmp = load_reg(s, rm);
7992 gen_bx(s, tmp);
9ee6e8bb
PB
7993 } else if (op1 == 3) {
7994 /* clz */
be5e7a76 7995 ARCH(5);
9ee6e8bb 7996 rd = (insn >> 12) & 0xf;
1497c961 7997 tmp = load_reg(s, rm);
7539a012 7998 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 7999 store_reg(s, rd, tmp);
9ee6e8bb
PB
8000 } else {
8001 goto illegal_op;
8002 }
8003 break;
8004 case 0x2:
8005 if (op1 == 1) {
8006 ARCH(5J); /* bxj */
8007 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8008 tmp = load_reg(s, rm);
8009 gen_bx(s, tmp);
9ee6e8bb
PB
8010 } else {
8011 goto illegal_op;
8012 }
8013 break;
8014 case 0x3:
8015 if (op1 != 1)
8016 goto illegal_op;
8017
be5e7a76 8018 ARCH(5);
9ee6e8bb 8019 /* branch link/exchange thumb (blx) */
d9ba4830 8020 tmp = load_reg(s, rm);
7d1b0095 8021 tmp2 = tcg_temp_new_i32();
a0415916 8022 tcg_gen_movi_i32(tmp2, s->base.pc_next);
d9ba4830
PB
8023 store_reg(s, 14, tmp2);
8024 gen_bx(s, tmp);
9ee6e8bb 8025 break;
eb0ecd5a
WN
8026 case 0x4:
8027 {
8028 /* crc32/crc32c */
8029 uint32_t c = extract32(insn, 8, 4);
8030
8031 /* Check this CPU supports ARMv8 CRC instructions.
8032 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8033 * Bits 8, 10 and 11 should be zero.
8034 */
962fcbf2 8035 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
eb0ecd5a
WN
8036 goto illegal_op;
8037 }
8038
8039 rn = extract32(insn, 16, 4);
8040 rd = extract32(insn, 12, 4);
8041
8042 tmp = load_reg(s, rn);
8043 tmp2 = load_reg(s, rm);
aa633469
PM
8044 if (op1 == 0) {
8045 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8046 } else if (op1 == 1) {
8047 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8048 }
eb0ecd5a
WN
8049 tmp3 = tcg_const_i32(1 << op1);
8050 if (c & 0x2) {
8051 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8052 } else {
8053 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8054 }
8055 tcg_temp_free_i32(tmp2);
8056 tcg_temp_free_i32(tmp3);
8057 store_reg(s, rd, tmp);
8058 break;
8059 }
9ee6e8bb 8060 case 0x5: /* saturating add/subtract */
be5e7a76 8061 ARCH(5TE);
9ee6e8bb
PB
8062 rd = (insn >> 12) & 0xf;
8063 rn = (insn >> 16) & 0xf;
b40d0353 8064 tmp = load_reg(s, rm);
5e3f878a 8065 tmp2 = load_reg(s, rn);
9ee6e8bb 8066 if (op1 & 2)
640581a0 8067 gen_helper_add_saturate(tmp2, cpu_env, tmp2, tmp2);
9ee6e8bb 8068 if (op1 & 1)
9ef39277 8069 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8070 else
9ef39277 8071 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8072 tcg_temp_free_i32(tmp2);
5e3f878a 8073 store_reg(s, rd, tmp);
9ee6e8bb 8074 break;
55c544ed
PM
8075 case 0x6: /* ERET */
8076 if (op1 != 3) {
8077 goto illegal_op;
8078 }
8079 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8080 goto illegal_op;
8081 }
8082 if ((insn & 0x000fff0f) != 0x0000000e) {
8083 /* UNPREDICTABLE; we choose to UNDEF */
8084 goto illegal_op;
8085 }
8086
8087 if (s->current_el == 2) {
8088 tmp = load_cpu_field(elr_el[2]);
8089 } else {
8090 tmp = load_reg(s, 14);
8091 }
8092 gen_exception_return(s, tmp);
8093 break;
49e14940 8094 case 7:
d4a2dc67
PM
8095 {
8096 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8097 switch (op1) {
19a6e31c
PM
8098 case 0:
8099 /* HLT */
8100 gen_hlt(s, imm16);
8101 break;
37e6456e
PM
8102 case 1:
8103 /* bkpt */
8104 ARCH(5);
06bcbda3 8105 gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm16, false));
37e6456e
PM
8106 break;
8107 case 2:
8108 /* Hypervisor call (v7) */
8109 ARCH(7);
8110 if (IS_USER(s)) {
8111 goto illegal_op;
8112 }
8113 gen_hvc(s, imm16);
8114 break;
8115 case 3:
8116 /* Secure monitor call (v6+) */
8117 ARCH(6K);
8118 if (IS_USER(s)) {
8119 goto illegal_op;
8120 }
8121 gen_smc(s);
8122 break;
8123 default:
19a6e31c 8124 g_assert_not_reached();
49e14940 8125 }
9ee6e8bb 8126 break;
d4a2dc67 8127 }
9ee6e8bb
PB
8128 case 0x8: /* signed multiply */
8129 case 0xa:
8130 case 0xc:
8131 case 0xe:
be5e7a76 8132 ARCH(5TE);
9ee6e8bb
PB
8133 rs = (insn >> 8) & 0xf;
8134 rn = (insn >> 12) & 0xf;
8135 rd = (insn >> 16) & 0xf;
8136 if (op1 == 1) {
8137 /* (32 * 16) >> 16 */
5e3f878a
PB
8138 tmp = load_reg(s, rm);
8139 tmp2 = load_reg(s, rs);
9ee6e8bb 8140 if (sh & 4)
5e3f878a 8141 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8142 else
5e3f878a 8143 gen_sxth(tmp2);
a7812ae4
PB
8144 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8145 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8146 tmp = tcg_temp_new_i32();
ecc7b3aa 8147 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8148 tcg_temp_free_i64(tmp64);
9ee6e8bb 8149 if ((sh & 2) == 0) {
5e3f878a 8150 tmp2 = load_reg(s, rn);
9ef39277 8151 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8152 tcg_temp_free_i32(tmp2);
9ee6e8bb 8153 }
5e3f878a 8154 store_reg(s, rd, tmp);
9ee6e8bb
PB
8155 } else {
8156 /* 16 * 16 */
5e3f878a
PB
8157 tmp = load_reg(s, rm);
8158 tmp2 = load_reg(s, rs);
8159 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8160 tcg_temp_free_i32(tmp2);
9ee6e8bb 8161 if (op1 == 2) {
a7812ae4
PB
8162 tmp64 = tcg_temp_new_i64();
8163 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8164 tcg_temp_free_i32(tmp);
a7812ae4
PB
8165 gen_addq(s, tmp64, rn, rd);
8166 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8167 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8168 } else {
8169 if (op1 == 0) {
5e3f878a 8170 tmp2 = load_reg(s, rn);
9ef39277 8171 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8172 tcg_temp_free_i32(tmp2);
9ee6e8bb 8173 }
5e3f878a 8174 store_reg(s, rd, tmp);
9ee6e8bb
PB
8175 }
8176 }
8177 break;
8178 default:
8179 goto illegal_op;
8180 }
8181 } else if (((insn & 0x0e000000) == 0 &&
8182 (insn & 0x00000090) != 0x90) ||
8183 ((insn & 0x0e000000) == (1 << 25))) {
8184 int set_cc, logic_cc, shiftop;
8185
8186 op1 = (insn >> 21) & 0xf;
8187 set_cc = (insn >> 20) & 1;
8188 logic_cc = table_logic_cc[op1] & set_cc;
8189
8190 /* data processing instruction */
8191 if (insn & (1 << 25)) {
8192 /* immediate operand */
8193 val = insn & 0xff;
8194 shift = ((insn >> 8) & 0xf) * 2;
dd861b3f 8195 val = ror32(val, shift);
7d1b0095 8196 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8197 tcg_gen_movi_i32(tmp2, val);
8198 if (logic_cc && shift) {
8199 gen_set_CF_bit31(tmp2);
8200 }
9ee6e8bb
PB
8201 } else {
8202 /* register */
8203 rm = (insn) & 0xf;
e9bb4aa9 8204 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8205 shiftop = (insn >> 5) & 3;
8206 if (!(insn & (1 << 4))) {
8207 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8208 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8209 } else {
8210 rs = (insn >> 8) & 0xf;
8984bd2e 8211 tmp = load_reg(s, rs);
e9bb4aa9 8212 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8213 }
8214 }
8215 if (op1 != 0x0f && op1 != 0x0d) {
8216 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8217 tmp = load_reg(s, rn);
8218 } else {
f764718d 8219 tmp = NULL;
9ee6e8bb
PB
8220 }
8221 rd = (insn >> 12) & 0xf;
8222 switch(op1) {
8223 case 0x00:
e9bb4aa9
JR
8224 tcg_gen_and_i32(tmp, tmp, tmp2);
8225 if (logic_cc) {
8226 gen_logic_CC(tmp);
8227 }
7dcc1f89 8228 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8229 break;
8230 case 0x01:
e9bb4aa9
JR
8231 tcg_gen_xor_i32(tmp, tmp, tmp2);
8232 if (logic_cc) {
8233 gen_logic_CC(tmp);
8234 }
7dcc1f89 8235 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8236 break;
8237 case 0x02:
8238 if (set_cc && rd == 15) {
8239 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8240 if (IS_USER(s)) {
9ee6e8bb 8241 goto illegal_op;
e9bb4aa9 8242 }
72485ec4 8243 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8244 gen_exception_return(s, tmp);
9ee6e8bb 8245 } else {
e9bb4aa9 8246 if (set_cc) {
72485ec4 8247 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8248 } else {
8249 tcg_gen_sub_i32(tmp, tmp, tmp2);
8250 }
7dcc1f89 8251 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8252 }
8253 break;
8254 case 0x03:
e9bb4aa9 8255 if (set_cc) {
72485ec4 8256 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8257 } else {
8258 tcg_gen_sub_i32(tmp, tmp2, tmp);
8259 }
7dcc1f89 8260 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8261 break;
8262 case 0x04:
e9bb4aa9 8263 if (set_cc) {
72485ec4 8264 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8265 } else {
8266 tcg_gen_add_i32(tmp, tmp, tmp2);
8267 }
7dcc1f89 8268 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8269 break;
8270 case 0x05:
e9bb4aa9 8271 if (set_cc) {
49b4c31e 8272 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8273 } else {
8274 gen_add_carry(tmp, tmp, tmp2);
8275 }
7dcc1f89 8276 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8277 break;
8278 case 0x06:
e9bb4aa9 8279 if (set_cc) {
2de68a49 8280 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8281 } else {
8282 gen_sub_carry(tmp, tmp, tmp2);
8283 }
7dcc1f89 8284 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8285 break;
8286 case 0x07:
e9bb4aa9 8287 if (set_cc) {
2de68a49 8288 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8289 } else {
8290 gen_sub_carry(tmp, tmp2, tmp);
8291 }
7dcc1f89 8292 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8293 break;
8294 case 0x08:
8295 if (set_cc) {
e9bb4aa9
JR
8296 tcg_gen_and_i32(tmp, tmp, tmp2);
8297 gen_logic_CC(tmp);
9ee6e8bb 8298 }
7d1b0095 8299 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8300 break;
8301 case 0x09:
8302 if (set_cc) {
e9bb4aa9
JR
8303 tcg_gen_xor_i32(tmp, tmp, tmp2);
8304 gen_logic_CC(tmp);
9ee6e8bb 8305 }
7d1b0095 8306 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8307 break;
8308 case 0x0a:
8309 if (set_cc) {
72485ec4 8310 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8311 }
7d1b0095 8312 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8313 break;
8314 case 0x0b:
8315 if (set_cc) {
72485ec4 8316 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8317 }
7d1b0095 8318 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8319 break;
8320 case 0x0c:
e9bb4aa9
JR
8321 tcg_gen_or_i32(tmp, tmp, tmp2);
8322 if (logic_cc) {
8323 gen_logic_CC(tmp);
8324 }
7dcc1f89 8325 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8326 break;
8327 case 0x0d:
8328 if (logic_cc && rd == 15) {
8329 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8330 if (IS_USER(s)) {
9ee6e8bb 8331 goto illegal_op;
e9bb4aa9
JR
8332 }
8333 gen_exception_return(s, tmp2);
9ee6e8bb 8334 } else {
e9bb4aa9
JR
8335 if (logic_cc) {
8336 gen_logic_CC(tmp2);
8337 }
7dcc1f89 8338 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8339 }
8340 break;
8341 case 0x0e:
f669df27 8342 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8343 if (logic_cc) {
8344 gen_logic_CC(tmp);
8345 }
7dcc1f89 8346 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8347 break;
8348 default:
8349 case 0x0f:
e9bb4aa9
JR
8350 tcg_gen_not_i32(tmp2, tmp2);
8351 if (logic_cc) {
8352 gen_logic_CC(tmp2);
8353 }
7dcc1f89 8354 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8355 break;
8356 }
e9bb4aa9 8357 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8358 tcg_temp_free_i32(tmp2);
e9bb4aa9 8359 }
9ee6e8bb
PB
8360 } else {
8361 /* other instructions */
8362 op1 = (insn >> 24) & 0xf;
8363 switch(op1) {
8364 case 0x0:
8365 case 0x1:
8366 /* multiplies, extra load/stores */
8367 sh = (insn >> 5) & 3;
8368 if (sh == 0) {
8369 if (op1 == 0x0) {
8370 rd = (insn >> 16) & 0xf;
8371 rn = (insn >> 12) & 0xf;
8372 rs = (insn >> 8) & 0xf;
8373 rm = (insn) & 0xf;
8374 op1 = (insn >> 20) & 0xf;
8375 switch (op1) {
8376 case 0: case 1: case 2: case 3: case 6:
8377 /* 32 bit mul */
5e3f878a
PB
8378 tmp = load_reg(s, rs);
8379 tmp2 = load_reg(s, rm);
8380 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8381 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8382 if (insn & (1 << 22)) {
8383 /* Subtract (mls) */
8384 ARCH(6T2);
5e3f878a
PB
8385 tmp2 = load_reg(s, rn);
8386 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8387 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8388 } else if (insn & (1 << 21)) {
8389 /* Add */
5e3f878a
PB
8390 tmp2 = load_reg(s, rn);
8391 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8392 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8393 }
8394 if (insn & (1 << 20))
5e3f878a
PB
8395 gen_logic_CC(tmp);
8396 store_reg(s, rd, tmp);
9ee6e8bb 8397 break;
8aac08b1
AJ
8398 case 4:
8399 /* 64 bit mul double accumulate (UMAAL) */
8400 ARCH(6);
8401 tmp = load_reg(s, rs);
8402 tmp2 = load_reg(s, rm);
8403 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8404 gen_addq_lo(s, tmp64, rn);
8405 gen_addq_lo(s, tmp64, rd);
8406 gen_storeq_reg(s, rn, rd, tmp64);
8407 tcg_temp_free_i64(tmp64);
8408 break;
8409 case 8: case 9: case 10: case 11:
8410 case 12: case 13: case 14: case 15:
8411 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8412 tmp = load_reg(s, rs);
8413 tmp2 = load_reg(s, rm);
8aac08b1 8414 if (insn & (1 << 22)) {
c9f10124 8415 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8416 } else {
c9f10124 8417 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8418 }
8419 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8420 TCGv_i32 al = load_reg(s, rn);
8421 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8422 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8423 tcg_temp_free_i32(al);
8424 tcg_temp_free_i32(ah);
9ee6e8bb 8425 }
8aac08b1 8426 if (insn & (1 << 20)) {
c9f10124 8427 gen_logicq_cc(tmp, tmp2);
8aac08b1 8428 }
c9f10124
RH
8429 store_reg(s, rn, tmp);
8430 store_reg(s, rd, tmp2);
9ee6e8bb 8431 break;
8aac08b1
AJ
8432 default:
8433 goto illegal_op;
9ee6e8bb
PB
8434 }
8435 } else {
8436 rn = (insn >> 16) & 0xf;
8437 rd = (insn >> 12) & 0xf;
8438 if (insn & (1 << 23)) {
8439 /* load/store exclusive */
96c55295
PM
8440 bool is_ld = extract32(insn, 20, 1);
8441 bool is_lasr = !extract32(insn, 8, 1);
2359bf80 8442 int op2 = (insn >> 8) & 3;
86753403 8443 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8444
8445 switch (op2) {
8446 case 0: /* lda/stl */
8447 if (op1 == 1) {
8448 goto illegal_op;
8449 }
8450 ARCH(8);
8451 break;
8452 case 1: /* reserved */
8453 goto illegal_op;
8454 case 2: /* ldaex/stlex */
8455 ARCH(8);
8456 break;
8457 case 3: /* ldrex/strex */
8458 if (op1) {
8459 ARCH(6K);
8460 } else {
8461 ARCH(6);
8462 }
8463 break;
8464 }
8465
3174f8e9 8466 addr = tcg_temp_local_new_i32();
98a46317 8467 load_reg_var(s, addr, rn);
2359bf80 8468
96c55295
PM
8469 if (is_lasr && !is_ld) {
8470 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8471 }
8472
2359bf80 8473 if (op2 == 0) {
96c55295 8474 if (is_ld) {
2359bf80
MR
8475 tmp = tcg_temp_new_i32();
8476 switch (op1) {
8477 case 0: /* lda */
9bb6558a
PM
8478 gen_aa32_ld32u_iss(s, tmp, addr,
8479 get_mem_index(s),
8480 rd | ISSIsAcqRel);
2359bf80
MR
8481 break;
8482 case 2: /* ldab */
9bb6558a
PM
8483 gen_aa32_ld8u_iss(s, tmp, addr,
8484 get_mem_index(s),
8485 rd | ISSIsAcqRel);
2359bf80
MR
8486 break;
8487 case 3: /* ldah */
9bb6558a
PM
8488 gen_aa32_ld16u_iss(s, tmp, addr,
8489 get_mem_index(s),
8490 rd | ISSIsAcqRel);
2359bf80
MR
8491 break;
8492 default:
8493 abort();
8494 }
8495 store_reg(s, rd, tmp);
8496 } else {
8497 rm = insn & 0xf;
8498 tmp = load_reg(s, rm);
8499 switch (op1) {
8500 case 0: /* stl */
9bb6558a
PM
8501 gen_aa32_st32_iss(s, tmp, addr,
8502 get_mem_index(s),
8503 rm | ISSIsAcqRel);
2359bf80
MR
8504 break;
8505 case 2: /* stlb */
9bb6558a
PM
8506 gen_aa32_st8_iss(s, tmp, addr,
8507 get_mem_index(s),
8508 rm | ISSIsAcqRel);
2359bf80
MR
8509 break;
8510 case 3: /* stlh */
9bb6558a
PM
8511 gen_aa32_st16_iss(s, tmp, addr,
8512 get_mem_index(s),
8513 rm | ISSIsAcqRel);
2359bf80
MR
8514 break;
8515 default:
8516 abort();
8517 }
8518 tcg_temp_free_i32(tmp);
8519 }
96c55295 8520 } else if (is_ld) {
86753403
PB
8521 switch (op1) {
8522 case 0: /* ldrex */
426f5abc 8523 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8524 break;
8525 case 1: /* ldrexd */
426f5abc 8526 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8527 break;
8528 case 2: /* ldrexb */
426f5abc 8529 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8530 break;
8531 case 3: /* ldrexh */
426f5abc 8532 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8533 break;
8534 default:
8535 abort();
8536 }
9ee6e8bb
PB
8537 } else {
8538 rm = insn & 0xf;
86753403
PB
8539 switch (op1) {
8540 case 0: /* strex */
426f5abc 8541 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8542 break;
8543 case 1: /* strexd */
502e64fe 8544 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8545 break;
8546 case 2: /* strexb */
426f5abc 8547 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8548 break;
8549 case 3: /* strexh */
426f5abc 8550 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8551 break;
8552 default:
8553 abort();
8554 }
9ee6e8bb 8555 }
39d5492a 8556 tcg_temp_free_i32(addr);
96c55295
PM
8557
8558 if (is_lasr && is_ld) {
8559 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
8560 }
c4869ca6
OS
8561 } else if ((insn & 0x00300f00) == 0) {
8562 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
8563 * - SWP, SWPB
8564 */
8565
cf12bce0
EC
8566 TCGv taddr;
8567 TCGMemOp opc = s->be_data;
8568
9ee6e8bb
PB
8569 rm = (insn) & 0xf;
8570
9ee6e8bb 8571 if (insn & (1 << 22)) {
cf12bce0 8572 opc |= MO_UB;
9ee6e8bb 8573 } else {
cf12bce0 8574 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 8575 }
cf12bce0
EC
8576
8577 addr = load_reg(s, rn);
8578 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 8579 tcg_temp_free_i32(addr);
cf12bce0
EC
8580
8581 tmp = load_reg(s, rm);
8582 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8583 get_mem_index(s), opc);
8584 tcg_temp_free(taddr);
8585 store_reg(s, rd, tmp);
c4869ca6
OS
8586 } else {
8587 goto illegal_op;
9ee6e8bb
PB
8588 }
8589 }
8590 } else {
8591 int address_offset;
3960c336 8592 bool load = insn & (1 << 20);
63f26fcf
PM
8593 bool wbit = insn & (1 << 21);
8594 bool pbit = insn & (1 << 24);
3960c336 8595 bool doubleword = false;
9bb6558a
PM
8596 ISSInfo issinfo;
8597
9ee6e8bb
PB
8598 /* Misc load/store */
8599 rn = (insn >> 16) & 0xf;
8600 rd = (insn >> 12) & 0xf;
3960c336 8601
9bb6558a
PM
8602 /* ISS not valid if writeback */
8603 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8604
3960c336
PM
8605 if (!load && (sh & 2)) {
8606 /* doubleword */
8607 ARCH(5TE);
8608 if (rd & 1) {
8609 /* UNPREDICTABLE; we choose to UNDEF */
8610 goto illegal_op;
8611 }
8612 load = (sh & 1) == 0;
8613 doubleword = true;
8614 }
8615
b0109805 8616 addr = load_reg(s, rn);
63f26fcf 8617 if (pbit) {
b0109805 8618 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 8619 }
9ee6e8bb 8620 address_offset = 0;
3960c336
PM
8621
8622 if (doubleword) {
8623 if (!load) {
9ee6e8bb 8624 /* store */
b0109805 8625 tmp = load_reg(s, rd);
12dcc321 8626 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8627 tcg_temp_free_i32(tmp);
b0109805
PB
8628 tcg_gen_addi_i32(addr, addr, 4);
8629 tmp = load_reg(s, rd + 1);
12dcc321 8630 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8631 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8632 } else {
8633 /* load */
5a839c0d 8634 tmp = tcg_temp_new_i32();
12dcc321 8635 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8636 store_reg(s, rd, tmp);
8637 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8638 tmp = tcg_temp_new_i32();
12dcc321 8639 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8640 rd++;
9ee6e8bb
PB
8641 }
8642 address_offset = -4;
3960c336
PM
8643 } else if (load) {
8644 /* load */
8645 tmp = tcg_temp_new_i32();
8646 switch (sh) {
8647 case 1:
9bb6558a
PM
8648 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
8649 issinfo);
3960c336
PM
8650 break;
8651 case 2:
9bb6558a
PM
8652 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
8653 issinfo);
3960c336
PM
8654 break;
8655 default:
8656 case 3:
9bb6558a
PM
8657 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
8658 issinfo);
3960c336
PM
8659 break;
8660 }
9ee6e8bb
PB
8661 } else {
8662 /* store */
b0109805 8663 tmp = load_reg(s, rd);
9bb6558a 8664 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 8665 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8666 }
8667 /* Perform base writeback before the loaded value to
8668 ensure correct behavior with overlapping index registers.
b6af0975 8669 ldrd with base writeback is undefined if the
9ee6e8bb 8670 destination and index registers overlap. */
63f26fcf 8671 if (!pbit) {
b0109805
PB
8672 gen_add_datah_offset(s, insn, address_offset, addr);
8673 store_reg(s, rn, addr);
63f26fcf 8674 } else if (wbit) {
9ee6e8bb 8675 if (address_offset)
b0109805
PB
8676 tcg_gen_addi_i32(addr, addr, address_offset);
8677 store_reg(s, rn, addr);
8678 } else {
7d1b0095 8679 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8680 }
8681 if (load) {
8682 /* Complete the load. */
b0109805 8683 store_reg(s, rd, tmp);
9ee6e8bb
PB
8684 }
8685 }
8686 break;
8687 case 0x4:
8688 case 0x5:
8689 goto do_ldst;
8690 case 0x6:
8691 case 0x7:
8692 if (insn & (1 << 4)) {
8693 ARCH(6);
8694 /* Armv6 Media instructions. */
8695 rm = insn & 0xf;
8696 rn = (insn >> 16) & 0xf;
2c0262af 8697 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8698 rs = (insn >> 8) & 0xf;
8699 switch ((insn >> 23) & 3) {
8700 case 0: /* Parallel add/subtract. */
8701 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8702 tmp = load_reg(s, rn);
8703 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8704 sh = (insn >> 5) & 7;
8705 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8706 goto illegal_op;
6ddbc6e4 8707 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8708 tcg_temp_free_i32(tmp2);
6ddbc6e4 8709 store_reg(s, rd, tmp);
9ee6e8bb
PB
8710 break;
8711 case 1:
8712 if ((insn & 0x00700020) == 0) {
6c95676b 8713 /* Halfword pack. */
3670669c
PB
8714 tmp = load_reg(s, rn);
8715 tmp2 = load_reg(s, rm);
9ee6e8bb 8716 shift = (insn >> 7) & 0x1f;
3670669c
PB
8717 if (insn & (1 << 6)) {
8718 /* pkhtb */
d1f8755f 8719 if (shift == 0) {
22478e79 8720 shift = 31;
d1f8755f 8721 }
22478e79 8722 tcg_gen_sari_i32(tmp2, tmp2, shift);
d1f8755f 8723 tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
3670669c
PB
8724 } else {
8725 /* pkhbt */
d1f8755f
RH
8726 tcg_gen_shli_i32(tmp2, tmp2, shift);
8727 tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
3670669c 8728 }
7d1b0095 8729 tcg_temp_free_i32(tmp2);
3670669c 8730 store_reg(s, rd, tmp);
9ee6e8bb
PB
8731 } else if ((insn & 0x00200020) == 0x00200000) {
8732 /* [us]sat */
6ddbc6e4 8733 tmp = load_reg(s, rm);
9ee6e8bb
PB
8734 shift = (insn >> 7) & 0x1f;
8735 if (insn & (1 << 6)) {
8736 if (shift == 0)
8737 shift = 31;
6ddbc6e4 8738 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8739 } else {
6ddbc6e4 8740 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8741 }
8742 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8743 tmp2 = tcg_const_i32(sh);
8744 if (insn & (1 << 22))
9ef39277 8745 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8746 else
9ef39277 8747 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8748 tcg_temp_free_i32(tmp2);
6ddbc6e4 8749 store_reg(s, rd, tmp);
9ee6e8bb
PB
8750 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8751 /* [us]sat16 */
6ddbc6e4 8752 tmp = load_reg(s, rm);
9ee6e8bb 8753 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8754 tmp2 = tcg_const_i32(sh);
8755 if (insn & (1 << 22))
9ef39277 8756 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8757 else
9ef39277 8758 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8759 tcg_temp_free_i32(tmp2);
6ddbc6e4 8760 store_reg(s, rd, tmp);
9ee6e8bb
PB
8761 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8762 /* Select bytes. */
6ddbc6e4
PB
8763 tmp = load_reg(s, rn);
8764 tmp2 = load_reg(s, rm);
7d1b0095 8765 tmp3 = tcg_temp_new_i32();
0ecb72a5 8766 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8767 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8768 tcg_temp_free_i32(tmp3);
8769 tcg_temp_free_i32(tmp2);
6ddbc6e4 8770 store_reg(s, rd, tmp);
9ee6e8bb 8771 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8772 tmp = load_reg(s, rm);
9ee6e8bb 8773 shift = (insn >> 10) & 3;
1301f322 8774 /* ??? In many cases it's not necessary to do a
9ee6e8bb 8775 rotate, a shift is sufficient. */
464eaa95 8776 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8777 op1 = (insn >> 20) & 7;
8778 switch (op1) {
5e3f878a
PB
8779 case 0: gen_sxtb16(tmp); break;
8780 case 2: gen_sxtb(tmp); break;
8781 case 3: gen_sxth(tmp); break;
8782 case 4: gen_uxtb16(tmp); break;
8783 case 6: gen_uxtb(tmp); break;
8784 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8785 default: goto illegal_op;
8786 }
8787 if (rn != 15) {
5e3f878a 8788 tmp2 = load_reg(s, rn);
9ee6e8bb 8789 if ((op1 & 3) == 0) {
5e3f878a 8790 gen_add16(tmp, tmp2);
9ee6e8bb 8791 } else {
5e3f878a 8792 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8793 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8794 }
8795 }
6c95676b 8796 store_reg(s, rd, tmp);
9ee6e8bb
PB
8797 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8798 /* rev */
b0109805 8799 tmp = load_reg(s, rm);
9ee6e8bb
PB
8800 if (insn & (1 << 22)) {
8801 if (insn & (1 << 7)) {
b0109805 8802 gen_revsh(tmp);
9ee6e8bb
PB
8803 } else {
8804 ARCH(6T2);
b0109805 8805 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8806 }
8807 } else {
8808 if (insn & (1 << 7))
b0109805 8809 gen_rev16(tmp);
9ee6e8bb 8810 else
66896cb8 8811 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8812 }
b0109805 8813 store_reg(s, rd, tmp);
9ee6e8bb
PB
8814 } else {
8815 goto illegal_op;
8816 }
8817 break;
8818 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8819 switch ((insn >> 20) & 0x7) {
8820 case 5:
8821 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8822 /* op2 not 00x or 11x : UNDEF */
8823 goto illegal_op;
8824 }
838fa72d
AJ
8825 /* Signed multiply most significant [accumulate].
8826 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8827 tmp = load_reg(s, rm);
8828 tmp2 = load_reg(s, rs);
5f8cd06e 8829 tcg_gen_muls2_i32(tmp2, tmp, tmp, tmp2);
838fa72d 8830
955a7dd5 8831 if (rd != 15) {
5f8cd06e 8832 tmp3 = load_reg(s, rd);
9ee6e8bb 8833 if (insn & (1 << 6)) {
5f8cd06e 8834 tcg_gen_sub_i32(tmp, tmp, tmp3);
9ee6e8bb 8835 } else {
5f8cd06e 8836 tcg_gen_add_i32(tmp, tmp, tmp3);
9ee6e8bb 8837 }
5f8cd06e 8838 tcg_temp_free_i32(tmp3);
9ee6e8bb 8839 }
838fa72d 8840 if (insn & (1 << 5)) {
5f8cd06e
RH
8841 /*
8842 * Adding 0x80000000 to the 64-bit quantity
8843 * means that we have carry in to the high
8844 * word when the low word has the high bit set.
8845 */
8846 tcg_gen_shri_i32(tmp2, tmp2, 31);
8847 tcg_gen_add_i32(tmp, tmp, tmp2);
838fa72d 8848 }
5f8cd06e 8849 tcg_temp_free_i32(tmp2);
955a7dd5 8850 store_reg(s, rn, tmp);
41e9564d
PM
8851 break;
8852 case 0:
8853 case 4:
8854 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8855 if (insn & (1 << 7)) {
8856 goto illegal_op;
8857 }
8858 tmp = load_reg(s, rm);
8859 tmp2 = load_reg(s, rs);
9ee6e8bb 8860 if (insn & (1 << 5))
5e3f878a
PB
8861 gen_swap_half(tmp2);
8862 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8863 if (insn & (1 << 22)) {
5e3f878a 8864 /* smlald, smlsld */
33bbd75a
PC
8865 TCGv_i64 tmp64_2;
8866
a7812ae4 8867 tmp64 = tcg_temp_new_i64();
33bbd75a 8868 tmp64_2 = tcg_temp_new_i64();
a7812ae4 8869 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 8870 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 8871 tcg_temp_free_i32(tmp);
33bbd75a
PC
8872 tcg_temp_free_i32(tmp2);
8873 if (insn & (1 << 6)) {
8874 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8875 } else {
8876 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8877 }
8878 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
8879 gen_addq(s, tmp64, rd, rn);
8880 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8881 tcg_temp_free_i64(tmp64);
9ee6e8bb 8882 } else {
5e3f878a 8883 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
8884 if (insn & (1 << 6)) {
8885 /* This subtraction cannot overflow. */
8886 tcg_gen_sub_i32(tmp, tmp, tmp2);
8887 } else {
8888 /* This addition cannot overflow 32 bits;
8889 * however it may overflow considered as a
8890 * signed operation, in which case we must set
8891 * the Q flag.
8892 */
8893 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8894 }
8895 tcg_temp_free_i32(tmp2);
22478e79 8896 if (rd != 15)
9ee6e8bb 8897 {
22478e79 8898 tmp2 = load_reg(s, rd);
9ef39277 8899 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8900 tcg_temp_free_i32(tmp2);
9ee6e8bb 8901 }
22478e79 8902 store_reg(s, rn, tmp);
9ee6e8bb 8903 }
41e9564d 8904 break;
b8b8ea05
PM
8905 case 1:
8906 case 3:
8907 /* SDIV, UDIV */
7e0cf8b4 8908 if (!dc_isar_feature(arm_div, s)) {
b8b8ea05
PM
8909 goto illegal_op;
8910 }
8911 if (((insn >> 5) & 7) || (rd != 15)) {
8912 goto illegal_op;
8913 }
8914 tmp = load_reg(s, rm);
8915 tmp2 = load_reg(s, rs);
8916 if (insn & (1 << 21)) {
8917 gen_helper_udiv(tmp, tmp, tmp2);
8918 } else {
8919 gen_helper_sdiv(tmp, tmp, tmp2);
8920 }
8921 tcg_temp_free_i32(tmp2);
8922 store_reg(s, rn, tmp);
8923 break;
41e9564d
PM
8924 default:
8925 goto illegal_op;
9ee6e8bb
PB
8926 }
8927 break;
8928 case 3:
8929 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8930 switch (op1) {
8931 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8932 ARCH(6);
8933 tmp = load_reg(s, rm);
8934 tmp2 = load_reg(s, rs);
8935 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8936 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8937 if (rd != 15) {
8938 tmp2 = load_reg(s, rd);
6ddbc6e4 8939 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8940 tcg_temp_free_i32(tmp2);
9ee6e8bb 8941 }
ded9d295 8942 store_reg(s, rn, tmp);
9ee6e8bb
PB
8943 break;
8944 case 0x20: case 0x24: case 0x28: case 0x2c:
8945 /* Bitfield insert/clear. */
8946 ARCH(6T2);
8947 shift = (insn >> 7) & 0x1f;
8948 i = (insn >> 16) & 0x1f;
45140a57
KB
8949 if (i < shift) {
8950 /* UNPREDICTABLE; we choose to UNDEF */
8951 goto illegal_op;
8952 }
9ee6e8bb
PB
8953 i = i + 1 - shift;
8954 if (rm == 15) {
7d1b0095 8955 tmp = tcg_temp_new_i32();
5e3f878a 8956 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8957 } else {
5e3f878a 8958 tmp = load_reg(s, rm);
9ee6e8bb
PB
8959 }
8960 if (i != 32) {
5e3f878a 8961 tmp2 = load_reg(s, rd);
d593c48e 8962 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8963 tcg_temp_free_i32(tmp2);
9ee6e8bb 8964 }
5e3f878a 8965 store_reg(s, rd, tmp);
9ee6e8bb
PB
8966 break;
8967 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8968 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8969 ARCH(6T2);
5e3f878a 8970 tmp = load_reg(s, rm);
9ee6e8bb
PB
8971 shift = (insn >> 7) & 0x1f;
8972 i = ((insn >> 16) & 0x1f) + 1;
8973 if (shift + i > 32)
8974 goto illegal_op;
8975 if (i < 32) {
8976 if (op1 & 0x20) {
59a71b4c 8977 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 8978 } else {
59a71b4c 8979 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
8980 }
8981 }
5e3f878a 8982 store_reg(s, rd, tmp);
9ee6e8bb
PB
8983 break;
8984 default:
8985 goto illegal_op;
8986 }
8987 break;
8988 }
8989 break;
8990 }
8991 do_ldst:
8992 /* Check for undefined extension instructions
8993 * per the ARM Bible IE:
8994 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8995 */
8996 sh = (0xf << 20) | (0xf << 4);
8997 if (op1 == 0x7 && ((insn & sh) == sh))
8998 {
8999 goto illegal_op;
9000 }
9001 /* load/store byte/word */
9002 rn = (insn >> 16) & 0xf;
9003 rd = (insn >> 12) & 0xf;
b0109805 9004 tmp2 = load_reg(s, rn);
a99caa48
PM
9005 if ((insn & 0x01200000) == 0x00200000) {
9006 /* ldrt/strt */
579d21cc 9007 i = get_a32_user_mem_index(s);
a99caa48
PM
9008 } else {
9009 i = get_mem_index(s);
9010 }
9ee6e8bb 9011 if (insn & (1 << 24))
b0109805 9012 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9013 if (insn & (1 << 20)) {
9014 /* load */
5a839c0d 9015 tmp = tcg_temp_new_i32();
9ee6e8bb 9016 if (insn & (1 << 22)) {
9bb6558a 9017 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9018 } else {
9bb6558a 9019 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9020 }
9ee6e8bb
PB
9021 } else {
9022 /* store */
b0109805 9023 tmp = load_reg(s, rd);
5a839c0d 9024 if (insn & (1 << 22)) {
9bb6558a 9025 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9026 } else {
9bb6558a 9027 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9028 }
9029 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9030 }
9031 if (!(insn & (1 << 24))) {
b0109805
PB
9032 gen_add_data_offset(s, insn, tmp2);
9033 store_reg(s, rn, tmp2);
9034 } else if (insn & (1 << 21)) {
9035 store_reg(s, rn, tmp2);
9036 } else {
7d1b0095 9037 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9038 }
9039 if (insn & (1 << 20)) {
9040 /* Complete the load. */
7dcc1f89 9041 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9042 }
9043 break;
9044 case 0x08:
9045 case 0x09:
9046 {
da3e53dd
PM
9047 int j, n, loaded_base;
9048 bool exc_return = false;
9049 bool is_load = extract32(insn, 20, 1);
9050 bool user = false;
39d5492a 9051 TCGv_i32 loaded_var;
9ee6e8bb
PB
9052 /* load/store multiple words */
9053 /* XXX: store correct base if write back */
9ee6e8bb 9054 if (insn & (1 << 22)) {
da3e53dd 9055 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9056 if (IS_USER(s))
9057 goto illegal_op; /* only usable in supervisor mode */
9058
da3e53dd
PM
9059 if (is_load && extract32(insn, 15, 1)) {
9060 exc_return = true;
9061 } else {
9062 user = true;
9063 }
9ee6e8bb
PB
9064 }
9065 rn = (insn >> 16) & 0xf;
b0109805 9066 addr = load_reg(s, rn);
9ee6e8bb
PB
9067
9068 /* compute total size */
9069 loaded_base = 0;
f764718d 9070 loaded_var = NULL;
9ee6e8bb 9071 n = 0;
9798ac71 9072 for (i = 0; i < 16; i++) {
9ee6e8bb
PB
9073 if (insn & (1 << i))
9074 n++;
9075 }
9076 /* XXX: test invalid n == 0 case ? */
9077 if (insn & (1 << 23)) {
9078 if (insn & (1 << 24)) {
9079 /* pre increment */
b0109805 9080 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9081 } else {
9082 /* post increment */
9083 }
9084 } else {
9085 if (insn & (1 << 24)) {
9086 /* pre decrement */
b0109805 9087 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9088 } else {
9089 /* post decrement */
9090 if (n != 1)
b0109805 9091 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9092 }
9093 }
9094 j = 0;
9798ac71 9095 for (i = 0; i < 16; i++) {
9ee6e8bb 9096 if (insn & (1 << i)) {
da3e53dd 9097 if (is_load) {
9ee6e8bb 9098 /* load */
5a839c0d 9099 tmp = tcg_temp_new_i32();
12dcc321 9100 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9101 if (user) {
b75263d6 9102 tmp2 = tcg_const_i32(i);
1ce94f81 9103 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9104 tcg_temp_free_i32(tmp2);
7d1b0095 9105 tcg_temp_free_i32(tmp);
9ee6e8bb 9106 } else if (i == rn) {
b0109805 9107 loaded_var = tmp;
9ee6e8bb 9108 loaded_base = 1;
9d090d17 9109 } else if (i == 15 && exc_return) {
fb0e8e79 9110 store_pc_exc_ret(s, tmp);
9ee6e8bb 9111 } else {
7dcc1f89 9112 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9113 }
9114 } else {
9115 /* store */
9116 if (i == 15) {
7d1b0095 9117 tmp = tcg_temp_new_i32();
fdbcf632 9118 tcg_gen_movi_i32(tmp, read_pc(s));
9ee6e8bb 9119 } else if (user) {
7d1b0095 9120 tmp = tcg_temp_new_i32();
b75263d6 9121 tmp2 = tcg_const_i32(i);
9ef39277 9122 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9123 tcg_temp_free_i32(tmp2);
9ee6e8bb 9124 } else {
b0109805 9125 tmp = load_reg(s, i);
9ee6e8bb 9126 }
12dcc321 9127 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9128 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9129 }
9130 j++;
9131 /* no need to add after the last transfer */
9132 if (j != n)
b0109805 9133 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9134 }
9135 }
9136 if (insn & (1 << 21)) {
9137 /* write back */
9138 if (insn & (1 << 23)) {
9139 if (insn & (1 << 24)) {
9140 /* pre increment */
9141 } else {
9142 /* post increment */
b0109805 9143 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9144 }
9145 } else {
9146 if (insn & (1 << 24)) {
9147 /* pre decrement */
9148 if (n != 1)
b0109805 9149 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9150 } else {
9151 /* post decrement */
b0109805 9152 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9153 }
9154 }
b0109805
PB
9155 store_reg(s, rn, addr);
9156 } else {
7d1b0095 9157 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9158 }
9159 if (loaded_base) {
b0109805 9160 store_reg(s, rn, loaded_var);
9ee6e8bb 9161 }
da3e53dd 9162 if (exc_return) {
9ee6e8bb 9163 /* Restore CPSR from SPSR. */
d9ba4830 9164 tmp = load_cpu_field(spsr);
e69ad9df
AL
9165 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9166 gen_io_start();
9167 }
235ea1f5 9168 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9169 tcg_temp_free_i32(tmp);
b29fd33d 9170 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9171 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9172 }
9173 }
9174 break;
9175 case 0xa:
9176 case 0xb:
9177 {
9178 int32_t offset;
9179
9180 /* branch (and link) */
9ee6e8bb 9181 if (insn & (1 << 24)) {
7d1b0095 9182 tmp = tcg_temp_new_i32();
a0415916 9183 tcg_gen_movi_i32(tmp, s->base.pc_next);
5e3f878a 9184 store_reg(s, 14, tmp);
9ee6e8bb 9185 }
534df156 9186 offset = sextract32(insn << 2, 0, 26);
fdbcf632 9187 gen_jmp(s, read_pc(s) + offset);
9ee6e8bb
PB
9188 }
9189 break;
9190 case 0xc:
9191 case 0xd:
9192 case 0xe:
6a57f3eb
WN
9193 if (((insn >> 8) & 0xe) == 10) {
9194 /* VFP. */
7dcc1f89 9195 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9196 goto illegal_op;
9197 }
7dcc1f89 9198 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9199 /* Coprocessor. */
9ee6e8bb 9200 goto illegal_op;
6a57f3eb 9201 }
9ee6e8bb
PB
9202 break;
9203 case 0xf:
9204 /* swi */
a0415916 9205 gen_set_pc_im(s, s->base.pc_next);
d4a2dc67 9206 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 9207 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
9208 break;
9209 default:
9210 illegal_op:
1ce21ba1 9211 unallocated_encoding(s);
9ee6e8bb
PB
9212 break;
9213 }
9214 }
9215}
9216
331b1ca6 9217static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
296e5a0a 9218{
331b1ca6
RH
9219 /*
9220 * Return true if this is a 16 bit instruction. We must be precise
9221 * about this (matching the decode).
296e5a0a
PM
9222 */
9223 if ((insn >> 11) < 0x1d) {
9224 /* Definitely a 16-bit instruction */
9225 return true;
9226 }
9227
9228 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9229 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9230 * end up actually treating this as two 16-bit insns, though,
9231 * if it's half of a bl/blx pair that might span a page boundary.
9232 */
14120108
JS
9233 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
9234 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
9235 /* Thumb2 cores (including all M profile ones) always treat
9236 * 32-bit insns as 32-bit.
9237 */
9238 return false;
9239 }
9240
331b1ca6 9241 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
9242 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9243 * is not on the next page; we merge this into a 32-bit
9244 * insn.
9245 */
9246 return false;
9247 }
9248 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9249 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9250 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9251 * -- handle as single 16 bit insn
9252 */
9253 return true;
9254}
9255
9ee6e8bb
PB
9256/* Return true if this is a Thumb-2 logical op. */
9257static int
9258thumb2_logic_op(int op)
9259{
9260 return (op < 8);
9261}
9262
9263/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9264 then set condition code flags based on the result of the operation.
9265 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9266 to the high bit of T1.
9267 Returns zero if the opcode is valid. */
9268
9269static int
39d5492a
PM
9270gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9271 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9272{
9273 int logic_cc;
9274
9275 logic_cc = 0;
9276 switch (op) {
9277 case 0: /* and */
396e467c 9278 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9279 logic_cc = conds;
9280 break;
9281 case 1: /* bic */
f669df27 9282 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9283 logic_cc = conds;
9284 break;
9285 case 2: /* orr */
396e467c 9286 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9287 logic_cc = conds;
9288 break;
9289 case 3: /* orn */
29501f1b 9290 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9291 logic_cc = conds;
9292 break;
9293 case 4: /* eor */
396e467c 9294 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9295 logic_cc = conds;
9296 break;
9297 case 8: /* add */
9298 if (conds)
72485ec4 9299 gen_add_CC(t0, t0, t1);
9ee6e8bb 9300 else
396e467c 9301 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9302 break;
9303 case 10: /* adc */
9304 if (conds)
49b4c31e 9305 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9306 else
396e467c 9307 gen_adc(t0, t1);
9ee6e8bb
PB
9308 break;
9309 case 11: /* sbc */
2de68a49
RH
9310 if (conds) {
9311 gen_sbc_CC(t0, t0, t1);
9312 } else {
396e467c 9313 gen_sub_carry(t0, t0, t1);
2de68a49 9314 }
9ee6e8bb
PB
9315 break;
9316 case 13: /* sub */
9317 if (conds)
72485ec4 9318 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9319 else
396e467c 9320 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9321 break;
9322 case 14: /* rsb */
9323 if (conds)
72485ec4 9324 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9325 else
396e467c 9326 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9327 break;
9328 default: /* 5, 6, 7, 9, 12, 15. */
9329 return 1;
9330 }
9331 if (logic_cc) {
396e467c 9332 gen_logic_CC(t0);
9ee6e8bb 9333 if (shifter_out)
396e467c 9334 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9335 }
9336 return 0;
9337}
9338
2eea841c
PM
9339/* Translate a 32-bit thumb instruction. */
9340static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 9341{
296e5a0a 9342 uint32_t imm, shift, offset;
9ee6e8bb 9343 uint32_t rd, rn, rm, rs;
39d5492a
PM
9344 TCGv_i32 tmp;
9345 TCGv_i32 tmp2;
9346 TCGv_i32 tmp3;
9347 TCGv_i32 addr;
a7812ae4 9348 TCGv_i64 tmp64;
9ee6e8bb
PB
9349 int op;
9350 int shiftop;
9351 int conds;
9352 int logic_cc;
9353
14120108
JS
9354 /*
9355 * ARMv6-M supports a limited subset of Thumb2 instructions.
9356 * Other Thumb1 architectures allow only 32-bit
9357 * combined BL/BLX prefix and suffix.
296e5a0a 9358 */
14120108
JS
9359 if (arm_dc_feature(s, ARM_FEATURE_M) &&
9360 !arm_dc_feature(s, ARM_FEATURE_V7)) {
9361 int i;
9362 bool found = false;
8297cb13
JS
9363 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
9364 0xf3b08040 /* dsb */,
9365 0xf3b08050 /* dmb */,
9366 0xf3b08060 /* isb */,
9367 0xf3e08000 /* mrs */,
9368 0xf000d000 /* bl */};
9369 static const uint32_t armv6m_mask[] = {0xffe0d000,
9370 0xfff0d0f0,
9371 0xfff0d0f0,
9372 0xfff0d0f0,
9373 0xffe0d000,
9374 0xf800d000};
14120108
JS
9375
9376 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
9377 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
9378 found = true;
9379 break;
9380 }
9381 }
9382 if (!found) {
9383 goto illegal_op;
9384 }
9385 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
9386 ARCH(6T2);
9387 }
9388
9389 rn = (insn >> 16) & 0xf;
9390 rs = (insn >> 12) & 0xf;
9391 rd = (insn >> 8) & 0xf;
9392 rm = insn & 0xf;
9393 switch ((insn >> 25) & 0xf) {
9394 case 0: case 1: case 2: case 3:
9395 /* 16-bit instructions. Should never happen. */
9396 abort();
9397 case 4:
9398 if (insn & (1 << 22)) {
ebfe27c5
PM
9399 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9400 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 9401 * table branch, TT.
ebfe27c5 9402 */
76eff04d
PM
9403 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9404 arm_dc_feature(s, ARM_FEATURE_V8)) {
9405 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9406 * - SG (v8M only)
9407 * The bulk of the behaviour for this instruction is implemented
9408 * in v7m_handle_execute_nsc(), which deals with the insn when
9409 * it is executed by a CPU in non-secure state from memory
9410 * which is Secure & NonSecure-Callable.
9411 * Here we only need to handle the remaining cases:
9412 * * in NS memory (including the "security extension not
9413 * implemented" case) : NOP
9414 * * in S memory but CPU already secure (clear IT bits)
9415 * We know that the attribute for the memory this insn is
9416 * in must match the current CPU state, because otherwise
9417 * get_phys_addr_pmsav8 would have generated an exception.
9418 */
9419 if (s->v8m_secure) {
9420 /* Like the IT insn, we don't need to generate any code */
9421 s->condexec_cond = 0;
9422 s->condexec_mask = 0;
9423 }
9424 } else if (insn & 0x01200000) {
ebfe27c5
PM
9425 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9426 * - load/store dual (post-indexed)
9427 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9428 * - load/store dual (literal and immediate)
9429 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9430 * - load/store dual (pre-indexed)
9431 */
910d7692
PM
9432 bool wback = extract32(insn, 21, 1);
9433
16e0d823
RH
9434 if (rn == 15 && (insn & (1 << 21))) {
9435 /* UNPREDICTABLE */
9436 goto illegal_op;
9ee6e8bb 9437 }
16e0d823
RH
9438
9439 addr = add_reg_for_lit(s, rn, 0);
9ee6e8bb 9440 offset = (insn & 0xff) * 4;
910d7692 9441 if ((insn & (1 << 23)) == 0) {
9ee6e8bb 9442 offset = -offset;
910d7692
PM
9443 }
9444
9445 if (s->v8m_stackcheck && rn == 13 && wback) {
9446 /*
9447 * Here 'addr' is the current SP; if offset is +ve we're
9448 * moving SP up, else down. It is UNKNOWN whether the limit
9449 * check triggers when SP starts below the limit and ends
9450 * up above it; check whichever of the current and final
9451 * SP is lower, so QEMU will trigger in that situation.
9452 */
9453 if ((int32_t)offset < 0) {
9454 TCGv_i32 newsp = tcg_temp_new_i32();
9455
9456 tcg_gen_addi_i32(newsp, addr, offset);
9457 gen_helper_v8m_stackcheck(cpu_env, newsp);
9458 tcg_temp_free_i32(newsp);
9459 } else {
9460 gen_helper_v8m_stackcheck(cpu_env, addr);
9461 }
9462 }
9463
9ee6e8bb 9464 if (insn & (1 << 24)) {
b0109805 9465 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9466 offset = 0;
9467 }
9468 if (insn & (1 << 20)) {
9469 /* ldrd */
e2592fad 9470 tmp = tcg_temp_new_i32();
12dcc321 9471 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9472 store_reg(s, rs, tmp);
9473 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9474 tmp = tcg_temp_new_i32();
12dcc321 9475 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9476 store_reg(s, rd, tmp);
9ee6e8bb
PB
9477 } else {
9478 /* strd */
b0109805 9479 tmp = load_reg(s, rs);
12dcc321 9480 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9481 tcg_temp_free_i32(tmp);
b0109805
PB
9482 tcg_gen_addi_i32(addr, addr, 4);
9483 tmp = load_reg(s, rd);
12dcc321 9484 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9485 tcg_temp_free_i32(tmp);
9ee6e8bb 9486 }
910d7692 9487 if (wback) {
9ee6e8bb 9488 /* Base writeback. */
b0109805
PB
9489 tcg_gen_addi_i32(addr, addr, offset - 4);
9490 store_reg(s, rn, addr);
9491 } else {
7d1b0095 9492 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9493 }
9494 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
9495 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9496 * - load/store exclusive word
5158de24 9497 * - TT (v8M only)
ebfe27c5
PM
9498 */
9499 if (rs == 15) {
5158de24
PM
9500 if (!(insn & (1 << 20)) &&
9501 arm_dc_feature(s, ARM_FEATURE_M) &&
9502 arm_dc_feature(s, ARM_FEATURE_V8)) {
9503 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
9504 * - TT (v8M only)
9505 */
9506 bool alt = insn & (1 << 7);
9507 TCGv_i32 addr, op, ttresp;
9508
9509 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
9510 /* we UNDEF for these UNPREDICTABLE cases */
9511 goto illegal_op;
9512 }
9513
9514 if (alt && !s->v8m_secure) {
9515 goto illegal_op;
9516 }
9517
9518 addr = load_reg(s, rn);
9519 op = tcg_const_i32(extract32(insn, 6, 2));
9520 ttresp = tcg_temp_new_i32();
9521 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
9522 tcg_temp_free_i32(addr);
9523 tcg_temp_free_i32(op);
9524 store_reg(s, rd, ttresp);
384c6c03 9525 break;
5158de24 9526 }
ebfe27c5
PM
9527 goto illegal_op;
9528 }
39d5492a 9529 addr = tcg_temp_local_new_i32();
98a46317 9530 load_reg_var(s, addr, rn);
426f5abc 9531 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9532 if (insn & (1 << 20)) {
426f5abc 9533 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9534 } else {
426f5abc 9535 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9536 }
39d5492a 9537 tcg_temp_free_i32(addr);
2359bf80 9538 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb 9539 /* Table Branch. */
fdbcf632 9540 addr = load_reg(s, rn);
b26eefb6 9541 tmp = load_reg(s, rm);
b0109805 9542 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9543 if (insn & (1 << 4)) {
9544 /* tbh */
b0109805 9545 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9546 tcg_temp_free_i32(tmp);
e2592fad 9547 tmp = tcg_temp_new_i32();
12dcc321 9548 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9549 } else { /* tbb */
7d1b0095 9550 tcg_temp_free_i32(tmp);
e2592fad 9551 tmp = tcg_temp_new_i32();
12dcc321 9552 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9553 }
7d1b0095 9554 tcg_temp_free_i32(addr);
b0109805 9555 tcg_gen_shli_i32(tmp, tmp, 1);
fdbcf632 9556 tcg_gen_addi_i32(tmp, tmp, read_pc(s));
b0109805 9557 store_reg(s, 15, tmp);
9ee6e8bb 9558 } else {
96c55295
PM
9559 bool is_lasr = false;
9560 bool is_ld = extract32(insn, 20, 1);
2359bf80 9561 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9562 op = (insn >> 4) & 0x3;
2359bf80
MR
9563 switch (op2) {
9564 case 0:
426f5abc 9565 goto illegal_op;
2359bf80
MR
9566 case 1:
9567 /* Load/store exclusive byte/halfword/doubleword */
9568 if (op == 2) {
9569 goto illegal_op;
9570 }
9571 ARCH(7);
9572 break;
9573 case 2:
9574 /* Load-acquire/store-release */
9575 if (op == 3) {
9576 goto illegal_op;
9577 }
9578 /* Fall through */
9579 case 3:
9580 /* Load-acquire/store-release exclusive */
9581 ARCH(8);
96c55295 9582 is_lasr = true;
2359bf80 9583 break;
426f5abc 9584 }
96c55295
PM
9585
9586 if (is_lasr && !is_ld) {
9587 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9588 }
9589
39d5492a 9590 addr = tcg_temp_local_new_i32();
98a46317 9591 load_reg_var(s, addr, rn);
2359bf80 9592 if (!(op2 & 1)) {
96c55295 9593 if (is_ld) {
2359bf80
MR
9594 tmp = tcg_temp_new_i32();
9595 switch (op) {
9596 case 0: /* ldab */
9bb6558a
PM
9597 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9598 rs | ISSIsAcqRel);
2359bf80
MR
9599 break;
9600 case 1: /* ldah */
9bb6558a
PM
9601 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9602 rs | ISSIsAcqRel);
2359bf80
MR
9603 break;
9604 case 2: /* lda */
9bb6558a
PM
9605 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9606 rs | ISSIsAcqRel);
2359bf80
MR
9607 break;
9608 default:
9609 abort();
9610 }
9611 store_reg(s, rs, tmp);
9612 } else {
9613 tmp = load_reg(s, rs);
9614 switch (op) {
9615 case 0: /* stlb */
9bb6558a
PM
9616 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9617 rs | ISSIsAcqRel);
2359bf80
MR
9618 break;
9619 case 1: /* stlh */
9bb6558a
PM
9620 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9621 rs | ISSIsAcqRel);
2359bf80
MR
9622 break;
9623 case 2: /* stl */
9bb6558a
PM
9624 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9625 rs | ISSIsAcqRel);
2359bf80
MR
9626 break;
9627 default:
9628 abort();
9629 }
9630 tcg_temp_free_i32(tmp);
9631 }
96c55295 9632 } else if (is_ld) {
426f5abc 9633 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9634 } else {
426f5abc 9635 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9636 }
39d5492a 9637 tcg_temp_free_i32(addr);
96c55295
PM
9638
9639 if (is_lasr && is_ld) {
9640 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
9641 }
9ee6e8bb
PB
9642 }
9643 } else {
9644 /* Load/store multiple, RFE, SRS. */
9645 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9646 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9647 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9648 goto illegal_op;
00115976 9649 }
9ee6e8bb
PB
9650 if (insn & (1 << 20)) {
9651 /* rfe */
b0109805
PB
9652 addr = load_reg(s, rn);
9653 if ((insn & (1 << 24)) == 0)
9654 tcg_gen_addi_i32(addr, addr, -8);
9655 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9656 tmp = tcg_temp_new_i32();
12dcc321 9657 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9658 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9659 tmp2 = tcg_temp_new_i32();
12dcc321 9660 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9661 if (insn & (1 << 21)) {
9662 /* Base writeback. */
b0109805
PB
9663 if (insn & (1 << 24)) {
9664 tcg_gen_addi_i32(addr, addr, 4);
9665 } else {
9666 tcg_gen_addi_i32(addr, addr, -4);
9667 }
9668 store_reg(s, rn, addr);
9669 } else {
7d1b0095 9670 tcg_temp_free_i32(addr);
9ee6e8bb 9671 }
b0109805 9672 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9673 } else {
9674 /* srs */
81465888
PM
9675 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9676 insn & (1 << 21));
9ee6e8bb
PB
9677 }
9678 } else {
5856d44e 9679 int i, loaded_base = 0;
39d5492a 9680 TCGv_i32 loaded_var;
7c0ed88e 9681 bool wback = extract32(insn, 21, 1);
9ee6e8bb 9682 /* Load/store multiple. */
b0109805 9683 addr = load_reg(s, rn);
9ee6e8bb
PB
9684 offset = 0;
9685 for (i = 0; i < 16; i++) {
9686 if (insn & (1 << i))
9687 offset += 4;
9688 }
7c0ed88e 9689
9ee6e8bb 9690 if (insn & (1 << 24)) {
b0109805 9691 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9692 }
9693
7c0ed88e
PM
9694 if (s->v8m_stackcheck && rn == 13 && wback) {
9695 /*
9696 * If the writeback is incrementing SP rather than
9697 * decrementing it, and the initial SP is below the
9698 * stack limit but the final written-back SP would
9699 * be above, then then we must not perform any memory
9700 * accesses, but it is IMPDEF whether we generate
9701 * an exception. We choose to do so in this case.
9702 * At this point 'addr' is the lowest address, so
9703 * either the original SP (if incrementing) or our
9704 * final SP (if decrementing), so that's what we check.
9705 */
9706 gen_helper_v8m_stackcheck(cpu_env, addr);
9707 }
9708
f764718d 9709 loaded_var = NULL;
9ee6e8bb
PB
9710 for (i = 0; i < 16; i++) {
9711 if ((insn & (1 << i)) == 0)
9712 continue;
9713 if (insn & (1 << 20)) {
9714 /* Load. */
e2592fad 9715 tmp = tcg_temp_new_i32();
12dcc321 9716 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9717 if (i == 15) {
3bb8a96f 9718 gen_bx_excret(s, tmp);
5856d44e
YO
9719 } else if (i == rn) {
9720 loaded_var = tmp;
9721 loaded_base = 1;
9ee6e8bb 9722 } else {
b0109805 9723 store_reg(s, i, tmp);
9ee6e8bb
PB
9724 }
9725 } else {
9726 /* Store. */
b0109805 9727 tmp = load_reg(s, i);
12dcc321 9728 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9729 tcg_temp_free_i32(tmp);
9ee6e8bb 9730 }
b0109805 9731 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9732 }
5856d44e
YO
9733 if (loaded_base) {
9734 store_reg(s, rn, loaded_var);
9735 }
7c0ed88e 9736 if (wback) {
9ee6e8bb
PB
9737 /* Base register writeback. */
9738 if (insn & (1 << 24)) {
b0109805 9739 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9740 }
9741 /* Fault if writeback register is in register list. */
9742 if (insn & (1 << rn))
9743 goto illegal_op;
b0109805
PB
9744 store_reg(s, rn, addr);
9745 } else {
7d1b0095 9746 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9747 }
9748 }
9749 }
9750 break;
2af9ab77
JB
9751 case 5:
9752
9ee6e8bb 9753 op = (insn >> 21) & 0xf;
2af9ab77 9754 if (op == 6) {
62b44f05
AR
9755 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9756 goto illegal_op;
9757 }
2af9ab77
JB
9758 /* Halfword pack. */
9759 tmp = load_reg(s, rn);
9760 tmp2 = load_reg(s, rm);
9761 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9762 if (insn & (1 << 5)) {
9763 /* pkhtb */
d1f8755f 9764 if (shift == 0) {
2af9ab77 9765 shift = 31;
d1f8755f 9766 }
2af9ab77 9767 tcg_gen_sari_i32(tmp2, tmp2, shift);
d1f8755f 9768 tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
2af9ab77
JB
9769 } else {
9770 /* pkhbt */
d1f8755f
RH
9771 tcg_gen_shli_i32(tmp2, tmp2, shift);
9772 tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
2af9ab77 9773 }
7d1b0095 9774 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9775 store_reg(s, rd, tmp);
9776 } else {
2af9ab77
JB
9777 /* Data processing register constant shift. */
9778 if (rn == 15) {
7d1b0095 9779 tmp = tcg_temp_new_i32();
2af9ab77
JB
9780 tcg_gen_movi_i32(tmp, 0);
9781 } else {
9782 tmp = load_reg(s, rn);
9783 }
9784 tmp2 = load_reg(s, rm);
9785
9786 shiftop = (insn >> 4) & 3;
9787 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9788 conds = (insn & (1 << 20)) != 0;
9789 logic_cc = (conds && thumb2_logic_op(op));
9790 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9791 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9792 goto illegal_op;
7d1b0095 9793 tcg_temp_free_i32(tmp2);
55203189
PM
9794 if (rd == 13 &&
9795 ((op == 2 && rn == 15) ||
9796 (op == 8 && rn == 13) ||
9797 (op == 13 && rn == 13))) {
9798 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
9799 store_sp_checked(s, tmp);
9800 } else if (rd != 15) {
2af9ab77
JB
9801 store_reg(s, rd, tmp);
9802 } else {
7d1b0095 9803 tcg_temp_free_i32(tmp);
2af9ab77 9804 }
3174f8e9 9805 }
9ee6e8bb
PB
9806 break;
9807 case 13: /* Misc data processing. */
9808 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9809 if (op < 4 && (insn & 0xf000) != 0xf000)
9810 goto illegal_op;
9811 switch (op) {
9812 case 0: /* Register controlled shift. */
8984bd2e
PB
9813 tmp = load_reg(s, rn);
9814 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9815 if ((insn & 0x70) != 0)
9816 goto illegal_op;
a2d12f0f
PM
9817 /*
9818 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
9819 * - MOV, MOVS (register-shifted register), flagsetting
9820 */
9ee6e8bb 9821 op = (insn >> 21) & 3;
8984bd2e
PB
9822 logic_cc = (insn & (1 << 20)) != 0;
9823 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9824 if (logic_cc)
9825 gen_logic_CC(tmp);
bedb8a6b 9826 store_reg(s, rd, tmp);
9ee6e8bb
PB
9827 break;
9828 case 1: /* Sign/zero extend. */
62b44f05
AR
9829 op = (insn >> 20) & 7;
9830 switch (op) {
9831 case 0: /* SXTAH, SXTH */
9832 case 1: /* UXTAH, UXTH */
9833 case 4: /* SXTAB, SXTB */
9834 case 5: /* UXTAB, UXTB */
9835 break;
9836 case 2: /* SXTAB16, SXTB16 */
9837 case 3: /* UXTAB16, UXTB16 */
9838 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9839 goto illegal_op;
9840 }
9841 break;
9842 default:
9843 goto illegal_op;
9844 }
9845 if (rn != 15) {
9846 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9847 goto illegal_op;
9848 }
9849 }
5e3f878a 9850 tmp = load_reg(s, rm);
9ee6e8bb 9851 shift = (insn >> 4) & 3;
1301f322 9852 /* ??? In many cases it's not necessary to do a
9ee6e8bb 9853 rotate, a shift is sufficient. */
464eaa95 9854 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9855 op = (insn >> 20) & 7;
9856 switch (op) {
5e3f878a
PB
9857 case 0: gen_sxth(tmp); break;
9858 case 1: gen_uxth(tmp); break;
9859 case 2: gen_sxtb16(tmp); break;
9860 case 3: gen_uxtb16(tmp); break;
9861 case 4: gen_sxtb(tmp); break;
9862 case 5: gen_uxtb(tmp); break;
62b44f05
AR
9863 default:
9864 g_assert_not_reached();
9ee6e8bb
PB
9865 }
9866 if (rn != 15) {
5e3f878a 9867 tmp2 = load_reg(s, rn);
9ee6e8bb 9868 if ((op >> 1) == 1) {
5e3f878a 9869 gen_add16(tmp, tmp2);
9ee6e8bb 9870 } else {
5e3f878a 9871 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9872 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9873 }
9874 }
5e3f878a 9875 store_reg(s, rd, tmp);
9ee6e8bb
PB
9876 break;
9877 case 2: /* SIMD add/subtract. */
62b44f05
AR
9878 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9879 goto illegal_op;
9880 }
9ee6e8bb
PB
9881 op = (insn >> 20) & 7;
9882 shift = (insn >> 4) & 7;
9883 if ((op & 3) == 3 || (shift & 3) == 3)
9884 goto illegal_op;
6ddbc6e4
PB
9885 tmp = load_reg(s, rn);
9886 tmp2 = load_reg(s, rm);
9887 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9888 tcg_temp_free_i32(tmp2);
6ddbc6e4 9889 store_reg(s, rd, tmp);
9ee6e8bb
PB
9890 break;
9891 case 3: /* Other data processing. */
9892 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9893 if (op < 4) {
9894 /* Saturating add/subtract. */
62b44f05
AR
9895 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9896 goto illegal_op;
9897 }
d9ba4830
PB
9898 tmp = load_reg(s, rn);
9899 tmp2 = load_reg(s, rm);
9ee6e8bb 9900 if (op & 1)
640581a0 9901 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp);
4809c612 9902 if (op & 2)
9ef39277 9903 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9904 else
9ef39277 9905 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9906 tcg_temp_free_i32(tmp2);
9ee6e8bb 9907 } else {
62b44f05
AR
9908 switch (op) {
9909 case 0x0a: /* rbit */
9910 case 0x08: /* rev */
9911 case 0x09: /* rev16 */
9912 case 0x0b: /* revsh */
9913 case 0x18: /* clz */
9914 break;
9915 case 0x10: /* sel */
9916 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9917 goto illegal_op;
9918 }
9919 break;
9920 case 0x20: /* crc32/crc32c */
9921 case 0x21:
9922 case 0x22:
9923 case 0x28:
9924 case 0x29:
9925 case 0x2a:
962fcbf2 9926 if (!dc_isar_feature(aa32_crc32, s)) {
62b44f05
AR
9927 goto illegal_op;
9928 }
9929 break;
9930 default:
9931 goto illegal_op;
9932 }
d9ba4830 9933 tmp = load_reg(s, rn);
9ee6e8bb
PB
9934 switch (op) {
9935 case 0x0a: /* rbit */
d9ba4830 9936 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9937 break;
9938 case 0x08: /* rev */
66896cb8 9939 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9940 break;
9941 case 0x09: /* rev16 */
d9ba4830 9942 gen_rev16(tmp);
9ee6e8bb
PB
9943 break;
9944 case 0x0b: /* revsh */
d9ba4830 9945 gen_revsh(tmp);
9ee6e8bb
PB
9946 break;
9947 case 0x10: /* sel */
d9ba4830 9948 tmp2 = load_reg(s, rm);
7d1b0095 9949 tmp3 = tcg_temp_new_i32();
0ecb72a5 9950 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9951 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9952 tcg_temp_free_i32(tmp3);
9953 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9954 break;
9955 case 0x18: /* clz */
7539a012 9956 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 9957 break;
eb0ecd5a
WN
9958 case 0x20:
9959 case 0x21:
9960 case 0x22:
9961 case 0x28:
9962 case 0x29:
9963 case 0x2a:
9964 {
9965 /* crc32/crc32c */
9966 uint32_t sz = op & 0x3;
9967 uint32_t c = op & 0x8;
9968
eb0ecd5a 9969 tmp2 = load_reg(s, rm);
aa633469
PM
9970 if (sz == 0) {
9971 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9972 } else if (sz == 1) {
9973 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9974 }
eb0ecd5a
WN
9975 tmp3 = tcg_const_i32(1 << sz);
9976 if (c) {
9977 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9978 } else {
9979 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9980 }
9981 tcg_temp_free_i32(tmp2);
9982 tcg_temp_free_i32(tmp3);
9983 break;
9984 }
9ee6e8bb 9985 default:
62b44f05 9986 g_assert_not_reached();
9ee6e8bb
PB
9987 }
9988 }
d9ba4830 9989 store_reg(s, rd, tmp);
9ee6e8bb
PB
9990 break;
9991 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
9992 switch ((insn >> 20) & 7) {
9993 case 0: /* 32 x 32 -> 32 */
9994 case 7: /* Unsigned sum of absolute differences. */
9995 break;
9996 case 1: /* 16 x 16 -> 32 */
9997 case 2: /* Dual multiply add. */
9998 case 3: /* 32 * 16 -> 32msb */
9999 case 4: /* Dual multiply subtract. */
10000 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10001 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10002 goto illegal_op;
10003 }
10004 break;
10005 }
9ee6e8bb 10006 op = (insn >> 4) & 0xf;
d9ba4830
PB
10007 tmp = load_reg(s, rn);
10008 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10009 switch ((insn >> 20) & 7) {
10010 case 0: /* 32 x 32 -> 32 */
d9ba4830 10011 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10012 tcg_temp_free_i32(tmp2);
9ee6e8bb 10013 if (rs != 15) {
d9ba4830 10014 tmp2 = load_reg(s, rs);
9ee6e8bb 10015 if (op)
d9ba4830 10016 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10017 else
d9ba4830 10018 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10019 tcg_temp_free_i32(tmp2);
9ee6e8bb 10020 }
9ee6e8bb
PB
10021 break;
10022 case 1: /* 16 x 16 -> 32 */
d9ba4830 10023 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10024 tcg_temp_free_i32(tmp2);
9ee6e8bb 10025 if (rs != 15) {
d9ba4830 10026 tmp2 = load_reg(s, rs);
9ef39277 10027 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10028 tcg_temp_free_i32(tmp2);
9ee6e8bb 10029 }
9ee6e8bb
PB
10030 break;
10031 case 2: /* Dual multiply add. */
10032 case 4: /* Dual multiply subtract. */
10033 if (op)
d9ba4830
PB
10034 gen_swap_half(tmp2);
10035 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10036 if (insn & (1 << 22)) {
e1d177b9 10037 /* This subtraction cannot overflow. */
d9ba4830 10038 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10039 } else {
e1d177b9
PM
10040 /* This addition cannot overflow 32 bits;
10041 * however it may overflow considered as a signed
10042 * operation, in which case we must set the Q flag.
10043 */
9ef39277 10044 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10045 }
7d1b0095 10046 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10047 if (rs != 15)
10048 {
d9ba4830 10049 tmp2 = load_reg(s, rs);
9ef39277 10050 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10051 tcg_temp_free_i32(tmp2);
9ee6e8bb 10052 }
9ee6e8bb
PB
10053 break;
10054 case 3: /* 32 * 16 -> 32msb */
10055 if (op)
d9ba4830 10056 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10057 else
d9ba4830 10058 gen_sxth(tmp2);
a7812ae4
PB
10059 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10060 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10061 tmp = tcg_temp_new_i32();
ecc7b3aa 10062 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10063 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10064 if (rs != 15)
10065 {
d9ba4830 10066 tmp2 = load_reg(s, rs);
9ef39277 10067 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10068 tcg_temp_free_i32(tmp2);
9ee6e8bb 10069 }
9ee6e8bb 10070 break;
838fa72d 10071 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
5f8cd06e 10072 tcg_gen_muls2_i32(tmp2, tmp, tmp, tmp2);
9ee6e8bb 10073 if (rs != 15) {
5f8cd06e 10074 tmp3 = load_reg(s, rs);
838fa72d 10075 if (insn & (1 << 20)) {
5f8cd06e 10076 tcg_gen_add_i32(tmp, tmp, tmp3);
99c475ab 10077 } else {
5f8cd06e 10078 tcg_gen_sub_i32(tmp, tmp, tmp3);
99c475ab 10079 }
5f8cd06e 10080 tcg_temp_free_i32(tmp3);
2c0262af 10081 }
838fa72d 10082 if (insn & (1 << 4)) {
5f8cd06e
RH
10083 /*
10084 * Adding 0x80000000 to the 64-bit quantity
10085 * means that we have carry in to the high
10086 * word when the low word has the high bit set.
10087 */
10088 tcg_gen_shri_i32(tmp2, tmp2, 31);
10089 tcg_gen_add_i32(tmp, tmp, tmp2);
838fa72d 10090 }
5f8cd06e 10091 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10092 break;
10093 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10094 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10095 tcg_temp_free_i32(tmp2);
9ee6e8bb 10096 if (rs != 15) {
d9ba4830
PB
10097 tmp2 = load_reg(s, rs);
10098 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10099 tcg_temp_free_i32(tmp2);
5fd46862 10100 }
9ee6e8bb 10101 break;
2c0262af 10102 }
d9ba4830 10103 store_reg(s, rd, tmp);
2c0262af 10104 break;
9ee6e8bb
PB
10105 case 6: case 7: /* 64-bit multiply, Divide. */
10106 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10107 tmp = load_reg(s, rn);
10108 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10109 if ((op & 0x50) == 0x10) {
10110 /* sdiv, udiv */
7e0cf8b4 10111 if (!dc_isar_feature(thumb_div, s)) {
9ee6e8bb 10112 goto illegal_op;
47789990 10113 }
9ee6e8bb 10114 if (op & 0x20)
5e3f878a 10115 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10116 else
5e3f878a 10117 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10118 tcg_temp_free_i32(tmp2);
5e3f878a 10119 store_reg(s, rd, tmp);
9ee6e8bb
PB
10120 } else if ((op & 0xe) == 0xc) {
10121 /* Dual multiply accumulate long. */
62b44f05
AR
10122 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10123 tcg_temp_free_i32(tmp);
10124 tcg_temp_free_i32(tmp2);
10125 goto illegal_op;
10126 }
9ee6e8bb 10127 if (op & 1)
5e3f878a
PB
10128 gen_swap_half(tmp2);
10129 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10130 if (op & 0x10) {
5e3f878a 10131 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10132 } else {
5e3f878a 10133 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10134 }
7d1b0095 10135 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10136 /* BUGFIX */
10137 tmp64 = tcg_temp_new_i64();
10138 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10139 tcg_temp_free_i32(tmp);
a7812ae4
PB
10140 gen_addq(s, tmp64, rs, rd);
10141 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10142 tcg_temp_free_i64(tmp64);
2c0262af 10143 } else {
9ee6e8bb
PB
10144 if (op & 0x20) {
10145 /* Unsigned 64-bit multiply */
a7812ae4 10146 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10147 } else {
9ee6e8bb
PB
10148 if (op & 8) {
10149 /* smlalxy */
62b44f05
AR
10150 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10151 tcg_temp_free_i32(tmp2);
10152 tcg_temp_free_i32(tmp);
10153 goto illegal_op;
10154 }
5e3f878a 10155 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10156 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10157 tmp64 = tcg_temp_new_i64();
10158 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10159 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10160 } else {
10161 /* Signed 64-bit multiply */
a7812ae4 10162 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10163 }
b5ff1b31 10164 }
9ee6e8bb
PB
10165 if (op & 4) {
10166 /* umaal */
62b44f05
AR
10167 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10168 tcg_temp_free_i64(tmp64);
10169 goto illegal_op;
10170 }
a7812ae4
PB
10171 gen_addq_lo(s, tmp64, rs);
10172 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10173 } else if (op & 0x40) {
10174 /* 64-bit accumulate. */
a7812ae4 10175 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10176 }
a7812ae4 10177 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10178 tcg_temp_free_i64(tmp64);
5fd46862 10179 }
2c0262af 10180 break;
9ee6e8bb
PB
10181 }
10182 break;
10183 case 6: case 7: case 14: case 15:
10184 /* Coprocessor. */
7517748e 10185 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8859ba3c
PM
10186 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10187 if (extract32(insn, 24, 2) == 3) {
10188 goto illegal_op; /* op0 = 0b11 : unallocated */
10189 }
10190
10191 /*
10192 * Decode VLLDM and VLSTM first: these are nonstandard because:
10193 * * if there is no FPU then these insns must NOP in
10194 * Secure state and UNDEF in Nonsecure state
10195 * * if there is an FPU then these insns do not have
10196 * the usual behaviour that disas_vfp_insn() provides of
10197 * being controlled by CPACR/NSACR enable bits or the
10198 * lazy-stacking logic.
7517748e 10199 */
b1e5336a
PM
10200 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10201 (insn & 0xffa00f00) == 0xec200a00) {
10202 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10203 * - VLLDM, VLSTM
10204 * We choose to UNDEF if the RAZ bits are non-zero.
10205 */
10206 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10207 goto illegal_op;
10208 }
019076b0
PM
10209
10210 if (arm_dc_feature(s, ARM_FEATURE_VFP)) {
10211 TCGv_i32 fptr = load_reg(s, rn);
10212
10213 if (extract32(insn, 20, 1)) {
956fe143 10214 gen_helper_v7m_vlldm(cpu_env, fptr);
019076b0
PM
10215 } else {
10216 gen_helper_v7m_vlstm(cpu_env, fptr);
10217 }
10218 tcg_temp_free_i32(fptr);
10219
10220 /* End the TB, because we have updated FP control bits */
10221 s->base.is_jmp = DISAS_UPDATE;
10222 }
b1e5336a
PM
10223 break;
10224 }
8859ba3c
PM
10225 if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
10226 ((insn >> 8) & 0xe) == 10) {
10227 /* FP, and the CPU supports it */
10228 if (disas_vfp_insn(s, insn)) {
10229 goto illegal_op;
10230 }
10231 break;
10232 }
10233
b1e5336a 10234 /* All other insns: NOCP */
a767fac8 10235 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
7517748e
PM
10236 default_exception_el(s));
10237 break;
10238 }
0052087e
RH
10239 if ((insn & 0xfe000a00) == 0xfc000800
10240 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10241 /* The Thumb2 and ARM encodings are identical. */
10242 if (disas_neon_insn_3same_ext(s, insn)) {
10243 goto illegal_op;
10244 }
10245 } else if ((insn & 0xff000a00) == 0xfe000800
10246 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10247 /* The Thumb2 and ARM encodings are identical. */
10248 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10249 goto illegal_op;
10250 }
10251 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 10252 /* Translate into the equivalent ARM encoding. */
f06053e3 10253 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10254 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10255 goto illegal_op;
7dcc1f89 10256 }
6a57f3eb 10257 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10258 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10259 goto illegal_op;
10260 }
9ee6e8bb
PB
10261 } else {
10262 if (insn & (1 << 28))
10263 goto illegal_op;
7dcc1f89 10264 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10265 goto illegal_op;
7dcc1f89 10266 }
9ee6e8bb
PB
10267 }
10268 break;
10269 case 8: case 9: case 10: case 11:
10270 if (insn & (1 << 15)) {
10271 /* Branches, misc control. */
10272 if (insn & 0x5000) {
10273 /* Unconditional branch. */
10274 /* signextend(hw1[10:0]) -> offset[:12]. */
10275 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10276 /* hw1[10:0] -> offset[11:1]. */
10277 offset |= (insn & 0x7ff) << 1;
10278 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10279 offset[24:22] already have the same value because of the
10280 sign extension above. */
10281 offset ^= ((~insn) & (1 << 13)) << 10;
10282 offset ^= ((~insn) & (1 << 11)) << 11;
10283
9ee6e8bb
PB
10284 if (insn & (1 << 14)) {
10285 /* Branch and link. */
a0415916 10286 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
b5ff1b31 10287 }
3b46e624 10288
fdbcf632 10289 offset += read_pc(s);
9ee6e8bb
PB
10290 if (insn & (1 << 12)) {
10291 /* b/bl */
b0109805 10292 gen_jmp(s, offset);
9ee6e8bb
PB
10293 } else {
10294 /* blx */
b0109805 10295 offset &= ~(uint32_t)2;
be5e7a76 10296 /* thumb2 bx, no need to check */
b0109805 10297 gen_bx_im(s, offset);
2c0262af 10298 }
9ee6e8bb
PB
10299 } else if (((insn >> 23) & 7) == 7) {
10300 /* Misc control */
10301 if (insn & (1 << 13))
10302 goto illegal_op;
10303
10304 if (insn & (1 << 26)) {
001b3cab
PM
10305 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10306 goto illegal_op;
10307 }
37e6456e
PM
10308 if (!(insn & (1 << 20))) {
10309 /* Hypervisor call (v7) */
10310 int imm16 = extract32(insn, 16, 4) << 12
10311 | extract32(insn, 0, 12);
10312 ARCH(7);
10313 if (IS_USER(s)) {
10314 goto illegal_op;
10315 }
10316 gen_hvc(s, imm16);
10317 } else {
10318 /* Secure monitor call (v6+) */
10319 ARCH(6K);
10320 if (IS_USER(s)) {
10321 goto illegal_op;
10322 }
10323 gen_smc(s);
10324 }
2c0262af 10325 } else {
9ee6e8bb
PB
10326 op = (insn >> 20) & 7;
10327 switch (op) {
10328 case 0: /* msr cpsr. */
b53d8923 10329 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10330 tmp = load_reg(s, rn);
b28b3377
PM
10331 /* the constant is the mask and SYSm fields */
10332 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10333 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10334 tcg_temp_free_i32(addr);
7d1b0095 10335 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10336 gen_lookup_tb(s);
10337 break;
10338 }
10339 /* fall through */
10340 case 1: /* msr spsr. */
b53d8923 10341 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10342 goto illegal_op;
b53d8923 10343 }
8bfd0550
PM
10344
10345 if (extract32(insn, 5, 1)) {
10346 /* MSR (banked) */
10347 int sysm = extract32(insn, 8, 4) |
10348 (extract32(insn, 4, 1) << 4);
10349 int r = op & 1;
10350
10351 gen_msr_banked(s, r, sysm, rm);
10352 break;
10353 }
10354
10355 /* MSR (for PSRs) */
2fbac54b
FN
10356 tmp = load_reg(s, rn);
10357 if (gen_set_psr(s,
7dcc1f89 10358 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10359 op == 1, tmp))
9ee6e8bb
PB
10360 goto illegal_op;
10361 break;
10362 case 2: /* cps, nop-hint. */
10363 if (((insn >> 8) & 7) == 0) {
10364 gen_nop_hint(s, insn & 0xff);
10365 }
10366 /* Implemented as NOP in user mode. */
10367 if (IS_USER(s))
10368 break;
10369 offset = 0;
10370 imm = 0;
10371 if (insn & (1 << 10)) {
10372 if (insn & (1 << 7))
10373 offset |= CPSR_A;
10374 if (insn & (1 << 6))
10375 offset |= CPSR_I;
10376 if (insn & (1 << 5))
10377 offset |= CPSR_F;
10378 if (insn & (1 << 9))
10379 imm = CPSR_A | CPSR_I | CPSR_F;
10380 }
10381 if (insn & (1 << 8)) {
10382 offset |= 0x1f;
10383 imm |= (insn & 0x1f);
10384 }
10385 if (offset) {
2fbac54b 10386 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10387 }
10388 break;
10389 case 3: /* Special control operations. */
14120108 10390 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 10391 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
10392 goto illegal_op;
10393 }
9ee6e8bb
PB
10394 op = (insn >> 4) & 0xf;
10395 switch (op) {
10396 case 2: /* clrex */
426f5abc 10397 gen_clrex(s);
9ee6e8bb
PB
10398 break;
10399 case 4: /* dsb */
10400 case 5: /* dmb */
61e4c432 10401 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10402 break;
6df99dec
SS
10403 case 6: /* isb */
10404 /* We need to break the TB after this insn
10405 * to execute self-modifying code correctly
10406 * and also to take any pending interrupts
10407 * immediately.
10408 */
a0415916 10409 gen_goto_tb(s, 0, s->base.pc_next);
6df99dec 10410 break;
9888bd1e
RH
10411 case 7: /* sb */
10412 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
10413 goto illegal_op;
10414 }
10415 /*
10416 * TODO: There is no speculation barrier opcode
10417 * for TCG; MB and end the TB instead.
10418 */
10419 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
a0415916 10420 gen_goto_tb(s, 0, s->base.pc_next);
9888bd1e 10421 break;
9ee6e8bb
PB
10422 default:
10423 goto illegal_op;
10424 }
10425 break;
10426 case 4: /* bxj */
9d7c59c8
PM
10427 /* Trivial implementation equivalent to bx.
10428 * This instruction doesn't exist at all for M-profile.
10429 */
10430 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10431 goto illegal_op;
10432 }
d9ba4830
PB
10433 tmp = load_reg(s, rn);
10434 gen_bx(s, tmp);
9ee6e8bb
PB
10435 break;
10436 case 5: /* Exception return. */
b8b45b68
RV
10437 if (IS_USER(s)) {
10438 goto illegal_op;
10439 }
10440 if (rn != 14 || rd != 15) {
10441 goto illegal_op;
10442 }
55c544ed
PM
10443 if (s->current_el == 2) {
10444 /* ERET from Hyp uses ELR_Hyp, not LR */
10445 if (insn & 0xff) {
10446 goto illegal_op;
10447 }
10448 tmp = load_cpu_field(elr_el[2]);
10449 } else {
10450 tmp = load_reg(s, rn);
10451 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10452 }
b8b45b68
RV
10453 gen_exception_return(s, tmp);
10454 break;
8bfd0550 10455 case 6: /* MRS */
43ac6574
PM
10456 if (extract32(insn, 5, 1) &&
10457 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10458 /* MRS (banked) */
10459 int sysm = extract32(insn, 16, 4) |
10460 (extract32(insn, 4, 1) << 4);
10461
10462 gen_mrs_banked(s, 0, sysm, rd);
10463 break;
10464 }
10465
3d54026f
PM
10466 if (extract32(insn, 16, 4) != 0xf) {
10467 goto illegal_op;
10468 }
10469 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10470 extract32(insn, 0, 8) != 0) {
10471 goto illegal_op;
10472 }
10473
8bfd0550 10474 /* mrs cpsr */
7d1b0095 10475 tmp = tcg_temp_new_i32();
b53d8923 10476 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10477 addr = tcg_const_i32(insn & 0xff);
10478 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10479 tcg_temp_free_i32(addr);
9ee6e8bb 10480 } else {
9ef39277 10481 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10482 }
8984bd2e 10483 store_reg(s, rd, tmp);
9ee6e8bb 10484 break;
8bfd0550 10485 case 7: /* MRS */
43ac6574
PM
10486 if (extract32(insn, 5, 1) &&
10487 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10488 /* MRS (banked) */
10489 int sysm = extract32(insn, 16, 4) |
10490 (extract32(insn, 4, 1) << 4);
10491
10492 gen_mrs_banked(s, 1, sysm, rd);
10493 break;
10494 }
10495
10496 /* mrs spsr. */
9ee6e8bb 10497 /* Not accessible in user mode. */
b53d8923 10498 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10499 goto illegal_op;
b53d8923 10500 }
3d54026f
PM
10501
10502 if (extract32(insn, 16, 4) != 0xf ||
10503 extract32(insn, 0, 8) != 0) {
10504 goto illegal_op;
10505 }
10506
d9ba4830
PB
10507 tmp = load_cpu_field(spsr);
10508 store_reg(s, rd, tmp);
9ee6e8bb 10509 break;
2c0262af
FB
10510 }
10511 }
9ee6e8bb
PB
10512 } else {
10513 /* Conditional branch. */
10514 op = (insn >> 22) & 0xf;
10515 /* Generate a conditional jump to next instruction. */
c2d9644e 10516 arm_skip_unless(s, op);
9ee6e8bb
PB
10517
10518 /* offset[11:1] = insn[10:0] */
10519 offset = (insn & 0x7ff) << 1;
10520 /* offset[17:12] = insn[21:16]. */
10521 offset |= (insn & 0x003f0000) >> 4;
10522 /* offset[31:20] = insn[26]. */
10523 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10524 /* offset[18] = insn[13]. */
10525 offset |= (insn & (1 << 13)) << 5;
10526 /* offset[19] = insn[11]. */
10527 offset |= (insn & (1 << 11)) << 8;
10528
10529 /* jump to the offset */
fdbcf632 10530 gen_jmp(s, read_pc(s) + offset);
9ee6e8bb
PB
10531 }
10532 } else {
55203189
PM
10533 /*
10534 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
10535 * - Data-processing (modified immediate, plain binary immediate)
10536 */
9ee6e8bb 10537 if (insn & (1 << 25)) {
55203189
PM
10538 /*
10539 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
10540 * - Data-processing (plain binary immediate)
10541 */
9ee6e8bb
PB
10542 if (insn & (1 << 24)) {
10543 if (insn & (1 << 20))
10544 goto illegal_op;
10545 /* Bitfield/Saturate. */
10546 op = (insn >> 21) & 7;
10547 imm = insn & 0x1f;
10548 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10549 if (rn == 15) {
7d1b0095 10550 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10551 tcg_gen_movi_i32(tmp, 0);
10552 } else {
10553 tmp = load_reg(s, rn);
10554 }
9ee6e8bb
PB
10555 switch (op) {
10556 case 2: /* Signed bitfield extract. */
10557 imm++;
10558 if (shift + imm > 32)
10559 goto illegal_op;
59a71b4c
RH
10560 if (imm < 32) {
10561 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10562 }
9ee6e8bb
PB
10563 break;
10564 case 6: /* Unsigned bitfield extract. */
10565 imm++;
10566 if (shift + imm > 32)
10567 goto illegal_op;
59a71b4c
RH
10568 if (imm < 32) {
10569 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10570 }
9ee6e8bb
PB
10571 break;
10572 case 3: /* Bitfield insert/clear. */
10573 if (imm < shift)
10574 goto illegal_op;
10575 imm = imm + 1 - shift;
10576 if (imm != 32) {
6ddbc6e4 10577 tmp2 = load_reg(s, rd);
d593c48e 10578 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10579 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10580 }
10581 break;
10582 case 7:
10583 goto illegal_op;
10584 default: /* Saturate. */
464eaa95
RH
10585 if (op & 1) {
10586 tcg_gen_sari_i32(tmp, tmp, shift);
10587 } else {
10588 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10589 }
6ddbc6e4 10590 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10591 if (op & 4) {
10592 /* Unsigned. */
62b44f05
AR
10593 if ((op & 1) && shift == 0) {
10594 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10595 tcg_temp_free_i32(tmp);
10596 tcg_temp_free_i32(tmp2);
10597 goto illegal_op;
10598 }
9ef39277 10599 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10600 } else {
9ef39277 10601 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10602 }
2c0262af 10603 } else {
9ee6e8bb 10604 /* Signed. */
62b44f05
AR
10605 if ((op & 1) && shift == 0) {
10606 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10607 tcg_temp_free_i32(tmp);
10608 tcg_temp_free_i32(tmp2);
10609 goto illegal_op;
10610 }
9ef39277 10611 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10612 } else {
9ef39277 10613 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10614 }
2c0262af 10615 }
b75263d6 10616 tcg_temp_free_i32(tmp2);
9ee6e8bb 10617 break;
2c0262af 10618 }
6ddbc6e4 10619 store_reg(s, rd, tmp);
9ee6e8bb
PB
10620 } else {
10621 imm = ((insn & 0x04000000) >> 15)
10622 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10623 if (insn & (1 << 22)) {
10624 /* 16-bit immediate. */
10625 imm |= (insn >> 4) & 0xf000;
10626 if (insn & (1 << 23)) {
10627 /* movt */
5e3f878a 10628 tmp = load_reg(s, rd);
86831435 10629 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10630 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10631 } else {
9ee6e8bb 10632 /* movw */
7d1b0095 10633 tmp = tcg_temp_new_i32();
5e3f878a 10634 tcg_gen_movi_i32(tmp, imm);
2c0262af 10635 }
55203189 10636 store_reg(s, rd, tmp);
2c0262af 10637 } else {
9ee6e8bb 10638 /* Add/sub 12-bit immediate. */
16e0d823
RH
10639 if (insn & (1 << 23)) {
10640 imm = -imm;
10641 }
10642 tmp = add_reg_for_lit(s, rn, imm);
10643 if (rn == 13 && rd == 13) {
10644 /* ADD SP, SP, imm or SUB SP, SP, imm */
10645 store_sp_checked(s, tmp);
2c0262af 10646 } else {
16e0d823 10647 store_reg(s, rd, tmp);
2c0262af 10648 }
9ee6e8bb 10649 }
191abaa2 10650 }
9ee6e8bb 10651 } else {
55203189
PM
10652 /*
10653 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
10654 * - Data-processing (modified immediate)
10655 */
9ee6e8bb
PB
10656 int shifter_out = 0;
10657 /* modified 12-bit immediate. */
10658 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10659 imm = (insn & 0xff);
10660 switch (shift) {
10661 case 0: /* XY */
10662 /* Nothing to do. */
10663 break;
10664 case 1: /* 00XY00XY */
10665 imm |= imm << 16;
10666 break;
10667 case 2: /* XY00XY00 */
10668 imm |= imm << 16;
10669 imm <<= 8;
10670 break;
10671 case 3: /* XYXYXYXY */
10672 imm |= imm << 16;
10673 imm |= imm << 8;
10674 break;
10675 default: /* Rotated constant. */
10676 shift = (shift << 1) | (imm >> 7);
10677 imm |= 0x80;
10678 imm = imm << (32 - shift);
10679 shifter_out = 1;
10680 break;
b5ff1b31 10681 }
7d1b0095 10682 tmp2 = tcg_temp_new_i32();
3174f8e9 10683 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10684 rn = (insn >> 16) & 0xf;
3174f8e9 10685 if (rn == 15) {
7d1b0095 10686 tmp = tcg_temp_new_i32();
3174f8e9
FN
10687 tcg_gen_movi_i32(tmp, 0);
10688 } else {
10689 tmp = load_reg(s, rn);
10690 }
9ee6e8bb
PB
10691 op = (insn >> 21) & 0xf;
10692 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10693 shifter_out, tmp, tmp2))
9ee6e8bb 10694 goto illegal_op;
7d1b0095 10695 tcg_temp_free_i32(tmp2);
9ee6e8bb 10696 rd = (insn >> 8) & 0xf;
55203189
PM
10697 if (rd == 13 && rn == 13
10698 && (op == 8 || op == 13)) {
10699 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
10700 store_sp_checked(s, tmp);
10701 } else if (rd != 15) {
3174f8e9
FN
10702 store_reg(s, rd, tmp);
10703 } else {
7d1b0095 10704 tcg_temp_free_i32(tmp);
2c0262af 10705 }
2c0262af 10706 }
9ee6e8bb
PB
10707 }
10708 break;
10709 case 12: /* Load/store single data item. */
10710 {
10711 int postinc = 0;
10712 int writeback = 0;
a99caa48 10713 int memidx;
9bb6558a
PM
10714 ISSInfo issinfo;
10715
9ee6e8bb 10716 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10717 if (disas_neon_ls_insn(s, insn)) {
c1713132 10718 goto illegal_op;
7dcc1f89 10719 }
9ee6e8bb
PB
10720 break;
10721 }
a2fdc890
PM
10722 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10723 if (rs == 15) {
10724 if (!(insn & (1 << 20))) {
10725 goto illegal_op;
10726 }
10727 if (op != 2) {
10728 /* Byte or halfword load space with dest == r15 : memory hints.
10729 * Catch them early so we don't emit pointless addressing code.
10730 * This space is a mix of:
10731 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10732 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10733 * cores)
10734 * unallocated hints, which must be treated as NOPs
10735 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10736 * which is easiest for the decoding logic
10737 * Some space which must UNDEF
10738 */
10739 int op1 = (insn >> 23) & 3;
10740 int op2 = (insn >> 6) & 0x3f;
10741 if (op & 2) {
10742 goto illegal_op;
10743 }
10744 if (rn == 15) {
02afbf64
PM
10745 /* UNPREDICTABLE, unallocated hint or
10746 * PLD/PLDW/PLI (literal)
10747 */
2eea841c 10748 return;
a2fdc890
PM
10749 }
10750 if (op1 & 1) {
2eea841c 10751 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10752 }
10753 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 10754 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10755 }
10756 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 10757 goto illegal_op;
a2fdc890
PM
10758 }
10759 }
a99caa48 10760 memidx = get_mem_index(s);
16e0d823
RH
10761 imm = insn & 0xfff;
10762 if (insn & (1 << 23)) {
10763 /* PC relative or Positive offset. */
10764 addr = add_reg_for_lit(s, rn, imm);
10765 } else if (rn == 15) {
10766 /* PC relative with negative offset. */
10767 addr = add_reg_for_lit(s, rn, -imm);
9ee6e8bb 10768 } else {
b0109805 10769 addr = load_reg(s, rn);
16e0d823
RH
10770 imm = insn & 0xff;
10771 switch ((insn >> 8) & 0xf) {
10772 case 0x0: /* Shifted Register. */
10773 shift = (insn >> 4) & 0xf;
10774 if (shift > 3) {
2a0308c5 10775 tcg_temp_free_i32(addr);
b7bcbe95 10776 goto illegal_op;
9ee6e8bb 10777 }
16e0d823 10778 tmp = load_reg(s, rm);
464eaa95 10779 tcg_gen_shli_i32(tmp, tmp, shift);
16e0d823
RH
10780 tcg_gen_add_i32(addr, addr, tmp);
10781 tcg_temp_free_i32(tmp);
10782 break;
10783 case 0xc: /* Negative offset. */
10784 tcg_gen_addi_i32(addr, addr, -imm);
10785 break;
10786 case 0xe: /* User privilege. */
10787 tcg_gen_addi_i32(addr, addr, imm);
10788 memidx = get_a32_user_mem_index(s);
10789 break;
10790 case 0x9: /* Post-decrement. */
10791 imm = -imm;
10792 /* Fall through. */
10793 case 0xb: /* Post-increment. */
10794 postinc = 1;
10795 writeback = 1;
10796 break;
10797 case 0xd: /* Pre-decrement. */
10798 imm = -imm;
10799 /* Fall through. */
10800 case 0xf: /* Pre-increment. */
10801 writeback = 1;
10802 break;
10803 default:
10804 tcg_temp_free_i32(addr);
10805 goto illegal_op;
9ee6e8bb
PB
10806 }
10807 }
9bb6558a
PM
10808
10809 issinfo = writeback ? ISSInvalid : rs;
10810
0bc003ba
PM
10811 if (s->v8m_stackcheck && rn == 13 && writeback) {
10812 /*
10813 * Stackcheck. Here we know 'addr' is the current SP;
10814 * if imm is +ve we're moving SP up, else down. It is
10815 * UNKNOWN whether the limit check triggers when SP starts
10816 * below the limit and ends up above it; we chose to do so.
10817 */
10818 if ((int32_t)imm < 0) {
10819 TCGv_i32 newsp = tcg_temp_new_i32();
10820
10821 tcg_gen_addi_i32(newsp, addr, imm);
10822 gen_helper_v8m_stackcheck(cpu_env, newsp);
10823 tcg_temp_free_i32(newsp);
10824 } else {
10825 gen_helper_v8m_stackcheck(cpu_env, addr);
10826 }
10827 }
10828
10829 if (writeback && !postinc) {
10830 tcg_gen_addi_i32(addr, addr, imm);
10831 }
10832
9ee6e8bb
PB
10833 if (insn & (1 << 20)) {
10834 /* Load. */
5a839c0d 10835 tmp = tcg_temp_new_i32();
a2fdc890 10836 switch (op) {
5a839c0d 10837 case 0:
9bb6558a 10838 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10839 break;
10840 case 4:
9bb6558a 10841 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10842 break;
10843 case 1:
9bb6558a 10844 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10845 break;
10846 case 5:
9bb6558a 10847 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10848 break;
10849 case 2:
9bb6558a 10850 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10851 break;
2a0308c5 10852 default:
5a839c0d 10853 tcg_temp_free_i32(tmp);
2a0308c5
PM
10854 tcg_temp_free_i32(addr);
10855 goto illegal_op;
a2fdc890
PM
10856 }
10857 if (rs == 15) {
3bb8a96f 10858 gen_bx_excret(s, tmp);
9ee6e8bb 10859 } else {
a2fdc890 10860 store_reg(s, rs, tmp);
9ee6e8bb
PB
10861 }
10862 } else {
10863 /* Store. */
b0109805 10864 tmp = load_reg(s, rs);
9ee6e8bb 10865 switch (op) {
5a839c0d 10866 case 0:
9bb6558a 10867 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10868 break;
10869 case 1:
9bb6558a 10870 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10871 break;
10872 case 2:
9bb6558a 10873 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10874 break;
2a0308c5 10875 default:
5a839c0d 10876 tcg_temp_free_i32(tmp);
2a0308c5
PM
10877 tcg_temp_free_i32(addr);
10878 goto illegal_op;
b7bcbe95 10879 }
5a839c0d 10880 tcg_temp_free_i32(tmp);
2c0262af 10881 }
9ee6e8bb 10882 if (postinc)
b0109805
PB
10883 tcg_gen_addi_i32(addr, addr, imm);
10884 if (writeback) {
10885 store_reg(s, rn, addr);
10886 } else {
7d1b0095 10887 tcg_temp_free_i32(addr);
b0109805 10888 }
9ee6e8bb
PB
10889 }
10890 break;
10891 default:
10892 goto illegal_op;
2c0262af 10893 }
2eea841c 10894 return;
9ee6e8bb 10895illegal_op:
1ce21ba1 10896 unallocated_encoding(s);
2c0262af
FB
10897}
10898
296e5a0a 10899static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 10900{
296e5a0a 10901 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
10902 int32_t offset;
10903 int i;
39d5492a
PM
10904 TCGv_i32 tmp;
10905 TCGv_i32 tmp2;
10906 TCGv_i32 addr;
99c475ab 10907
99c475ab
FB
10908 switch (insn >> 12) {
10909 case 0: case 1:
396e467c 10910
99c475ab
FB
10911 rd = insn & 7;
10912 op = (insn >> 11) & 3;
10913 if (op == 3) {
a2d12f0f
PM
10914 /*
10915 * 0b0001_1xxx_xxxx_xxxx
10916 * - Add, subtract (three low registers)
10917 * - Add, subtract (two low registers and immediate)
10918 */
99c475ab 10919 rn = (insn >> 3) & 7;
396e467c 10920 tmp = load_reg(s, rn);
99c475ab
FB
10921 if (insn & (1 << 10)) {
10922 /* immediate */
7d1b0095 10923 tmp2 = tcg_temp_new_i32();
396e467c 10924 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10925 } else {
10926 /* reg */
10927 rm = (insn >> 6) & 7;
396e467c 10928 tmp2 = load_reg(s, rm);
99c475ab 10929 }
9ee6e8bb
PB
10930 if (insn & (1 << 9)) {
10931 if (s->condexec_mask)
396e467c 10932 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10933 else
72485ec4 10934 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10935 } else {
10936 if (s->condexec_mask)
396e467c 10937 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10938 else
72485ec4 10939 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10940 }
7d1b0095 10941 tcg_temp_free_i32(tmp2);
396e467c 10942 store_reg(s, rd, tmp);
99c475ab
FB
10943 } else {
10944 /* shift immediate */
10945 rm = (insn >> 3) & 7;
10946 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10947 tmp = load_reg(s, rm);
10948 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10949 if (!s->condexec_mask)
10950 gen_logic_CC(tmp);
10951 store_reg(s, rd, tmp);
99c475ab
FB
10952 }
10953 break;
10954 case 2: case 3:
a2d12f0f
PM
10955 /*
10956 * 0b001x_xxxx_xxxx_xxxx
10957 * - Add, subtract, compare, move (one low register and immediate)
10958 */
99c475ab
FB
10959 op = (insn >> 11) & 3;
10960 rd = (insn >> 8) & 0x7;
396e467c 10961 if (op == 0) { /* mov */
7d1b0095 10962 tmp = tcg_temp_new_i32();
396e467c 10963 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10964 if (!s->condexec_mask)
396e467c
FN
10965 gen_logic_CC(tmp);
10966 store_reg(s, rd, tmp);
10967 } else {
10968 tmp = load_reg(s, rd);
7d1b0095 10969 tmp2 = tcg_temp_new_i32();
396e467c
FN
10970 tcg_gen_movi_i32(tmp2, insn & 0xff);
10971 switch (op) {
10972 case 1: /* cmp */
72485ec4 10973 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10974 tcg_temp_free_i32(tmp);
10975 tcg_temp_free_i32(tmp2);
396e467c
FN
10976 break;
10977 case 2: /* add */
10978 if (s->condexec_mask)
10979 tcg_gen_add_i32(tmp, tmp, tmp2);
10980 else
72485ec4 10981 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10982 tcg_temp_free_i32(tmp2);
396e467c
FN
10983 store_reg(s, rd, tmp);
10984 break;
10985 case 3: /* sub */
10986 if (s->condexec_mask)
10987 tcg_gen_sub_i32(tmp, tmp, tmp2);
10988 else
72485ec4 10989 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10990 tcg_temp_free_i32(tmp2);
396e467c
FN
10991 store_reg(s, rd, tmp);
10992 break;
10993 }
99c475ab 10994 }
99c475ab
FB
10995 break;
10996 case 4:
10997 if (insn & (1 << 11)) {
10998 rd = (insn >> 8) & 7;
5899f386 10999 /* load pc-relative. Bit 1 of PC is ignored. */
16e0d823 11000 addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
c40c8556 11001 tmp = tcg_temp_new_i32();
9bb6558a
PM
11002 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11003 rd | ISSIs16Bit);
7d1b0095 11004 tcg_temp_free_i32(addr);
b0109805 11005 store_reg(s, rd, tmp);
99c475ab
FB
11006 break;
11007 }
11008 if (insn & (1 << 10)) {
ebfe27c5
PM
11009 /* 0b0100_01xx_xxxx_xxxx
11010 * - data processing extended, branch and exchange
11011 */
99c475ab
FB
11012 rd = (insn & 7) | ((insn >> 4) & 8);
11013 rm = (insn >> 3) & 0xf;
11014 op = (insn >> 8) & 3;
11015 switch (op) {
11016 case 0: /* add */
396e467c
FN
11017 tmp = load_reg(s, rd);
11018 tmp2 = load_reg(s, rm);
11019 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11020 tcg_temp_free_i32(tmp2);
55203189
PM
11021 if (rd == 13) {
11022 /* ADD SP, SP, reg */
11023 store_sp_checked(s, tmp);
11024 } else {
11025 store_reg(s, rd, tmp);
11026 }
99c475ab
FB
11027 break;
11028 case 1: /* cmp */
396e467c
FN
11029 tmp = load_reg(s, rd);
11030 tmp2 = load_reg(s, rm);
72485ec4 11031 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11032 tcg_temp_free_i32(tmp2);
11033 tcg_temp_free_i32(tmp);
99c475ab
FB
11034 break;
11035 case 2: /* mov/cpy */
396e467c 11036 tmp = load_reg(s, rm);
55203189
PM
11037 if (rd == 13) {
11038 /* MOV SP, reg */
11039 store_sp_checked(s, tmp);
11040 } else {
11041 store_reg(s, rd, tmp);
11042 }
99c475ab 11043 break;
ebfe27c5
PM
11044 case 3:
11045 {
11046 /* 0b0100_0111_xxxx_xxxx
11047 * - branch [and link] exchange thumb register
11048 */
11049 bool link = insn & (1 << 7);
11050
fb602cb7 11051 if (insn & 3) {
ebfe27c5
PM
11052 goto undef;
11053 }
11054 if (link) {
be5e7a76 11055 ARCH(5);
ebfe27c5 11056 }
fb602cb7
PM
11057 if ((insn & 4)) {
11058 /* BXNS/BLXNS: only exists for v8M with the
11059 * security extensions, and always UNDEF if NonSecure.
11060 * We don't implement these in the user-only mode
11061 * either (in theory you can use them from Secure User
11062 * mode but they are too tied in to system emulation.)
11063 */
11064 if (!s->v8m_secure || IS_USER_ONLY) {
11065 goto undef;
11066 }
11067 if (link) {
3e3fa230 11068 gen_blxns(s, rm);
fb602cb7
PM
11069 } else {
11070 gen_bxns(s, rm);
11071 }
11072 break;
11073 }
11074 /* BLX/BX */
ebfe27c5
PM
11075 tmp = load_reg(s, rm);
11076 if (link) {
a0415916 11077 val = (uint32_t)s->base.pc_next | 1;
7d1b0095 11078 tmp2 = tcg_temp_new_i32();
b0109805
PB
11079 tcg_gen_movi_i32(tmp2, val);
11080 store_reg(s, 14, tmp2);
3bb8a96f
PM
11081 gen_bx(s, tmp);
11082 } else {
11083 /* Only BX works as exception-return, not BLX */
11084 gen_bx_excret(s, tmp);
99c475ab 11085 }
99c475ab
FB
11086 break;
11087 }
ebfe27c5 11088 }
99c475ab
FB
11089 break;
11090 }
11091
a2d12f0f
PM
11092 /*
11093 * 0b0100_00xx_xxxx_xxxx
11094 * - Data-processing (two low registers)
11095 */
99c475ab
FB
11096 rd = insn & 7;
11097 rm = (insn >> 3) & 7;
11098 op = (insn >> 6) & 0xf;
11099 if (op == 2 || op == 3 || op == 4 || op == 7) {
11100 /* the shift/rotate ops want the operands backwards */
11101 val = rm;
11102 rm = rd;
11103 rd = val;
11104 val = 1;
11105 } else {
11106 val = 0;
11107 }
11108
396e467c 11109 if (op == 9) { /* neg */
7d1b0095 11110 tmp = tcg_temp_new_i32();
396e467c
FN
11111 tcg_gen_movi_i32(tmp, 0);
11112 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11113 tmp = load_reg(s, rd);
11114 } else {
f764718d 11115 tmp = NULL;
396e467c 11116 }
99c475ab 11117
396e467c 11118 tmp2 = load_reg(s, rm);
5899f386 11119 switch (op) {
99c475ab 11120 case 0x0: /* and */
396e467c 11121 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11122 if (!s->condexec_mask)
396e467c 11123 gen_logic_CC(tmp);
99c475ab
FB
11124 break;
11125 case 0x1: /* eor */
396e467c 11126 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11127 if (!s->condexec_mask)
396e467c 11128 gen_logic_CC(tmp);
99c475ab
FB
11129 break;
11130 case 0x2: /* lsl */
9ee6e8bb 11131 if (s->condexec_mask) {
365af80e 11132 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11133 } else {
9ef39277 11134 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11135 gen_logic_CC(tmp2);
9ee6e8bb 11136 }
99c475ab
FB
11137 break;
11138 case 0x3: /* lsr */
9ee6e8bb 11139 if (s->condexec_mask) {
365af80e 11140 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11141 } else {
9ef39277 11142 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11143 gen_logic_CC(tmp2);
9ee6e8bb 11144 }
99c475ab
FB
11145 break;
11146 case 0x4: /* asr */
9ee6e8bb 11147 if (s->condexec_mask) {
365af80e 11148 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11149 } else {
9ef39277 11150 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11151 gen_logic_CC(tmp2);
9ee6e8bb 11152 }
99c475ab
FB
11153 break;
11154 case 0x5: /* adc */
49b4c31e 11155 if (s->condexec_mask) {
396e467c 11156 gen_adc(tmp, tmp2);
49b4c31e
RH
11157 } else {
11158 gen_adc_CC(tmp, tmp, tmp2);
11159 }
99c475ab
FB
11160 break;
11161 case 0x6: /* sbc */
2de68a49 11162 if (s->condexec_mask) {
396e467c 11163 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11164 } else {
11165 gen_sbc_CC(tmp, tmp, tmp2);
11166 }
99c475ab
FB
11167 break;
11168 case 0x7: /* ror */
9ee6e8bb 11169 if (s->condexec_mask) {
f669df27
AJ
11170 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11171 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11172 } else {
9ef39277 11173 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11174 gen_logic_CC(tmp2);
9ee6e8bb 11175 }
99c475ab
FB
11176 break;
11177 case 0x8: /* tst */
396e467c
FN
11178 tcg_gen_and_i32(tmp, tmp, tmp2);
11179 gen_logic_CC(tmp);
99c475ab 11180 rd = 16;
5899f386 11181 break;
99c475ab 11182 case 0x9: /* neg */
9ee6e8bb 11183 if (s->condexec_mask)
396e467c 11184 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11185 else
72485ec4 11186 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11187 break;
11188 case 0xa: /* cmp */
72485ec4 11189 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11190 rd = 16;
11191 break;
11192 case 0xb: /* cmn */
72485ec4 11193 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11194 rd = 16;
11195 break;
11196 case 0xc: /* orr */
396e467c 11197 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11198 if (!s->condexec_mask)
396e467c 11199 gen_logic_CC(tmp);
99c475ab
FB
11200 break;
11201 case 0xd: /* mul */
7b2919a0 11202 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11203 if (!s->condexec_mask)
396e467c 11204 gen_logic_CC(tmp);
99c475ab
FB
11205 break;
11206 case 0xe: /* bic */
f669df27 11207 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11208 if (!s->condexec_mask)
396e467c 11209 gen_logic_CC(tmp);
99c475ab
FB
11210 break;
11211 case 0xf: /* mvn */
396e467c 11212 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11213 if (!s->condexec_mask)
396e467c 11214 gen_logic_CC(tmp2);
99c475ab 11215 val = 1;
5899f386 11216 rm = rd;
99c475ab
FB
11217 break;
11218 }
11219 if (rd != 16) {
396e467c
FN
11220 if (val) {
11221 store_reg(s, rm, tmp2);
11222 if (op != 0xf)
7d1b0095 11223 tcg_temp_free_i32(tmp);
396e467c
FN
11224 } else {
11225 store_reg(s, rd, tmp);
7d1b0095 11226 tcg_temp_free_i32(tmp2);
396e467c
FN
11227 }
11228 } else {
7d1b0095
PM
11229 tcg_temp_free_i32(tmp);
11230 tcg_temp_free_i32(tmp2);
99c475ab
FB
11231 }
11232 break;
11233
11234 case 5:
11235 /* load/store register offset. */
11236 rd = insn & 7;
11237 rn = (insn >> 3) & 7;
11238 rm = (insn >> 6) & 7;
11239 op = (insn >> 9) & 7;
b0109805 11240 addr = load_reg(s, rn);
b26eefb6 11241 tmp = load_reg(s, rm);
b0109805 11242 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11243 tcg_temp_free_i32(tmp);
99c475ab 11244
c40c8556 11245 if (op < 3) { /* store */
b0109805 11246 tmp = load_reg(s, rd);
c40c8556
PM
11247 } else {
11248 tmp = tcg_temp_new_i32();
11249 }
99c475ab
FB
11250
11251 switch (op) {
11252 case 0: /* str */
9bb6558a 11253 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11254 break;
11255 case 1: /* strh */
9bb6558a 11256 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11257 break;
11258 case 2: /* strb */
9bb6558a 11259 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11260 break;
11261 case 3: /* ldrsb */
9bb6558a 11262 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11263 break;
11264 case 4: /* ldr */
9bb6558a 11265 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11266 break;
11267 case 5: /* ldrh */
9bb6558a 11268 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11269 break;
11270 case 6: /* ldrb */
9bb6558a 11271 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11272 break;
11273 case 7: /* ldrsh */
9bb6558a 11274 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11275 break;
11276 }
c40c8556 11277 if (op >= 3) { /* load */
b0109805 11278 store_reg(s, rd, tmp);
c40c8556
PM
11279 } else {
11280 tcg_temp_free_i32(tmp);
11281 }
7d1b0095 11282 tcg_temp_free_i32(addr);
99c475ab
FB
11283 break;
11284
11285 case 6:
11286 /* load/store word immediate offset */
11287 rd = insn & 7;
11288 rn = (insn >> 3) & 7;
b0109805 11289 addr = load_reg(s, rn);
99c475ab 11290 val = (insn >> 4) & 0x7c;
b0109805 11291 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11292
11293 if (insn & (1 << 11)) {
11294 /* load */
c40c8556 11295 tmp = tcg_temp_new_i32();
12dcc321 11296 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11297 store_reg(s, rd, tmp);
99c475ab
FB
11298 } else {
11299 /* store */
b0109805 11300 tmp = load_reg(s, rd);
12dcc321 11301 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11302 tcg_temp_free_i32(tmp);
99c475ab 11303 }
7d1b0095 11304 tcg_temp_free_i32(addr);
99c475ab
FB
11305 break;
11306
11307 case 7:
11308 /* load/store byte immediate offset */
11309 rd = insn & 7;
11310 rn = (insn >> 3) & 7;
b0109805 11311 addr = load_reg(s, rn);
99c475ab 11312 val = (insn >> 6) & 0x1f;
b0109805 11313 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11314
11315 if (insn & (1 << 11)) {
11316 /* load */
c40c8556 11317 tmp = tcg_temp_new_i32();
9bb6558a 11318 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11319 store_reg(s, rd, tmp);
99c475ab
FB
11320 } else {
11321 /* store */
b0109805 11322 tmp = load_reg(s, rd);
9bb6558a 11323 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11324 tcg_temp_free_i32(tmp);
99c475ab 11325 }
7d1b0095 11326 tcg_temp_free_i32(addr);
99c475ab
FB
11327 break;
11328
11329 case 8:
11330 /* load/store halfword immediate offset */
11331 rd = insn & 7;
11332 rn = (insn >> 3) & 7;
b0109805 11333 addr = load_reg(s, rn);
99c475ab 11334 val = (insn >> 5) & 0x3e;
b0109805 11335 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11336
11337 if (insn & (1 << 11)) {
11338 /* load */
c40c8556 11339 tmp = tcg_temp_new_i32();
9bb6558a 11340 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11341 store_reg(s, rd, tmp);
99c475ab
FB
11342 } else {
11343 /* store */
b0109805 11344 tmp = load_reg(s, rd);
9bb6558a 11345 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11346 tcg_temp_free_i32(tmp);
99c475ab 11347 }
7d1b0095 11348 tcg_temp_free_i32(addr);
99c475ab
FB
11349 break;
11350
11351 case 9:
11352 /* load/store from stack */
11353 rd = (insn >> 8) & 7;
b0109805 11354 addr = load_reg(s, 13);
99c475ab 11355 val = (insn & 0xff) * 4;
b0109805 11356 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11357
11358 if (insn & (1 << 11)) {
11359 /* load */
c40c8556 11360 tmp = tcg_temp_new_i32();
9bb6558a 11361 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11362 store_reg(s, rd, tmp);
99c475ab
FB
11363 } else {
11364 /* store */
b0109805 11365 tmp = load_reg(s, rd);
9bb6558a 11366 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11367 tcg_temp_free_i32(tmp);
99c475ab 11368 }
7d1b0095 11369 tcg_temp_free_i32(addr);
99c475ab
FB
11370 break;
11371
11372 case 10:
55203189
PM
11373 /*
11374 * 0b1010_xxxx_xxxx_xxxx
11375 * - Add PC/SP (immediate)
11376 */
99c475ab 11377 rd = (insn >> 8) & 7;
99c475ab 11378 val = (insn & 0xff) * 4;
16e0d823 11379 tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
5e3f878a 11380 store_reg(s, rd, tmp);
99c475ab
FB
11381 break;
11382
11383 case 11:
11384 /* misc */
11385 op = (insn >> 8) & 0xf;
11386 switch (op) {
11387 case 0:
55203189
PM
11388 /*
11389 * 0b1011_0000_xxxx_xxxx
11390 * - ADD (SP plus immediate)
11391 * - SUB (SP minus immediate)
11392 */
b26eefb6 11393 tmp = load_reg(s, 13);
99c475ab
FB
11394 val = (insn & 0x7f) * 4;
11395 if (insn & (1 << 7))
6a0d8a1d 11396 val = -(int32_t)val;
b26eefb6 11397 tcg_gen_addi_i32(tmp, tmp, val);
55203189 11398 store_sp_checked(s, tmp);
99c475ab
FB
11399 break;
11400
9ee6e8bb
PB
11401 case 2: /* sign/zero extend. */
11402 ARCH(6);
11403 rd = insn & 7;
11404 rm = (insn >> 3) & 7;
b0109805 11405 tmp = load_reg(s, rm);
9ee6e8bb 11406 switch ((insn >> 6) & 3) {
b0109805
PB
11407 case 0: gen_sxth(tmp); break;
11408 case 1: gen_sxtb(tmp); break;
11409 case 2: gen_uxth(tmp); break;
11410 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11411 }
b0109805 11412 store_reg(s, rd, tmp);
9ee6e8bb 11413 break;
99c475ab 11414 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
11415 /*
11416 * 0b1011_x10x_xxxx_xxxx
11417 * - push/pop
11418 */
b0109805 11419 addr = load_reg(s, 13);
5899f386
FB
11420 if (insn & (1 << 8))
11421 offset = 4;
99c475ab 11422 else
5899f386
FB
11423 offset = 0;
11424 for (i = 0; i < 8; i++) {
11425 if (insn & (1 << i))
11426 offset += 4;
11427 }
11428 if ((insn & (1 << 11)) == 0) {
b0109805 11429 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11430 }
aa369e5c
PM
11431
11432 if (s->v8m_stackcheck) {
11433 /*
11434 * Here 'addr' is the lower of "old SP" and "new SP";
11435 * if this is a pop that starts below the limit and ends
11436 * above it, it is UNKNOWN whether the limit check triggers;
11437 * we choose to trigger.
11438 */
11439 gen_helper_v8m_stackcheck(cpu_env, addr);
11440 }
11441
99c475ab
FB
11442 for (i = 0; i < 8; i++) {
11443 if (insn & (1 << i)) {
11444 if (insn & (1 << 11)) {
11445 /* pop */
c40c8556 11446 tmp = tcg_temp_new_i32();
12dcc321 11447 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11448 store_reg(s, i, tmp);
99c475ab
FB
11449 } else {
11450 /* push */
b0109805 11451 tmp = load_reg(s, i);
12dcc321 11452 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11453 tcg_temp_free_i32(tmp);
99c475ab 11454 }
5899f386 11455 /* advance to the next address. */
b0109805 11456 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11457 }
11458 }
f764718d 11459 tmp = NULL;
99c475ab
FB
11460 if (insn & (1 << 8)) {
11461 if (insn & (1 << 11)) {
11462 /* pop pc */
c40c8556 11463 tmp = tcg_temp_new_i32();
12dcc321 11464 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11465 /* don't set the pc until the rest of the instruction
11466 has completed */
11467 } else {
11468 /* push lr */
b0109805 11469 tmp = load_reg(s, 14);
12dcc321 11470 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11471 tcg_temp_free_i32(tmp);
99c475ab 11472 }
b0109805 11473 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11474 }
5899f386 11475 if ((insn & (1 << 11)) == 0) {
b0109805 11476 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11477 }
99c475ab 11478 /* write back the new stack pointer */
b0109805 11479 store_reg(s, 13, addr);
99c475ab 11480 /* set the new PC value */
be5e7a76 11481 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11482 store_reg_from_load(s, 15, tmp);
be5e7a76 11483 }
99c475ab
FB
11484 break;
11485
9ee6e8bb
PB
11486 case 1: case 3: case 9: case 11: /* czb */
11487 rm = insn & 7;
d9ba4830 11488 tmp = load_reg(s, rm);
c2d9644e 11489 arm_gen_condlabel(s);
9ee6e8bb 11490 if (insn & (1 << 11))
cb63669a 11491 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11492 else
cb63669a 11493 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11494 tcg_temp_free_i32(tmp);
9ee6e8bb 11495 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
fdbcf632 11496 gen_jmp(s, read_pc(s) + offset);
9ee6e8bb
PB
11497 break;
11498
11499 case 15: /* IT, nop-hint. */
11500 if ((insn & 0xf) == 0) {
11501 gen_nop_hint(s, (insn >> 4) & 0xf);
11502 break;
11503 }
5529de1e
PM
11504 /*
11505 * IT (If-Then)
11506 *
11507 * Combinations of firstcond and mask which set up an 0b1111
11508 * condition are UNPREDICTABLE; we take the CONSTRAINED
11509 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
11510 * i.e. both meaning "execute always".
11511 */
9ee6e8bb
PB
11512 s->condexec_cond = (insn >> 4) & 0xe;
11513 s->condexec_mask = insn & 0x1f;
11514 /* No actual code generated for this insn, just setup state. */
11515 break;
11516
06c949e6 11517 case 0xe: /* bkpt */
d4a2dc67
PM
11518 {
11519 int imm8 = extract32(insn, 0, 8);
be5e7a76 11520 ARCH(5);
06bcbda3 11521 gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm8, true));
06c949e6 11522 break;
d4a2dc67 11523 }
06c949e6 11524
19a6e31c
PM
11525 case 0xa: /* rev, and hlt */
11526 {
11527 int op1 = extract32(insn, 6, 2);
11528
11529 if (op1 == 2) {
11530 /* HLT */
11531 int imm6 = extract32(insn, 0, 6);
11532
11533 gen_hlt(s, imm6);
11534 break;
11535 }
11536
11537 /* Otherwise this is rev */
9ee6e8bb
PB
11538 ARCH(6);
11539 rn = (insn >> 3) & 0x7;
11540 rd = insn & 0x7;
b0109805 11541 tmp = load_reg(s, rn);
19a6e31c 11542 switch (op1) {
66896cb8 11543 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11544 case 1: gen_rev16(tmp); break;
11545 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11546 default:
11547 g_assert_not_reached();
9ee6e8bb 11548 }
b0109805 11549 store_reg(s, rd, tmp);
9ee6e8bb 11550 break;
19a6e31c 11551 }
9ee6e8bb 11552
d9e028c1
PM
11553 case 6:
11554 switch ((insn >> 5) & 7) {
11555 case 2:
11556 /* setend */
11557 ARCH(6);
9886ecdf
PB
11558 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11559 gen_helper_setend(cpu_env);
dcba3a8d 11560 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 11561 }
9ee6e8bb 11562 break;
d9e028c1
PM
11563 case 3:
11564 /* cps */
11565 ARCH(6);
11566 if (IS_USER(s)) {
11567 break;
8984bd2e 11568 }
b53d8923 11569 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11570 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11571 /* FAULTMASK */
11572 if (insn & 1) {
11573 addr = tcg_const_i32(19);
11574 gen_helper_v7m_msr(cpu_env, addr, tmp);
11575 tcg_temp_free_i32(addr);
11576 }
11577 /* PRIMASK */
11578 if (insn & 2) {
11579 addr = tcg_const_i32(16);
11580 gen_helper_v7m_msr(cpu_env, addr, tmp);
11581 tcg_temp_free_i32(addr);
11582 }
11583 tcg_temp_free_i32(tmp);
11584 gen_lookup_tb(s);
11585 } else {
11586 if (insn & (1 << 4)) {
11587 shift = CPSR_A | CPSR_I | CPSR_F;
11588 } else {
11589 shift = 0;
11590 }
11591 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11592 }
d9e028c1
PM
11593 break;
11594 default:
11595 goto undef;
9ee6e8bb
PB
11596 }
11597 break;
11598
99c475ab
FB
11599 default:
11600 goto undef;
11601 }
11602 break;
11603
11604 case 12:
a7d3970d 11605 {
99c475ab 11606 /* load/store multiple */
f764718d 11607 TCGv_i32 loaded_var = NULL;
99c475ab 11608 rn = (insn >> 8) & 0x7;
b0109805 11609 addr = load_reg(s, rn);
99c475ab
FB
11610 for (i = 0; i < 8; i++) {
11611 if (insn & (1 << i)) {
99c475ab
FB
11612 if (insn & (1 << 11)) {
11613 /* load */
c40c8556 11614 tmp = tcg_temp_new_i32();
12dcc321 11615 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11616 if (i == rn) {
11617 loaded_var = tmp;
11618 } else {
11619 store_reg(s, i, tmp);
11620 }
99c475ab
FB
11621 } else {
11622 /* store */
b0109805 11623 tmp = load_reg(s, i);
12dcc321 11624 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11625 tcg_temp_free_i32(tmp);
99c475ab 11626 }
5899f386 11627 /* advance to the next address */
b0109805 11628 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11629 }
11630 }
b0109805 11631 if ((insn & (1 << rn)) == 0) {
a7d3970d 11632 /* base reg not in list: base register writeback */
b0109805
PB
11633 store_reg(s, rn, addr);
11634 } else {
a7d3970d
PM
11635 /* base reg in list: if load, complete it now */
11636 if (insn & (1 << 11)) {
11637 store_reg(s, rn, loaded_var);
11638 }
7d1b0095 11639 tcg_temp_free_i32(addr);
b0109805 11640 }
99c475ab 11641 break;
a7d3970d 11642 }
99c475ab
FB
11643 case 13:
11644 /* conditional branch or swi */
11645 cond = (insn >> 8) & 0xf;
11646 if (cond == 0xe)
11647 goto undef;
11648
11649 if (cond == 0xf) {
11650 /* swi */
a0415916 11651 gen_set_pc_im(s, s->base.pc_next);
d4a2dc67 11652 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 11653 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
11654 break;
11655 }
11656 /* generate a conditional jump to next instruction */
c2d9644e 11657 arm_skip_unless(s, cond);
99c475ab
FB
11658
11659 /* jump to the offset */
fdbcf632 11660 val = read_pc(s);
99c475ab 11661 offset = ((int32_t)insn << 24) >> 24;
5899f386 11662 val += offset << 1;
8aaca4c0 11663 gen_jmp(s, val);
99c475ab
FB
11664 break;
11665
11666 case 14:
358bf29e 11667 if (insn & (1 << 11)) {
296e5a0a
PM
11668 /* thumb_insn_is_16bit() ensures we can't get here for
11669 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11670 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11671 */
11672 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11673 ARCH(5);
11674 offset = ((insn & 0x7ff) << 1);
11675 tmp = load_reg(s, 14);
11676 tcg_gen_addi_i32(tmp, tmp, offset);
11677 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11678
11679 tmp2 = tcg_temp_new_i32();
a0415916 11680 tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
296e5a0a
PM
11681 store_reg(s, 14, tmp2);
11682 gen_bx(s, tmp);
358bf29e
PB
11683 break;
11684 }
9ee6e8bb 11685 /* unconditional branch */
fdbcf632 11686 val = read_pc(s);
99c475ab 11687 offset = ((int32_t)insn << 21) >> 21;
fdbcf632 11688 val += offset << 1;
8aaca4c0 11689 gen_jmp(s, val);
99c475ab
FB
11690 break;
11691
11692 case 15:
296e5a0a
PM
11693 /* thumb_insn_is_16bit() ensures we can't get here for
11694 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11695 */
11696 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11697
11698 if (insn & (1 << 11)) {
11699 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11700 offset = ((insn & 0x7ff) << 1) | 1;
11701 tmp = load_reg(s, 14);
11702 tcg_gen_addi_i32(tmp, tmp, offset);
11703
11704 tmp2 = tcg_temp_new_i32();
a0415916 11705 tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
296e5a0a
PM
11706 store_reg(s, 14, tmp2);
11707 gen_bx(s, tmp);
11708 } else {
11709 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11710 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11711
fdbcf632 11712 tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
296e5a0a 11713 }
9ee6e8bb 11714 break;
99c475ab
FB
11715 }
11716 return;
9ee6e8bb 11717illegal_op:
99c475ab 11718undef:
1ce21ba1 11719 unallocated_encoding(s);
99c475ab
FB
11720}
11721
541ebcd4
PM
11722static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11723{
a0415916 11724 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
541ebcd4 11725 * (False positives are OK, false negatives are not.)
5b8d7289 11726 * We know this is a Thumb insn, and our caller ensures we are
a0415916 11727 * only called if dc->base.pc_next is less than 4 bytes from the page
5b8d7289
PM
11728 * boundary, so we cross the page if the first 16 bits indicate
11729 * that this is a 32 bit insn.
541ebcd4 11730 */
a0415916 11731 uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
541ebcd4 11732
a0415916 11733 return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
541ebcd4
PM
11734}
11735
b542683d 11736static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 11737{
1d8a5535 11738 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 11739 CPUARMState *env = cs->env_ptr;
2fc0cc0e 11740 ARMCPU *cpu = env_archcpu(env);
aad821ac
RH
11741 uint32_t tb_flags = dc->base.tb->flags;
11742 uint32_t condexec, core_mmu_idx;
3b46e624 11743
962fcbf2 11744 dc->isar = &cpu->isar;
e50e6a20 11745 dc->condjmp = 0;
3926cc84 11746
40f860cd 11747 dc->aarch64 = 0;
cef9ee70
SS
11748 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11749 * there is no secure EL1, so we route exceptions to EL3.
11750 */
11751 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11752 !arm_el_is_aa64(env, 3);
aad821ac
RH
11753 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
11754 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
11755 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
11756 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
11757 dc->condexec_mask = (condexec & 0xf) << 1;
11758 dc->condexec_cond = condexec >> 4;
11759 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
11760 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
c1e37810 11761 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11762#if !defined(CONFIG_USER_ONLY)
c1e37810 11763 dc->user = (dc->current_el == 0);
3926cc84 11764#endif
aad821ac
RH
11765 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
11766 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
11767 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
11768 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
ea7ac69d
PM
11769 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11770 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
11771 dc->vec_stride = 0;
11772 } else {
11773 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
11774 dc->c15_cpar = 0;
11775 }
aad821ac 11776 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
fb602cb7
PM
11777 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11778 regime_is_secure(env, dc->mmu_idx);
aad821ac 11779 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
6d60c67a 11780 dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
6000531e
PM
11781 dc->v7m_new_fp_ctxt_needed =
11782 FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
e33cf0f8 11783 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
60322b39 11784 dc->cp_regs = cpu->cp_regs;
a984e42c 11785 dc->features = env->features;
40f860cd 11786
50225ad0
PM
11787 /* Single step state. The code-generation logic here is:
11788 * SS_ACTIVE == 0:
11789 * generate code with no special handling for single-stepping (except
11790 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11791 * this happens anyway because those changes are all system register or
11792 * PSTATE writes).
11793 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11794 * emit code for one insn
11795 * emit code to clear PSTATE.SS
11796 * emit code to generate software step exception for completed step
11797 * end TB (as usual for having generated an exception)
11798 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11799 * emit code to generate a software step exception
11800 * end the TB
11801 */
aad821ac
RH
11802 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
11803 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
50225ad0 11804 dc->is_ldex = false;
8bd587c1
PM
11805 if (!arm_feature(env, ARM_FEATURE_M)) {
11806 dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
11807 }
50225ad0 11808
bfe7ad5b 11809 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 11810
f7708456
RH
11811 /* If architectural single step active, limit to 1. */
11812 if (is_singlestepping(dc)) {
b542683d 11813 dc->base.max_insns = 1;
f7708456
RH
11814 }
11815
d0264d86
RH
11816 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11817 to those left on the page. */
11818 if (!dc->thumb) {
bfe7ad5b 11819 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 11820 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
11821 }
11822
d9eea52c
PM
11823 cpu_V0 = tcg_temp_new_i64();
11824 cpu_V1 = tcg_temp_new_i64();
e677137d 11825 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11826 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
11827}
11828
b1476854
LV
11829static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11830{
11831 DisasContext *dc = container_of(dcbase, DisasContext, base);
11832
11833 /* A note on handling of the condexec (IT) bits:
11834 *
11835 * We want to avoid the overhead of having to write the updated condexec
11836 * bits back to the CPUARMState for every instruction in an IT block. So:
11837 * (1) if the condexec bits are not already zero then we write
11838 * zero back into the CPUARMState now. This avoids complications trying
11839 * to do it at the end of the block. (For example if we don't do this
11840 * it's hard to identify whether we can safely skip writing condexec
11841 * at the end of the TB, which we definitely want to do for the case
11842 * where a TB doesn't do anything with the IT state at all.)
11843 * (2) if we are going to leave the TB then we call gen_set_condexec()
11844 * which will write the correct value into CPUARMState if zero is wrong.
11845 * This is done both for leaving the TB at the end, and for leaving
11846 * it because of an exception we know will happen, which is done in
11847 * gen_exception_insn(). The latter is necessary because we need to
11848 * leave the TB with the PC/IT state just prior to execution of the
11849 * instruction which caused the exception.
11850 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11851 * then the CPUARMState will be wrong and we need to reset it.
11852 * This is handled in the same way as restoration of the
11853 * PC in these situations; we save the value of the condexec bits
11854 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11855 * then uses this to restore them after an exception.
11856 *
11857 * Note that there are no instructions which can read the condexec
11858 * bits, and none which can write non-static values to them, so
11859 * we don't need to care about whether CPUARMState is correct in the
11860 * middle of a TB.
11861 */
11862
11863 /* Reset the conditional execution bits immediately. This avoids
11864 complications trying to do it at the end of the block. */
11865 if (dc->condexec_mask || dc->condexec_cond) {
11866 TCGv_i32 tmp = tcg_temp_new_i32();
11867 tcg_gen_movi_i32(tmp, 0);
11868 store_cpu_field(tmp, condexec_bits);
11869 }
11870}
11871
f62bd897
LV
11872static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11873{
11874 DisasContext *dc = container_of(dcbase, DisasContext, base);
11875
a0415916 11876 tcg_gen_insn_start(dc->base.pc_next,
f62bd897
LV
11877 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11878 0);
15fa08f8 11879 dc->insn_start = tcg_last_op();
f62bd897
LV
11880}
11881
a68956ad
LV
11882static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
11883 const CPUBreakpoint *bp)
11884{
11885 DisasContext *dc = container_of(dcbase, DisasContext, base);
11886
11887 if (bp->flags & BP_CPU) {
11888 gen_set_condexec(dc);
a0415916 11889 gen_set_pc_im(dc, dc->base.pc_next);
a68956ad
LV
11890 gen_helper_check_breakpoints(cpu_env);
11891 /* End the TB early; it's likely not going to be executed */
11892 dc->base.is_jmp = DISAS_TOO_MANY;
11893 } else {
aee828e7 11894 gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
a68956ad
LV
11895 /* The address covered by the breakpoint must be
11896 included in [tb->pc, tb->pc + tb->size) in order
11897 to for it to be properly cleared -- thus we
11898 increment the PC here so that the logic setting
11899 tb->size below does the right thing. */
11900 /* TODO: Advance PC by correct instruction length to
11901 * avoid disassembler error messages */
a0415916 11902 dc->base.pc_next += 2;
a68956ad
LV
11903 dc->base.is_jmp = DISAS_NORETURN;
11904 }
11905
11906 return true;
11907}
11908
722ef0a5 11909static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 11910{
13189a90
LV
11911#ifdef CONFIG_USER_ONLY
11912 /* Intercept jump to the magic kernel page. */
a0415916 11913 if (dc->base.pc_next >= 0xffff0000) {
13189a90
LV
11914 /* We always get here via a jump, so know we are not in a
11915 conditional execution block. */
11916 gen_exception_internal(EXCP_KERNEL_TRAP);
11917 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 11918 return true;
13189a90
LV
11919 }
11920#endif
11921
11922 if (dc->ss_active && !dc->pstate_ss) {
11923 /* Singlestep state is Active-pending.
11924 * If we're in this state at the start of a TB then either
11925 * a) we just took an exception to an EL which is being debugged
11926 * and this is the first insn in the exception handler
11927 * b) debug exceptions were masked and we just unmasked them
11928 * without changing EL (eg by clearing PSTATE.D)
11929 * In either case we're going to take a swstep exception in the
11930 * "did not step an insn" case, and so the syndrome ISV and EX
11931 * bits should be zero.
11932 */
11933 assert(dc->base.num_insns == 1);
c1d5f50f 11934 gen_swstep_exception(dc, 0, 0);
13189a90 11935 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 11936 return true;
13189a90
LV
11937 }
11938
722ef0a5
RH
11939 return false;
11940}
13189a90 11941
d0264d86 11942static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 11943{
13189a90
LV
11944 if (dc->condjmp && !dc->base.is_jmp) {
11945 gen_set_label(dc->condlabel);
11946 dc->condjmp = 0;
11947 }
23169224 11948 translator_loop_temp_check(&dc->base);
13189a90
LV
11949}
11950
722ef0a5
RH
11951static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
11952{
11953 DisasContext *dc = container_of(dcbase, DisasContext, base);
11954 CPUARMState *env = cpu->env_ptr;
11955 unsigned int insn;
11956
11957 if (arm_pre_translate_insn(dc)) {
11958 return;
11959 }
11960
a0415916
RH
11961 dc->pc_curr = dc->base.pc_next;
11962 insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
58803318 11963 dc->insn = insn;
a0415916 11964 dc->base.pc_next += 4;
722ef0a5
RH
11965 disas_arm_insn(dc, insn);
11966
d0264d86
RH
11967 arm_post_translate_insn(dc);
11968
11969 /* ARM is a fixed-length ISA. We performed the cross-page check
11970 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
11971}
11972
dcf14dfb
PM
11973static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
11974{
11975 /* Return true if this Thumb insn is always unconditional,
11976 * even inside an IT block. This is true of only a very few
11977 * instructions: BKPT, HLT, and SG.
11978 *
11979 * A larger class of instructions are UNPREDICTABLE if used
11980 * inside an IT block; we do not need to detect those here, because
11981 * what we do by default (perform the cc check and update the IT
11982 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
11983 * choice for those situations.
11984 *
11985 * insn is either a 16-bit or a 32-bit instruction; the two are
11986 * distinguishable because for the 16-bit case the top 16 bits
11987 * are zeroes, and that isn't a valid 32-bit encoding.
11988 */
11989 if ((insn & 0xffffff00) == 0xbe00) {
11990 /* BKPT */
11991 return true;
11992 }
11993
11994 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
11995 !arm_dc_feature(s, ARM_FEATURE_M)) {
11996 /* HLT: v8A only. This is unconditional even when it is going to
11997 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
11998 * For v7 cores this was a plain old undefined encoding and so
11999 * honours its cc check. (We might be using the encoding as
12000 * a semihosting trap, but we don't change the cc check behaviour
12001 * on that account, because a debugger connected to a real v7A
12002 * core and emulating semihosting traps by catching the UNDEF
12003 * exception would also only see cases where the cc check passed.
12004 * No guest code should be trying to do a HLT semihosting trap
12005 * in an IT block anyway.
12006 */
12007 return true;
12008 }
12009
12010 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12011 arm_dc_feature(s, ARM_FEATURE_M)) {
12012 /* SG: v8M only */
12013 return true;
12014 }
12015
12016 return false;
12017}
12018
722ef0a5
RH
12019static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12020{
12021 DisasContext *dc = container_of(dcbase, DisasContext, base);
12022 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12023 uint32_t insn;
12024 bool is_16bit;
722ef0a5
RH
12025
12026 if (arm_pre_translate_insn(dc)) {
12027 return;
12028 }
12029
a0415916
RH
12030 dc->pc_curr = dc->base.pc_next;
12031 insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
12032 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
12033 dc->base.pc_next += 2;
296e5a0a 12034 if (!is_16bit) {
a0415916 12035 uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
296e5a0a
PM
12036
12037 insn = insn << 16 | insn2;
a0415916 12038 dc->base.pc_next += 2;
296e5a0a 12039 }
58803318 12040 dc->insn = insn;
296e5a0a 12041
dcf14dfb 12042 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12043 uint32_t cond = dc->condexec_cond;
12044
5529de1e
PM
12045 /*
12046 * Conditionally skip the insn. Note that both 0xe and 0xf mean
12047 * "always"; 0xf is not "never".
12048 */
12049 if (cond < 0x0e) {
c2d9644e 12050 arm_skip_unless(dc, cond);
296e5a0a
PM
12051 }
12052 }
12053
12054 if (is_16bit) {
12055 disas_thumb_insn(dc, insn);
12056 } else {
2eea841c 12057 disas_thumb2_insn(dc, insn);
296e5a0a 12058 }
722ef0a5
RH
12059
12060 /* Advance the Thumb condexec condition. */
12061 if (dc->condexec_mask) {
12062 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12063 ((dc->condexec_mask >> 4) & 1));
12064 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12065 if (dc->condexec_mask == 0) {
12066 dc->condexec_cond = 0;
12067 }
12068 }
12069
d0264d86
RH
12070 arm_post_translate_insn(dc);
12071
12072 /* Thumb is a variable-length ISA. Stop translation when the next insn
12073 * will touch a new page. This ensures that prefetch aborts occur at
12074 * the right place.
12075 *
12076 * We want to stop the TB if the next insn starts in a new page,
12077 * or if it spans between this page and the next. This means that
12078 * if we're looking at the last halfword in the page we need to
12079 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12080 * or a 32-bit Thumb insn (which won't).
12081 * This is to avoid generating a silly TB with a single 16-bit insn
12082 * in it at the end of this page (which would execute correctly
12083 * but isn't very efficient).
12084 */
12085 if (dc->base.is_jmp == DISAS_NEXT
a0415916
RH
12086 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
12087 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
12088 && insn_crosses_page(env, dc)))) {
12089 dc->base.is_jmp = DISAS_TOO_MANY;
12090 }
722ef0a5
RH
12091}
12092
70d3c035 12093static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12094{
70d3c035 12095 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12096
c5a49c63 12097 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12098 /* FIXME: This can theoretically happen with self-modifying code. */
12099 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12100 }
9ee6e8bb 12101
b5ff1b31 12102 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12103 instruction was a conditional branch or trap, and the PC has
12104 already been written. */
f021b2c4 12105 gen_set_condexec(dc);
dcba3a8d 12106 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12107 /* Exception return branches need some special case code at the
12108 * end of the TB, which is complex enough that it has to
12109 * handle the single-step vs not and the condition-failed
12110 * insn codepath itself.
12111 */
12112 gen_bx_excret_final_code(dc);
12113 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12114 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12115 switch (dc->base.is_jmp) {
7999a5c8 12116 case DISAS_SWI:
50225ad0 12117 gen_ss_advance(dc);
73710361
GB
12118 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12119 default_exception_el(dc));
7999a5c8
SF
12120 break;
12121 case DISAS_HVC:
37e6456e 12122 gen_ss_advance(dc);
73710361 12123 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12124 break;
12125 case DISAS_SMC:
37e6456e 12126 gen_ss_advance(dc);
73710361 12127 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12128 break;
12129 case DISAS_NEXT:
a68956ad 12130 case DISAS_TOO_MANY:
7999a5c8 12131 case DISAS_UPDATE:
a0415916 12132 gen_set_pc_im(dc, dc->base.pc_next);
7999a5c8
SF
12133 /* fall through */
12134 default:
5425415e
PM
12135 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12136 gen_singlestep_exception(dc);
a0c231e6
RH
12137 break;
12138 case DISAS_NORETURN:
12139 break;
7999a5c8 12140 }
8aaca4c0 12141 } else {
9ee6e8bb
PB
12142 /* While branches must always occur at the end of an IT block,
12143 there are a few other things that can cause us to terminate
65626741 12144 the TB in the middle of an IT block:
9ee6e8bb
PB
12145 - Exception generating instructions (bkpt, swi, undefined).
12146 - Page boundaries.
12147 - Hardware watchpoints.
12148 Hardware breakpoints have already been handled and skip this code.
12149 */
dcba3a8d 12150 switch(dc->base.is_jmp) {
8aaca4c0 12151 case DISAS_NEXT:
a68956ad 12152 case DISAS_TOO_MANY:
a0415916 12153 gen_goto_tb(dc, 1, dc->base.pc_next);
8aaca4c0 12154 break;
577bf808 12155 case DISAS_JUMP:
8a6b28c7
EC
12156 gen_goto_ptr();
12157 break;
e8d52302 12158 case DISAS_UPDATE:
a0415916 12159 gen_set_pc_im(dc, dc->base.pc_next);
e8d52302 12160 /* fall through */
577bf808 12161 default:
8aaca4c0 12162 /* indicate that the hash table must be used to find the next TB */
07ea28b4 12163 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 12164 break;
a0c231e6 12165 case DISAS_NORETURN:
8aaca4c0
FB
12166 /* nothing more to generate */
12167 break;
9ee6e8bb 12168 case DISAS_WFI:
58803318
SS
12169 {
12170 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12171 !(dc->insn & (1U << 31))) ? 2 : 4);
12172
12173 gen_helper_wfi(cpu_env, tmp);
12174 tcg_temp_free_i32(tmp);
84549b6d
PM
12175 /* The helper doesn't necessarily throw an exception, but we
12176 * must go back to the main loop to check for interrupts anyway.
12177 */
07ea28b4 12178 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 12179 break;
58803318 12180 }
72c1d3af
PM
12181 case DISAS_WFE:
12182 gen_helper_wfe(cpu_env);
12183 break;
c87e5a61
PM
12184 case DISAS_YIELD:
12185 gen_helper_yield(cpu_env);
12186 break;
9ee6e8bb 12187 case DISAS_SWI:
73710361
GB
12188 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12189 default_exception_el(dc));
9ee6e8bb 12190 break;
37e6456e 12191 case DISAS_HVC:
73710361 12192 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12193 break;
12194 case DISAS_SMC:
73710361 12195 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12196 break;
8aaca4c0 12197 }
f021b2c4
PM
12198 }
12199
12200 if (dc->condjmp) {
12201 /* "Condition failed" instruction codepath for the branch/trap insn */
12202 gen_set_label(dc->condlabel);
12203 gen_set_condexec(dc);
b636649f 12204 if (unlikely(is_singlestepping(dc))) {
a0415916 12205 gen_set_pc_im(dc, dc->base.pc_next);
f021b2c4
PM
12206 gen_singlestep_exception(dc);
12207 } else {
a0415916 12208 gen_goto_tb(dc, 1, dc->base.pc_next);
e50e6a20 12209 }
2c0262af 12210 }
70d3c035
LV
12211}
12212
4013f7fc
LV
12213static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12214{
12215 DisasContext *dc = container_of(dcbase, DisasContext, base);
12216
12217 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12218 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12219}
12220
23169224
LV
12221static const TranslatorOps arm_translator_ops = {
12222 .init_disas_context = arm_tr_init_disas_context,
12223 .tb_start = arm_tr_tb_start,
12224 .insn_start = arm_tr_insn_start,
12225 .breakpoint_check = arm_tr_breakpoint_check,
12226 .translate_insn = arm_tr_translate_insn,
12227 .tb_stop = arm_tr_tb_stop,
12228 .disas_log = arm_tr_disas_log,
12229};
12230
722ef0a5
RH
12231static const TranslatorOps thumb_translator_ops = {
12232 .init_disas_context = arm_tr_init_disas_context,
12233 .tb_start = arm_tr_tb_start,
12234 .insn_start = arm_tr_insn_start,
12235 .breakpoint_check = arm_tr_breakpoint_check,
12236 .translate_insn = thumb_tr_translate_insn,
12237 .tb_stop = arm_tr_tb_stop,
12238 .disas_log = arm_tr_disas_log,
12239};
12240
70d3c035 12241/* generate intermediate code for basic block 'tb'. */
8b86d6d2 12242void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
70d3c035 12243{
23169224
LV
12244 DisasContext dc;
12245 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12246
aad821ac 12247 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
722ef0a5
RH
12248 ops = &thumb_translator_ops;
12249 }
23169224 12250#ifdef TARGET_AARCH64
aad821ac 12251 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
23169224 12252 ops = &aarch64_translator_ops;
2c0262af
FB
12253 }
12254#endif
23169224 12255
8b86d6d2 12256 translator_loop(ops, &dc.base, cpu, tb, max_insns);
2c0262af
FB
12257}
12258
bad729e2
RH
12259void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12260 target_ulong *data)
d2856f1a 12261{
3926cc84 12262 if (is_a64(env)) {
bad729e2 12263 env->pc = data[0];
40f860cd 12264 env->condexec_bits = 0;
aaa1f954 12265 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12266 } else {
bad729e2
RH
12267 env->regs[15] = data[0];
12268 env->condexec_bits = data[1];
aaa1f954 12269 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12270 }
d2856f1a 12271}