]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Convert VCVT fp/fixed-point conversion insns to decodetree
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
90c84c56 31#include "qemu/qemu-print.h"
1d854765 32#include "arm_ldst.h"
f1672e6f 33#include "hw/semihosting/semihost.h"
1497c961 34
2ef6175a
RH
35#include "exec/helper-proto.h"
36#include "exec/helper-gen.h"
2c0262af 37
a7e30d84 38#include "trace-tcg.h"
508127e2 39#include "exec/log.h"
a7e30d84
LV
40
41
2b51668f
PM
42#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
43#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 44/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 45#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
09cbd501 46#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
2b51668f
PM
47#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
48#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
49#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
50#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
51#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 52
86753403 53#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 54
f570c61e 55#include "translate.h"
e12ce78d 56
b5ff1b31
FB
57#if defined(CONFIG_USER_ONLY)
58#define IS_USER(s) 1
59#else
60#define IS_USER(s) (s->user)
61#endif
62
ad69471c 63/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 64static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 65static TCGv_i32 cpu_R[16];
78bcaa3e
RH
66TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
67TCGv_i64 cpu_exclusive_addr;
68TCGv_i64 cpu_exclusive_val;
ad69471c 69
b26eefb6 70/* FIXME: These should be removed. */
39d5492a 71static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 72static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 73
022c62cb 74#include "exec/gen-icount.h"
2e70f6ef 75
308e5636 76static const char * const regnames[] =
155c3eac
FN
77 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
78 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79
61adacc8
RH
80/* Function prototypes for gen_ functions calling Neon helpers. */
81typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
82 TCGv_i32, TCGv_i32);
83
b26eefb6
PB
84/* initialize TCG globals. */
85void arm_translate_init(void)
86{
155c3eac
FN
87 int i;
88
155c3eac 89 for (i = 0; i < 16; i++) {
e1ccc054 90 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 91 offsetof(CPUARMState, regs[i]),
155c3eac
FN
92 regnames[i]);
93 }
e1ccc054
RH
94 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
95 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
96 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
97 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 98
e1ccc054 99 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 100 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 101 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 102 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 103
14ade10f 104 a64_translate_init();
b26eefb6
PB
105}
106
9bb6558a
PM
107/* Flags for the disas_set_da_iss info argument:
108 * lower bits hold the Rt register number, higher bits are flags.
109 */
110typedef enum ISSInfo {
111 ISSNone = 0,
112 ISSRegMask = 0x1f,
113 ISSInvalid = (1 << 5),
114 ISSIsAcqRel = (1 << 6),
115 ISSIsWrite = (1 << 7),
116 ISSIs16Bit = (1 << 8),
117} ISSInfo;
118
119/* Save the syndrome information for a Data Abort */
120static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
121{
122 uint32_t syn;
123 int sas = memop & MO_SIZE;
124 bool sse = memop & MO_SIGN;
125 bool is_acqrel = issinfo & ISSIsAcqRel;
126 bool is_write = issinfo & ISSIsWrite;
127 bool is_16bit = issinfo & ISSIs16Bit;
128 int srt = issinfo & ISSRegMask;
129
130 if (issinfo & ISSInvalid) {
131 /* Some callsites want to conditionally provide ISS info,
132 * eg "only if this was not a writeback"
133 */
134 return;
135 }
136
137 if (srt == 15) {
138 /* For AArch32, insns where the src/dest is R15 never generate
139 * ISS information. Catching that here saves checking at all
140 * the call sites.
141 */
142 return;
143 }
144
145 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
146 0, 0, 0, is_write, 0, is_16bit);
147 disas_set_insn_syndrome(s, syn);
148}
149
8bd5c820 150static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 151{
8bd5c820 152 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
153 * insns:
154 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
155 * otherwise, access as if at PL0.
156 */
157 switch (s->mmu_idx) {
158 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
159 case ARMMMUIdx_S12NSE0:
160 case ARMMMUIdx_S12NSE1:
8bd5c820 161 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
162 case ARMMMUIdx_S1E3:
163 case ARMMMUIdx_S1SE0:
164 case ARMMMUIdx_S1SE1:
8bd5c820 165 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
166 case ARMMMUIdx_MUser:
167 case ARMMMUIdx_MPriv:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
169 case ARMMMUIdx_MUserNegPri:
170 case ARMMMUIdx_MPrivNegPri:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
172 case ARMMMUIdx_MSUser:
173 case ARMMMUIdx_MSPriv:
b9f587d6 174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
175 case ARMMMUIdx_MSUserNegPri:
176 case ARMMMUIdx_MSPrivNegPri:
177 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
178 case ARMMMUIdx_S2NS:
179 default:
180 g_assert_not_reached();
181 }
182}
183
39d5492a 184static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 185{
39d5492a 186 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
187 tcg_gen_ld_i32(tmp, cpu_env, offset);
188 return tmp;
189}
190
0ecb72a5 191#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 192
39d5492a 193static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
194{
195 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 196 tcg_temp_free_i32(var);
d9ba4830
PB
197}
198
199#define store_cpu_field(var, name) \
0ecb72a5 200 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 201
b26eefb6 202/* Set a variable to the value of a CPU register. */
39d5492a 203static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
204{
205 if (reg == 15) {
206 uint32_t addr;
b90372ad 207 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
208 if (s->thumb)
209 addr = (long)s->pc + 2;
210 else
211 addr = (long)s->pc + 4;
212 tcg_gen_movi_i32(var, addr);
213 } else {
155c3eac 214 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
215 }
216}
217
218/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 219static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 220{
39d5492a 221 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
222 load_reg_var(s, tmp, reg);
223 return tmp;
224}
225
226/* Set a CPU register. The source must be a temporary and will be
227 marked as dead. */
39d5492a 228static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
229{
230 if (reg == 15) {
9b6a3ea7
PM
231 /* In Thumb mode, we must ignore bit 0.
232 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
233 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
234 * We choose to ignore [1:0] in ARM mode for all architecture versions.
235 */
236 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 237 s->base.is_jmp = DISAS_JUMP;
b26eefb6 238 }
155c3eac 239 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 240 tcg_temp_free_i32(var);
b26eefb6
PB
241}
242
55203189
PM
243/*
244 * Variant of store_reg which applies v8M stack-limit checks before updating
245 * SP. If the check fails this will result in an exception being taken.
246 * We disable the stack checks for CONFIG_USER_ONLY because we have
247 * no idea what the stack limits should be in that case.
248 * If stack checking is not being done this just acts like store_reg().
249 */
250static void store_sp_checked(DisasContext *s, TCGv_i32 var)
251{
252#ifndef CONFIG_USER_ONLY
253 if (s->v8m_stackcheck) {
254 gen_helper_v8m_stackcheck(cpu_env, var);
255 }
256#endif
257 store_reg(s, 13, var);
258}
259
b26eefb6 260/* Value extensions. */
86831435
PB
261#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
262#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
263#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
264#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
265
1497c961
PB
266#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
267#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 268
b26eefb6 269
39d5492a 270static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 271{
39d5492a 272 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 273 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
274 tcg_temp_free_i32(tmp_mask);
275}
d9ba4830
PB
276/* Set NZCV flags from the high 4 bits of var. */
277#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
278
d4a2dc67 279static void gen_exception_internal(int excp)
d9ba4830 280{
d4a2dc67
PM
281 TCGv_i32 tcg_excp = tcg_const_i32(excp);
282
283 assert(excp_is_internal(excp));
284 gen_helper_exception_internal(cpu_env, tcg_excp);
285 tcg_temp_free_i32(tcg_excp);
286}
287
73710361 288static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
289{
290 TCGv_i32 tcg_excp = tcg_const_i32(excp);
291 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 292 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 293
73710361
GB
294 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
295 tcg_syn, tcg_el);
296
297 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
298 tcg_temp_free_i32(tcg_syn);
299 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
300}
301
50225ad0
PM
302static void gen_step_complete_exception(DisasContext *s)
303{
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
312 */
313 gen_ss_advance(s);
73710361
GB
314 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
315 default_exception_el(s));
dcba3a8d 316 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
317}
318
5425415e
PM
319static void gen_singlestep_exception(DisasContext *s)
320{
321 /* Generate the right kind of exception for singlestep, which is
322 * either the architectural singlestep or EXCP_DEBUG for QEMU's
323 * gdb singlestepping.
324 */
325 if (s->ss_active) {
326 gen_step_complete_exception(s);
327 } else {
328 gen_exception_internal(EXCP_DEBUG);
329 }
330}
331
b636649f
PM
332static inline bool is_singlestepping(DisasContext *s)
333{
334 /* Return true if we are singlestepping either because of
335 * architectural singlestep or QEMU gdbstub singlestep. This does
336 * not include the command line '-singlestep' mode which is rather
337 * misnamed as it only means "one instruction per TB" and doesn't
338 * affect the code we generate.
339 */
dcba3a8d 340 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
341}
342
39d5492a 343static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 344{
39d5492a
PM
345 TCGv_i32 tmp1 = tcg_temp_new_i32();
346 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
347 tcg_gen_ext16s_i32(tmp1, a);
348 tcg_gen_ext16s_i32(tmp2, b);
3670669c 349 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 350 tcg_temp_free_i32(tmp2);
3670669c
PB
351 tcg_gen_sari_i32(a, a, 16);
352 tcg_gen_sari_i32(b, b, 16);
353 tcg_gen_mul_i32(b, b, a);
354 tcg_gen_mov_i32(a, tmp1);
7d1b0095 355 tcg_temp_free_i32(tmp1);
3670669c
PB
356}
357
358/* Byteswap each halfword. */
39d5492a 359static void gen_rev16(TCGv_i32 var)
3670669c 360{
39d5492a 361 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 362 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 363 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
364 tcg_gen_and_i32(tmp, tmp, mask);
365 tcg_gen_and_i32(var, var, mask);
3670669c 366 tcg_gen_shli_i32(var, var, 8);
3670669c 367 tcg_gen_or_i32(var, var, tmp);
68cedf73 368 tcg_temp_free_i32(mask);
7d1b0095 369 tcg_temp_free_i32(tmp);
3670669c
PB
370}
371
372/* Byteswap low halfword and sign extend. */
39d5492a 373static void gen_revsh(TCGv_i32 var)
3670669c 374{
1a855029
AJ
375 tcg_gen_ext16u_i32(var, var);
376 tcg_gen_bswap16_i32(var, var);
377 tcg_gen_ext16s_i32(var, var);
3670669c
PB
378}
379
838fa72d 380/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 381static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 382{
838fa72d
AJ
383 TCGv_i64 tmp64 = tcg_temp_new_i64();
384
385 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 386 tcg_temp_free_i32(b);
838fa72d
AJ
387 tcg_gen_shli_i64(tmp64, tmp64, 32);
388 tcg_gen_add_i64(a, tmp64, a);
389
390 tcg_temp_free_i64(tmp64);
391 return a;
392}
393
394/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 395static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
396{
397 TCGv_i64 tmp64 = tcg_temp_new_i64();
398
399 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 400 tcg_temp_free_i32(b);
838fa72d
AJ
401 tcg_gen_shli_i64(tmp64, tmp64, 32);
402 tcg_gen_sub_i64(a, tmp64, a);
403
404 tcg_temp_free_i64(tmp64);
405 return a;
3670669c
PB
406}
407
5e3f878a 408/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 409static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 410{
39d5492a
PM
411 TCGv_i32 lo = tcg_temp_new_i32();
412 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 413 TCGv_i64 ret;
5e3f878a 414
831d7fe8 415 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 416 tcg_temp_free_i32(a);
7d1b0095 417 tcg_temp_free_i32(b);
831d7fe8
RH
418
419 ret = tcg_temp_new_i64();
420 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
421 tcg_temp_free_i32(lo);
422 tcg_temp_free_i32(hi);
831d7fe8
RH
423
424 return ret;
5e3f878a
PB
425}
426
39d5492a 427static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 428{
39d5492a
PM
429 TCGv_i32 lo = tcg_temp_new_i32();
430 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 431 TCGv_i64 ret;
5e3f878a 432
831d7fe8 433 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 434 tcg_temp_free_i32(a);
7d1b0095 435 tcg_temp_free_i32(b);
831d7fe8
RH
436
437 ret = tcg_temp_new_i64();
438 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
439 tcg_temp_free_i32(lo);
440 tcg_temp_free_i32(hi);
831d7fe8
RH
441
442 return ret;
5e3f878a
PB
443}
444
8f01245e 445/* Swap low and high halfwords. */
39d5492a 446static void gen_swap_half(TCGv_i32 var)
8f01245e 447{
39d5492a 448 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
449 tcg_gen_shri_i32(tmp, var, 16);
450 tcg_gen_shli_i32(var, var, 16);
451 tcg_gen_or_i32(var, var, tmp);
7d1b0095 452 tcg_temp_free_i32(tmp);
8f01245e
PB
453}
454
b26eefb6
PB
455/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
456 tmp = (t0 ^ t1) & 0x8000;
457 t0 &= ~0x8000;
458 t1 &= ~0x8000;
459 t0 = (t0 + t1) ^ tmp;
460 */
461
39d5492a 462static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 463{
39d5492a 464 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
465 tcg_gen_xor_i32(tmp, t0, t1);
466 tcg_gen_andi_i32(tmp, tmp, 0x8000);
467 tcg_gen_andi_i32(t0, t0, ~0x8000);
468 tcg_gen_andi_i32(t1, t1, ~0x8000);
469 tcg_gen_add_i32(t0, t0, t1);
470 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
471 tcg_temp_free_i32(tmp);
472 tcg_temp_free_i32(t1);
b26eefb6
PB
473}
474
475/* Set CF to the top bit of var. */
39d5492a 476static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 477{
66c374de 478 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
479}
480
481/* Set N and Z flags from var. */
39d5492a 482static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 483{
66c374de
AJ
484 tcg_gen_mov_i32(cpu_NF, var);
485 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
486}
487
488/* T0 += T1 + CF. */
39d5492a 489static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 490{
396e467c 491 tcg_gen_add_i32(t0, t0, t1);
66c374de 492 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
493}
494
e9bb4aa9 495/* dest = T0 + T1 + CF. */
39d5492a 496static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 497{
e9bb4aa9 498 tcg_gen_add_i32(dest, t0, t1);
66c374de 499 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
500}
501
3670669c 502/* dest = T0 - T1 + CF - 1. */
39d5492a 503static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 504{
3670669c 505 tcg_gen_sub_i32(dest, t0, t1);
66c374de 506 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 507 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
508}
509
72485ec4 510/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 511static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 512{
39d5492a 513 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
514 tcg_gen_movi_i32(tmp, 0);
515 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 516 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 517 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
518 tcg_gen_xor_i32(tmp, t0, t1);
519 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
520 tcg_temp_free_i32(tmp);
521 tcg_gen_mov_i32(dest, cpu_NF);
522}
523
49b4c31e 524/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 525static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 526{
39d5492a 527 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
528 if (TCG_TARGET_HAS_add2_i32) {
529 tcg_gen_movi_i32(tmp, 0);
530 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 531 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
532 } else {
533 TCGv_i64 q0 = tcg_temp_new_i64();
534 TCGv_i64 q1 = tcg_temp_new_i64();
535 tcg_gen_extu_i32_i64(q0, t0);
536 tcg_gen_extu_i32_i64(q1, t1);
537 tcg_gen_add_i64(q0, q0, q1);
538 tcg_gen_extu_i32_i64(q1, cpu_CF);
539 tcg_gen_add_i64(q0, q0, q1);
540 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
541 tcg_temp_free_i64(q0);
542 tcg_temp_free_i64(q1);
543 }
544 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
545 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
546 tcg_gen_xor_i32(tmp, t0, t1);
547 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
548 tcg_temp_free_i32(tmp);
549 tcg_gen_mov_i32(dest, cpu_NF);
550}
551
72485ec4 552/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 553static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 554{
39d5492a 555 TCGv_i32 tmp;
72485ec4
AJ
556 tcg_gen_sub_i32(cpu_NF, t0, t1);
557 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
558 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
559 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
560 tmp = tcg_temp_new_i32();
561 tcg_gen_xor_i32(tmp, t0, t1);
562 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
563 tcg_temp_free_i32(tmp);
564 tcg_gen_mov_i32(dest, cpu_NF);
565}
566
e77f0832 567/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 568static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 569{
39d5492a 570 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
571 tcg_gen_not_i32(tmp, t1);
572 gen_adc_CC(dest, t0, tmp);
39d5492a 573 tcg_temp_free_i32(tmp);
2de68a49
RH
574}
575
365af80e 576#define GEN_SHIFT(name) \
39d5492a 577static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 578{ \
39d5492a 579 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
580 tmp1 = tcg_temp_new_i32(); \
581 tcg_gen_andi_i32(tmp1, t1, 0xff); \
582 tmp2 = tcg_const_i32(0); \
583 tmp3 = tcg_const_i32(0x1f); \
584 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
585 tcg_temp_free_i32(tmp3); \
586 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
587 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
588 tcg_temp_free_i32(tmp2); \
589 tcg_temp_free_i32(tmp1); \
590}
591GEN_SHIFT(shl)
592GEN_SHIFT(shr)
593#undef GEN_SHIFT
594
39d5492a 595static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 596{
39d5492a 597 TCGv_i32 tmp1, tmp2;
365af80e
AJ
598 tmp1 = tcg_temp_new_i32();
599 tcg_gen_andi_i32(tmp1, t1, 0xff);
600 tmp2 = tcg_const_i32(0x1f);
601 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
602 tcg_temp_free_i32(tmp2);
603 tcg_gen_sar_i32(dest, t0, tmp1);
604 tcg_temp_free_i32(tmp1);
605}
606
39d5492a 607static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 608{
9a119ff6 609 if (shift == 0) {
66c374de 610 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 611 } else {
66c374de
AJ
612 tcg_gen_shri_i32(cpu_CF, var, shift);
613 if (shift != 31) {
614 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
615 }
9a119ff6 616 }
9a119ff6 617}
b26eefb6 618
9a119ff6 619/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
620static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
621 int shift, int flags)
9a119ff6
PB
622{
623 switch (shiftop) {
624 case 0: /* LSL */
625 if (shift != 0) {
626 if (flags)
627 shifter_out_im(var, 32 - shift);
628 tcg_gen_shli_i32(var, var, shift);
629 }
630 break;
631 case 1: /* LSR */
632 if (shift == 0) {
633 if (flags) {
66c374de 634 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
635 }
636 tcg_gen_movi_i32(var, 0);
637 } else {
638 if (flags)
639 shifter_out_im(var, shift - 1);
640 tcg_gen_shri_i32(var, var, shift);
641 }
642 break;
643 case 2: /* ASR */
644 if (shift == 0)
645 shift = 32;
646 if (flags)
647 shifter_out_im(var, shift - 1);
648 if (shift == 32)
649 shift = 31;
650 tcg_gen_sari_i32(var, var, shift);
651 break;
652 case 3: /* ROR/RRX */
653 if (shift != 0) {
654 if (flags)
655 shifter_out_im(var, shift - 1);
f669df27 656 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 657 } else {
39d5492a 658 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 659 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
660 if (flags)
661 shifter_out_im(var, 0);
662 tcg_gen_shri_i32(var, var, 1);
b26eefb6 663 tcg_gen_or_i32(var, var, tmp);
7d1b0095 664 tcg_temp_free_i32(tmp);
b26eefb6
PB
665 }
666 }
667};
668
39d5492a
PM
669static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
670 TCGv_i32 shift, int flags)
8984bd2e
PB
671{
672 if (flags) {
673 switch (shiftop) {
9ef39277
BS
674 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
675 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
676 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
677 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
678 }
679 } else {
680 switch (shiftop) {
365af80e
AJ
681 case 0:
682 gen_shl(var, var, shift);
683 break;
684 case 1:
685 gen_shr(var, var, shift);
686 break;
687 case 2:
688 gen_sar(var, var, shift);
689 break;
f669df27
AJ
690 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
691 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
692 }
693 }
7d1b0095 694 tcg_temp_free_i32(shift);
8984bd2e
PB
695}
696
6ddbc6e4
PB
697#define PAS_OP(pfx) \
698 switch (op2) { \
699 case 0: gen_pas_helper(glue(pfx,add16)); break; \
700 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
701 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
702 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
703 case 4: gen_pas_helper(glue(pfx,add8)); break; \
704 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
705 }
39d5492a 706static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 707{
a7812ae4 708 TCGv_ptr tmp;
6ddbc6e4
PB
709
710 switch (op1) {
711#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
712 case 1:
a7812ae4 713 tmp = tcg_temp_new_ptr();
0ecb72a5 714 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 715 PAS_OP(s)
b75263d6 716 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
717 break;
718 case 5:
a7812ae4 719 tmp = tcg_temp_new_ptr();
0ecb72a5 720 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 721 PAS_OP(u)
b75263d6 722 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
723 break;
724#undef gen_pas_helper
725#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
726 case 2:
727 PAS_OP(q);
728 break;
729 case 3:
730 PAS_OP(sh);
731 break;
732 case 6:
733 PAS_OP(uq);
734 break;
735 case 7:
736 PAS_OP(uh);
737 break;
738#undef gen_pas_helper
739 }
740}
9ee6e8bb
PB
741#undef PAS_OP
742
6ddbc6e4
PB
743/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
744#define PAS_OP(pfx) \
ed89a2f1 745 switch (op1) { \
6ddbc6e4
PB
746 case 0: gen_pas_helper(glue(pfx,add8)); break; \
747 case 1: gen_pas_helper(glue(pfx,add16)); break; \
748 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
749 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
750 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
751 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
752 }
39d5492a 753static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 754{
a7812ae4 755 TCGv_ptr tmp;
6ddbc6e4 756
ed89a2f1 757 switch (op2) {
6ddbc6e4
PB
758#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
759 case 0:
a7812ae4 760 tmp = tcg_temp_new_ptr();
0ecb72a5 761 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 762 PAS_OP(s)
b75263d6 763 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
764 break;
765 case 4:
a7812ae4 766 tmp = tcg_temp_new_ptr();
0ecb72a5 767 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 768 PAS_OP(u)
b75263d6 769 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
770 break;
771#undef gen_pas_helper
772#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
773 case 1:
774 PAS_OP(q);
775 break;
776 case 2:
777 PAS_OP(sh);
778 break;
779 case 5:
780 PAS_OP(uq);
781 break;
782 case 6:
783 PAS_OP(uh);
784 break;
785#undef gen_pas_helper
786 }
787}
9ee6e8bb
PB
788#undef PAS_OP
789
39fb730a 790/*
6c2c63d3 791 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
792 * This is common between ARM and Aarch64 targets.
793 */
6c2c63d3 794void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 795{
6c2c63d3
RH
796 TCGv_i32 value;
797 TCGCond cond;
798 bool global = true;
d9ba4830 799
d9ba4830
PB
800 switch (cc) {
801 case 0: /* eq: Z */
d9ba4830 802 case 1: /* ne: !Z */
6c2c63d3
RH
803 cond = TCG_COND_EQ;
804 value = cpu_ZF;
d9ba4830 805 break;
6c2c63d3 806
d9ba4830 807 case 2: /* cs: C */
d9ba4830 808 case 3: /* cc: !C */
6c2c63d3
RH
809 cond = TCG_COND_NE;
810 value = cpu_CF;
d9ba4830 811 break;
6c2c63d3 812
d9ba4830 813 case 4: /* mi: N */
d9ba4830 814 case 5: /* pl: !N */
6c2c63d3
RH
815 cond = TCG_COND_LT;
816 value = cpu_NF;
d9ba4830 817 break;
6c2c63d3 818
d9ba4830 819 case 6: /* vs: V */
d9ba4830 820 case 7: /* vc: !V */
6c2c63d3
RH
821 cond = TCG_COND_LT;
822 value = cpu_VF;
d9ba4830 823 break;
6c2c63d3 824
d9ba4830 825 case 8: /* hi: C && !Z */
6c2c63d3
RH
826 case 9: /* ls: !C || Z -> !(C && !Z) */
827 cond = TCG_COND_NE;
828 value = tcg_temp_new_i32();
829 global = false;
830 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
831 ZF is non-zero for !Z; so AND the two subexpressions. */
832 tcg_gen_neg_i32(value, cpu_CF);
833 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 834 break;
6c2c63d3 835
d9ba4830 836 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 837 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
838 /* Since we're only interested in the sign bit, == 0 is >= 0. */
839 cond = TCG_COND_GE;
840 value = tcg_temp_new_i32();
841 global = false;
842 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 843 break;
6c2c63d3 844
d9ba4830 845 case 12: /* gt: !Z && N == V */
d9ba4830 846 case 13: /* le: Z || N != V */
6c2c63d3
RH
847 cond = TCG_COND_NE;
848 value = tcg_temp_new_i32();
849 global = false;
850 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
851 * the sign bit then AND with ZF to yield the result. */
852 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
853 tcg_gen_sari_i32(value, value, 31);
854 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 855 break;
6c2c63d3 856
9305eac0
RH
857 case 14: /* always */
858 case 15: /* always */
859 /* Use the ALWAYS condition, which will fold early.
860 * It doesn't matter what we use for the value. */
861 cond = TCG_COND_ALWAYS;
862 value = cpu_ZF;
863 goto no_invert;
864
d9ba4830
PB
865 default:
866 fprintf(stderr, "Bad condition code 0x%x\n", cc);
867 abort();
868 }
6c2c63d3
RH
869
870 if (cc & 1) {
871 cond = tcg_invert_cond(cond);
872 }
873
9305eac0 874 no_invert:
6c2c63d3
RH
875 cmp->cond = cond;
876 cmp->value = value;
877 cmp->value_global = global;
878}
879
880void arm_free_cc(DisasCompare *cmp)
881{
882 if (!cmp->value_global) {
883 tcg_temp_free_i32(cmp->value);
884 }
885}
886
887void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
888{
889 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
890}
891
892void arm_gen_test_cc(int cc, TCGLabel *label)
893{
894 DisasCompare cmp;
895 arm_test_cc(&cmp, cc);
896 arm_jump_cc(&cmp, label);
897 arm_free_cc(&cmp);
d9ba4830 898}
2c0262af 899
b1d8e52e 900static const uint8_t table_logic_cc[16] = {
2c0262af
FB
901 1, /* and */
902 1, /* xor */
903 0, /* sub */
904 0, /* rsb */
905 0, /* add */
906 0, /* adc */
907 0, /* sbc */
908 0, /* rsc */
909 1, /* andl */
910 1, /* xorl */
911 0, /* cmp */
912 0, /* cmn */
913 1, /* orr */
914 1, /* mov */
915 1, /* bic */
916 1, /* mvn */
917};
3b46e624 918
4d5e8c96
PM
919static inline void gen_set_condexec(DisasContext *s)
920{
921 if (s->condexec_mask) {
922 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
923 TCGv_i32 tmp = tcg_temp_new_i32();
924 tcg_gen_movi_i32(tmp, val);
925 store_cpu_field(tmp, condexec_bits);
926 }
927}
928
929static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
930{
931 tcg_gen_movi_i32(cpu_R[15], val);
932}
933
d9ba4830
PB
934/* Set PC and Thumb state from an immediate address. */
935static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 936{
39d5492a 937 TCGv_i32 tmp;
99c475ab 938
dcba3a8d 939 s->base.is_jmp = DISAS_JUMP;
d9ba4830 940 if (s->thumb != (addr & 1)) {
7d1b0095 941 tmp = tcg_temp_new_i32();
d9ba4830 942 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 943 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 944 tcg_temp_free_i32(tmp);
d9ba4830 945 }
155c3eac 946 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
947}
948
949/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 950static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 951{
dcba3a8d 952 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
953 tcg_gen_andi_i32(cpu_R[15], var, ~1);
954 tcg_gen_andi_i32(var, var, 1);
955 store_cpu_field(var, thumb);
d9ba4830
PB
956}
957
3bb8a96f
PM
958/* Set PC and Thumb state from var. var is marked as dead.
959 * For M-profile CPUs, include logic to detect exception-return
960 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
961 * and BX reg, and no others, and happens only for code in Handler mode.
962 */
963static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
964{
965 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 966 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
967 */
968 gen_bx(s, var);
d02a8698
PM
969 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
970 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 971 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
972 }
973}
974
975static inline void gen_bx_excret_final_code(DisasContext *s)
976{
977 /* Generate the code to finish possible exception return and end the TB */
978 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
979 uint32_t min_magic;
980
981 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
982 /* Covers FNC_RETURN and EXC_RETURN magic */
983 min_magic = FNC_RETURN_MIN_MAGIC;
984 } else {
985 /* EXC_RETURN magic only */
986 min_magic = EXC_RETURN_MIN_MAGIC;
987 }
3bb8a96f
PM
988
989 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 990 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
991 /* No: end the TB as we would for a DISAS_JMP */
992 if (is_singlestepping(s)) {
993 gen_singlestep_exception(s);
994 } else {
07ea28b4 995 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
996 }
997 gen_set_label(excret_label);
998 /* Yes: this is an exception return.
999 * At this point in runtime env->regs[15] and env->thumb will hold
1000 * the exception-return magic number, which do_v7m_exception_exit()
1001 * will read. Nothing else will be able to see those values because
1002 * the cpu-exec main loop guarantees that we will always go straight
1003 * from raising the exception to the exception-handling code.
1004 *
1005 * gen_ss_advance(s) does nothing on M profile currently but
1006 * calling it is conceptually the right thing as we have executed
1007 * this instruction (compare SWI, HVC, SMC handling).
1008 */
1009 gen_ss_advance(s);
1010 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1011}
1012
fb602cb7
PM
1013static inline void gen_bxns(DisasContext *s, int rm)
1014{
1015 TCGv_i32 var = load_reg(s, rm);
1016
1017 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1018 * we need to sync state before calling it, but:
1019 * - we don't need to do gen_set_pc_im() because the bxns helper will
1020 * always set the PC itself
1021 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1022 * unless it's outside an IT block or the last insn in an IT block,
1023 * so we know that condexec == 0 (already set at the top of the TB)
1024 * is correct in the non-UNPREDICTABLE cases, and we can choose
1025 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1026 */
1027 gen_helper_v7m_bxns(cpu_env, var);
1028 tcg_temp_free_i32(var);
ef475b5d 1029 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1030}
1031
3e3fa230
PM
1032static inline void gen_blxns(DisasContext *s, int rm)
1033{
1034 TCGv_i32 var = load_reg(s, rm);
1035
1036 /* We don't need to sync condexec state, for the same reason as bxns.
1037 * We do however need to set the PC, because the blxns helper reads it.
1038 * The blxns helper may throw an exception.
1039 */
1040 gen_set_pc_im(s, s->pc);
1041 gen_helper_v7m_blxns(cpu_env, var);
1042 tcg_temp_free_i32(var);
1043 s->base.is_jmp = DISAS_EXIT;
1044}
1045
21aeb343
JR
1046/* Variant of store_reg which uses branch&exchange logic when storing
1047 to r15 in ARM architecture v7 and above. The source must be a temporary
1048 and will be marked as dead. */
7dcc1f89 1049static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1050{
1051 if (reg == 15 && ENABLE_ARCH_7) {
1052 gen_bx(s, var);
1053 } else {
1054 store_reg(s, reg, var);
1055 }
1056}
1057
be5e7a76
DES
1058/* Variant of store_reg which uses branch&exchange logic when storing
1059 * to r15 in ARM architecture v5T and above. This is used for storing
1060 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1061 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1062static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1063{
1064 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1065 gen_bx_excret(s, var);
be5e7a76
DES
1066 } else {
1067 store_reg(s, reg, var);
1068 }
1069}
1070
e334bd31
PB
1071#ifdef CONFIG_USER_ONLY
1072#define IS_USER_ONLY 1
1073#else
1074#define IS_USER_ONLY 0
1075#endif
1076
08307563
PM
1077/* Abstractions of "generate code to do a guest load/store for
1078 * AArch32", where a vaddr is always 32 bits (and is zero
1079 * extended if we're a 64 bit core) and data is also
1080 * 32 bits unless specifically doing a 64 bit access.
1081 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1082 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1083 */
08307563 1084
7f5616f5 1085static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1086{
7f5616f5
RH
1087 TCGv addr = tcg_temp_new();
1088 tcg_gen_extu_i32_tl(addr, a32);
1089
e334bd31 1090 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1091 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1092 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1093 }
7f5616f5 1094 return addr;
08307563
PM
1095}
1096
7f5616f5
RH
1097static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1098 int index, TCGMemOp opc)
08307563 1099{
2aeba0d0
JS
1100 TCGv addr;
1101
1102 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1103 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1104 opc |= MO_ALIGN;
1105 }
1106
1107 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1108 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1109 tcg_temp_free(addr);
08307563
PM
1110}
1111
7f5616f5
RH
1112static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1113 int index, TCGMemOp opc)
1114{
2aeba0d0
JS
1115 TCGv addr;
1116
1117 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1118 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1119 opc |= MO_ALIGN;
1120 }
1121
1122 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1123 tcg_gen_qemu_st_i32(val, addr, index, opc);
1124 tcg_temp_free(addr);
1125}
08307563 1126
7f5616f5 1127#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1128static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1129 TCGv_i32 a32, int index) \
08307563 1130{ \
7f5616f5 1131 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1132} \
1133static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1134 TCGv_i32 val, \
1135 TCGv_i32 a32, int index, \
1136 ISSInfo issinfo) \
1137{ \
1138 gen_aa32_ld##SUFF(s, val, a32, index); \
1139 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1140}
1141
7f5616f5 1142#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1143static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1144 TCGv_i32 a32, int index) \
08307563 1145{ \
7f5616f5 1146 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1147} \
1148static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1149 TCGv_i32 val, \
1150 TCGv_i32 a32, int index, \
1151 ISSInfo issinfo) \
1152{ \
1153 gen_aa32_st##SUFF(s, val, a32, index); \
1154 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1155}
1156
7f5616f5 1157static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1158{
e334bd31
PB
1159 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1160 if (!IS_USER_ONLY && s->sctlr_b) {
1161 tcg_gen_rotri_i64(val, val, 32);
1162 }
08307563
PM
1163}
1164
7f5616f5
RH
1165static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1166 int index, TCGMemOp opc)
08307563 1167{
7f5616f5
RH
1168 TCGv addr = gen_aa32_addr(s, a32, opc);
1169 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1170 gen_aa32_frob64(s, val);
1171 tcg_temp_free(addr);
1172}
1173
1174static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1175 TCGv_i32 a32, int index)
1176{
1177 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1178}
1179
1180static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1181 int index, TCGMemOp opc)
1182{
1183 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1184
1185 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1186 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1187 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1188 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1189 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1190 tcg_temp_free_i64(tmp);
e334bd31 1191 } else {
7f5616f5 1192 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1193 }
7f5616f5 1194 tcg_temp_free(addr);
08307563
PM
1195}
1196
7f5616f5
RH
1197static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1198 TCGv_i32 a32, int index)
1199{
1200 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1201}
08307563 1202
7f5616f5
RH
1203DO_GEN_LD(8s, MO_SB)
1204DO_GEN_LD(8u, MO_UB)
1205DO_GEN_LD(16s, MO_SW)
1206DO_GEN_LD(16u, MO_UW)
1207DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1208DO_GEN_ST(8, MO_UB)
1209DO_GEN_ST(16, MO_UW)
1210DO_GEN_ST(32, MO_UL)
08307563 1211
37e6456e
PM
1212static inline void gen_hvc(DisasContext *s, int imm16)
1213{
1214 /* The pre HVC helper handles cases when HVC gets trapped
1215 * as an undefined insn by runtime configuration (ie before
1216 * the insn really executes).
1217 */
1218 gen_set_pc_im(s, s->pc - 4);
1219 gen_helper_pre_hvc(cpu_env);
1220 /* Otherwise we will treat this as a real exception which
1221 * happens after execution of the insn. (The distinction matters
1222 * for the PC value reported to the exception handler and also
1223 * for single stepping.)
1224 */
1225 s->svc_imm = imm16;
1226 gen_set_pc_im(s, s->pc);
dcba3a8d 1227 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1228}
1229
1230static inline void gen_smc(DisasContext *s)
1231{
1232 /* As with HVC, we may take an exception either before or after
1233 * the insn executes.
1234 */
1235 TCGv_i32 tmp;
1236
1237 gen_set_pc_im(s, s->pc - 4);
1238 tmp = tcg_const_i32(syn_aa32_smc());
1239 gen_helper_pre_smc(cpu_env, tmp);
1240 tcg_temp_free_i32(tmp);
1241 gen_set_pc_im(s, s->pc);
dcba3a8d 1242 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1243}
1244
d4a2dc67
PM
1245static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1246{
1247 gen_set_condexec(s);
1248 gen_set_pc_im(s, s->pc - offset);
1249 gen_exception_internal(excp);
dcba3a8d 1250 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1251}
1252
73710361
GB
1253static void gen_exception_insn(DisasContext *s, int offset, int excp,
1254 int syn, uint32_t target_el)
d4a2dc67
PM
1255{
1256 gen_set_condexec(s);
1257 gen_set_pc_im(s, s->pc - offset);
73710361 1258 gen_exception(excp, syn, target_el);
dcba3a8d 1259 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1260}
1261
c900a2e6
PM
1262static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1263{
1264 TCGv_i32 tcg_syn;
1265
1266 gen_set_condexec(s);
1267 gen_set_pc_im(s, s->pc - offset);
1268 tcg_syn = tcg_const_i32(syn);
1269 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1270 tcg_temp_free_i32(tcg_syn);
1271 s->base.is_jmp = DISAS_NORETURN;
1272}
1273
b5ff1b31
FB
1274/* Force a TB lookup after an instruction that changes the CPU state. */
1275static inline void gen_lookup_tb(DisasContext *s)
1276{
a6445c52 1277 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1278 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1279}
1280
19a6e31c
PM
1281static inline void gen_hlt(DisasContext *s, int imm)
1282{
1283 /* HLT. This has two purposes.
1284 * Architecturally, it is an external halting debug instruction.
1285 * Since QEMU doesn't implement external debug, we treat this as
1286 * it is required for halting debug disabled: it will UNDEF.
1287 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1288 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1289 * must trigger semihosting even for ARMv7 and earlier, where
1290 * HLT was an undefined encoding.
1291 * In system mode, we don't allow userspace access to
1292 * semihosting, to provide some semblance of security
1293 * (and for consistency with our 32-bit semihosting).
1294 */
1295 if (semihosting_enabled() &&
1296#ifndef CONFIG_USER_ONLY
1297 s->current_el != 0 &&
1298#endif
1299 (imm == (s->thumb ? 0x3c : 0xf000))) {
1300 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1301 return;
1302 }
1303
1304 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1305 default_exception_el(s));
1306}
1307
b0109805 1308static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1309 TCGv_i32 var)
2c0262af 1310{
1e8d4eec 1311 int val, rm, shift, shiftop;
39d5492a 1312 TCGv_i32 offset;
2c0262af
FB
1313
1314 if (!(insn & (1 << 25))) {
1315 /* immediate */
1316 val = insn & 0xfff;
1317 if (!(insn & (1 << 23)))
1318 val = -val;
537730b9 1319 if (val != 0)
b0109805 1320 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1321 } else {
1322 /* shift/register */
1323 rm = (insn) & 0xf;
1324 shift = (insn >> 7) & 0x1f;
1e8d4eec 1325 shiftop = (insn >> 5) & 3;
b26eefb6 1326 offset = load_reg(s, rm);
9a119ff6 1327 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1328 if (!(insn & (1 << 23)))
b0109805 1329 tcg_gen_sub_i32(var, var, offset);
2c0262af 1330 else
b0109805 1331 tcg_gen_add_i32(var, var, offset);
7d1b0095 1332 tcg_temp_free_i32(offset);
2c0262af
FB
1333 }
1334}
1335
191f9a93 1336static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1337 int extra, TCGv_i32 var)
2c0262af
FB
1338{
1339 int val, rm;
39d5492a 1340 TCGv_i32 offset;
3b46e624 1341
2c0262af
FB
1342 if (insn & (1 << 22)) {
1343 /* immediate */
1344 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1345 if (!(insn & (1 << 23)))
1346 val = -val;
18acad92 1347 val += extra;
537730b9 1348 if (val != 0)
b0109805 1349 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1350 } else {
1351 /* register */
191f9a93 1352 if (extra)
b0109805 1353 tcg_gen_addi_i32(var, var, extra);
2c0262af 1354 rm = (insn) & 0xf;
b26eefb6 1355 offset = load_reg(s, rm);
2c0262af 1356 if (!(insn & (1 << 23)))
b0109805 1357 tcg_gen_sub_i32(var, var, offset);
2c0262af 1358 else
b0109805 1359 tcg_gen_add_i32(var, var, offset);
7d1b0095 1360 tcg_temp_free_i32(offset);
2c0262af
FB
1361 }
1362}
1363
5aaebd13
PM
1364static TCGv_ptr get_fpstatus_ptr(int neon)
1365{
1366 TCGv_ptr statusptr = tcg_temp_new_ptr();
1367 int offset;
1368 if (neon) {
0ecb72a5 1369 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1370 } else {
0ecb72a5 1371 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1372 }
1373 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1374 return statusptr;
1375}
1376
4373f3ce
PB
1377static inline void gen_vfp_abs(int dp)
1378{
1379 if (dp)
1380 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1381 else
1382 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1383}
1384
1385static inline void gen_vfp_neg(int dp)
1386{
1387 if (dp)
1388 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1389 else
1390 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1391}
1392
5500b06c
PM
1393#define VFP_GEN_ITOF(name) \
1394static inline void gen_vfp_##name(int dp, int neon) \
1395{ \
5aaebd13 1396 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1397 if (dp) { \
1398 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1399 } else { \
1400 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1401 } \
b7fa9214 1402 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1403}
1404
5500b06c
PM
1405VFP_GEN_ITOF(uito)
1406VFP_GEN_ITOF(sito)
1407#undef VFP_GEN_ITOF
4373f3ce 1408
5500b06c
PM
1409#define VFP_GEN_FTOI(name) \
1410static inline void gen_vfp_##name(int dp, int neon) \
1411{ \
5aaebd13 1412 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1413 if (dp) { \
1414 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1415 } else { \
1416 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1417 } \
b7fa9214 1418 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1419}
1420
5500b06c
PM
1421VFP_GEN_FTOI(toui)
1422VFP_GEN_FTOI(touiz)
1423VFP_GEN_FTOI(tosi)
1424VFP_GEN_FTOI(tosiz)
1425#undef VFP_GEN_FTOI
4373f3ce 1426
16d5b3ca 1427#define VFP_GEN_FIX(name, round) \
5500b06c 1428static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1429{ \
39d5492a 1430 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1431 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1432 if (dp) { \
16d5b3ca
WN
1433 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1434 statusptr); \
5500b06c 1435 } else { \
16d5b3ca
WN
1436 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1437 statusptr); \
5500b06c 1438 } \
b75263d6 1439 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1440 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1441}
16d5b3ca 1442VFP_GEN_FIX(tosl, _round_to_zero)
16d5b3ca 1443VFP_GEN_FIX(toul, _round_to_zero)
16d5b3ca 1444VFP_GEN_FIX(slto, )
16d5b3ca 1445VFP_GEN_FIX(ulto, )
4373f3ce 1446#undef VFP_GEN_FIX
9ee6e8bb 1447
c39c2b90 1448static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1449{
9a2b5256 1450 if (dp) {
c39c2b90 1451 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1452 } else {
c39c2b90 1453 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1454 if (reg & 1) {
1455 ofs += offsetof(CPU_DoubleU, l.upper);
1456 } else {
1457 ofs += offsetof(CPU_DoubleU, l.lower);
1458 }
1459 return ofs;
8e96005d
FB
1460 }
1461}
9ee6e8bb
PB
1462
1463/* Return the offset of a 32-bit piece of a NEON register.
1464 zero is the least significant end of the register. */
1465static inline long
1466neon_reg_offset (int reg, int n)
1467{
1468 int sreg;
1469 sreg = reg * 2 + n;
1470 return vfp_reg_offset(0, sreg);
1471}
1472
32f91fb7
RH
1473/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1474 * where 0 is the least significant end of the register.
1475 */
1476static inline long
1477neon_element_offset(int reg, int element, TCGMemOp size)
1478{
1479 int element_size = 1 << size;
1480 int ofs = element * element_size;
1481#ifdef HOST_WORDS_BIGENDIAN
1482 /* Calculate the offset assuming fully little-endian,
1483 * then XOR to account for the order of the 8-byte units.
1484 */
1485 if (element_size < 8) {
1486 ofs ^= 8 - element_size;
1487 }
1488#endif
1489 return neon_reg_offset(reg, 0) + ofs;
1490}
1491
39d5492a 1492static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1493{
39d5492a 1494 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1495 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1496 return tmp;
1497}
1498
2d6ac920
RH
1499static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1500{
1501 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1502
1503 switch (mop) {
1504 case MO_UB:
1505 tcg_gen_ld8u_i32(var, cpu_env, offset);
1506 break;
1507 case MO_UW:
1508 tcg_gen_ld16u_i32(var, cpu_env, offset);
1509 break;
1510 case MO_UL:
1511 tcg_gen_ld_i32(var, cpu_env, offset);
1512 break;
1513 default:
1514 g_assert_not_reached();
1515 }
1516}
1517
ac55d007
RH
1518static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1519{
1520 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1521
1522 switch (mop) {
1523 case MO_UB:
1524 tcg_gen_ld8u_i64(var, cpu_env, offset);
1525 break;
1526 case MO_UW:
1527 tcg_gen_ld16u_i64(var, cpu_env, offset);
1528 break;
1529 case MO_UL:
1530 tcg_gen_ld32u_i64(var, cpu_env, offset);
1531 break;
1532 case MO_Q:
1533 tcg_gen_ld_i64(var, cpu_env, offset);
1534 break;
1535 default:
1536 g_assert_not_reached();
1537 }
1538}
1539
39d5492a 1540static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1541{
1542 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1543 tcg_temp_free_i32(var);
8f8e3aa4
PB
1544}
1545
2d6ac920
RH
1546static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1547{
1548 long offset = neon_element_offset(reg, ele, size);
1549
1550 switch (size) {
1551 case MO_8:
1552 tcg_gen_st8_i32(var, cpu_env, offset);
1553 break;
1554 case MO_16:
1555 tcg_gen_st16_i32(var, cpu_env, offset);
1556 break;
1557 case MO_32:
1558 tcg_gen_st_i32(var, cpu_env, offset);
1559 break;
1560 default:
1561 g_assert_not_reached();
1562 }
1563}
1564
ac55d007
RH
1565static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1566{
1567 long offset = neon_element_offset(reg, ele, size);
1568
1569 switch (size) {
1570 case MO_8:
1571 tcg_gen_st8_i64(var, cpu_env, offset);
1572 break;
1573 case MO_16:
1574 tcg_gen_st16_i64(var, cpu_env, offset);
1575 break;
1576 case MO_32:
1577 tcg_gen_st32_i64(var, cpu_env, offset);
1578 break;
1579 case MO_64:
1580 tcg_gen_st_i64(var, cpu_env, offset);
1581 break;
1582 default:
1583 g_assert_not_reached();
1584 }
1585}
1586
a7812ae4 1587static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1588{
1589 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1590}
1591
a7812ae4 1592static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1593{
1594 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1595}
1596
160f3b64
PM
1597static inline void neon_load_reg32(TCGv_i32 var, int reg)
1598{
1599 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1600}
1601
1602static inline void neon_store_reg32(TCGv_i32 var, int reg)
1603{
1604 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1605}
1606
1a66ac61
RH
1607static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1608{
1609 TCGv_ptr ret = tcg_temp_new_ptr();
1610 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1611 return ret;
1612}
1613
4373f3ce
PB
1614#define tcg_gen_ld_f32 tcg_gen_ld_i32
1615#define tcg_gen_ld_f64 tcg_gen_ld_i64
1616#define tcg_gen_st_f32 tcg_gen_st_i32
1617#define tcg_gen_st_f64 tcg_gen_st_i64
1618
b7bcbe95
FB
1619static inline void gen_mov_F0_vreg(int dp, int reg)
1620{
1621 if (dp)
4373f3ce 1622 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1623 else
4373f3ce 1624 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1625}
1626
1627static inline void gen_mov_F1_vreg(int dp, int reg)
1628{
1629 if (dp)
4373f3ce 1630 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1631 else
4373f3ce 1632 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1633}
1634
1635static inline void gen_mov_vreg_F0(int dp, int reg)
1636{
1637 if (dp)
4373f3ce 1638 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1639 else
4373f3ce 1640 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1641}
1642
d00584b7 1643#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1644
78e138bc
PM
1645/* Include the VFP decoder */
1646#include "translate-vfp.inc.c"
1647
a7812ae4 1648static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1649{
0ecb72a5 1650 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1651}
1652
a7812ae4 1653static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1654{
0ecb72a5 1655 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1656}
1657
39d5492a 1658static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1659{
39d5492a 1660 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1661 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1662 return var;
e677137d
PB
1663}
1664
39d5492a 1665static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1666{
0ecb72a5 1667 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1668 tcg_temp_free_i32(var);
e677137d
PB
1669}
1670
1671static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1672{
1673 iwmmxt_store_reg(cpu_M0, rn);
1674}
1675
1676static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1677{
1678 iwmmxt_load_reg(cpu_M0, rn);
1679}
1680
1681static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1682{
1683 iwmmxt_load_reg(cpu_V1, rn);
1684 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1685}
1686
1687static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1688{
1689 iwmmxt_load_reg(cpu_V1, rn);
1690 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1691}
1692
1693static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1694{
1695 iwmmxt_load_reg(cpu_V1, rn);
1696 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1697}
1698
1699#define IWMMXT_OP(name) \
1700static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1701{ \
1702 iwmmxt_load_reg(cpu_V1, rn); \
1703 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1704}
1705
477955bd
PM
1706#define IWMMXT_OP_ENV(name) \
1707static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1708{ \
1709 iwmmxt_load_reg(cpu_V1, rn); \
1710 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1711}
1712
1713#define IWMMXT_OP_ENV_SIZE(name) \
1714IWMMXT_OP_ENV(name##b) \
1715IWMMXT_OP_ENV(name##w) \
1716IWMMXT_OP_ENV(name##l)
e677137d 1717
477955bd 1718#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1719static inline void gen_op_iwmmxt_##name##_M0(void) \
1720{ \
477955bd 1721 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1722}
1723
1724IWMMXT_OP(maddsq)
1725IWMMXT_OP(madduq)
1726IWMMXT_OP(sadb)
1727IWMMXT_OP(sadw)
1728IWMMXT_OP(mulslw)
1729IWMMXT_OP(mulshw)
1730IWMMXT_OP(mululw)
1731IWMMXT_OP(muluhw)
1732IWMMXT_OP(macsw)
1733IWMMXT_OP(macuw)
1734
477955bd
PM
1735IWMMXT_OP_ENV_SIZE(unpackl)
1736IWMMXT_OP_ENV_SIZE(unpackh)
1737
1738IWMMXT_OP_ENV1(unpacklub)
1739IWMMXT_OP_ENV1(unpackluw)
1740IWMMXT_OP_ENV1(unpacklul)
1741IWMMXT_OP_ENV1(unpackhub)
1742IWMMXT_OP_ENV1(unpackhuw)
1743IWMMXT_OP_ENV1(unpackhul)
1744IWMMXT_OP_ENV1(unpacklsb)
1745IWMMXT_OP_ENV1(unpacklsw)
1746IWMMXT_OP_ENV1(unpacklsl)
1747IWMMXT_OP_ENV1(unpackhsb)
1748IWMMXT_OP_ENV1(unpackhsw)
1749IWMMXT_OP_ENV1(unpackhsl)
1750
1751IWMMXT_OP_ENV_SIZE(cmpeq)
1752IWMMXT_OP_ENV_SIZE(cmpgtu)
1753IWMMXT_OP_ENV_SIZE(cmpgts)
1754
1755IWMMXT_OP_ENV_SIZE(mins)
1756IWMMXT_OP_ENV_SIZE(minu)
1757IWMMXT_OP_ENV_SIZE(maxs)
1758IWMMXT_OP_ENV_SIZE(maxu)
1759
1760IWMMXT_OP_ENV_SIZE(subn)
1761IWMMXT_OP_ENV_SIZE(addn)
1762IWMMXT_OP_ENV_SIZE(subu)
1763IWMMXT_OP_ENV_SIZE(addu)
1764IWMMXT_OP_ENV_SIZE(subs)
1765IWMMXT_OP_ENV_SIZE(adds)
1766
1767IWMMXT_OP_ENV(avgb0)
1768IWMMXT_OP_ENV(avgb1)
1769IWMMXT_OP_ENV(avgw0)
1770IWMMXT_OP_ENV(avgw1)
e677137d 1771
477955bd
PM
1772IWMMXT_OP_ENV(packuw)
1773IWMMXT_OP_ENV(packul)
1774IWMMXT_OP_ENV(packuq)
1775IWMMXT_OP_ENV(packsw)
1776IWMMXT_OP_ENV(packsl)
1777IWMMXT_OP_ENV(packsq)
e677137d 1778
e677137d
PB
1779static void gen_op_iwmmxt_set_mup(void)
1780{
39d5492a 1781 TCGv_i32 tmp;
e677137d
PB
1782 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1783 tcg_gen_ori_i32(tmp, tmp, 2);
1784 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1785}
1786
1787static void gen_op_iwmmxt_set_cup(void)
1788{
39d5492a 1789 TCGv_i32 tmp;
e677137d
PB
1790 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1791 tcg_gen_ori_i32(tmp, tmp, 1);
1792 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1793}
1794
1795static void gen_op_iwmmxt_setpsr_nz(void)
1796{
39d5492a 1797 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1798 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1799 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1800}
1801
1802static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1803{
1804 iwmmxt_load_reg(cpu_V1, rn);
86831435 1805 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1806 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1807}
1808
39d5492a
PM
1809static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1810 TCGv_i32 dest)
18c9b560
AZ
1811{
1812 int rd;
1813 uint32_t offset;
39d5492a 1814 TCGv_i32 tmp;
18c9b560
AZ
1815
1816 rd = (insn >> 16) & 0xf;
da6b5335 1817 tmp = load_reg(s, rd);
18c9b560
AZ
1818
1819 offset = (insn & 0xff) << ((insn >> 7) & 2);
1820 if (insn & (1 << 24)) {
1821 /* Pre indexed */
1822 if (insn & (1 << 23))
da6b5335 1823 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1824 else
da6b5335
FN
1825 tcg_gen_addi_i32(tmp, tmp, -offset);
1826 tcg_gen_mov_i32(dest, tmp);
18c9b560 1827 if (insn & (1 << 21))
da6b5335
FN
1828 store_reg(s, rd, tmp);
1829 else
7d1b0095 1830 tcg_temp_free_i32(tmp);
18c9b560
AZ
1831 } else if (insn & (1 << 21)) {
1832 /* Post indexed */
da6b5335 1833 tcg_gen_mov_i32(dest, tmp);
18c9b560 1834 if (insn & (1 << 23))
da6b5335 1835 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1836 else
da6b5335
FN
1837 tcg_gen_addi_i32(tmp, tmp, -offset);
1838 store_reg(s, rd, tmp);
18c9b560
AZ
1839 } else if (!(insn & (1 << 23)))
1840 return 1;
1841 return 0;
1842}
1843
39d5492a 1844static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1845{
1846 int rd = (insn >> 0) & 0xf;
39d5492a 1847 TCGv_i32 tmp;
18c9b560 1848
da6b5335
FN
1849 if (insn & (1 << 8)) {
1850 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1851 return 1;
da6b5335
FN
1852 } else {
1853 tmp = iwmmxt_load_creg(rd);
1854 }
1855 } else {
7d1b0095 1856 tmp = tcg_temp_new_i32();
da6b5335 1857 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1858 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1859 }
1860 tcg_gen_andi_i32(tmp, tmp, mask);
1861 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1862 tcg_temp_free_i32(tmp);
18c9b560
AZ
1863 return 0;
1864}
1865
a1c7273b 1866/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1867 (ie. an undefined instruction). */
7dcc1f89 1868static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1869{
1870 int rd, wrd;
1871 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1872 TCGv_i32 addr;
1873 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1874
1875 if ((insn & 0x0e000e00) == 0x0c000000) {
1876 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1877 wrd = insn & 0xf;
1878 rdlo = (insn >> 12) & 0xf;
1879 rdhi = (insn >> 16) & 0xf;
d00584b7 1880 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1881 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1882 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1883 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1884 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1885 } else { /* TMCRR */
da6b5335
FN
1886 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1887 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1888 gen_op_iwmmxt_set_mup();
1889 }
1890 return 0;
1891 }
1892
1893 wrd = (insn >> 12) & 0xf;
7d1b0095 1894 addr = tcg_temp_new_i32();
da6b5335 1895 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1896 tcg_temp_free_i32(addr);
18c9b560 1897 return 1;
da6b5335 1898 }
18c9b560 1899 if (insn & ARM_CP_RW_BIT) {
d00584b7 1900 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1901 tmp = tcg_temp_new_i32();
12dcc321 1902 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1903 iwmmxt_store_creg(wrd, tmp);
18c9b560 1904 } else {
e677137d
PB
1905 i = 1;
1906 if (insn & (1 << 8)) {
d00584b7 1907 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1908 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1909 i = 0;
d00584b7 1910 } else { /* WLDRW wRd */
29531141 1911 tmp = tcg_temp_new_i32();
12dcc321 1912 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1913 }
1914 } else {
29531141 1915 tmp = tcg_temp_new_i32();
d00584b7 1916 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1917 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 1918 } else { /* WLDRB */
12dcc321 1919 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1920 }
1921 }
1922 if (i) {
1923 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1924 tcg_temp_free_i32(tmp);
e677137d 1925 }
18c9b560
AZ
1926 gen_op_iwmmxt_movq_wRn_M0(wrd);
1927 }
1928 } else {
d00584b7 1929 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1930 tmp = iwmmxt_load_creg(wrd);
12dcc321 1931 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1932 } else {
1933 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1934 tmp = tcg_temp_new_i32();
e677137d 1935 if (insn & (1 << 8)) {
d00584b7 1936 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1937 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 1938 } else { /* WSTRW wRd */
ecc7b3aa 1939 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1940 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1941 }
1942 } else {
d00584b7 1943 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1944 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1945 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 1946 } else { /* WSTRB */
ecc7b3aa 1947 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1948 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1949 }
1950 }
18c9b560 1951 }
29531141 1952 tcg_temp_free_i32(tmp);
18c9b560 1953 }
7d1b0095 1954 tcg_temp_free_i32(addr);
18c9b560
AZ
1955 return 0;
1956 }
1957
1958 if ((insn & 0x0f000000) != 0x0e000000)
1959 return 1;
1960
1961 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 1962 case 0x000: /* WOR */
18c9b560
AZ
1963 wrd = (insn >> 12) & 0xf;
1964 rd0 = (insn >> 0) & 0xf;
1965 rd1 = (insn >> 16) & 0xf;
1966 gen_op_iwmmxt_movq_M0_wRn(rd0);
1967 gen_op_iwmmxt_orq_M0_wRn(rd1);
1968 gen_op_iwmmxt_setpsr_nz();
1969 gen_op_iwmmxt_movq_wRn_M0(wrd);
1970 gen_op_iwmmxt_set_mup();
1971 gen_op_iwmmxt_set_cup();
1972 break;
d00584b7 1973 case 0x011: /* TMCR */
18c9b560
AZ
1974 if (insn & 0xf)
1975 return 1;
1976 rd = (insn >> 12) & 0xf;
1977 wrd = (insn >> 16) & 0xf;
1978 switch (wrd) {
1979 case ARM_IWMMXT_wCID:
1980 case ARM_IWMMXT_wCASF:
1981 break;
1982 case ARM_IWMMXT_wCon:
1983 gen_op_iwmmxt_set_cup();
1984 /* Fall through. */
1985 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1986 tmp = iwmmxt_load_creg(wrd);
1987 tmp2 = load_reg(s, rd);
f669df27 1988 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1989 tcg_temp_free_i32(tmp2);
da6b5335 1990 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1991 break;
1992 case ARM_IWMMXT_wCGR0:
1993 case ARM_IWMMXT_wCGR1:
1994 case ARM_IWMMXT_wCGR2:
1995 case ARM_IWMMXT_wCGR3:
1996 gen_op_iwmmxt_set_cup();
da6b5335
FN
1997 tmp = load_reg(s, rd);
1998 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1999 break;
2000 default:
2001 return 1;
2002 }
2003 break;
d00584b7 2004 case 0x100: /* WXOR */
18c9b560
AZ
2005 wrd = (insn >> 12) & 0xf;
2006 rd0 = (insn >> 0) & 0xf;
2007 rd1 = (insn >> 16) & 0xf;
2008 gen_op_iwmmxt_movq_M0_wRn(rd0);
2009 gen_op_iwmmxt_xorq_M0_wRn(rd1);
2010 gen_op_iwmmxt_setpsr_nz();
2011 gen_op_iwmmxt_movq_wRn_M0(wrd);
2012 gen_op_iwmmxt_set_mup();
2013 gen_op_iwmmxt_set_cup();
2014 break;
d00584b7 2015 case 0x111: /* TMRC */
18c9b560
AZ
2016 if (insn & 0xf)
2017 return 1;
2018 rd = (insn >> 12) & 0xf;
2019 wrd = (insn >> 16) & 0xf;
da6b5335
FN
2020 tmp = iwmmxt_load_creg(wrd);
2021 store_reg(s, rd, tmp);
18c9b560 2022 break;
d00584b7 2023 case 0x300: /* WANDN */
18c9b560
AZ
2024 wrd = (insn >> 12) & 0xf;
2025 rd0 = (insn >> 0) & 0xf;
2026 rd1 = (insn >> 16) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 2028 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
2029 gen_op_iwmmxt_andq_M0_wRn(rd1);
2030 gen_op_iwmmxt_setpsr_nz();
2031 gen_op_iwmmxt_movq_wRn_M0(wrd);
2032 gen_op_iwmmxt_set_mup();
2033 gen_op_iwmmxt_set_cup();
2034 break;
d00584b7 2035 case 0x200: /* WAND */
18c9b560
AZ
2036 wrd = (insn >> 12) & 0xf;
2037 rd0 = (insn >> 0) & 0xf;
2038 rd1 = (insn >> 16) & 0xf;
2039 gen_op_iwmmxt_movq_M0_wRn(rd0);
2040 gen_op_iwmmxt_andq_M0_wRn(rd1);
2041 gen_op_iwmmxt_setpsr_nz();
2042 gen_op_iwmmxt_movq_wRn_M0(wrd);
2043 gen_op_iwmmxt_set_mup();
2044 gen_op_iwmmxt_set_cup();
2045 break;
d00584b7 2046 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
2047 wrd = (insn >> 12) & 0xf;
2048 rd0 = (insn >> 0) & 0xf;
2049 rd1 = (insn >> 16) & 0xf;
2050 gen_op_iwmmxt_movq_M0_wRn(rd0);
2051 if (insn & (1 << 21))
2052 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2053 else
2054 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2055 gen_op_iwmmxt_movq_wRn_M0(wrd);
2056 gen_op_iwmmxt_set_mup();
2057 break;
d00584b7 2058 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 rd1 = (insn >> 0) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
2063 switch ((insn >> 22) & 3) {
2064 case 0:
2065 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2066 break;
2067 case 1:
2068 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2069 break;
2070 case 2:
2071 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2072 break;
2073 case 3:
2074 return 1;
2075 }
2076 gen_op_iwmmxt_movq_wRn_M0(wrd);
2077 gen_op_iwmmxt_set_mup();
2078 gen_op_iwmmxt_set_cup();
2079 break;
d00584b7 2080 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
2081 wrd = (insn >> 12) & 0xf;
2082 rd0 = (insn >> 16) & 0xf;
2083 rd1 = (insn >> 0) & 0xf;
2084 gen_op_iwmmxt_movq_M0_wRn(rd0);
2085 switch ((insn >> 22) & 3) {
2086 case 0:
2087 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2088 break;
2089 case 1:
2090 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2091 break;
2092 case 2:
2093 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2094 break;
2095 case 3:
2096 return 1;
2097 }
2098 gen_op_iwmmxt_movq_wRn_M0(wrd);
2099 gen_op_iwmmxt_set_mup();
2100 gen_op_iwmmxt_set_cup();
2101 break;
d00584b7 2102 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
2103 wrd = (insn >> 12) & 0xf;
2104 rd0 = (insn >> 16) & 0xf;
2105 rd1 = (insn >> 0) & 0xf;
2106 gen_op_iwmmxt_movq_M0_wRn(rd0);
2107 if (insn & (1 << 22))
2108 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2109 else
2110 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2111 if (!(insn & (1 << 20)))
2112 gen_op_iwmmxt_addl_M0_wRn(wrd);
2113 gen_op_iwmmxt_movq_wRn_M0(wrd);
2114 gen_op_iwmmxt_set_mup();
2115 break;
d00584b7 2116 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
2117 wrd = (insn >> 12) & 0xf;
2118 rd0 = (insn >> 16) & 0xf;
2119 rd1 = (insn >> 0) & 0xf;
2120 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2121 if (insn & (1 << 21)) {
2122 if (insn & (1 << 20))
2123 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2124 else
2125 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2126 } else {
2127 if (insn & (1 << 20))
2128 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2129 else
2130 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2131 }
18c9b560
AZ
2132 gen_op_iwmmxt_movq_wRn_M0(wrd);
2133 gen_op_iwmmxt_set_mup();
2134 break;
d00584b7 2135 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
2136 wrd = (insn >> 12) & 0xf;
2137 rd0 = (insn >> 16) & 0xf;
2138 rd1 = (insn >> 0) & 0xf;
2139 gen_op_iwmmxt_movq_M0_wRn(rd0);
2140 if (insn & (1 << 21))
2141 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2142 else
2143 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2144 if (!(insn & (1 << 20))) {
e677137d
PB
2145 iwmmxt_load_reg(cpu_V1, wrd);
2146 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2147 }
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2150 break;
d00584b7 2151 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2152 wrd = (insn >> 12) & 0xf;
2153 rd0 = (insn >> 16) & 0xf;
2154 rd1 = (insn >> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0);
2156 switch ((insn >> 22) & 3) {
2157 case 0:
2158 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2159 break;
2160 case 1:
2161 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2162 break;
2163 case 2:
2164 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2165 break;
2166 case 3:
2167 return 1;
2168 }
2169 gen_op_iwmmxt_movq_wRn_M0(wrd);
2170 gen_op_iwmmxt_set_mup();
2171 gen_op_iwmmxt_set_cup();
2172 break;
d00584b7 2173 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2174 wrd = (insn >> 12) & 0xf;
2175 rd0 = (insn >> 16) & 0xf;
2176 rd1 = (insn >> 0) & 0xf;
2177 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2178 if (insn & (1 << 22)) {
2179 if (insn & (1 << 20))
2180 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2181 else
2182 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2183 } else {
2184 if (insn & (1 << 20))
2185 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2186 else
2187 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2188 }
18c9b560
AZ
2189 gen_op_iwmmxt_movq_wRn_M0(wrd);
2190 gen_op_iwmmxt_set_mup();
2191 gen_op_iwmmxt_set_cup();
2192 break;
d00584b7 2193 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2194 wrd = (insn >> 12) & 0xf;
2195 rd0 = (insn >> 16) & 0xf;
2196 rd1 = (insn >> 0) & 0xf;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2198 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2199 tcg_gen_andi_i32(tmp, tmp, 7);
2200 iwmmxt_load_reg(cpu_V1, rd1);
2201 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2202 tcg_temp_free_i32(tmp);
18c9b560
AZ
2203 gen_op_iwmmxt_movq_wRn_M0(wrd);
2204 gen_op_iwmmxt_set_mup();
2205 break;
d00584b7 2206 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2207 if (((insn >> 6) & 3) == 3)
2208 return 1;
18c9b560
AZ
2209 rd = (insn >> 12) & 0xf;
2210 wrd = (insn >> 16) & 0xf;
da6b5335 2211 tmp = load_reg(s, rd);
18c9b560
AZ
2212 gen_op_iwmmxt_movq_M0_wRn(wrd);
2213 switch ((insn >> 6) & 3) {
2214 case 0:
da6b5335
FN
2215 tmp2 = tcg_const_i32(0xff);
2216 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2217 break;
2218 case 1:
da6b5335
FN
2219 tmp2 = tcg_const_i32(0xffff);
2220 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2221 break;
2222 case 2:
da6b5335
FN
2223 tmp2 = tcg_const_i32(0xffffffff);
2224 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2225 break;
da6b5335 2226 default:
f764718d
RH
2227 tmp2 = NULL;
2228 tmp3 = NULL;
18c9b560 2229 }
da6b5335 2230 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2231 tcg_temp_free_i32(tmp3);
2232 tcg_temp_free_i32(tmp2);
7d1b0095 2233 tcg_temp_free_i32(tmp);
18c9b560
AZ
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 break;
d00584b7 2237 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2238 rd = (insn >> 12) & 0xf;
2239 wrd = (insn >> 16) & 0xf;
da6b5335 2240 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2241 return 1;
2242 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2243 tmp = tcg_temp_new_i32();
18c9b560
AZ
2244 switch ((insn >> 22) & 3) {
2245 case 0:
da6b5335 2246 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2247 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2248 if (insn & 8) {
2249 tcg_gen_ext8s_i32(tmp, tmp);
2250 } else {
2251 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2252 }
2253 break;
2254 case 1:
da6b5335 2255 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2256 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2257 if (insn & 8) {
2258 tcg_gen_ext16s_i32(tmp, tmp);
2259 } else {
2260 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2261 }
2262 break;
2263 case 2:
da6b5335 2264 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2265 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2266 break;
18c9b560 2267 }
da6b5335 2268 store_reg(s, rd, tmp);
18c9b560 2269 break;
d00584b7 2270 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2271 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2272 return 1;
da6b5335 2273 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2274 switch ((insn >> 22) & 3) {
2275 case 0:
da6b5335 2276 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2277 break;
2278 case 1:
da6b5335 2279 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2280 break;
2281 case 2:
da6b5335 2282 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2283 break;
18c9b560 2284 }
da6b5335
FN
2285 tcg_gen_shli_i32(tmp, tmp, 28);
2286 gen_set_nzcv(tmp);
7d1b0095 2287 tcg_temp_free_i32(tmp);
18c9b560 2288 break;
d00584b7 2289 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2290 if (((insn >> 6) & 3) == 3)
2291 return 1;
18c9b560
AZ
2292 rd = (insn >> 12) & 0xf;
2293 wrd = (insn >> 16) & 0xf;
da6b5335 2294 tmp = load_reg(s, rd);
18c9b560
AZ
2295 switch ((insn >> 6) & 3) {
2296 case 0:
da6b5335 2297 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2298 break;
2299 case 1:
da6b5335 2300 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2301 break;
2302 case 2:
da6b5335 2303 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2304 break;
18c9b560 2305 }
7d1b0095 2306 tcg_temp_free_i32(tmp);
18c9b560
AZ
2307 gen_op_iwmmxt_movq_wRn_M0(wrd);
2308 gen_op_iwmmxt_set_mup();
2309 break;
d00584b7 2310 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2311 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2312 return 1;
da6b5335 2313 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2314 tmp2 = tcg_temp_new_i32();
da6b5335 2315 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2316 switch ((insn >> 22) & 3) {
2317 case 0:
2318 for (i = 0; i < 7; i ++) {
da6b5335
FN
2319 tcg_gen_shli_i32(tmp2, tmp2, 4);
2320 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2321 }
2322 break;
2323 case 1:
2324 for (i = 0; i < 3; i ++) {
da6b5335
FN
2325 tcg_gen_shli_i32(tmp2, tmp2, 8);
2326 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2327 }
2328 break;
2329 case 2:
da6b5335
FN
2330 tcg_gen_shli_i32(tmp2, tmp2, 16);
2331 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2332 break;
18c9b560 2333 }
da6b5335 2334 gen_set_nzcv(tmp);
7d1b0095
PM
2335 tcg_temp_free_i32(tmp2);
2336 tcg_temp_free_i32(tmp);
18c9b560 2337 break;
d00584b7 2338 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2339 wrd = (insn >> 12) & 0xf;
2340 rd0 = (insn >> 16) & 0xf;
2341 gen_op_iwmmxt_movq_M0_wRn(rd0);
2342 switch ((insn >> 22) & 3) {
2343 case 0:
e677137d 2344 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2345 break;
2346 case 1:
e677137d 2347 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2348 break;
2349 case 2:
e677137d 2350 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2351 break;
2352 case 3:
2353 return 1;
2354 }
2355 gen_op_iwmmxt_movq_wRn_M0(wrd);
2356 gen_op_iwmmxt_set_mup();
2357 break;
d00584b7 2358 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2359 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2360 return 1;
da6b5335 2361 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2362 tmp2 = tcg_temp_new_i32();
da6b5335 2363 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2364 switch ((insn >> 22) & 3) {
2365 case 0:
2366 for (i = 0; i < 7; i ++) {
da6b5335
FN
2367 tcg_gen_shli_i32(tmp2, tmp2, 4);
2368 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2369 }
2370 break;
2371 case 1:
2372 for (i = 0; i < 3; i ++) {
da6b5335
FN
2373 tcg_gen_shli_i32(tmp2, tmp2, 8);
2374 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2375 }
2376 break;
2377 case 2:
da6b5335
FN
2378 tcg_gen_shli_i32(tmp2, tmp2, 16);
2379 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2380 break;
18c9b560 2381 }
da6b5335 2382 gen_set_nzcv(tmp);
7d1b0095
PM
2383 tcg_temp_free_i32(tmp2);
2384 tcg_temp_free_i32(tmp);
18c9b560 2385 break;
d00584b7 2386 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2387 rd = (insn >> 12) & 0xf;
2388 rd0 = (insn >> 16) & 0xf;
da6b5335 2389 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2390 return 1;
2391 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2392 tmp = tcg_temp_new_i32();
18c9b560
AZ
2393 switch ((insn >> 22) & 3) {
2394 case 0:
da6b5335 2395 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2396 break;
2397 case 1:
da6b5335 2398 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2399 break;
2400 case 2:
da6b5335 2401 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2402 break;
18c9b560 2403 }
da6b5335 2404 store_reg(s, rd, tmp);
18c9b560 2405 break;
d00584b7 2406 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2407 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2408 wrd = (insn >> 12) & 0xf;
2409 rd0 = (insn >> 16) & 0xf;
2410 rd1 = (insn >> 0) & 0xf;
2411 gen_op_iwmmxt_movq_M0_wRn(rd0);
2412 switch ((insn >> 22) & 3) {
2413 case 0:
2414 if (insn & (1 << 21))
2415 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2416 else
2417 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2418 break;
2419 case 1:
2420 if (insn & (1 << 21))
2421 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2422 else
2423 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2424 break;
2425 case 2:
2426 if (insn & (1 << 21))
2427 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2428 else
2429 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2430 break;
2431 case 3:
2432 return 1;
2433 }
2434 gen_op_iwmmxt_movq_wRn_M0(wrd);
2435 gen_op_iwmmxt_set_mup();
2436 gen_op_iwmmxt_set_cup();
2437 break;
d00584b7 2438 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2439 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2440 wrd = (insn >> 12) & 0xf;
2441 rd0 = (insn >> 16) & 0xf;
2442 gen_op_iwmmxt_movq_M0_wRn(rd0);
2443 switch ((insn >> 22) & 3) {
2444 case 0:
2445 if (insn & (1 << 21))
2446 gen_op_iwmmxt_unpacklsb_M0();
2447 else
2448 gen_op_iwmmxt_unpacklub_M0();
2449 break;
2450 case 1:
2451 if (insn & (1 << 21))
2452 gen_op_iwmmxt_unpacklsw_M0();
2453 else
2454 gen_op_iwmmxt_unpackluw_M0();
2455 break;
2456 case 2:
2457 if (insn & (1 << 21))
2458 gen_op_iwmmxt_unpacklsl_M0();
2459 else
2460 gen_op_iwmmxt_unpacklul_M0();
2461 break;
2462 case 3:
2463 return 1;
2464 }
2465 gen_op_iwmmxt_movq_wRn_M0(wrd);
2466 gen_op_iwmmxt_set_mup();
2467 gen_op_iwmmxt_set_cup();
2468 break;
d00584b7 2469 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2470 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2471 wrd = (insn >> 12) & 0xf;
2472 rd0 = (insn >> 16) & 0xf;
2473 gen_op_iwmmxt_movq_M0_wRn(rd0);
2474 switch ((insn >> 22) & 3) {
2475 case 0:
2476 if (insn & (1 << 21))
2477 gen_op_iwmmxt_unpackhsb_M0();
2478 else
2479 gen_op_iwmmxt_unpackhub_M0();
2480 break;
2481 case 1:
2482 if (insn & (1 << 21))
2483 gen_op_iwmmxt_unpackhsw_M0();
2484 else
2485 gen_op_iwmmxt_unpackhuw_M0();
2486 break;
2487 case 2:
2488 if (insn & (1 << 21))
2489 gen_op_iwmmxt_unpackhsl_M0();
2490 else
2491 gen_op_iwmmxt_unpackhul_M0();
2492 break;
2493 case 3:
2494 return 1;
2495 }
2496 gen_op_iwmmxt_movq_wRn_M0(wrd);
2497 gen_op_iwmmxt_set_mup();
2498 gen_op_iwmmxt_set_cup();
2499 break;
d00584b7 2500 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2501 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2502 if (((insn >> 22) & 3) == 0)
2503 return 1;
18c9b560
AZ
2504 wrd = (insn >> 12) & 0xf;
2505 rd0 = (insn >> 16) & 0xf;
2506 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2507 tmp = tcg_temp_new_i32();
da6b5335 2508 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2509 tcg_temp_free_i32(tmp);
18c9b560 2510 return 1;
da6b5335 2511 }
18c9b560 2512 switch ((insn >> 22) & 3) {
18c9b560 2513 case 1:
477955bd 2514 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2515 break;
2516 case 2:
477955bd 2517 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2518 break;
2519 case 3:
477955bd 2520 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2521 break;
2522 }
7d1b0095 2523 tcg_temp_free_i32(tmp);
18c9b560
AZ
2524 gen_op_iwmmxt_movq_wRn_M0(wrd);
2525 gen_op_iwmmxt_set_mup();
2526 gen_op_iwmmxt_set_cup();
2527 break;
d00584b7 2528 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2529 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2530 if (((insn >> 22) & 3) == 0)
2531 return 1;
18c9b560
AZ
2532 wrd = (insn >> 12) & 0xf;
2533 rd0 = (insn >> 16) & 0xf;
2534 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2535 tmp = tcg_temp_new_i32();
da6b5335 2536 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2537 tcg_temp_free_i32(tmp);
18c9b560 2538 return 1;
da6b5335 2539 }
18c9b560 2540 switch ((insn >> 22) & 3) {
18c9b560 2541 case 1:
477955bd 2542 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2543 break;
2544 case 2:
477955bd 2545 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2546 break;
2547 case 3:
477955bd 2548 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2549 break;
2550 }
7d1b0095 2551 tcg_temp_free_i32(tmp);
18c9b560
AZ
2552 gen_op_iwmmxt_movq_wRn_M0(wrd);
2553 gen_op_iwmmxt_set_mup();
2554 gen_op_iwmmxt_set_cup();
2555 break;
d00584b7 2556 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2557 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2558 if (((insn >> 22) & 3) == 0)
2559 return 1;
18c9b560
AZ
2560 wrd = (insn >> 12) & 0xf;
2561 rd0 = (insn >> 16) & 0xf;
2562 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2563 tmp = tcg_temp_new_i32();
da6b5335 2564 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2565 tcg_temp_free_i32(tmp);
18c9b560 2566 return 1;
da6b5335 2567 }
18c9b560 2568 switch ((insn >> 22) & 3) {
18c9b560 2569 case 1:
477955bd 2570 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2571 break;
2572 case 2:
477955bd 2573 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2574 break;
2575 case 3:
477955bd 2576 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2577 break;
2578 }
7d1b0095 2579 tcg_temp_free_i32(tmp);
18c9b560
AZ
2580 gen_op_iwmmxt_movq_wRn_M0(wrd);
2581 gen_op_iwmmxt_set_mup();
2582 gen_op_iwmmxt_set_cup();
2583 break;
d00584b7 2584 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2585 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2586 if (((insn >> 22) & 3) == 0)
2587 return 1;
18c9b560
AZ
2588 wrd = (insn >> 12) & 0xf;
2589 rd0 = (insn >> 16) & 0xf;
2590 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2591 tmp = tcg_temp_new_i32();
18c9b560 2592 switch ((insn >> 22) & 3) {
18c9b560 2593 case 1:
da6b5335 2594 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2595 tcg_temp_free_i32(tmp);
18c9b560 2596 return 1;
da6b5335 2597 }
477955bd 2598 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2599 break;
2600 case 2:
da6b5335 2601 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2602 tcg_temp_free_i32(tmp);
18c9b560 2603 return 1;
da6b5335 2604 }
477955bd 2605 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2606 break;
2607 case 3:
da6b5335 2608 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2609 tcg_temp_free_i32(tmp);
18c9b560 2610 return 1;
da6b5335 2611 }
477955bd 2612 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2613 break;
2614 }
7d1b0095 2615 tcg_temp_free_i32(tmp);
18c9b560
AZ
2616 gen_op_iwmmxt_movq_wRn_M0(wrd);
2617 gen_op_iwmmxt_set_mup();
2618 gen_op_iwmmxt_set_cup();
2619 break;
d00584b7 2620 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2621 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2622 wrd = (insn >> 12) & 0xf;
2623 rd0 = (insn >> 16) & 0xf;
2624 rd1 = (insn >> 0) & 0xf;
2625 gen_op_iwmmxt_movq_M0_wRn(rd0);
2626 switch ((insn >> 22) & 3) {
2627 case 0:
2628 if (insn & (1 << 21))
2629 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2630 else
2631 gen_op_iwmmxt_minub_M0_wRn(rd1);
2632 break;
2633 case 1:
2634 if (insn & (1 << 21))
2635 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2636 else
2637 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2638 break;
2639 case 2:
2640 if (insn & (1 << 21))
2641 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2642 else
2643 gen_op_iwmmxt_minul_M0_wRn(rd1);
2644 break;
2645 case 3:
2646 return 1;
2647 }
2648 gen_op_iwmmxt_movq_wRn_M0(wrd);
2649 gen_op_iwmmxt_set_mup();
2650 break;
d00584b7 2651 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2652 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2653 wrd = (insn >> 12) & 0xf;
2654 rd0 = (insn >> 16) & 0xf;
2655 rd1 = (insn >> 0) & 0xf;
2656 gen_op_iwmmxt_movq_M0_wRn(rd0);
2657 switch ((insn >> 22) & 3) {
2658 case 0:
2659 if (insn & (1 << 21))
2660 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2661 else
2662 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2663 break;
2664 case 1:
2665 if (insn & (1 << 21))
2666 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2667 else
2668 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2669 break;
2670 case 2:
2671 if (insn & (1 << 21))
2672 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2673 else
2674 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2675 break;
2676 case 3:
2677 return 1;
2678 }
2679 gen_op_iwmmxt_movq_wRn_M0(wrd);
2680 gen_op_iwmmxt_set_mup();
2681 break;
d00584b7 2682 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2683 case 0x402: case 0x502: case 0x602: case 0x702:
2684 wrd = (insn >> 12) & 0xf;
2685 rd0 = (insn >> 16) & 0xf;
2686 rd1 = (insn >> 0) & 0xf;
2687 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2688 tmp = tcg_const_i32((insn >> 20) & 3);
2689 iwmmxt_load_reg(cpu_V1, rd1);
2690 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2691 tcg_temp_free_i32(tmp);
18c9b560
AZ
2692 gen_op_iwmmxt_movq_wRn_M0(wrd);
2693 gen_op_iwmmxt_set_mup();
2694 break;
d00584b7 2695 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2696 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2697 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2698 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2699 wrd = (insn >> 12) & 0xf;
2700 rd0 = (insn >> 16) & 0xf;
2701 rd1 = (insn >> 0) & 0xf;
2702 gen_op_iwmmxt_movq_M0_wRn(rd0);
2703 switch ((insn >> 20) & 0xf) {
2704 case 0x0:
2705 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2706 break;
2707 case 0x1:
2708 gen_op_iwmmxt_subub_M0_wRn(rd1);
2709 break;
2710 case 0x3:
2711 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2712 break;
2713 case 0x4:
2714 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2715 break;
2716 case 0x5:
2717 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2718 break;
2719 case 0x7:
2720 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2721 break;
2722 case 0x8:
2723 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2724 break;
2725 case 0x9:
2726 gen_op_iwmmxt_subul_M0_wRn(rd1);
2727 break;
2728 case 0xb:
2729 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2730 break;
2731 default:
2732 return 1;
2733 }
2734 gen_op_iwmmxt_movq_wRn_M0(wrd);
2735 gen_op_iwmmxt_set_mup();
2736 gen_op_iwmmxt_set_cup();
2737 break;
d00584b7 2738 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2739 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2740 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2741 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2742 wrd = (insn >> 12) & 0xf;
2743 rd0 = (insn >> 16) & 0xf;
2744 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2745 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2746 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2747 tcg_temp_free_i32(tmp);
18c9b560
AZ
2748 gen_op_iwmmxt_movq_wRn_M0(wrd);
2749 gen_op_iwmmxt_set_mup();
2750 gen_op_iwmmxt_set_cup();
2751 break;
d00584b7 2752 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2753 case 0x418: case 0x518: case 0x618: case 0x718:
2754 case 0x818: case 0x918: case 0xa18: case 0xb18:
2755 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2756 wrd = (insn >> 12) & 0xf;
2757 rd0 = (insn >> 16) & 0xf;
2758 rd1 = (insn >> 0) & 0xf;
2759 gen_op_iwmmxt_movq_M0_wRn(rd0);
2760 switch ((insn >> 20) & 0xf) {
2761 case 0x0:
2762 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2763 break;
2764 case 0x1:
2765 gen_op_iwmmxt_addub_M0_wRn(rd1);
2766 break;
2767 case 0x3:
2768 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2769 break;
2770 case 0x4:
2771 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2772 break;
2773 case 0x5:
2774 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2775 break;
2776 case 0x7:
2777 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2778 break;
2779 case 0x8:
2780 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2781 break;
2782 case 0x9:
2783 gen_op_iwmmxt_addul_M0_wRn(rd1);
2784 break;
2785 case 0xb:
2786 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2787 break;
2788 default:
2789 return 1;
2790 }
2791 gen_op_iwmmxt_movq_wRn_M0(wrd);
2792 gen_op_iwmmxt_set_mup();
2793 gen_op_iwmmxt_set_cup();
2794 break;
d00584b7 2795 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2796 case 0x408: case 0x508: case 0x608: case 0x708:
2797 case 0x808: case 0x908: case 0xa08: case 0xb08:
2798 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2799 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2800 return 1;
18c9b560
AZ
2801 wrd = (insn >> 12) & 0xf;
2802 rd0 = (insn >> 16) & 0xf;
2803 rd1 = (insn >> 0) & 0xf;
2804 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2805 switch ((insn >> 22) & 3) {
18c9b560
AZ
2806 case 1:
2807 if (insn & (1 << 21))
2808 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2809 else
2810 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2811 break;
2812 case 2:
2813 if (insn & (1 << 21))
2814 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2815 else
2816 gen_op_iwmmxt_packul_M0_wRn(rd1);
2817 break;
2818 case 3:
2819 if (insn & (1 << 21))
2820 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2821 else
2822 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2823 break;
2824 }
2825 gen_op_iwmmxt_movq_wRn_M0(wrd);
2826 gen_op_iwmmxt_set_mup();
2827 gen_op_iwmmxt_set_cup();
2828 break;
2829 case 0x201: case 0x203: case 0x205: case 0x207:
2830 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2831 case 0x211: case 0x213: case 0x215: case 0x217:
2832 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2833 wrd = (insn >> 5) & 0xf;
2834 rd0 = (insn >> 12) & 0xf;
2835 rd1 = (insn >> 0) & 0xf;
2836 if (rd0 == 0xf || rd1 == 0xf)
2837 return 1;
2838 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2839 tmp = load_reg(s, rd0);
2840 tmp2 = load_reg(s, rd1);
18c9b560 2841 switch ((insn >> 16) & 0xf) {
d00584b7 2842 case 0x0: /* TMIA */
da6b5335 2843 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2844 break;
d00584b7 2845 case 0x8: /* TMIAPH */
da6b5335 2846 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2847 break;
d00584b7 2848 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2849 if (insn & (1 << 16))
da6b5335 2850 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2851 if (insn & (1 << 17))
da6b5335
FN
2852 tcg_gen_shri_i32(tmp2, tmp2, 16);
2853 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2854 break;
2855 default:
7d1b0095
PM
2856 tcg_temp_free_i32(tmp2);
2857 tcg_temp_free_i32(tmp);
18c9b560
AZ
2858 return 1;
2859 }
7d1b0095
PM
2860 tcg_temp_free_i32(tmp2);
2861 tcg_temp_free_i32(tmp);
18c9b560
AZ
2862 gen_op_iwmmxt_movq_wRn_M0(wrd);
2863 gen_op_iwmmxt_set_mup();
2864 break;
2865 default:
2866 return 1;
2867 }
2868
2869 return 0;
2870}
2871
a1c7273b 2872/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2873 (ie. an undefined instruction). */
7dcc1f89 2874static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2875{
2876 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2877 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2878
2879 if ((insn & 0x0ff00f10) == 0x0e200010) {
2880 /* Multiply with Internal Accumulate Format */
2881 rd0 = (insn >> 12) & 0xf;
2882 rd1 = insn & 0xf;
2883 acc = (insn >> 5) & 7;
2884
2885 if (acc != 0)
2886 return 1;
2887
3a554c0f
FN
2888 tmp = load_reg(s, rd0);
2889 tmp2 = load_reg(s, rd1);
18c9b560 2890 switch ((insn >> 16) & 0xf) {
d00584b7 2891 case 0x0: /* MIA */
3a554c0f 2892 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2893 break;
d00584b7 2894 case 0x8: /* MIAPH */
3a554c0f 2895 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2896 break;
d00584b7
PM
2897 case 0xc: /* MIABB */
2898 case 0xd: /* MIABT */
2899 case 0xe: /* MIATB */
2900 case 0xf: /* MIATT */
18c9b560 2901 if (insn & (1 << 16))
3a554c0f 2902 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2903 if (insn & (1 << 17))
3a554c0f
FN
2904 tcg_gen_shri_i32(tmp2, tmp2, 16);
2905 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2906 break;
2907 default:
2908 return 1;
2909 }
7d1b0095
PM
2910 tcg_temp_free_i32(tmp2);
2911 tcg_temp_free_i32(tmp);
18c9b560
AZ
2912
2913 gen_op_iwmmxt_movq_wRn_M0(acc);
2914 return 0;
2915 }
2916
2917 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2918 /* Internal Accumulator Access Format */
2919 rdhi = (insn >> 16) & 0xf;
2920 rdlo = (insn >> 12) & 0xf;
2921 acc = insn & 7;
2922
2923 if (acc != 0)
2924 return 1;
2925
d00584b7 2926 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2927 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2928 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2929 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2930 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2931 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 2932 } else { /* MAR */
3a554c0f
FN
2933 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2934 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2935 }
2936 return 0;
2937 }
2938
2939 return 1;
2940}
2941
9ee6e8bb
PB
2942#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2943#define VFP_SREG(insn, bigbit, smallbit) \
2944 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2945#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2946 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2947 reg = (((insn) >> (bigbit)) & 0x0f) \
2948 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2949 } else { \
2950 if (insn & (1 << (smallbit))) \
2951 return 1; \
2952 reg = ((insn) >> (bigbit)) & 0x0f; \
2953 }} while (0)
2954
2955#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2956#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2957#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2958#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2959#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2960#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2961
39d5492a 2962static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2963{
39d5492a 2964 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2965 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2966 tcg_gen_shli_i32(tmp, var, 16);
2967 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2968 tcg_temp_free_i32(tmp);
ad69471c
PB
2969}
2970
39d5492a 2971static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2972{
39d5492a 2973 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2974 tcg_gen_andi_i32(var, var, 0xffff0000);
2975 tcg_gen_shri_i32(tmp, var, 16);
2976 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2977 tcg_temp_free_i32(tmp);
ad69471c
PB
2978}
2979
06db8196
PM
2980/*
2981 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2982 * (ie. an undefined instruction).
2983 */
7dcc1f89 2984static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95 2985{
b518c753 2986 uint32_t rd, rn, rm, op, delta_d, delta_m, bank_mask;
b7bcbe95
FB
2987 int dp, veclen;
2988
d614a513 2989 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 2990 return 1;
d614a513 2991 }
40f137e1 2992
78e138bc
PM
2993 /*
2994 * If the decodetree decoder handles this insn it will always
2995 * emit code to either execute the insn or generate an appropriate
2996 * exception; so we don't need to ever return non-zero to tell
2997 * the calling code to emit an UNDEF exception.
2998 */
2999 if (extract32(insn, 28, 4) == 0xf) {
3000 if (disas_vfp_uncond(s, insn)) {
3001 return 0;
3002 }
3003 } else {
3004 if (disas_vfp(s, insn)) {
3005 return 0;
3006 }
3007 }
3008
c2a46a91
PM
3009 if (extract32(insn, 28, 4) == 0xf) {
3010 /*
3011 * Encodings with T=1 (Thumb) or unconditional (ARM): these
3012 * were all handled by the decodetree decoder, so any insn
3013 * patterns which get here must be UNDEF.
3014 */
3015 return 1;
3016 }
3017
06db8196
PM
3018 /*
3019 * FIXME: this access check should not take precedence over UNDEF
2c7ffc41
PM
3020 * for invalid encodings; we will generate incorrect syndrome information
3021 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3022 */
a9ab5001 3023 if (!vfp_access_check(s)) {
06db8196 3024 return 0;
6d60c67a
PM
3025 }
3026
b7bcbe95
FB
3027 dp = ((insn & 0xf00) == 0xb00);
3028 switch ((insn >> 24) & 0xf) {
3029 case 0xe:
3030 if (insn & (1 << 4)) {
a9ab5001
PM
3031 /* already handled by decodetree */
3032 return 1;
b7bcbe95
FB
3033 } else {
3034 /* data processing */
e80941bd
RH
3035 bool rd_is_dp = dp;
3036 bool rm_is_dp = dp;
3037 bool no_output = false;
3038
b7bcbe95
FB
3039 /* The opcode is in bits 23, 21, 20 and 6. */
3040 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
e80941bd 3041 rn = VFP_SREG_N(insn);
b7bcbe95 3042
266bd25c 3043 switch (op) {
b518c753 3044 case 0 ... 14:
266bd25c
PM
3045 /* Already handled by decodetree */
3046 return 1;
90287e22
PM
3047 case 15:
3048 switch (rn) {
e3d6f429
PM
3049 case 0 ... 23:
3050 case 28 ... 31:
90287e22
PM
3051 /* Already handled by decodetree */
3052 return 1;
3053 default:
3054 break;
3055 }
266bd25c
PM
3056 default:
3057 break;
3058 }
3059
e80941bd
RH
3060 if (op == 15) {
3061 /* rn is opcode, encoded as per VFP_SREG_N. */
3062 switch (rn) {
e80941bd
RH
3063 case 0x18: /* vcvtr.u32.fxx */
3064 case 0x19: /* vcvtz.u32.fxx */
3065 case 0x1a: /* vcvtr.s32.fxx */
3066 case 0x1b: /* vcvtz.s32.fxx */
3067 rd_is_dp = false;
3068 break;
3069
e80941bd
RH
3070 default:
3071 return 1;
b7bcbe95 3072 }
e80941bd
RH
3073 } else if (dp) {
3074 /* rn is register number */
3075 VFP_DREG_N(rn, insn);
3076 }
3077
3078 if (rd_is_dp) {
3079 VFP_DREG_D(rd, insn);
3080 } else {
3081 rd = VFP_SREG_D(insn);
3082 }
3083 if (rm_is_dp) {
3084 VFP_DREG_M(rm, insn);
b7bcbe95 3085 } else {
9ee6e8bb 3086 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3087 }
3088
69d1fc22 3089 veclen = s->vec_len;
e80941bd 3090 if (op == 15 && rn > 3) {
b7bcbe95 3091 veclen = 0;
e80941bd 3092 }
b7bcbe95
FB
3093
3094 /* Shut up compiler warnings. */
3095 delta_m = 0;
3096 delta_d = 0;
3097 bank_mask = 0;
3b46e624 3098
b7bcbe95
FB
3099 if (veclen > 0) {
3100 if (dp)
3101 bank_mask = 0xc;
3102 else
3103 bank_mask = 0x18;
3104
3105 /* Figure out what type of vector operation this is. */
3106 if ((rd & bank_mask) == 0) {
3107 /* scalar */
3108 veclen = 0;
3109 } else {
3110 if (dp)
69d1fc22 3111 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3112 else
69d1fc22 3113 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3114
3115 if ((rm & bank_mask) == 0) {
3116 /* mixed scalar/vector */
3117 delta_m = 0;
3118 } else {
3119 /* vector */
3120 delta_m = delta_d;
3121 }
3122 }
3123 }
3124
3125 /* Load the initial operands. */
3126 if (op == 15) {
3127 switch (rn) {
b7bcbe95
FB
3128 default:
3129 /* One source operand. */
e80941bd 3130 gen_mov_F0_vreg(rm_is_dp, rm);
9ee6e8bb 3131 break;
b7bcbe95
FB
3132 }
3133 } else {
3134 /* Two source operands. */
3135 gen_mov_F0_vreg(dp, rn);
3136 gen_mov_F1_vreg(dp, rm);
3137 }
3138
3139 for (;;) {
3140 /* Perform the calculation. */
3141 switch (op) {
b7bcbe95
FB
3142 case 15: /* extension space */
3143 switch (rn) {
b7bcbe95 3144 case 24: /* ftoui */
5500b06c 3145 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3146 break;
3147 case 25: /* ftouiz */
5500b06c 3148 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3149 break;
3150 case 26: /* ftosi */
5500b06c 3151 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3152 break;
3153 case 27: /* ftosiz */
5500b06c 3154 gen_vfp_tosiz(dp, 0);
b7bcbe95
FB
3155 break;
3156 default: /* undefined */
e80941bd 3157 g_assert_not_reached();
b7bcbe95
FB
3158 }
3159 break;
3160 default: /* undefined */
b7bcbe95
FB
3161 return 1;
3162 }
3163
e80941bd
RH
3164 /* Write back the result, if any. */
3165 if (!no_output) {
3166 gen_mov_vreg_F0(rd_is_dp, rd);
239c20c7 3167 }
b7bcbe95
FB
3168
3169 /* break out of the loop if we have finished */
e80941bd 3170 if (veclen == 0) {
b7bcbe95 3171 break;
e80941bd 3172 }
b7bcbe95
FB
3173
3174 if (op == 15 && delta_m == 0) {
3175 /* single source one-many */
3176 while (veclen--) {
3177 rd = ((rd + delta_d) & (bank_mask - 1))
3178 | (rd & bank_mask);
3179 gen_mov_vreg_F0(dp, rd);
3180 }
3181 break;
3182 }
3183 /* Setup the next operands. */
3184 veclen--;
3185 rd = ((rd + delta_d) & (bank_mask - 1))
3186 | (rd & bank_mask);
3187
3188 if (op == 15) {
3189 /* One source operand. */
3190 rm = ((rm + delta_m) & (bank_mask - 1))
3191 | (rm & bank_mask);
3192 gen_mov_F0_vreg(dp, rm);
3193 } else {
3194 /* Two source operands. */
3195 rn = ((rn + delta_d) & (bank_mask - 1))
3196 | (rn & bank_mask);
3197 gen_mov_F0_vreg(dp, rn);
3198 if (delta_m) {
3199 rm = ((rm + delta_m) & (bank_mask - 1))
3200 | (rm & bank_mask);
3201 gen_mov_F1_vreg(dp, rm);
3202 }
3203 }
3204 }
3205 }
3206 break;
3207 case 0xc:
3208 case 0xd:
fa288de2
PM
3209 /* Already handled by decodetree */
3210 return 1;
b7bcbe95
FB
3211 default:
3212 /* Should never happen. */
3213 return 1;
3214 }
3215 return 0;
3216}
3217
90aa39a1 3218static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 3219{
90aa39a1 3220#ifndef CONFIG_USER_ONLY
dcba3a8d 3221 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
3222 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
3223#else
3224 return true;
3225#endif
3226}
6e256c93 3227
8a6b28c7
EC
3228static void gen_goto_ptr(void)
3229{
7f11636d 3230 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
3231}
3232
4cae8f56
AB
3233/* This will end the TB but doesn't guarantee we'll return to
3234 * cpu_loop_exec. Any live exit_requests will be processed as we
3235 * enter the next TB.
3236 */
8a6b28c7 3237static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
3238{
3239 if (use_goto_tb(s, dest)) {
57fec1fe 3240 tcg_gen_goto_tb(n);
eaed129d 3241 gen_set_pc_im(s, dest);
07ea28b4 3242 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 3243 } else {
eaed129d 3244 gen_set_pc_im(s, dest);
8a6b28c7 3245 gen_goto_ptr();
6e256c93 3246 }
dcba3a8d 3247 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
3248}
3249
8aaca4c0
FB
3250static inline void gen_jmp (DisasContext *s, uint32_t dest)
3251{
b636649f 3252 if (unlikely(is_singlestepping(s))) {
8aaca4c0 3253 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3254 if (s->thumb)
d9ba4830
PB
3255 dest |= 1;
3256 gen_bx_im(s, dest);
8aaca4c0 3257 } else {
6e256c93 3258 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3259 }
3260}
3261
39d5492a 3262static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3263{
ee097184 3264 if (x)
d9ba4830 3265 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3266 else
d9ba4830 3267 gen_sxth(t0);
ee097184 3268 if (y)
d9ba4830 3269 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3270 else
d9ba4830
PB
3271 gen_sxth(t1);
3272 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3273}
3274
3275/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
3276static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
3277{
b5ff1b31
FB
3278 uint32_t mask;
3279
3280 mask = 0;
3281 if (flags & (1 << 0))
3282 mask |= 0xff;
3283 if (flags & (1 << 1))
3284 mask |= 0xff00;
3285 if (flags & (1 << 2))
3286 mask |= 0xff0000;
3287 if (flags & (1 << 3))
3288 mask |= 0xff000000;
9ee6e8bb 3289
2ae23e75 3290 /* Mask out undefined bits. */
9ee6e8bb 3291 mask &= ~CPSR_RESERVED;
d614a513 3292 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 3293 mask &= ~CPSR_T;
d614a513
PM
3294 }
3295 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 3296 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
3297 }
3298 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 3299 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
3300 }
3301 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 3302 mask &= ~CPSR_IT;
d614a513 3303 }
4051e12c
PM
3304 /* Mask out execution state and reserved bits. */
3305 if (!spsr) {
3306 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
3307 }
b5ff1b31
FB
3308 /* Mask out privileged bits. */
3309 if (IS_USER(s))
9ee6e8bb 3310 mask &= CPSR_USER;
b5ff1b31
FB
3311 return mask;
3312}
3313
2fbac54b 3314/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3315static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3316{
39d5492a 3317 TCGv_i32 tmp;
b5ff1b31
FB
3318 if (spsr) {
3319 /* ??? This is also undefined in system mode. */
3320 if (IS_USER(s))
3321 return 1;
d9ba4830
PB
3322
3323 tmp = load_cpu_field(spsr);
3324 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3325 tcg_gen_andi_i32(t0, t0, mask);
3326 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3327 store_cpu_field(tmp, spsr);
b5ff1b31 3328 } else {
2fbac54b 3329 gen_set_cpsr(t0, mask);
b5ff1b31 3330 }
7d1b0095 3331 tcg_temp_free_i32(t0);
b5ff1b31
FB
3332 gen_lookup_tb(s);
3333 return 0;
3334}
3335
2fbac54b
FN
3336/* Returns nonzero if access to the PSR is not permitted. */
3337static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3338{
39d5492a 3339 TCGv_i32 tmp;
7d1b0095 3340 tmp = tcg_temp_new_i32();
2fbac54b
FN
3341 tcg_gen_movi_i32(tmp, val);
3342 return gen_set_psr(s, mask, spsr, tmp);
3343}
3344
8bfd0550
PM
3345static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
3346 int *tgtmode, int *regno)
3347{
3348 /* Decode the r and sysm fields of MSR/MRS banked accesses into
3349 * the target mode and register number, and identify the various
3350 * unpredictable cases.
3351 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
3352 * + executed in user mode
3353 * + using R15 as the src/dest register
3354 * + accessing an unimplemented register
3355 * + accessing a register that's inaccessible at current PL/security state*
3356 * + accessing a register that you could access with a different insn
3357 * We choose to UNDEF in all these cases.
3358 * Since we don't know which of the various AArch32 modes we are in
3359 * we have to defer some checks to runtime.
3360 * Accesses to Monitor mode registers from Secure EL1 (which implies
3361 * that EL3 is AArch64) must trap to EL3.
3362 *
3363 * If the access checks fail this function will emit code to take
3364 * an exception and return false. Otherwise it will return true,
3365 * and set *tgtmode and *regno appropriately.
3366 */
3367 int exc_target = default_exception_el(s);
3368
3369 /* These instructions are present only in ARMv8, or in ARMv7 with the
3370 * Virtualization Extensions.
3371 */
3372 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
3373 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3374 goto undef;
3375 }
3376
3377 if (IS_USER(s) || rn == 15) {
3378 goto undef;
3379 }
3380
3381 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
3382 * of registers into (r, sysm).
3383 */
3384 if (r) {
3385 /* SPSRs for other modes */
3386 switch (sysm) {
3387 case 0xe: /* SPSR_fiq */
3388 *tgtmode = ARM_CPU_MODE_FIQ;
3389 break;
3390 case 0x10: /* SPSR_irq */
3391 *tgtmode = ARM_CPU_MODE_IRQ;
3392 break;
3393 case 0x12: /* SPSR_svc */
3394 *tgtmode = ARM_CPU_MODE_SVC;
3395 break;
3396 case 0x14: /* SPSR_abt */
3397 *tgtmode = ARM_CPU_MODE_ABT;
3398 break;
3399 case 0x16: /* SPSR_und */
3400 *tgtmode = ARM_CPU_MODE_UND;
3401 break;
3402 case 0x1c: /* SPSR_mon */
3403 *tgtmode = ARM_CPU_MODE_MON;
3404 break;
3405 case 0x1e: /* SPSR_hyp */
3406 *tgtmode = ARM_CPU_MODE_HYP;
3407 break;
3408 default: /* unallocated */
3409 goto undef;
3410 }
3411 /* We arbitrarily assign SPSR a register number of 16. */
3412 *regno = 16;
3413 } else {
3414 /* general purpose registers for other modes */
3415 switch (sysm) {
3416 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
3417 *tgtmode = ARM_CPU_MODE_USR;
3418 *regno = sysm + 8;
3419 break;
3420 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
3421 *tgtmode = ARM_CPU_MODE_FIQ;
3422 *regno = sysm;
3423 break;
3424 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
3425 *tgtmode = ARM_CPU_MODE_IRQ;
3426 *regno = sysm & 1 ? 13 : 14;
3427 break;
3428 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
3429 *tgtmode = ARM_CPU_MODE_SVC;
3430 *regno = sysm & 1 ? 13 : 14;
3431 break;
3432 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
3433 *tgtmode = ARM_CPU_MODE_ABT;
3434 *regno = sysm & 1 ? 13 : 14;
3435 break;
3436 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
3437 *tgtmode = ARM_CPU_MODE_UND;
3438 *regno = sysm & 1 ? 13 : 14;
3439 break;
3440 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
3441 *tgtmode = ARM_CPU_MODE_MON;
3442 *regno = sysm & 1 ? 13 : 14;
3443 break;
3444 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
3445 *tgtmode = ARM_CPU_MODE_HYP;
3446 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
3447 *regno = sysm & 1 ? 13 : 17;
3448 break;
3449 default: /* unallocated */
3450 goto undef;
3451 }
3452 }
3453
3454 /* Catch the 'accessing inaccessible register' cases we can detect
3455 * at translate time.
3456 */
3457 switch (*tgtmode) {
3458 case ARM_CPU_MODE_MON:
3459 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
3460 goto undef;
3461 }
3462 if (s->current_el == 1) {
3463 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
3464 * then accesses to Mon registers trap to EL3
3465 */
3466 exc_target = 3;
3467 goto undef;
3468 }
3469 break;
3470 case ARM_CPU_MODE_HYP:
aec4dd09
PM
3471 /*
3472 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
3473 * (and so we can forbid accesses from EL2 or below). elr_hyp
3474 * can be accessed also from Hyp mode, so forbid accesses from
3475 * EL0 or EL1.
8bfd0550 3476 */
aec4dd09
PM
3477 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
3478 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
3479 goto undef;
3480 }
3481 break;
3482 default:
3483 break;
3484 }
3485
3486 return true;
3487
3488undef:
3489 /* If we get here then some access check did not pass */
3490 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
3491 return false;
3492}
3493
3494static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
3495{
3496 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3497 int tgtmode = 0, regno = 0;
3498
3499 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3500 return;
3501 }
3502
3503 /* Sync state because msr_banked() can raise exceptions */
3504 gen_set_condexec(s);
3505 gen_set_pc_im(s, s->pc - 4);
3506 tcg_reg = load_reg(s, rn);
3507 tcg_tgtmode = tcg_const_i32(tgtmode);
3508 tcg_regno = tcg_const_i32(regno);
3509 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
3510 tcg_temp_free_i32(tcg_tgtmode);
3511 tcg_temp_free_i32(tcg_regno);
3512 tcg_temp_free_i32(tcg_reg);
dcba3a8d 3513 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
3514}
3515
3516static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
3517{
3518 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3519 int tgtmode = 0, regno = 0;
3520
3521 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3522 return;
3523 }
3524
3525 /* Sync state because mrs_banked() can raise exceptions */
3526 gen_set_condexec(s);
3527 gen_set_pc_im(s, s->pc - 4);
3528 tcg_reg = tcg_temp_new_i32();
3529 tcg_tgtmode = tcg_const_i32(tgtmode);
3530 tcg_regno = tcg_const_i32(regno);
3531 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
3532 tcg_temp_free_i32(tcg_tgtmode);
3533 tcg_temp_free_i32(tcg_regno);
3534 store_reg(s, rn, tcg_reg);
dcba3a8d 3535 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
3536}
3537
fb0e8e79
PM
3538/* Store value to PC as for an exception return (ie don't
3539 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
3540 * will do the masking based on the new value of the Thumb bit.
3541 */
3542static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3543{
fb0e8e79
PM
3544 tcg_gen_mov_i32(cpu_R[15], pc);
3545 tcg_temp_free_i32(pc);
b5ff1b31
FB
3546}
3547
b0109805 3548/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3549static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3550{
fb0e8e79
PM
3551 store_pc_exc_ret(s, pc);
3552 /* The cpsr_write_eret helper will mask the low bits of PC
3553 * appropriately depending on the new Thumb bit, so it must
3554 * be called after storing the new PC.
3555 */
e69ad9df
AL
3556 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3557 gen_io_start();
3558 }
235ea1f5 3559 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
3560 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3561 gen_io_end();
3562 }
7d1b0095 3563 tcg_temp_free_i32(cpsr);
b29fd33d 3564 /* Must exit loop to check un-masked IRQs */
dcba3a8d 3565 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 3566}
3b46e624 3567
fb0e8e79
PM
3568/* Generate an old-style exception return. Marks pc as dead. */
3569static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3570{
3571 gen_rfe(s, pc, load_cpu_field(spsr));
3572}
3573
c22edfeb
AB
3574/*
3575 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
3576 * only call the helper when running single threaded TCG code to ensure
3577 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
3578 * just skip this instruction. Currently the SEV/SEVL instructions
3579 * which are *one* of many ways to wake the CPU from WFE are not
3580 * implemented so we can't sleep like WFI does.
3581 */
9ee6e8bb
PB
3582static void gen_nop_hint(DisasContext *s, int val)
3583{
3584 switch (val) {
2399d4e7
EC
3585 /* When running in MTTCG we don't generate jumps to the yield and
3586 * WFE helpers as it won't affect the scheduling of other vCPUs.
3587 * If we wanted to more completely model WFE/SEV so we don't busy
3588 * spin unnecessarily we would need to do something more involved.
3589 */
c87e5a61 3590 case 1: /* yield */
2399d4e7 3591 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 3592 gen_set_pc_im(s, s->pc);
dcba3a8d 3593 s->base.is_jmp = DISAS_YIELD;
c22edfeb 3594 }
c87e5a61 3595 break;
9ee6e8bb 3596 case 3: /* wfi */
eaed129d 3597 gen_set_pc_im(s, s->pc);
dcba3a8d 3598 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
3599 break;
3600 case 2: /* wfe */
2399d4e7 3601 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 3602 gen_set_pc_im(s, s->pc);
dcba3a8d 3603 s->base.is_jmp = DISAS_WFE;
c22edfeb 3604 }
72c1d3af 3605 break;
9ee6e8bb 3606 case 4: /* sev */
12b10571
MR
3607 case 5: /* sevl */
3608 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3609 default: /* nop */
3610 break;
3611 }
3612}
99c475ab 3613
ad69471c 3614#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3615
39d5492a 3616static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3617{
3618 switch (size) {
dd8fbd78
FN
3619 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3620 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3621 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3622 default: abort();
9ee6e8bb 3623 }
9ee6e8bb
PB
3624}
3625
39d5492a 3626static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3627{
3628 switch (size) {
dd8fbd78
FN
3629 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3630 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3631 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3632 default: return;
3633 }
3634}
3635
3636/* 32-bit pairwise ops end up the same as the elementwise versions. */
9ecd3c5c
RH
3637#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3638#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3639#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3640#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
ad69471c 3641
ad69471c
PB
3642#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3643 switch ((size << 1) | u) { \
3644 case 0: \
dd8fbd78 3645 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3646 break; \
3647 case 1: \
dd8fbd78 3648 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3649 break; \
3650 case 2: \
dd8fbd78 3651 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3652 break; \
3653 case 3: \
dd8fbd78 3654 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3655 break; \
3656 case 4: \
dd8fbd78 3657 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3658 break; \
3659 case 5: \
dd8fbd78 3660 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3661 break; \
3662 default: return 1; \
3663 }} while (0)
9ee6e8bb
PB
3664
3665#define GEN_NEON_INTEGER_OP(name) do { \
3666 switch ((size << 1) | u) { \
ad69471c 3667 case 0: \
dd8fbd78 3668 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3669 break; \
3670 case 1: \
dd8fbd78 3671 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3672 break; \
3673 case 2: \
dd8fbd78 3674 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3675 break; \
3676 case 3: \
dd8fbd78 3677 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3678 break; \
3679 case 4: \
dd8fbd78 3680 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3681 break; \
3682 case 5: \
dd8fbd78 3683 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3684 break; \
9ee6e8bb
PB
3685 default: return 1; \
3686 }} while (0)
3687
39d5492a 3688static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3689{
39d5492a 3690 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3691 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3692 return tmp;
9ee6e8bb
PB
3693}
3694
39d5492a 3695static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3696{
dd8fbd78 3697 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3698 tcg_temp_free_i32(var);
9ee6e8bb
PB
3699}
3700
39d5492a 3701static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3702{
39d5492a 3703 TCGv_i32 tmp;
9ee6e8bb 3704 if (size == 1) {
0fad6efc
PM
3705 tmp = neon_load_reg(reg & 7, reg >> 4);
3706 if (reg & 8) {
dd8fbd78 3707 gen_neon_dup_high16(tmp);
0fad6efc
PM
3708 } else {
3709 gen_neon_dup_low16(tmp);
dd8fbd78 3710 }
0fad6efc
PM
3711 } else {
3712 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3713 }
dd8fbd78 3714 return tmp;
9ee6e8bb
PB
3715}
3716
02acedf9 3717static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3718{
b13708bb
RH
3719 TCGv_ptr pd, pm;
3720
600b828c 3721 if (!q && size == 2) {
02acedf9
PM
3722 return 1;
3723 }
b13708bb
RH
3724 pd = vfp_reg_ptr(true, rd);
3725 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
3726 if (q) {
3727 switch (size) {
3728 case 0:
b13708bb 3729 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
3730 break;
3731 case 1:
b13708bb 3732 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
3733 break;
3734 case 2:
b13708bb 3735 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
3736 break;
3737 default:
3738 abort();
3739 }
3740 } else {
3741 switch (size) {
3742 case 0:
b13708bb 3743 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
3744 break;
3745 case 1:
b13708bb 3746 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
3747 break;
3748 default:
3749 abort();
3750 }
3751 }
b13708bb
RH
3752 tcg_temp_free_ptr(pd);
3753 tcg_temp_free_ptr(pm);
02acedf9 3754 return 0;
19457615
FN
3755}
3756
d68a6f3a 3757static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3758{
b13708bb
RH
3759 TCGv_ptr pd, pm;
3760
600b828c 3761 if (!q && size == 2) {
d68a6f3a
PM
3762 return 1;
3763 }
b13708bb
RH
3764 pd = vfp_reg_ptr(true, rd);
3765 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
3766 if (q) {
3767 switch (size) {
3768 case 0:
b13708bb 3769 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
3770 break;
3771 case 1:
b13708bb 3772 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
3773 break;
3774 case 2:
b13708bb 3775 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
3776 break;
3777 default:
3778 abort();
3779 }
3780 } else {
3781 switch (size) {
3782 case 0:
b13708bb 3783 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
3784 break;
3785 case 1:
b13708bb 3786 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
3787 break;
3788 default:
3789 abort();
3790 }
3791 }
b13708bb
RH
3792 tcg_temp_free_ptr(pd);
3793 tcg_temp_free_ptr(pm);
d68a6f3a 3794 return 0;
19457615
FN
3795}
3796
39d5492a 3797static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3798{
39d5492a 3799 TCGv_i32 rd, tmp;
19457615 3800
7d1b0095
PM
3801 rd = tcg_temp_new_i32();
3802 tmp = tcg_temp_new_i32();
19457615
FN
3803
3804 tcg_gen_shli_i32(rd, t0, 8);
3805 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3806 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3807 tcg_gen_or_i32(rd, rd, tmp);
3808
3809 tcg_gen_shri_i32(t1, t1, 8);
3810 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3811 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3812 tcg_gen_or_i32(t1, t1, tmp);
3813 tcg_gen_mov_i32(t0, rd);
3814
7d1b0095
PM
3815 tcg_temp_free_i32(tmp);
3816 tcg_temp_free_i32(rd);
19457615
FN
3817}
3818
39d5492a 3819static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3820{
39d5492a 3821 TCGv_i32 rd, tmp;
19457615 3822
7d1b0095
PM
3823 rd = tcg_temp_new_i32();
3824 tmp = tcg_temp_new_i32();
19457615
FN
3825
3826 tcg_gen_shli_i32(rd, t0, 16);
3827 tcg_gen_andi_i32(tmp, t1, 0xffff);
3828 tcg_gen_or_i32(rd, rd, tmp);
3829 tcg_gen_shri_i32(t1, t1, 16);
3830 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3831 tcg_gen_or_i32(t1, t1, tmp);
3832 tcg_gen_mov_i32(t0, rd);
3833
7d1b0095
PM
3834 tcg_temp_free_i32(tmp);
3835 tcg_temp_free_i32(rd);
19457615
FN
3836}
3837
3838
9ee6e8bb
PB
3839static struct {
3840 int nregs;
3841 int interleave;
3842 int spacing;
308e5636 3843} const neon_ls_element_type[11] = {
ac55d007
RH
3844 {1, 4, 1},
3845 {1, 4, 2},
9ee6e8bb 3846 {4, 1, 1},
ac55d007
RH
3847 {2, 2, 2},
3848 {1, 3, 1},
3849 {1, 3, 2},
9ee6e8bb
PB
3850 {3, 1, 1},
3851 {1, 1, 1},
ac55d007
RH
3852 {1, 2, 1},
3853 {1, 2, 2},
9ee6e8bb
PB
3854 {2, 1, 1}
3855};
3856
3857/* Translate a NEON load/store element instruction. Return nonzero if the
3858 instruction is invalid. */
7dcc1f89 3859static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3860{
3861 int rd, rn, rm;
3862 int op;
3863 int nregs;
3864 int interleave;
84496233 3865 int spacing;
9ee6e8bb
PB
3866 int stride;
3867 int size;
3868 int reg;
9ee6e8bb 3869 int load;
9ee6e8bb 3870 int n;
7377c2c9 3871 int vec_size;
ac55d007
RH
3872 int mmu_idx;
3873 TCGMemOp endian;
39d5492a
PM
3874 TCGv_i32 addr;
3875 TCGv_i32 tmp;
3876 TCGv_i32 tmp2;
84496233 3877 TCGv_i64 tmp64;
9ee6e8bb 3878
2c7ffc41
PM
3879 /* FIXME: this access check should not take precedence over UNDEF
3880 * for invalid encodings; we will generate incorrect syndrome information
3881 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3882 */
9dbbc748 3883 if (s->fp_excp_el) {
2c7ffc41 3884 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 3885 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3886 return 0;
3887 }
3888
5df8bac1 3889 if (!s->vfp_enabled)
9ee6e8bb
PB
3890 return 1;
3891 VFP_DREG_D(rd, insn);
3892 rn = (insn >> 16) & 0xf;
3893 rm = insn & 0xf;
3894 load = (insn & (1 << 21)) != 0;
ac55d007
RH
3895 endian = s->be_data;
3896 mmu_idx = get_mem_index(s);
9ee6e8bb
PB
3897 if ((insn & (1 << 23)) == 0) {
3898 /* Load store all elements. */
3899 op = (insn >> 8) & 0xf;
3900 size = (insn >> 6) & 3;
84496233 3901 if (op > 10)
9ee6e8bb 3902 return 1;
f2dd89d0
PM
3903 /* Catch UNDEF cases for bad values of align field */
3904 switch (op & 0xc) {
3905 case 4:
3906 if (((insn >> 5) & 1) == 1) {
3907 return 1;
3908 }
3909 break;
3910 case 8:
3911 if (((insn >> 4) & 3) == 3) {
3912 return 1;
3913 }
3914 break;
3915 default:
3916 break;
3917 }
9ee6e8bb
PB
3918 nregs = neon_ls_element_type[op].nregs;
3919 interleave = neon_ls_element_type[op].interleave;
84496233 3920 spacing = neon_ls_element_type[op].spacing;
ac55d007 3921 if (size == 3 && (interleave | spacing) != 1) {
84496233 3922 return 1;
ac55d007 3923 }
e23f12b3
RH
3924 /* For our purposes, bytes are always little-endian. */
3925 if (size == 0) {
3926 endian = MO_LE;
3927 }
3928 /* Consecutive little-endian elements from a single register
3929 * can be promoted to a larger little-endian operation.
3930 */
3931 if (interleave == 1 && endian == MO_LE) {
3932 size = 3;
3933 }
ac55d007 3934 tmp64 = tcg_temp_new_i64();
e318a60b 3935 addr = tcg_temp_new_i32();
ac55d007 3936 tmp2 = tcg_const_i32(1 << size);
dcc65026 3937 load_reg_var(s, addr, rn);
9ee6e8bb 3938 for (reg = 0; reg < nregs; reg++) {
ac55d007
RH
3939 for (n = 0; n < 8 >> size; n++) {
3940 int xs;
3941 for (xs = 0; xs < interleave; xs++) {
3942 int tt = rd + reg + spacing * xs;
3943
3944 if (load) {
3945 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
3946 neon_store_element64(tt, n, size, tmp64);
3947 } else {
3948 neon_load_element64(tmp64, tt, n, size);
3949 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
9ee6e8bb 3950 }
ac55d007 3951 tcg_gen_add_i32(addr, addr, tmp2);
9ee6e8bb
PB
3952 }
3953 }
9ee6e8bb 3954 }
e318a60b 3955 tcg_temp_free_i32(addr);
ac55d007
RH
3956 tcg_temp_free_i32(tmp2);
3957 tcg_temp_free_i64(tmp64);
3958 stride = nregs * interleave * 8;
9ee6e8bb
PB
3959 } else {
3960 size = (insn >> 10) & 3;
3961 if (size == 3) {
3962 /* Load single element to all lanes. */
8e18cde3
PM
3963 int a = (insn >> 4) & 1;
3964 if (!load) {
9ee6e8bb 3965 return 1;
8e18cde3 3966 }
9ee6e8bb
PB
3967 size = (insn >> 6) & 3;
3968 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3969
3970 if (size == 3) {
3971 if (nregs != 4 || a == 0) {
9ee6e8bb 3972 return 1;
99c475ab 3973 }
8e18cde3
PM
3974 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3975 size = 2;
3976 }
3977 if (nregs == 1 && a == 1 && size == 0) {
3978 return 1;
3979 }
3980 if (nregs == 3 && a == 1) {
3981 return 1;
3982 }
e318a60b 3983 addr = tcg_temp_new_i32();
8e18cde3 3984 load_reg_var(s, addr, rn);
7377c2c9
RH
3985
3986 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
3987 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
3988 */
3989 stride = (insn & (1 << 5)) ? 2 : 1;
3990 vec_size = nregs == 1 ? stride * 8 : 8;
3991
3992 tmp = tcg_temp_new_i32();
3993 for (reg = 0; reg < nregs; reg++) {
3994 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3995 s->be_data | size);
3996 if ((rd & 1) && vec_size == 16) {
3997 /* We cannot write 16 bytes at once because the
3998 * destination is unaligned.
3999 */
4000 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
4001 8, 8, tmp);
4002 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
4003 neon_reg_offset(rd, 0), 8, 8);
4004 } else {
4005 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
4006 vec_size, vec_size, tmp);
8e18cde3 4007 }
7377c2c9
RH
4008 tcg_gen_addi_i32(addr, addr, 1 << size);
4009 rd += stride;
9ee6e8bb 4010 }
7377c2c9 4011 tcg_temp_free_i32(tmp);
e318a60b 4012 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4013 stride = (1 << size) * nregs;
4014 } else {
4015 /* Single element. */
93262b16 4016 int idx = (insn >> 4) & 0xf;
2d6ac920 4017 int reg_idx;
9ee6e8bb
PB
4018 switch (size) {
4019 case 0:
2d6ac920 4020 reg_idx = (insn >> 5) & 7;
9ee6e8bb
PB
4021 stride = 1;
4022 break;
4023 case 1:
2d6ac920 4024 reg_idx = (insn >> 6) & 3;
9ee6e8bb
PB
4025 stride = (insn & (1 << 5)) ? 2 : 1;
4026 break;
4027 case 2:
2d6ac920 4028 reg_idx = (insn >> 7) & 1;
9ee6e8bb
PB
4029 stride = (insn & (1 << 6)) ? 2 : 1;
4030 break;
4031 default:
4032 abort();
4033 }
4034 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4035 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4036 switch (nregs) {
4037 case 1:
4038 if (((idx & (1 << size)) != 0) ||
4039 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4040 return 1;
4041 }
4042 break;
4043 case 3:
4044 if ((idx & 1) != 0) {
4045 return 1;
4046 }
4047 /* fall through */
4048 case 2:
4049 if (size == 2 && (idx & 2) != 0) {
4050 return 1;
4051 }
4052 break;
4053 case 4:
4054 if ((size == 2) && ((idx & 3) == 3)) {
4055 return 1;
4056 }
4057 break;
4058 default:
4059 abort();
4060 }
4061 if ((rd + stride * (nregs - 1)) > 31) {
4062 /* Attempts to write off the end of the register file
4063 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4064 * the neon_load_reg() would write off the end of the array.
4065 */
4066 return 1;
4067 }
2d6ac920 4068 tmp = tcg_temp_new_i32();
e318a60b 4069 addr = tcg_temp_new_i32();
dcc65026 4070 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4071 for (reg = 0; reg < nregs; reg++) {
4072 if (load) {
2d6ac920
RH
4073 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
4074 s->be_data | size);
4075 neon_store_element(rd, reg_idx, size, tmp);
9ee6e8bb 4076 } else { /* Store */
2d6ac920
RH
4077 neon_load_element(tmp, rd, reg_idx, size);
4078 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
4079 s->be_data | size);
99c475ab 4080 }
9ee6e8bb 4081 rd += stride;
1b2b1e54 4082 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4083 }
e318a60b 4084 tcg_temp_free_i32(addr);
2d6ac920 4085 tcg_temp_free_i32(tmp);
9ee6e8bb 4086 stride = nregs * (1 << size);
99c475ab 4087 }
9ee6e8bb
PB
4088 }
4089 if (rm != 15) {
39d5492a 4090 TCGv_i32 base;
b26eefb6
PB
4091
4092 base = load_reg(s, rn);
9ee6e8bb 4093 if (rm == 13) {
b26eefb6 4094 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4095 } else {
39d5492a 4096 TCGv_i32 index;
b26eefb6
PB
4097 index = load_reg(s, rm);
4098 tcg_gen_add_i32(base, base, index);
7d1b0095 4099 tcg_temp_free_i32(index);
9ee6e8bb 4100 }
b26eefb6 4101 store_reg(s, rn, base);
9ee6e8bb
PB
4102 }
4103 return 0;
4104}
3b46e624 4105
39d5492a 4106static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4107{
4108 switch (size) {
4109 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4110 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 4111 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
4112 default: abort();
4113 }
4114}
4115
39d5492a 4116static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4117{
4118 switch (size) {
02da0b2d
PM
4119 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4120 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4121 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4122 default: abort();
4123 }
4124}
4125
39d5492a 4126static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4127{
4128 switch (size) {
02da0b2d
PM
4129 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4130 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4131 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4132 default: abort();
4133 }
4134}
4135
39d5492a 4136static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4137{
4138 switch (size) {
02da0b2d
PM
4139 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4140 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4141 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4142 default: abort();
4143 }
4144}
4145
39d5492a 4146static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4147 int q, int u)
4148{
4149 if (q) {
4150 if (u) {
4151 switch (size) {
4152 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4153 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4154 default: abort();
4155 }
4156 } else {
4157 switch (size) {
4158 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4159 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4160 default: abort();
4161 }
4162 }
4163 } else {
4164 if (u) {
4165 switch (size) {
b408a9b0
CL
4166 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4167 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4168 default: abort();
4169 }
4170 } else {
4171 switch (size) {
4172 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4173 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4174 default: abort();
4175 }
4176 }
4177 }
4178}
4179
39d5492a 4180static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4181{
4182 if (u) {
4183 switch (size) {
4184 case 0: gen_helper_neon_widen_u8(dest, src); break;
4185 case 1: gen_helper_neon_widen_u16(dest, src); break;
4186 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4187 default: abort();
4188 }
4189 } else {
4190 switch (size) {
4191 case 0: gen_helper_neon_widen_s8(dest, src); break;
4192 case 1: gen_helper_neon_widen_s16(dest, src); break;
4193 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4194 default: abort();
4195 }
4196 }
7d1b0095 4197 tcg_temp_free_i32(src);
ad69471c
PB
4198}
4199
4200static inline void gen_neon_addl(int size)
4201{
4202 switch (size) {
4203 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4204 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4205 case 2: tcg_gen_add_i64(CPU_V001); break;
4206 default: abort();
4207 }
4208}
4209
4210static inline void gen_neon_subl(int size)
4211{
4212 switch (size) {
4213 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4214 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4215 case 2: tcg_gen_sub_i64(CPU_V001); break;
4216 default: abort();
4217 }
4218}
4219
a7812ae4 4220static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4221{
4222 switch (size) {
4223 case 0: gen_helper_neon_negl_u16(var, var); break;
4224 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4225 case 2:
4226 tcg_gen_neg_i64(var, var);
4227 break;
ad69471c
PB
4228 default: abort();
4229 }
4230}
4231
a7812ae4 4232static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4233{
4234 switch (size) {
02da0b2d
PM
4235 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4236 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4237 default: abort();
4238 }
4239}
4240
39d5492a
PM
4241static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4242 int size, int u)
ad69471c 4243{
a7812ae4 4244 TCGv_i64 tmp;
ad69471c
PB
4245
4246 switch ((size << 1) | u) {
4247 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4248 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4249 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4250 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4251 case 4:
4252 tmp = gen_muls_i64_i32(a, b);
4253 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4254 tcg_temp_free_i64(tmp);
ad69471c
PB
4255 break;
4256 case 5:
4257 tmp = gen_mulu_i64_i32(a, b);
4258 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4259 tcg_temp_free_i64(tmp);
ad69471c
PB
4260 break;
4261 default: abort();
4262 }
c6067f04
CL
4263
4264 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4265 Don't forget to clean them now. */
4266 if (size < 2) {
7d1b0095
PM
4267 tcg_temp_free_i32(a);
4268 tcg_temp_free_i32(b);
c6067f04 4269 }
ad69471c
PB
4270}
4271
39d5492a
PM
4272static void gen_neon_narrow_op(int op, int u, int size,
4273 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4274{
4275 if (op) {
4276 if (u) {
4277 gen_neon_unarrow_sats(size, dest, src);
4278 } else {
4279 gen_neon_narrow(size, dest, src);
4280 }
4281 } else {
4282 if (u) {
4283 gen_neon_narrow_satu(size, dest, src);
4284 } else {
4285 gen_neon_narrow_sats(size, dest, src);
4286 }
4287 }
4288}
4289
62698be3
PM
4290/* Symbolic constants for op fields for Neon 3-register same-length.
4291 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4292 * table A7-9.
4293 */
4294#define NEON_3R_VHADD 0
4295#define NEON_3R_VQADD 1
4296#define NEON_3R_VRHADD 2
4297#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4298#define NEON_3R_VHSUB 4
4299#define NEON_3R_VQSUB 5
4300#define NEON_3R_VCGT 6
4301#define NEON_3R_VCGE 7
4302#define NEON_3R_VSHL 8
4303#define NEON_3R_VQSHL 9
4304#define NEON_3R_VRSHL 10
4305#define NEON_3R_VQRSHL 11
4306#define NEON_3R_VMAX 12
4307#define NEON_3R_VMIN 13
4308#define NEON_3R_VABD 14
4309#define NEON_3R_VABA 15
4310#define NEON_3R_VADD_VSUB 16
4311#define NEON_3R_VTST_VCEQ 17
4a7832b0 4312#define NEON_3R_VML 18 /* VMLA, VMLS */
62698be3
PM
4313#define NEON_3R_VMUL 19
4314#define NEON_3R_VPMAX 20
4315#define NEON_3R_VPMIN 21
4316#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 4317#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 4318#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 4319#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
4320#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4321#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4322#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4323#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4324#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4325#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4326
4327static const uint8_t neon_3r_sizes[] = {
4328 [NEON_3R_VHADD] = 0x7,
4329 [NEON_3R_VQADD] = 0xf,
4330 [NEON_3R_VRHADD] = 0x7,
4331 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4332 [NEON_3R_VHSUB] = 0x7,
4333 [NEON_3R_VQSUB] = 0xf,
4334 [NEON_3R_VCGT] = 0x7,
4335 [NEON_3R_VCGE] = 0x7,
4336 [NEON_3R_VSHL] = 0xf,
4337 [NEON_3R_VQSHL] = 0xf,
4338 [NEON_3R_VRSHL] = 0xf,
4339 [NEON_3R_VQRSHL] = 0xf,
4340 [NEON_3R_VMAX] = 0x7,
4341 [NEON_3R_VMIN] = 0x7,
4342 [NEON_3R_VABD] = 0x7,
4343 [NEON_3R_VABA] = 0x7,
4344 [NEON_3R_VADD_VSUB] = 0xf,
4345 [NEON_3R_VTST_VCEQ] = 0x7,
4346 [NEON_3R_VML] = 0x7,
4347 [NEON_3R_VMUL] = 0x7,
4348 [NEON_3R_VPMAX] = 0x7,
4349 [NEON_3R_VPMIN] = 0x7,
4350 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 4351 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 4352 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 4353 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
4354 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4355 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4356 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4357 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4358 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4359 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4360};
4361
600b828c
PM
4362/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4363 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4364 * table A7-13.
4365 */
4366#define NEON_2RM_VREV64 0
4367#define NEON_2RM_VREV32 1
4368#define NEON_2RM_VREV16 2
4369#define NEON_2RM_VPADDL 4
4370#define NEON_2RM_VPADDL_U 5
9d935509
AB
4371#define NEON_2RM_AESE 6 /* Includes AESD */
4372#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4373#define NEON_2RM_VCLS 8
4374#define NEON_2RM_VCLZ 9
4375#define NEON_2RM_VCNT 10
4376#define NEON_2RM_VMVN 11
4377#define NEON_2RM_VPADAL 12
4378#define NEON_2RM_VPADAL_U 13
4379#define NEON_2RM_VQABS 14
4380#define NEON_2RM_VQNEG 15
4381#define NEON_2RM_VCGT0 16
4382#define NEON_2RM_VCGE0 17
4383#define NEON_2RM_VCEQ0 18
4384#define NEON_2RM_VCLE0 19
4385#define NEON_2RM_VCLT0 20
f1ecb913 4386#define NEON_2RM_SHA1H 21
600b828c
PM
4387#define NEON_2RM_VABS 22
4388#define NEON_2RM_VNEG 23
4389#define NEON_2RM_VCGT0_F 24
4390#define NEON_2RM_VCGE0_F 25
4391#define NEON_2RM_VCEQ0_F 26
4392#define NEON_2RM_VCLE0_F 27
4393#define NEON_2RM_VCLT0_F 28
4394#define NEON_2RM_VABS_F 30
4395#define NEON_2RM_VNEG_F 31
4396#define NEON_2RM_VSWP 32
4397#define NEON_2RM_VTRN 33
4398#define NEON_2RM_VUZP 34
4399#define NEON_2RM_VZIP 35
4400#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4401#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4402#define NEON_2RM_VSHLL 38
f1ecb913 4403#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 4404#define NEON_2RM_VRINTN 40
2ce70625 4405#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4406#define NEON_2RM_VRINTA 42
4407#define NEON_2RM_VRINTZ 43
600b828c 4408#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4409#define NEON_2RM_VRINTM 45
600b828c 4410#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4411#define NEON_2RM_VRINTP 47
901ad525
WN
4412#define NEON_2RM_VCVTAU 48
4413#define NEON_2RM_VCVTAS 49
4414#define NEON_2RM_VCVTNU 50
4415#define NEON_2RM_VCVTNS 51
4416#define NEON_2RM_VCVTPU 52
4417#define NEON_2RM_VCVTPS 53
4418#define NEON_2RM_VCVTMU 54
4419#define NEON_2RM_VCVTMS 55
600b828c
PM
4420#define NEON_2RM_VRECPE 56
4421#define NEON_2RM_VRSQRTE 57
4422#define NEON_2RM_VRECPE_F 58
4423#define NEON_2RM_VRSQRTE_F 59
4424#define NEON_2RM_VCVT_FS 60
4425#define NEON_2RM_VCVT_FU 61
4426#define NEON_2RM_VCVT_SF 62
4427#define NEON_2RM_VCVT_UF 63
4428
4429static int neon_2rm_is_float_op(int op)
4430{
4431 /* Return true if this neon 2reg-misc op is float-to-float */
4432 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 4433 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
4434 op == NEON_2RM_VRINTM ||
4435 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 4436 op >= NEON_2RM_VRECPE_F);
600b828c
PM
4437}
4438
fe8fcf3d
PM
4439static bool neon_2rm_is_v8_op(int op)
4440{
4441 /* Return true if this neon 2reg-misc op is ARMv8 and up */
4442 switch (op) {
4443 case NEON_2RM_VRINTN:
4444 case NEON_2RM_VRINTA:
4445 case NEON_2RM_VRINTM:
4446 case NEON_2RM_VRINTP:
4447 case NEON_2RM_VRINTZ:
4448 case NEON_2RM_VRINTX:
4449 case NEON_2RM_VCVTAU:
4450 case NEON_2RM_VCVTAS:
4451 case NEON_2RM_VCVTNU:
4452 case NEON_2RM_VCVTNS:
4453 case NEON_2RM_VCVTPU:
4454 case NEON_2RM_VCVTPS:
4455 case NEON_2RM_VCVTMU:
4456 case NEON_2RM_VCVTMS:
4457 return true;
4458 default:
4459 return false;
4460 }
4461}
4462
600b828c
PM
4463/* Each entry in this array has bit n set if the insn allows
4464 * size value n (otherwise it will UNDEF). Since unallocated
4465 * op values will have no bits set they always UNDEF.
4466 */
4467static const uint8_t neon_2rm_sizes[] = {
4468 [NEON_2RM_VREV64] = 0x7,
4469 [NEON_2RM_VREV32] = 0x3,
4470 [NEON_2RM_VREV16] = 0x1,
4471 [NEON_2RM_VPADDL] = 0x7,
4472 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4473 [NEON_2RM_AESE] = 0x1,
4474 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4475 [NEON_2RM_VCLS] = 0x7,
4476 [NEON_2RM_VCLZ] = 0x7,
4477 [NEON_2RM_VCNT] = 0x1,
4478 [NEON_2RM_VMVN] = 0x1,
4479 [NEON_2RM_VPADAL] = 0x7,
4480 [NEON_2RM_VPADAL_U] = 0x7,
4481 [NEON_2RM_VQABS] = 0x7,
4482 [NEON_2RM_VQNEG] = 0x7,
4483 [NEON_2RM_VCGT0] = 0x7,
4484 [NEON_2RM_VCGE0] = 0x7,
4485 [NEON_2RM_VCEQ0] = 0x7,
4486 [NEON_2RM_VCLE0] = 0x7,
4487 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 4488 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
4489 [NEON_2RM_VABS] = 0x7,
4490 [NEON_2RM_VNEG] = 0x7,
4491 [NEON_2RM_VCGT0_F] = 0x4,
4492 [NEON_2RM_VCGE0_F] = 0x4,
4493 [NEON_2RM_VCEQ0_F] = 0x4,
4494 [NEON_2RM_VCLE0_F] = 0x4,
4495 [NEON_2RM_VCLT0_F] = 0x4,
4496 [NEON_2RM_VABS_F] = 0x4,
4497 [NEON_2RM_VNEG_F] = 0x4,
4498 [NEON_2RM_VSWP] = 0x1,
4499 [NEON_2RM_VTRN] = 0x7,
4500 [NEON_2RM_VUZP] = 0x7,
4501 [NEON_2RM_VZIP] = 0x7,
4502 [NEON_2RM_VMOVN] = 0x7,
4503 [NEON_2RM_VQMOVN] = 0x7,
4504 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 4505 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 4506 [NEON_2RM_VRINTN] = 0x4,
2ce70625 4507 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
4508 [NEON_2RM_VRINTA] = 0x4,
4509 [NEON_2RM_VRINTZ] = 0x4,
600b828c 4510 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 4511 [NEON_2RM_VRINTM] = 0x4,
600b828c 4512 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 4513 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
4514 [NEON_2RM_VCVTAU] = 0x4,
4515 [NEON_2RM_VCVTAS] = 0x4,
4516 [NEON_2RM_VCVTNU] = 0x4,
4517 [NEON_2RM_VCVTNS] = 0x4,
4518 [NEON_2RM_VCVTPU] = 0x4,
4519 [NEON_2RM_VCVTPS] = 0x4,
4520 [NEON_2RM_VCVTMU] = 0x4,
4521 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
4522 [NEON_2RM_VRECPE] = 0x4,
4523 [NEON_2RM_VRSQRTE] = 0x4,
4524 [NEON_2RM_VRECPE_F] = 0x4,
4525 [NEON_2RM_VRSQRTE_F] = 0x4,
4526 [NEON_2RM_VCVT_FS] = 0x4,
4527 [NEON_2RM_VCVT_FU] = 0x4,
4528 [NEON_2RM_VCVT_SF] = 0x4,
4529 [NEON_2RM_VCVT_UF] = 0x4,
4530};
4531
36a71934
RH
4532
4533/* Expand v8.1 simd helper. */
4534static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
4535 int q, int rd, int rn, int rm)
4536{
962fcbf2 4537 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
4538 int opr_sz = (1 + q) * 8;
4539 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
4540 vfp_reg_offset(1, rn),
4541 vfp_reg_offset(1, rm), cpu_env,
4542 opr_sz, opr_sz, 0, fn);
4543 return 0;
4544 }
4545 return 1;
4546}
4547
41f6c113
RH
4548static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4549{
4550 tcg_gen_vec_sar8i_i64(a, a, shift);
4551 tcg_gen_vec_add8_i64(d, d, a);
4552}
4553
4554static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4555{
4556 tcg_gen_vec_sar16i_i64(a, a, shift);
4557 tcg_gen_vec_add16_i64(d, d, a);
4558}
4559
4560static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4561{
4562 tcg_gen_sari_i32(a, a, shift);
4563 tcg_gen_add_i32(d, d, a);
4564}
4565
4566static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4567{
4568 tcg_gen_sari_i64(a, a, shift);
4569 tcg_gen_add_i64(d, d, a);
4570}
4571
4572static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4573{
4574 tcg_gen_sari_vec(vece, a, a, sh);
4575 tcg_gen_add_vec(vece, d, d, a);
4576}
4577
53229a77
RH
4578static const TCGOpcode vecop_list_ssra[] = {
4579 INDEX_op_sari_vec, INDEX_op_add_vec, 0
4580};
4581
41f6c113
RH
4582const GVecGen2i ssra_op[4] = {
4583 { .fni8 = gen_ssra8_i64,
4584 .fniv = gen_ssra_vec,
4585 .load_dest = true,
53229a77 4586 .opt_opc = vecop_list_ssra,
41f6c113
RH
4587 .vece = MO_8 },
4588 { .fni8 = gen_ssra16_i64,
4589 .fniv = gen_ssra_vec,
4590 .load_dest = true,
53229a77 4591 .opt_opc = vecop_list_ssra,
41f6c113
RH
4592 .vece = MO_16 },
4593 { .fni4 = gen_ssra32_i32,
4594 .fniv = gen_ssra_vec,
4595 .load_dest = true,
53229a77 4596 .opt_opc = vecop_list_ssra,
41f6c113
RH
4597 .vece = MO_32 },
4598 { .fni8 = gen_ssra64_i64,
4599 .fniv = gen_ssra_vec,
4600 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 4601 .opt_opc = vecop_list_ssra,
41f6c113 4602 .load_dest = true,
41f6c113
RH
4603 .vece = MO_64 },
4604};
4605
4606static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4607{
4608 tcg_gen_vec_shr8i_i64(a, a, shift);
4609 tcg_gen_vec_add8_i64(d, d, a);
4610}
4611
4612static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4613{
4614 tcg_gen_vec_shr16i_i64(a, a, shift);
4615 tcg_gen_vec_add16_i64(d, d, a);
4616}
4617
4618static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4619{
4620 tcg_gen_shri_i32(a, a, shift);
4621 tcg_gen_add_i32(d, d, a);
4622}
4623
4624static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4625{
4626 tcg_gen_shri_i64(a, a, shift);
4627 tcg_gen_add_i64(d, d, a);
4628}
4629
4630static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4631{
4632 tcg_gen_shri_vec(vece, a, a, sh);
4633 tcg_gen_add_vec(vece, d, d, a);
4634}
4635
53229a77
RH
4636static const TCGOpcode vecop_list_usra[] = {
4637 INDEX_op_shri_vec, INDEX_op_add_vec, 0
4638};
4639
41f6c113
RH
4640const GVecGen2i usra_op[4] = {
4641 { .fni8 = gen_usra8_i64,
4642 .fniv = gen_usra_vec,
4643 .load_dest = true,
53229a77 4644 .opt_opc = vecop_list_usra,
41f6c113
RH
4645 .vece = MO_8, },
4646 { .fni8 = gen_usra16_i64,
4647 .fniv = gen_usra_vec,
4648 .load_dest = true,
53229a77 4649 .opt_opc = vecop_list_usra,
41f6c113
RH
4650 .vece = MO_16, },
4651 { .fni4 = gen_usra32_i32,
4652 .fniv = gen_usra_vec,
4653 .load_dest = true,
53229a77 4654 .opt_opc = vecop_list_usra,
41f6c113
RH
4655 .vece = MO_32, },
4656 { .fni8 = gen_usra64_i64,
4657 .fniv = gen_usra_vec,
4658 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4659 .load_dest = true,
53229a77 4660 .opt_opc = vecop_list_usra,
41f6c113
RH
4661 .vece = MO_64, },
4662};
eabcd6fa 4663
f3cd8218
RH
4664static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4665{
4666 uint64_t mask = dup_const(MO_8, 0xff >> shift);
4667 TCGv_i64 t = tcg_temp_new_i64();
4668
4669 tcg_gen_shri_i64(t, a, shift);
4670 tcg_gen_andi_i64(t, t, mask);
4671 tcg_gen_andi_i64(d, d, ~mask);
4672 tcg_gen_or_i64(d, d, t);
4673 tcg_temp_free_i64(t);
4674}
4675
4676static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4677{
4678 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
4679 TCGv_i64 t = tcg_temp_new_i64();
4680
4681 tcg_gen_shri_i64(t, a, shift);
4682 tcg_gen_andi_i64(t, t, mask);
4683 tcg_gen_andi_i64(d, d, ~mask);
4684 tcg_gen_or_i64(d, d, t);
4685 tcg_temp_free_i64(t);
4686}
4687
4688static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4689{
4690 tcg_gen_shri_i32(a, a, shift);
4691 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
4692}
4693
4694static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4695{
4696 tcg_gen_shri_i64(a, a, shift);
4697 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
4698}
4699
4700static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4701{
4702 if (sh == 0) {
4703 tcg_gen_mov_vec(d, a);
4704 } else {
4705 TCGv_vec t = tcg_temp_new_vec_matching(d);
4706 TCGv_vec m = tcg_temp_new_vec_matching(d);
4707
4708 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
4709 tcg_gen_shri_vec(vece, t, a, sh);
4710 tcg_gen_and_vec(vece, d, d, m);
4711 tcg_gen_or_vec(vece, d, d, t);
4712
4713 tcg_temp_free_vec(t);
4714 tcg_temp_free_vec(m);
4715 }
4716}
4717
53229a77
RH
4718static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
4719
f3cd8218
RH
4720const GVecGen2i sri_op[4] = {
4721 { .fni8 = gen_shr8_ins_i64,
4722 .fniv = gen_shr_ins_vec,
4723 .load_dest = true,
53229a77 4724 .opt_opc = vecop_list_sri,
f3cd8218
RH
4725 .vece = MO_8 },
4726 { .fni8 = gen_shr16_ins_i64,
4727 .fniv = gen_shr_ins_vec,
4728 .load_dest = true,
53229a77 4729 .opt_opc = vecop_list_sri,
f3cd8218
RH
4730 .vece = MO_16 },
4731 { .fni4 = gen_shr32_ins_i32,
4732 .fniv = gen_shr_ins_vec,
4733 .load_dest = true,
53229a77 4734 .opt_opc = vecop_list_sri,
f3cd8218
RH
4735 .vece = MO_32 },
4736 { .fni8 = gen_shr64_ins_i64,
4737 .fniv = gen_shr_ins_vec,
4738 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4739 .load_dest = true,
53229a77 4740 .opt_opc = vecop_list_sri,
f3cd8218
RH
4741 .vece = MO_64 },
4742};
4743
4744static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4745{
4746 uint64_t mask = dup_const(MO_8, 0xff << shift);
4747 TCGv_i64 t = tcg_temp_new_i64();
4748
4749 tcg_gen_shli_i64(t, a, shift);
4750 tcg_gen_andi_i64(t, t, mask);
4751 tcg_gen_andi_i64(d, d, ~mask);
4752 tcg_gen_or_i64(d, d, t);
4753 tcg_temp_free_i64(t);
4754}
4755
4756static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4757{
4758 uint64_t mask = dup_const(MO_16, 0xffff << shift);
4759 TCGv_i64 t = tcg_temp_new_i64();
4760
4761 tcg_gen_shli_i64(t, a, shift);
4762 tcg_gen_andi_i64(t, t, mask);
4763 tcg_gen_andi_i64(d, d, ~mask);
4764 tcg_gen_or_i64(d, d, t);
4765 tcg_temp_free_i64(t);
4766}
4767
4768static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4769{
4770 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
4771}
4772
4773static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4774{
4775 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
4776}
4777
4778static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4779{
4780 if (sh == 0) {
4781 tcg_gen_mov_vec(d, a);
4782 } else {
4783 TCGv_vec t = tcg_temp_new_vec_matching(d);
4784 TCGv_vec m = tcg_temp_new_vec_matching(d);
4785
4786 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
4787 tcg_gen_shli_vec(vece, t, a, sh);
4788 tcg_gen_and_vec(vece, d, d, m);
4789 tcg_gen_or_vec(vece, d, d, t);
4790
4791 tcg_temp_free_vec(t);
4792 tcg_temp_free_vec(m);
4793 }
4794}
4795
53229a77
RH
4796static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
4797
f3cd8218
RH
4798const GVecGen2i sli_op[4] = {
4799 { .fni8 = gen_shl8_ins_i64,
4800 .fniv = gen_shl_ins_vec,
4801 .load_dest = true,
53229a77 4802 .opt_opc = vecop_list_sli,
f3cd8218
RH
4803 .vece = MO_8 },
4804 { .fni8 = gen_shl16_ins_i64,
4805 .fniv = gen_shl_ins_vec,
4806 .load_dest = true,
53229a77 4807 .opt_opc = vecop_list_sli,
f3cd8218
RH
4808 .vece = MO_16 },
4809 { .fni4 = gen_shl32_ins_i32,
4810 .fniv = gen_shl_ins_vec,
4811 .load_dest = true,
53229a77 4812 .opt_opc = vecop_list_sli,
f3cd8218
RH
4813 .vece = MO_32 },
4814 { .fni8 = gen_shl64_ins_i64,
4815 .fniv = gen_shl_ins_vec,
4816 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4817 .load_dest = true,
53229a77 4818 .opt_opc = vecop_list_sli,
f3cd8218
RH
4819 .vece = MO_64 },
4820};
4821
4a7832b0
RH
4822static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4823{
4824 gen_helper_neon_mul_u8(a, a, b);
4825 gen_helper_neon_add_u8(d, d, a);
4826}
4827
4828static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4829{
4830 gen_helper_neon_mul_u8(a, a, b);
4831 gen_helper_neon_sub_u8(d, d, a);
4832}
4833
4834static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4835{
4836 gen_helper_neon_mul_u16(a, a, b);
4837 gen_helper_neon_add_u16(d, d, a);
4838}
4839
4840static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4841{
4842 gen_helper_neon_mul_u16(a, a, b);
4843 gen_helper_neon_sub_u16(d, d, a);
4844}
4845
4846static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4847{
4848 tcg_gen_mul_i32(a, a, b);
4849 tcg_gen_add_i32(d, d, a);
4850}
4851
4852static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4853{
4854 tcg_gen_mul_i32(a, a, b);
4855 tcg_gen_sub_i32(d, d, a);
4856}
4857
4858static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4859{
4860 tcg_gen_mul_i64(a, a, b);
4861 tcg_gen_add_i64(d, d, a);
4862}
4863
4864static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4865{
4866 tcg_gen_mul_i64(a, a, b);
4867 tcg_gen_sub_i64(d, d, a);
4868}
4869
4870static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4871{
4872 tcg_gen_mul_vec(vece, a, a, b);
4873 tcg_gen_add_vec(vece, d, d, a);
4874}
4875
4876static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4877{
4878 tcg_gen_mul_vec(vece, a, a, b);
4879 tcg_gen_sub_vec(vece, d, d, a);
4880}
4881
4882/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4883 * these tables are shared with AArch64 which does support them.
4884 */
53229a77
RH
4885
4886static const TCGOpcode vecop_list_mla[] = {
4887 INDEX_op_mul_vec, INDEX_op_add_vec, 0
4888};
4889
4890static const TCGOpcode vecop_list_mls[] = {
4891 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
4892};
4893
4a7832b0
RH
4894const GVecGen3 mla_op[4] = {
4895 { .fni4 = gen_mla8_i32,
4896 .fniv = gen_mla_vec,
4a7832b0 4897 .load_dest = true,
53229a77 4898 .opt_opc = vecop_list_mla,
4a7832b0
RH
4899 .vece = MO_8 },
4900 { .fni4 = gen_mla16_i32,
4901 .fniv = gen_mla_vec,
4a7832b0 4902 .load_dest = true,
53229a77 4903 .opt_opc = vecop_list_mla,
4a7832b0
RH
4904 .vece = MO_16 },
4905 { .fni4 = gen_mla32_i32,
4906 .fniv = gen_mla_vec,
4a7832b0 4907 .load_dest = true,
53229a77 4908 .opt_opc = vecop_list_mla,
4a7832b0
RH
4909 .vece = MO_32 },
4910 { .fni8 = gen_mla64_i64,
4911 .fniv = gen_mla_vec,
4a7832b0
RH
4912 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4913 .load_dest = true,
53229a77 4914 .opt_opc = vecop_list_mla,
4a7832b0
RH
4915 .vece = MO_64 },
4916};
4917
4918const GVecGen3 mls_op[4] = {
4919 { .fni4 = gen_mls8_i32,
4920 .fniv = gen_mls_vec,
4a7832b0 4921 .load_dest = true,
53229a77 4922 .opt_opc = vecop_list_mls,
4a7832b0
RH
4923 .vece = MO_8 },
4924 { .fni4 = gen_mls16_i32,
4925 .fniv = gen_mls_vec,
4a7832b0 4926 .load_dest = true,
53229a77 4927 .opt_opc = vecop_list_mls,
4a7832b0
RH
4928 .vece = MO_16 },
4929 { .fni4 = gen_mls32_i32,
4930 .fniv = gen_mls_vec,
4a7832b0 4931 .load_dest = true,
53229a77 4932 .opt_opc = vecop_list_mls,
4a7832b0
RH
4933 .vece = MO_32 },
4934 { .fni8 = gen_mls64_i64,
4935 .fniv = gen_mls_vec,
4a7832b0
RH
4936 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4937 .load_dest = true,
53229a77 4938 .opt_opc = vecop_list_mls,
4a7832b0
RH
4939 .vece = MO_64 },
4940};
4941
ea580fa3
RH
4942/* CMTST : test is "if (X & Y != 0)". */
4943static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4944{
4945 tcg_gen_and_i32(d, a, b);
4946 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
4947 tcg_gen_neg_i32(d, d);
4948}
4949
4950void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4951{
4952 tcg_gen_and_i64(d, a, b);
4953 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
4954 tcg_gen_neg_i64(d, d);
4955}
4956
4957static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4958{
4959 tcg_gen_and_vec(vece, d, a, b);
4960 tcg_gen_dupi_vec(vece, a, 0);
4961 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
4962}
4963
53229a77
RH
4964static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
4965
ea580fa3
RH
4966const GVecGen3 cmtst_op[4] = {
4967 { .fni4 = gen_helper_neon_tst_u8,
4968 .fniv = gen_cmtst_vec,
53229a77 4969 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4970 .vece = MO_8 },
4971 { .fni4 = gen_helper_neon_tst_u16,
4972 .fniv = gen_cmtst_vec,
53229a77 4973 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4974 .vece = MO_16 },
4975 { .fni4 = gen_cmtst_i32,
4976 .fniv = gen_cmtst_vec,
53229a77 4977 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4978 .vece = MO_32 },
4979 { .fni8 = gen_cmtst_i64,
4980 .fniv = gen_cmtst_vec,
4981 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 4982 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
4983 .vece = MO_64 },
4984};
4985
89e68b57
RH
4986static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4987 TCGv_vec a, TCGv_vec b)
4988{
4989 TCGv_vec x = tcg_temp_new_vec_matching(t);
4990 tcg_gen_add_vec(vece, x, a, b);
4991 tcg_gen_usadd_vec(vece, t, a, b);
4992 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4993 tcg_gen_or_vec(vece, sat, sat, x);
4994 tcg_temp_free_vec(x);
4995}
4996
53229a77
RH
4997static const TCGOpcode vecop_list_uqadd[] = {
4998 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4999};
5000
89e68b57
RH
5001const GVecGen4 uqadd_op[4] = {
5002 { .fniv = gen_uqadd_vec,
5003 .fno = gen_helper_gvec_uqadd_b,
89e68b57 5004 .write_aofs = true,
53229a77 5005 .opt_opc = vecop_list_uqadd,
89e68b57
RH
5006 .vece = MO_8 },
5007 { .fniv = gen_uqadd_vec,
5008 .fno = gen_helper_gvec_uqadd_h,
89e68b57 5009 .write_aofs = true,
53229a77 5010 .opt_opc = vecop_list_uqadd,
89e68b57
RH
5011 .vece = MO_16 },
5012 { .fniv = gen_uqadd_vec,
5013 .fno = gen_helper_gvec_uqadd_s,
89e68b57 5014 .write_aofs = true,
53229a77 5015 .opt_opc = vecop_list_uqadd,
89e68b57
RH
5016 .vece = MO_32 },
5017 { .fniv = gen_uqadd_vec,
5018 .fno = gen_helper_gvec_uqadd_d,
89e68b57 5019 .write_aofs = true,
53229a77 5020 .opt_opc = vecop_list_uqadd,
89e68b57
RH
5021 .vece = MO_64 },
5022};
5023
5024static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
5025 TCGv_vec a, TCGv_vec b)
5026{
5027 TCGv_vec x = tcg_temp_new_vec_matching(t);
5028 tcg_gen_add_vec(vece, x, a, b);
5029 tcg_gen_ssadd_vec(vece, t, a, b);
5030 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
5031 tcg_gen_or_vec(vece, sat, sat, x);
5032 tcg_temp_free_vec(x);
5033}
5034
53229a77
RH
5035static const TCGOpcode vecop_list_sqadd[] = {
5036 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
5037};
5038
89e68b57
RH
5039const GVecGen4 sqadd_op[4] = {
5040 { .fniv = gen_sqadd_vec,
5041 .fno = gen_helper_gvec_sqadd_b,
53229a77 5042 .opt_opc = vecop_list_sqadd,
89e68b57
RH
5043 .write_aofs = true,
5044 .vece = MO_8 },
5045 { .fniv = gen_sqadd_vec,
5046 .fno = gen_helper_gvec_sqadd_h,
53229a77 5047 .opt_opc = vecop_list_sqadd,
89e68b57
RH
5048 .write_aofs = true,
5049 .vece = MO_16 },
5050 { .fniv = gen_sqadd_vec,
5051 .fno = gen_helper_gvec_sqadd_s,
53229a77 5052 .opt_opc = vecop_list_sqadd,
89e68b57
RH
5053 .write_aofs = true,
5054 .vece = MO_32 },
5055 { .fniv = gen_sqadd_vec,
5056 .fno = gen_helper_gvec_sqadd_d,
53229a77 5057 .opt_opc = vecop_list_sqadd,
89e68b57
RH
5058 .write_aofs = true,
5059 .vece = MO_64 },
5060};
5061
5062static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
5063 TCGv_vec a, TCGv_vec b)
5064{
5065 TCGv_vec x = tcg_temp_new_vec_matching(t);
5066 tcg_gen_sub_vec(vece, x, a, b);
5067 tcg_gen_ussub_vec(vece, t, a, b);
5068 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
5069 tcg_gen_or_vec(vece, sat, sat, x);
5070 tcg_temp_free_vec(x);
5071}
5072
53229a77
RH
5073static const TCGOpcode vecop_list_uqsub[] = {
5074 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
5075};
5076
89e68b57
RH
5077const GVecGen4 uqsub_op[4] = {
5078 { .fniv = gen_uqsub_vec,
5079 .fno = gen_helper_gvec_uqsub_b,
53229a77 5080 .opt_opc = vecop_list_uqsub,
89e68b57
RH
5081 .write_aofs = true,
5082 .vece = MO_8 },
5083 { .fniv = gen_uqsub_vec,
5084 .fno = gen_helper_gvec_uqsub_h,
53229a77 5085 .opt_opc = vecop_list_uqsub,
89e68b57
RH
5086 .write_aofs = true,
5087 .vece = MO_16 },
5088 { .fniv = gen_uqsub_vec,
5089 .fno = gen_helper_gvec_uqsub_s,
53229a77 5090 .opt_opc = vecop_list_uqsub,
89e68b57
RH
5091 .write_aofs = true,
5092 .vece = MO_32 },
5093 { .fniv = gen_uqsub_vec,
5094 .fno = gen_helper_gvec_uqsub_d,
53229a77 5095 .opt_opc = vecop_list_uqsub,
89e68b57
RH
5096 .write_aofs = true,
5097 .vece = MO_64 },
5098};
5099
5100static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
5101 TCGv_vec a, TCGv_vec b)
5102{
5103 TCGv_vec x = tcg_temp_new_vec_matching(t);
5104 tcg_gen_sub_vec(vece, x, a, b);
5105 tcg_gen_sssub_vec(vece, t, a, b);
5106 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
5107 tcg_gen_or_vec(vece, sat, sat, x);
5108 tcg_temp_free_vec(x);
5109}
5110
53229a77
RH
5111static const TCGOpcode vecop_list_sqsub[] = {
5112 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
5113};
5114
89e68b57
RH
5115const GVecGen4 sqsub_op[4] = {
5116 { .fniv = gen_sqsub_vec,
5117 .fno = gen_helper_gvec_sqsub_b,
53229a77 5118 .opt_opc = vecop_list_sqsub,
89e68b57
RH
5119 .write_aofs = true,
5120 .vece = MO_8 },
5121 { .fniv = gen_sqsub_vec,
5122 .fno = gen_helper_gvec_sqsub_h,
53229a77 5123 .opt_opc = vecop_list_sqsub,
89e68b57
RH
5124 .write_aofs = true,
5125 .vece = MO_16 },
5126 { .fniv = gen_sqsub_vec,
5127 .fno = gen_helper_gvec_sqsub_s,
53229a77 5128 .opt_opc = vecop_list_sqsub,
89e68b57
RH
5129 .write_aofs = true,
5130 .vece = MO_32 },
5131 { .fniv = gen_sqsub_vec,
5132 .fno = gen_helper_gvec_sqsub_d,
53229a77 5133 .opt_opc = vecop_list_sqsub,
89e68b57
RH
5134 .write_aofs = true,
5135 .vece = MO_64 },
5136};
5137
9ee6e8bb
PB
5138/* Translate a NEON data processing instruction. Return nonzero if the
5139 instruction is invalid.
ad69471c
PB
5140 We process data in a mixture of 32-bit and 64-bit chunks.
5141 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5142
7dcc1f89 5143static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5144{
5145 int op;
5146 int q;
eabcd6fa 5147 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
9ee6e8bb
PB
5148 int size;
5149 int shift;
5150 int pass;
5151 int count;
5152 int pairwise;
5153 int u;
eabcd6fa 5154 int vec_size;
f3cd8218 5155 uint32_t imm;
39d5492a 5156 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 5157 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 5158 TCGv_i64 tmp64;
9ee6e8bb 5159
2c7ffc41
PM
5160 /* FIXME: this access check should not take precedence over UNDEF
5161 * for invalid encodings; we will generate incorrect syndrome information
5162 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5163 */
9dbbc748 5164 if (s->fp_excp_el) {
2c7ffc41 5165 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 5166 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5167 return 0;
5168 }
5169
5df8bac1 5170 if (!s->vfp_enabled)
9ee6e8bb
PB
5171 return 1;
5172 q = (insn & (1 << 6)) != 0;
5173 u = (insn >> 24) & 1;
5174 VFP_DREG_D(rd, insn);
5175 VFP_DREG_N(rn, insn);
5176 VFP_DREG_M(rm, insn);
5177 size = (insn >> 20) & 3;
eabcd6fa
RH
5178 vec_size = q ? 16 : 8;
5179 rd_ofs = neon_reg_offset(rd, 0);
5180 rn_ofs = neon_reg_offset(rn, 0);
5181 rm_ofs = neon_reg_offset(rm, 0);
5182
9ee6e8bb
PB
5183 if ((insn & (1 << 23)) == 0) {
5184 /* Three register same length. */
5185 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5186 /* Catch invalid op and bad size combinations: UNDEF */
5187 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5188 return 1;
5189 }
25f84f79
PM
5190 /* All insns of this form UNDEF for either this condition or the
5191 * superset of cases "Q==1"; we catch the latter later.
5192 */
5193 if (q && ((rd | rn | rm) & 1)) {
5194 return 1;
5195 }
36a71934
RH
5196 switch (op) {
5197 case NEON_3R_SHA:
5198 /* The SHA-1/SHA-256 3-register instructions require special
5199 * treatment here, as their size field is overloaded as an
5200 * op type selector, and they all consume their input in a
5201 * single pass.
5202 */
f1ecb913
AB
5203 if (!q) {
5204 return 1;
5205 }
5206 if (!u) { /* SHA-1 */
962fcbf2 5207 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
5208 return 1;
5209 }
1a66ac61
RH
5210 ptr1 = vfp_reg_ptr(true, rd);
5211 ptr2 = vfp_reg_ptr(true, rn);
5212 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 5213 tmp4 = tcg_const_i32(size);
1a66ac61 5214 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
5215 tcg_temp_free_i32(tmp4);
5216 } else { /* SHA-256 */
962fcbf2 5217 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
5218 return 1;
5219 }
1a66ac61
RH
5220 ptr1 = vfp_reg_ptr(true, rd);
5221 ptr2 = vfp_reg_ptr(true, rn);
5222 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
5223 switch (size) {
5224 case 0:
1a66ac61 5225 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
5226 break;
5227 case 1:
1a66ac61 5228 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
5229 break;
5230 case 2:
1a66ac61 5231 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
5232 break;
5233 }
5234 }
1a66ac61
RH
5235 tcg_temp_free_ptr(ptr1);
5236 tcg_temp_free_ptr(ptr2);
5237 tcg_temp_free_ptr(ptr3);
f1ecb913 5238 return 0;
36a71934
RH
5239
5240 case NEON_3R_VPADD_VQRDMLAH:
5241 if (!u) {
5242 break; /* VPADD */
5243 }
5244 /* VQRDMLAH */
5245 switch (size) {
5246 case 1:
5247 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
5248 q, rd, rn, rm);
5249 case 2:
5250 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
5251 q, rd, rn, rm);
5252 }
5253 return 1;
5254
5255 case NEON_3R_VFM_VQRDMLSH:
5256 if (!u) {
5257 /* VFM, VFMS */
5258 if (size == 1) {
5259 return 1;
5260 }
5261 break;
5262 }
5263 /* VQRDMLSH */
5264 switch (size) {
5265 case 1:
5266 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
5267 q, rd, rn, rm);
5268 case 2:
5269 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
5270 q, rd, rn, rm);
5271 }
5272 return 1;
eabcd6fa
RH
5273
5274 case NEON_3R_LOGIC: /* Logic ops. */
5275 switch ((u << 2) | size) {
5276 case 0: /* VAND */
5277 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
5278 vec_size, vec_size);
5279 break;
5280 case 1: /* VBIC */
5281 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
5282 vec_size, vec_size);
5283 break;
2900847f
RH
5284 case 2: /* VORR */
5285 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
5286 vec_size, vec_size);
eabcd6fa
RH
5287 break;
5288 case 3: /* VORN */
5289 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
5290 vec_size, vec_size);
5291 break;
5292 case 4: /* VEOR */
5293 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
5294 vec_size, vec_size);
5295 break;
5296 case 5: /* VBSL */
3a7a2b4e
RH
5297 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
5298 vec_size, vec_size);
eabcd6fa
RH
5299 break;
5300 case 6: /* VBIT */
3a7a2b4e
RH
5301 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
5302 vec_size, vec_size);
eabcd6fa
RH
5303 break;
5304 case 7: /* VBIF */
3a7a2b4e
RH
5305 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
5306 vec_size, vec_size);
eabcd6fa
RH
5307 break;
5308 }
5309 return 0;
e4717ae0
RH
5310
5311 case NEON_3R_VADD_VSUB:
5312 if (u) {
5313 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
5314 vec_size, vec_size);
5315 } else {
5316 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
5317 vec_size, vec_size);
5318 }
5319 return 0;
82083184 5320
89e68b57
RH
5321 case NEON_3R_VQADD:
5322 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5323 rn_ofs, rm_ofs, vec_size, vec_size,
5324 (u ? uqadd_op : sqadd_op) + size);
2f143d3a 5325 return 0;
89e68b57
RH
5326
5327 case NEON_3R_VQSUB:
5328 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5329 rn_ofs, rm_ofs, vec_size, vec_size,
5330 (u ? uqsub_op : sqsub_op) + size);
2f143d3a 5331 return 0;
89e68b57 5332
82083184
RH
5333 case NEON_3R_VMUL: /* VMUL */
5334 if (u) {
5335 /* Polynomial case allows only P8 and is handled below. */
5336 if (size != 0) {
5337 return 1;
5338 }
5339 } else {
5340 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
5341 vec_size, vec_size);
5342 return 0;
5343 }
5344 break;
4a7832b0
RH
5345
5346 case NEON_3R_VML: /* VMLA, VMLS */
5347 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
5348 u ? &mls_op[size] : &mla_op[size]);
5349 return 0;
ea580fa3
RH
5350
5351 case NEON_3R_VTST_VCEQ:
5352 if (u) { /* VCEQ */
5353 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
5354 vec_size, vec_size);
5355 } else { /* VTST */
5356 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
5357 vec_size, vec_size, &cmtst_op[size]);
5358 }
5359 return 0;
5360
5361 case NEON_3R_VCGT:
5362 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
5363 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5364 return 0;
5365
5366 case NEON_3R_VCGE:
5367 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
5368 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5369 return 0;
6f278221
RH
5370
5371 case NEON_3R_VMAX:
5372 if (u) {
5373 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
5374 vec_size, vec_size);
5375 } else {
5376 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
5377 vec_size, vec_size);
5378 }
5379 return 0;
5380 case NEON_3R_VMIN:
5381 if (u) {
5382 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
5383 vec_size, vec_size);
5384 } else {
5385 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
5386 vec_size, vec_size);
5387 }
5388 return 0;
f1ecb913 5389 }
4a7832b0 5390
eabcd6fa 5391 if (size == 3) {
62698be3 5392 /* 64-bit element instructions. */
9ee6e8bb 5393 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5394 neon_load_reg64(cpu_V0, rn + pass);
5395 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5396 switch (op) {
62698be3 5397 case NEON_3R_VSHL:
ad69471c
PB
5398 if (u) {
5399 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5400 } else {
5401 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5402 }
5403 break;
62698be3 5404 case NEON_3R_VQSHL:
ad69471c 5405 if (u) {
02da0b2d
PM
5406 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5407 cpu_V1, cpu_V0);
ad69471c 5408 } else {
02da0b2d
PM
5409 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5410 cpu_V1, cpu_V0);
ad69471c
PB
5411 }
5412 break;
62698be3 5413 case NEON_3R_VRSHL:
ad69471c
PB
5414 if (u) {
5415 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5416 } else {
ad69471c
PB
5417 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5418 }
5419 break;
62698be3 5420 case NEON_3R_VQRSHL:
ad69471c 5421 if (u) {
02da0b2d
PM
5422 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5423 cpu_V1, cpu_V0);
ad69471c 5424 } else {
02da0b2d
PM
5425 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5426 cpu_V1, cpu_V0);
1e8d4eec 5427 }
9ee6e8bb 5428 break;
9ee6e8bb
PB
5429 default:
5430 abort();
2c0262af 5431 }
ad69471c 5432 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5433 }
9ee6e8bb 5434 return 0;
2c0262af 5435 }
25f84f79 5436 pairwise = 0;
9ee6e8bb 5437 switch (op) {
62698be3
PM
5438 case NEON_3R_VSHL:
5439 case NEON_3R_VQSHL:
5440 case NEON_3R_VRSHL:
5441 case NEON_3R_VQRSHL:
9ee6e8bb 5442 {
ad69471c
PB
5443 int rtmp;
5444 /* Shift instruction operands are reversed. */
5445 rtmp = rn;
9ee6e8bb 5446 rn = rm;
ad69471c 5447 rm = rtmp;
9ee6e8bb 5448 }
2c0262af 5449 break;
36a71934 5450 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
5451 case NEON_3R_VPMAX:
5452 case NEON_3R_VPMIN:
9ee6e8bb 5453 pairwise = 1;
2c0262af 5454 break;
25f84f79
PM
5455 case NEON_3R_FLOAT_ARITH:
5456 pairwise = (u && size < 2); /* if VPADD (float) */
5457 break;
5458 case NEON_3R_FLOAT_MINMAX:
5459 pairwise = u; /* if VPMIN/VPMAX (float) */
5460 break;
5461 case NEON_3R_FLOAT_CMP:
5462 if (!u && size) {
5463 /* no encoding for U=0 C=1x */
5464 return 1;
5465 }
5466 break;
5467 case NEON_3R_FLOAT_ACMP:
5468 if (!u) {
5469 return 1;
5470 }
5471 break;
505935fc
WN
5472 case NEON_3R_FLOAT_MISC:
5473 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5474 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5475 return 1;
5476 }
2c0262af 5477 break;
36a71934
RH
5478 case NEON_3R_VFM_VQRDMLSH:
5479 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
5480 return 1;
5481 }
5482 break;
9ee6e8bb 5483 default:
2c0262af 5484 break;
9ee6e8bb 5485 }
dd8fbd78 5486
25f84f79
PM
5487 if (pairwise && q) {
5488 /* All the pairwise insns UNDEF if Q is set */
5489 return 1;
5490 }
5491
9ee6e8bb
PB
5492 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5493
5494 if (pairwise) {
5495 /* Pairwise. */
a5a14945
JR
5496 if (pass < 1) {
5497 tmp = neon_load_reg(rn, 0);
5498 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5499 } else {
a5a14945
JR
5500 tmp = neon_load_reg(rm, 0);
5501 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5502 }
5503 } else {
5504 /* Elementwise. */
dd8fbd78
FN
5505 tmp = neon_load_reg(rn, pass);
5506 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5507 }
5508 switch (op) {
62698be3 5509 case NEON_3R_VHADD:
9ee6e8bb
PB
5510 GEN_NEON_INTEGER_OP(hadd);
5511 break;
62698be3 5512 case NEON_3R_VRHADD:
9ee6e8bb 5513 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5514 break;
62698be3 5515 case NEON_3R_VHSUB:
9ee6e8bb
PB
5516 GEN_NEON_INTEGER_OP(hsub);
5517 break;
62698be3 5518 case NEON_3R_VSHL:
ad69471c 5519 GEN_NEON_INTEGER_OP(shl);
2c0262af 5520 break;
62698be3 5521 case NEON_3R_VQSHL:
02da0b2d 5522 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5523 break;
62698be3 5524 case NEON_3R_VRSHL:
ad69471c 5525 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5526 break;
62698be3 5527 case NEON_3R_VQRSHL:
02da0b2d 5528 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5529 break;
62698be3 5530 case NEON_3R_VABD:
9ee6e8bb
PB
5531 GEN_NEON_INTEGER_OP(abd);
5532 break;
62698be3 5533 case NEON_3R_VABA:
9ee6e8bb 5534 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5535 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5536 tmp2 = neon_load_reg(rd, pass);
5537 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5538 break;
62698be3 5539 case NEON_3R_VMUL:
82083184
RH
5540 /* VMUL.P8; other cases already eliminated. */
5541 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb 5542 break;
62698be3 5543 case NEON_3R_VPMAX:
9ee6e8bb
PB
5544 GEN_NEON_INTEGER_OP(pmax);
5545 break;
62698be3 5546 case NEON_3R_VPMIN:
9ee6e8bb
PB
5547 GEN_NEON_INTEGER_OP(pmin);
5548 break;
62698be3 5549 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5550 if (!u) { /* VQDMULH */
5551 switch (size) {
02da0b2d
PM
5552 case 1:
5553 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5554 break;
5555 case 2:
5556 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5557 break;
62698be3 5558 default: abort();
9ee6e8bb 5559 }
62698be3 5560 } else { /* VQRDMULH */
9ee6e8bb 5561 switch (size) {
02da0b2d
PM
5562 case 1:
5563 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5564 break;
5565 case 2:
5566 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5567 break;
62698be3 5568 default: abort();
9ee6e8bb
PB
5569 }
5570 }
5571 break;
36a71934 5572 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 5573 switch (size) {
dd8fbd78
FN
5574 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5575 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5576 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5577 default: abort();
9ee6e8bb
PB
5578 }
5579 break;
62698be3 5580 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5581 {
5582 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5583 switch ((u << 2) | size) {
5584 case 0: /* VADD */
aa47cfdd
PM
5585 case 4: /* VPADD */
5586 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5587 break;
5588 case 2: /* VSUB */
aa47cfdd 5589 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5590 break;
5591 case 6: /* VABD */
aa47cfdd 5592 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5593 break;
5594 default:
62698be3 5595 abort();
9ee6e8bb 5596 }
aa47cfdd 5597 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5598 break;
aa47cfdd 5599 }
62698be3 5600 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5601 {
5602 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5603 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5604 if (!u) {
7d1b0095 5605 tcg_temp_free_i32(tmp2);
dd8fbd78 5606 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5607 if (size == 0) {
aa47cfdd 5608 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5609 } else {
aa47cfdd 5610 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5611 }
5612 }
aa47cfdd 5613 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5614 break;
aa47cfdd 5615 }
62698be3 5616 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5617 {
5618 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5619 if (!u) {
aa47cfdd 5620 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5621 } else {
aa47cfdd
PM
5622 if (size == 0) {
5623 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5624 } else {
5625 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5626 }
b5ff1b31 5627 }
aa47cfdd 5628 tcg_temp_free_ptr(fpstatus);
2c0262af 5629 break;
aa47cfdd 5630 }
62698be3 5631 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5632 {
5633 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5634 if (size == 0) {
5635 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5636 } else {
5637 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5638 }
5639 tcg_temp_free_ptr(fpstatus);
2c0262af 5640 break;
aa47cfdd 5641 }
62698be3 5642 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5643 {
5644 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5645 if (size == 0) {
f71a2ae5 5646 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5647 } else {
f71a2ae5 5648 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5649 }
5650 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5651 break;
aa47cfdd 5652 }
505935fc
WN
5653 case NEON_3R_FLOAT_MISC:
5654 if (u) {
5655 /* VMAXNM/VMINNM */
5656 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5657 if (size == 0) {
f71a2ae5 5658 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5659 } else {
f71a2ae5 5660 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5661 }
5662 tcg_temp_free_ptr(fpstatus);
5663 } else {
5664 if (size == 0) {
5665 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5666 } else {
5667 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5668 }
5669 }
2c0262af 5670 break;
36a71934 5671 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
5672 {
5673 /* VFMA, VFMS: fused multiply-add */
5674 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5675 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5676 if (size) {
5677 /* VFMS */
5678 gen_helper_vfp_negs(tmp, tmp);
5679 }
5680 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5681 tcg_temp_free_i32(tmp3);
5682 tcg_temp_free_ptr(fpstatus);
5683 break;
5684 }
9ee6e8bb
PB
5685 default:
5686 abort();
2c0262af 5687 }
7d1b0095 5688 tcg_temp_free_i32(tmp2);
dd8fbd78 5689
9ee6e8bb
PB
5690 /* Save the result. For elementwise operations we can put it
5691 straight into the destination register. For pairwise operations
5692 we have to be careful to avoid clobbering the source operands. */
5693 if (pairwise && rd == rm) {
dd8fbd78 5694 neon_store_scratch(pass, tmp);
9ee6e8bb 5695 } else {
dd8fbd78 5696 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5697 }
5698
5699 } /* for pass */
5700 if (pairwise && rd == rm) {
5701 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5702 tmp = neon_load_scratch(pass);
5703 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5704 }
5705 }
ad69471c 5706 /* End of 3 register same size operations. */
9ee6e8bb
PB
5707 } else if (insn & (1 << 4)) {
5708 if ((insn & 0x00380080) != 0) {
5709 /* Two registers and shift. */
5710 op = (insn >> 8) & 0xf;
5711 if (insn & (1 << 7)) {
cc13115b
PM
5712 /* 64-bit shift. */
5713 if (op > 7) {
5714 return 1;
5715 }
9ee6e8bb
PB
5716 size = 3;
5717 } else {
5718 size = 2;
5719 while ((insn & (1 << (size + 19))) == 0)
5720 size--;
5721 }
5722 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
9ee6e8bb
PB
5723 if (op < 8) {
5724 /* Shift by immediate:
5725 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5726 if (q && ((rd | rm) & 1)) {
5727 return 1;
5728 }
5729 if (!u && (op == 4 || op == 6)) {
5730 return 1;
5731 }
9ee6e8bb
PB
5732 /* Right shifts are encoded as N - shift, where N is the
5733 element size in bits. */
1dc8425e 5734 if (op <= 4) {
9ee6e8bb 5735 shift = shift - (1 << (size + 3));
1dc8425e
RH
5736 }
5737
5738 switch (op) {
5739 case 0: /* VSHR */
5740 /* Right shift comes here negative. */
5741 shift = -shift;
5742 /* Shifts larger than the element size are architecturally
5743 * valid. Unsigned results in all zeros; signed results
5744 * in all sign bits.
5745 */
5746 if (!u) {
5747 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
5748 MIN(shift, (8 << size) - 1),
5749 vec_size, vec_size);
5750 } else if (shift >= 8 << size) {
5751 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5752 } else {
5753 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
5754 vec_size, vec_size);
5755 }
5756 return 0;
5757
41f6c113
RH
5758 case 1: /* VSRA */
5759 /* Right shift comes here negative. */
5760 shift = -shift;
5761 /* Shifts larger than the element size are architecturally
5762 * valid. Unsigned results in all zeros; signed results
5763 * in all sign bits.
5764 */
5765 if (!u) {
5766 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5767 MIN(shift, (8 << size) - 1),
5768 &ssra_op[size]);
5769 } else if (shift >= 8 << size) {
5770 /* rd += 0 */
5771 } else {
5772 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5773 shift, &usra_op[size]);
5774 }
5775 return 0;
5776
f3cd8218
RH
5777 case 4: /* VSRI */
5778 if (!u) {
5779 return 1;
5780 }
5781 /* Right shift comes here negative. */
5782 shift = -shift;
5783 /* Shift out of range leaves destination unchanged. */
5784 if (shift < 8 << size) {
5785 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5786 shift, &sri_op[size]);
5787 }
5788 return 0;
5789
1dc8425e 5790 case 5: /* VSHL, VSLI */
f3cd8218
RH
5791 if (u) { /* VSLI */
5792 /* Shift out of range leaves destination unchanged. */
5793 if (shift < 8 << size) {
5794 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
5795 vec_size, shift, &sli_op[size]);
5796 }
5797 } else { /* VSHL */
1dc8425e
RH
5798 /* Shifts larger than the element size are
5799 * architecturally valid and results in zero.
5800 */
5801 if (shift >= 8 << size) {
5802 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5803 } else {
5804 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
5805 vec_size, vec_size);
5806 }
1dc8425e 5807 }
f3cd8218 5808 return 0;
1dc8425e
RH
5809 }
5810
9ee6e8bb
PB
5811 if (size == 3) {
5812 count = q + 1;
5813 } else {
5814 count = q ? 4: 2;
5815 }
1dc8425e
RH
5816
5817 /* To avoid excessive duplication of ops we implement shift
5818 * by immediate using the variable shift operations.
5819 */
5820 imm = dup_const(size, shift);
9ee6e8bb
PB
5821
5822 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5823 if (size == 3) {
5824 neon_load_reg64(cpu_V0, rm + pass);
5825 tcg_gen_movi_i64(cpu_V1, imm);
5826 switch (op) {
ad69471c
PB
5827 case 2: /* VRSHR */
5828 case 3: /* VRSRA */
5829 if (u)
5830 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5831 else
ad69471c 5832 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5833 break;
0322b26e 5834 case 6: /* VQSHLU */
02da0b2d
PM
5835 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5836 cpu_V0, cpu_V1);
ad69471c 5837 break;
0322b26e
PM
5838 case 7: /* VQSHL */
5839 if (u) {
02da0b2d 5840 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5841 cpu_V0, cpu_V1);
5842 } else {
02da0b2d 5843 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5844 cpu_V0, cpu_V1);
5845 }
9ee6e8bb 5846 break;
1dc8425e
RH
5847 default:
5848 g_assert_not_reached();
9ee6e8bb 5849 }
41f6c113 5850 if (op == 3) {
ad69471c 5851 /* Accumulate. */
5371cb81 5852 neon_load_reg64(cpu_V1, rd + pass);
ad69471c 5853 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5854 }
5855 neon_store_reg64(cpu_V0, rd + pass);
5856 } else { /* size < 3 */
5857 /* Operands in T0 and T1. */
dd8fbd78 5858 tmp = neon_load_reg(rm, pass);
7d1b0095 5859 tmp2 = tcg_temp_new_i32();
dd8fbd78 5860 tcg_gen_movi_i32(tmp2, imm);
ad69471c 5861 switch (op) {
ad69471c
PB
5862 case 2: /* VRSHR */
5863 case 3: /* VRSRA */
5864 GEN_NEON_INTEGER_OP(rshl);
5865 break;
0322b26e 5866 case 6: /* VQSHLU */
ad69471c 5867 switch (size) {
0322b26e 5868 case 0:
02da0b2d
PM
5869 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5870 tmp, tmp2);
0322b26e
PM
5871 break;
5872 case 1:
02da0b2d
PM
5873 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5874 tmp, tmp2);
0322b26e
PM
5875 break;
5876 case 2:
02da0b2d
PM
5877 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5878 tmp, tmp2);
0322b26e
PM
5879 break;
5880 default:
cc13115b 5881 abort();
ad69471c
PB
5882 }
5883 break;
0322b26e 5884 case 7: /* VQSHL */
02da0b2d 5885 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5886 break;
1dc8425e
RH
5887 default:
5888 g_assert_not_reached();
ad69471c 5889 }
7d1b0095 5890 tcg_temp_free_i32(tmp2);
ad69471c 5891
41f6c113 5892 if (op == 3) {
ad69471c 5893 /* Accumulate. */
dd8fbd78 5894 tmp2 = neon_load_reg(rd, pass);
5371cb81 5895 gen_neon_add(size, tmp, tmp2);
7d1b0095 5896 tcg_temp_free_i32(tmp2);
ad69471c 5897 }
dd8fbd78 5898 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5899 }
5900 } /* for pass */
5901 } else if (op < 10) {
ad69471c 5902 /* Shift by immediate and narrow:
9ee6e8bb 5903 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5904 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5905 if (rm & 1) {
5906 return 1;
5907 }
9ee6e8bb
PB
5908 shift = shift - (1 << (size + 3));
5909 size++;
92cdfaeb 5910 if (size == 3) {
a7812ae4 5911 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5912 neon_load_reg64(cpu_V0, rm);
5913 neon_load_reg64(cpu_V1, rm + 1);
5914 for (pass = 0; pass < 2; pass++) {
5915 TCGv_i64 in;
5916 if (pass == 0) {
5917 in = cpu_V0;
5918 } else {
5919 in = cpu_V1;
5920 }
ad69471c 5921 if (q) {
0b36f4cd 5922 if (input_unsigned) {
92cdfaeb 5923 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5924 } else {
92cdfaeb 5925 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5926 }
ad69471c 5927 } else {
0b36f4cd 5928 if (input_unsigned) {
92cdfaeb 5929 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5930 } else {
92cdfaeb 5931 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5932 }
ad69471c 5933 }
7d1b0095 5934 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5935 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5936 neon_store_reg(rd, pass, tmp);
5937 } /* for pass */
5938 tcg_temp_free_i64(tmp64);
5939 } else {
5940 if (size == 1) {
5941 imm = (uint16_t)shift;
5942 imm |= imm << 16;
2c0262af 5943 } else {
92cdfaeb
PM
5944 /* size == 2 */
5945 imm = (uint32_t)shift;
5946 }
5947 tmp2 = tcg_const_i32(imm);
5948 tmp4 = neon_load_reg(rm + 1, 0);
5949 tmp5 = neon_load_reg(rm + 1, 1);
5950 for (pass = 0; pass < 2; pass++) {
5951 if (pass == 0) {
5952 tmp = neon_load_reg(rm, 0);
5953 } else {
5954 tmp = tmp4;
5955 }
0b36f4cd
CL
5956 gen_neon_shift_narrow(size, tmp, tmp2, q,
5957 input_unsigned);
92cdfaeb
PM
5958 if (pass == 0) {
5959 tmp3 = neon_load_reg(rm, 1);
5960 } else {
5961 tmp3 = tmp5;
5962 }
0b36f4cd
CL
5963 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5964 input_unsigned);
36aa55dc 5965 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5966 tcg_temp_free_i32(tmp);
5967 tcg_temp_free_i32(tmp3);
5968 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5969 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5970 neon_store_reg(rd, pass, tmp);
5971 } /* for pass */
c6067f04 5972 tcg_temp_free_i32(tmp2);
b75263d6 5973 }
9ee6e8bb 5974 } else if (op == 10) {
cc13115b
PM
5975 /* VSHLL, VMOVL */
5976 if (q || (rd & 1)) {
9ee6e8bb 5977 return 1;
cc13115b 5978 }
ad69471c
PB
5979 tmp = neon_load_reg(rm, 0);
5980 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5981 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5982 if (pass == 1)
5983 tmp = tmp2;
5984
5985 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5986
9ee6e8bb
PB
5987 if (shift != 0) {
5988 /* The shift is less than the width of the source
ad69471c
PB
5989 type, so we can just shift the whole register. */
5990 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5991 /* Widen the result of shift: we need to clear
5992 * the potential overflow bits resulting from
5993 * left bits of the narrow input appearing as
5994 * right bits of left the neighbour narrow
5995 * input. */
ad69471c
PB
5996 if (size < 2 || !u) {
5997 uint64_t imm64;
5998 if (size == 0) {
5999 imm = (0xffu >> (8 - shift));
6000 imm |= imm << 16;
acdf01ef 6001 } else if (size == 1) {
ad69471c 6002 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6003 } else {
6004 /* size == 2 */
6005 imm = 0xffffffff >> (32 - shift);
6006 }
6007 if (size < 2) {
6008 imm64 = imm | (((uint64_t)imm) << 32);
6009 } else {
6010 imm64 = imm;
9ee6e8bb 6011 }
acdf01ef 6012 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6013 }
6014 }
ad69471c 6015 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6016 }
f73534a5 6017 } else if (op >= 14) {
9ee6e8bb 6018 /* VCVT fixed-point. */
cc13115b
PM
6019 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6020 return 1;
6021 }
f73534a5
PM
6022 /* We have already masked out the must-be-1 top bit of imm6,
6023 * hence this 32-shift where the ARM ARM has 64-imm6.
6024 */
6025 shift = 32 - shift;
9ee6e8bb 6026 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6027 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6028 if (!(op & 1)) {
9ee6e8bb 6029 if (u)
5500b06c 6030 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6031 else
5500b06c 6032 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6033 } else {
6034 if (u)
5500b06c 6035 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6036 else
5500b06c 6037 gen_vfp_tosl(0, shift, 1);
2c0262af 6038 }
4373f3ce 6039 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6040 }
6041 } else {
9ee6e8bb
PB
6042 return 1;
6043 }
6044 } else { /* (insn & 0x00380080) == 0 */
246fa4ac
RH
6045 int invert, reg_ofs, vec_size;
6046
7d80fee5
PM
6047 if (q && (rd & 1)) {
6048 return 1;
6049 }
9ee6e8bb
PB
6050
6051 op = (insn >> 8) & 0xf;
6052 /* One register and immediate. */
6053 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6054 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6055 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6056 * We choose to not special-case this and will behave as if a
6057 * valid constant encoding of 0 had been given.
6058 */
9ee6e8bb
PB
6059 switch (op) {
6060 case 0: case 1:
6061 /* no-op */
6062 break;
6063 case 2: case 3:
6064 imm <<= 8;
6065 break;
6066 case 4: case 5:
6067 imm <<= 16;
6068 break;
6069 case 6: case 7:
6070 imm <<= 24;
6071 break;
6072 case 8: case 9:
6073 imm |= imm << 16;
6074 break;
6075 case 10: case 11:
6076 imm = (imm << 8) | (imm << 24);
6077 break;
6078 case 12:
8e31209e 6079 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6080 break;
6081 case 13:
6082 imm = (imm << 16) | 0xffff;
6083 break;
6084 case 14:
6085 imm |= (imm << 8) | (imm << 16) | (imm << 24);
246fa4ac 6086 if (invert) {
9ee6e8bb 6087 imm = ~imm;
246fa4ac 6088 }
9ee6e8bb
PB
6089 break;
6090 case 15:
7d80fee5
PM
6091 if (invert) {
6092 return 1;
6093 }
9ee6e8bb
PB
6094 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6095 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6096 break;
6097 }
246fa4ac 6098 if (invert) {
9ee6e8bb 6099 imm = ~imm;
246fa4ac 6100 }
9ee6e8bb 6101
246fa4ac
RH
6102 reg_ofs = neon_reg_offset(rd, 0);
6103 vec_size = q ? 16 : 8;
6104
6105 if (op & 1 && op < 12) {
6106 if (invert) {
6107 /* The immediate value has already been inverted,
6108 * so BIC becomes AND.
6109 */
6110 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
6111 vec_size, vec_size);
9ee6e8bb 6112 } else {
246fa4ac
RH
6113 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
6114 vec_size, vec_size);
6115 }
6116 } else {
6117 /* VMOV, VMVN. */
6118 if (op == 14 && invert) {
6119 TCGv_i64 t64 = tcg_temp_new_i64();
6120
6121 for (pass = 0; pass <= q; ++pass) {
6122 uint64_t val = 0;
a5a14945 6123 int n;
246fa4ac
RH
6124
6125 for (n = 0; n < 8; n++) {
6126 if (imm & (1 << (n + pass * 8))) {
6127 val |= 0xffull << (n * 8);
6128 }
9ee6e8bb 6129 }
246fa4ac
RH
6130 tcg_gen_movi_i64(t64, val);
6131 neon_store_reg64(t64, rd + pass);
9ee6e8bb 6132 }
246fa4ac
RH
6133 tcg_temp_free_i64(t64);
6134 } else {
6135 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
9ee6e8bb
PB
6136 }
6137 }
6138 }
e4b3861d 6139 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6140 if (size != 3) {
6141 op = (insn >> 8) & 0xf;
6142 if ((insn & (1 << 6)) == 0) {
6143 /* Three registers of different lengths. */
6144 int src1_wide;
6145 int src2_wide;
6146 int prewiden;
526d0096
PM
6147 /* undefreq: bit 0 : UNDEF if size == 0
6148 * bit 1 : UNDEF if size == 1
6149 * bit 2 : UNDEF if size == 2
6150 * bit 3 : UNDEF if U == 1
6151 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6152 */
6153 int undefreq;
6154 /* prewiden, src1_wide, src2_wide, undefreq */
6155 static const int neon_3reg_wide[16][4] = {
6156 {1, 0, 0, 0}, /* VADDL */
6157 {1, 1, 0, 0}, /* VADDW */
6158 {1, 0, 0, 0}, /* VSUBL */
6159 {1, 1, 0, 0}, /* VSUBW */
6160 {0, 1, 1, 0}, /* VADDHN */
6161 {0, 0, 0, 0}, /* VABAL */
6162 {0, 1, 1, 0}, /* VSUBHN */
6163 {0, 0, 0, 0}, /* VABDL */
6164 {0, 0, 0, 0}, /* VMLAL */
526d0096 6165 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6166 {0, 0, 0, 0}, /* VMLSL */
526d0096 6167 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6168 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6169 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6170 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6171 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6172 };
6173
6174 prewiden = neon_3reg_wide[op][0];
6175 src1_wide = neon_3reg_wide[op][1];
6176 src2_wide = neon_3reg_wide[op][2];
695272dc 6177 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6178
526d0096
PM
6179 if ((undefreq & (1 << size)) ||
6180 ((undefreq & 8) && u)) {
695272dc
PM
6181 return 1;
6182 }
6183 if ((src1_wide && (rn & 1)) ||
6184 (src2_wide && (rm & 1)) ||
6185 (!src2_wide && (rd & 1))) {
ad69471c 6186 return 1;
695272dc 6187 }
ad69471c 6188
4e624eda
PM
6189 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6190 * outside the loop below as it only performs a single pass.
6191 */
6192 if (op == 14 && size == 2) {
6193 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6194
962fcbf2 6195 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
6196 return 1;
6197 }
6198 tcg_rn = tcg_temp_new_i64();
6199 tcg_rm = tcg_temp_new_i64();
6200 tcg_rd = tcg_temp_new_i64();
6201 neon_load_reg64(tcg_rn, rn);
6202 neon_load_reg64(tcg_rm, rm);
6203 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6204 neon_store_reg64(tcg_rd, rd);
6205 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6206 neon_store_reg64(tcg_rd, rd + 1);
6207 tcg_temp_free_i64(tcg_rn);
6208 tcg_temp_free_i64(tcg_rm);
6209 tcg_temp_free_i64(tcg_rd);
6210 return 0;
6211 }
6212
9ee6e8bb
PB
6213 /* Avoid overlapping operands. Wide source operands are
6214 always aligned so will never overlap with wide
6215 destinations in problematic ways. */
8f8e3aa4 6216 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6217 tmp = neon_load_reg(rm, 1);
6218 neon_store_scratch(2, tmp);
8f8e3aa4 6219 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6220 tmp = neon_load_reg(rn, 1);
6221 neon_store_scratch(2, tmp);
9ee6e8bb 6222 }
f764718d 6223 tmp3 = NULL;
9ee6e8bb 6224 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6225 if (src1_wide) {
6226 neon_load_reg64(cpu_V0, rn + pass);
f764718d 6227 tmp = NULL;
9ee6e8bb 6228 } else {
ad69471c 6229 if (pass == 1 && rd == rn) {
dd8fbd78 6230 tmp = neon_load_scratch(2);
9ee6e8bb 6231 } else {
ad69471c
PB
6232 tmp = neon_load_reg(rn, pass);
6233 }
6234 if (prewiden) {
6235 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6236 }
6237 }
ad69471c
PB
6238 if (src2_wide) {
6239 neon_load_reg64(cpu_V1, rm + pass);
f764718d 6240 tmp2 = NULL;
9ee6e8bb 6241 } else {
ad69471c 6242 if (pass == 1 && rd == rm) {
dd8fbd78 6243 tmp2 = neon_load_scratch(2);
9ee6e8bb 6244 } else {
ad69471c
PB
6245 tmp2 = neon_load_reg(rm, pass);
6246 }
6247 if (prewiden) {
6248 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6249 }
9ee6e8bb
PB
6250 }
6251 switch (op) {
6252 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6253 gen_neon_addl(size);
9ee6e8bb 6254 break;
79b0e534 6255 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6256 gen_neon_subl(size);
9ee6e8bb
PB
6257 break;
6258 case 5: case 7: /* VABAL, VABDL */
6259 switch ((size << 1) | u) {
ad69471c
PB
6260 case 0:
6261 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6262 break;
6263 case 1:
6264 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6265 break;
6266 case 2:
6267 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6268 break;
6269 case 3:
6270 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6271 break;
6272 case 4:
6273 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6274 break;
6275 case 5:
6276 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6277 break;
9ee6e8bb
PB
6278 default: abort();
6279 }
7d1b0095
PM
6280 tcg_temp_free_i32(tmp2);
6281 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6282 break;
6283 case 8: case 9: case 10: case 11: case 12: case 13:
6284 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6285 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6286 break;
6287 case 14: /* Polynomial VMULL */
e5ca24cb 6288 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6289 tcg_temp_free_i32(tmp2);
6290 tcg_temp_free_i32(tmp);
e5ca24cb 6291 break;
695272dc
PM
6292 default: /* 15 is RESERVED: caught earlier */
6293 abort();
9ee6e8bb 6294 }
ebcd88ce
PM
6295 if (op == 13) {
6296 /* VQDMULL */
6297 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6298 neon_store_reg64(cpu_V0, rd + pass);
6299 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6300 /* Accumulate. */
ebcd88ce 6301 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6302 switch (op) {
4dc064e6
PM
6303 case 10: /* VMLSL */
6304 gen_neon_negl(cpu_V0, size);
6305 /* Fall through */
6306 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6307 gen_neon_addl(size);
9ee6e8bb
PB
6308 break;
6309 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6310 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6311 if (op == 11) {
6312 gen_neon_negl(cpu_V0, size);
6313 }
ad69471c
PB
6314 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6315 break;
9ee6e8bb
PB
6316 default:
6317 abort();
6318 }
ad69471c 6319 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6320 } else if (op == 4 || op == 6) {
6321 /* Narrowing operation. */
7d1b0095 6322 tmp = tcg_temp_new_i32();
79b0e534 6323 if (!u) {
9ee6e8bb 6324 switch (size) {
ad69471c
PB
6325 case 0:
6326 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6327 break;
6328 case 1:
6329 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6330 break;
6331 case 2:
6332 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6333 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6334 break;
9ee6e8bb
PB
6335 default: abort();
6336 }
6337 } else {
6338 switch (size) {
ad69471c
PB
6339 case 0:
6340 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6341 break;
6342 case 1:
6343 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6344 break;
6345 case 2:
6346 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6347 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6348 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6349 break;
9ee6e8bb
PB
6350 default: abort();
6351 }
6352 }
ad69471c
PB
6353 if (pass == 0) {
6354 tmp3 = tmp;
6355 } else {
6356 neon_store_reg(rd, 0, tmp3);
6357 neon_store_reg(rd, 1, tmp);
6358 }
9ee6e8bb
PB
6359 } else {
6360 /* Write back the result. */
ad69471c 6361 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6362 }
6363 }
6364 } else {
3e3326df
PM
6365 /* Two registers and a scalar. NB that for ops of this form
6366 * the ARM ARM labels bit 24 as Q, but it is in our variable
6367 * 'u', not 'q'.
6368 */
6369 if (size == 0) {
6370 return 1;
6371 }
9ee6e8bb 6372 switch (op) {
9ee6e8bb 6373 case 1: /* Float VMLA scalar */
9ee6e8bb 6374 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6375 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6376 if (size == 1) {
6377 return 1;
6378 }
6379 /* fall through */
6380 case 0: /* Integer VMLA scalar */
6381 case 4: /* Integer VMLS scalar */
6382 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6383 case 12: /* VQDMULH scalar */
6384 case 13: /* VQRDMULH scalar */
3e3326df
PM
6385 if (u && ((rd | rn) & 1)) {
6386 return 1;
6387 }
dd8fbd78
FN
6388 tmp = neon_get_scalar(size, rm);
6389 neon_store_scratch(0, tmp);
9ee6e8bb 6390 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6391 tmp = neon_load_scratch(0);
6392 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6393 if (op == 12) {
6394 if (size == 1) {
02da0b2d 6395 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6396 } else {
02da0b2d 6397 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6398 }
6399 } else if (op == 13) {
6400 if (size == 1) {
02da0b2d 6401 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6402 } else {
02da0b2d 6403 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6404 }
6405 } else if (op & 1) {
aa47cfdd
PM
6406 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6407 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6408 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6409 } else {
6410 switch (size) {
dd8fbd78
FN
6411 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6412 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6413 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6414 default: abort();
9ee6e8bb
PB
6415 }
6416 }
7d1b0095 6417 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6418 if (op < 8) {
6419 /* Accumulate. */
dd8fbd78 6420 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6421 switch (op) {
6422 case 0:
dd8fbd78 6423 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6424 break;
6425 case 1:
aa47cfdd
PM
6426 {
6427 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6428 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6429 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6430 break;
aa47cfdd 6431 }
9ee6e8bb 6432 case 4:
dd8fbd78 6433 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6434 break;
6435 case 5:
aa47cfdd
PM
6436 {
6437 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6438 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6439 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6440 break;
aa47cfdd 6441 }
9ee6e8bb
PB
6442 default:
6443 abort();
6444 }
7d1b0095 6445 tcg_temp_free_i32(tmp2);
9ee6e8bb 6446 }
dd8fbd78 6447 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6448 }
6449 break;
9ee6e8bb 6450 case 3: /* VQDMLAL scalar */
9ee6e8bb 6451 case 7: /* VQDMLSL scalar */
9ee6e8bb 6452 case 11: /* VQDMULL scalar */
3e3326df 6453 if (u == 1) {
ad69471c 6454 return 1;
3e3326df
PM
6455 }
6456 /* fall through */
6457 case 2: /* VMLAL sclar */
6458 case 6: /* VMLSL scalar */
6459 case 10: /* VMULL scalar */
6460 if (rd & 1) {
6461 return 1;
6462 }
dd8fbd78 6463 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6464 /* We need a copy of tmp2 because gen_neon_mull
6465 * deletes it during pass 0. */
7d1b0095 6466 tmp4 = tcg_temp_new_i32();
c6067f04 6467 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6468 tmp3 = neon_load_reg(rn, 1);
ad69471c 6469
9ee6e8bb 6470 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6471 if (pass == 0) {
6472 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6473 } else {
dd8fbd78 6474 tmp = tmp3;
c6067f04 6475 tmp2 = tmp4;
9ee6e8bb 6476 }
ad69471c 6477 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6478 if (op != 11) {
6479 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6480 }
9ee6e8bb 6481 switch (op) {
4dc064e6
PM
6482 case 6:
6483 gen_neon_negl(cpu_V0, size);
6484 /* Fall through */
6485 case 2:
ad69471c 6486 gen_neon_addl(size);
9ee6e8bb
PB
6487 break;
6488 case 3: case 7:
ad69471c 6489 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6490 if (op == 7) {
6491 gen_neon_negl(cpu_V0, size);
6492 }
ad69471c 6493 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6494 break;
6495 case 10:
6496 /* no-op */
6497 break;
6498 case 11:
ad69471c 6499 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6500 break;
6501 default:
6502 abort();
6503 }
ad69471c 6504 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6505 }
61adacc8
RH
6506 break;
6507 case 14: /* VQRDMLAH scalar */
6508 case 15: /* VQRDMLSH scalar */
6509 {
6510 NeonGenThreeOpEnvFn *fn;
dd8fbd78 6511
962fcbf2 6512 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
6513 return 1;
6514 }
6515 if (u && ((rd | rn) & 1)) {
6516 return 1;
6517 }
6518 if (op == 14) {
6519 if (size == 1) {
6520 fn = gen_helper_neon_qrdmlah_s16;
6521 } else {
6522 fn = gen_helper_neon_qrdmlah_s32;
6523 }
6524 } else {
6525 if (size == 1) {
6526 fn = gen_helper_neon_qrdmlsh_s16;
6527 } else {
6528 fn = gen_helper_neon_qrdmlsh_s32;
6529 }
6530 }
dd8fbd78 6531
61adacc8
RH
6532 tmp2 = neon_get_scalar(size, rm);
6533 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6534 tmp = neon_load_reg(rn, pass);
6535 tmp3 = neon_load_reg(rd, pass);
6536 fn(tmp, cpu_env, tmp, tmp2, tmp3);
6537 tcg_temp_free_i32(tmp3);
6538 neon_store_reg(rd, pass, tmp);
6539 }
6540 tcg_temp_free_i32(tmp2);
6541 }
9ee6e8bb 6542 break;
61adacc8
RH
6543 default:
6544 g_assert_not_reached();
9ee6e8bb
PB
6545 }
6546 }
6547 } else { /* size == 3 */
6548 if (!u) {
6549 /* Extract. */
9ee6e8bb 6550 imm = (insn >> 8) & 0xf;
ad69471c
PB
6551
6552 if (imm > 7 && !q)
6553 return 1;
6554
52579ea1
PM
6555 if (q && ((rd | rn | rm) & 1)) {
6556 return 1;
6557 }
6558
ad69471c
PB
6559 if (imm == 0) {
6560 neon_load_reg64(cpu_V0, rn);
6561 if (q) {
6562 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6563 }
ad69471c
PB
6564 } else if (imm == 8) {
6565 neon_load_reg64(cpu_V0, rn + 1);
6566 if (q) {
6567 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6568 }
ad69471c 6569 } else if (q) {
a7812ae4 6570 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6571 if (imm < 8) {
6572 neon_load_reg64(cpu_V0, rn);
a7812ae4 6573 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6574 } else {
6575 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6576 neon_load_reg64(tmp64, rm);
ad69471c
PB
6577 }
6578 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6579 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6580 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6581 if (imm < 8) {
6582 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6583 } else {
ad69471c
PB
6584 neon_load_reg64(cpu_V1, rm + 1);
6585 imm -= 8;
9ee6e8bb 6586 }
ad69471c 6587 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6588 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6589 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6590 tcg_temp_free_i64(tmp64);
ad69471c 6591 } else {
a7812ae4 6592 /* BUGFIX */
ad69471c 6593 neon_load_reg64(cpu_V0, rn);
a7812ae4 6594 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6595 neon_load_reg64(cpu_V1, rm);
a7812ae4 6596 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6597 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6598 }
6599 neon_store_reg64(cpu_V0, rd);
6600 if (q) {
6601 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6602 }
6603 } else if ((insn & (1 << 11)) == 0) {
6604 /* Two register misc. */
6605 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6606 size = (insn >> 18) & 3;
600b828c
PM
6607 /* UNDEF for unknown op values and bad op-size combinations */
6608 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6609 return 1;
6610 }
fe8fcf3d
PM
6611 if (neon_2rm_is_v8_op(op) &&
6612 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6613 return 1;
6614 }
fc2a9b37
PM
6615 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6616 q && ((rm | rd) & 1)) {
6617 return 1;
6618 }
9ee6e8bb 6619 switch (op) {
600b828c 6620 case NEON_2RM_VREV64:
9ee6e8bb 6621 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6622 tmp = neon_load_reg(rm, pass * 2);
6623 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6624 switch (size) {
dd8fbd78
FN
6625 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6626 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6627 case 2: /* no-op */ break;
6628 default: abort();
6629 }
dd8fbd78 6630 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6631 if (size == 2) {
dd8fbd78 6632 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6633 } else {
9ee6e8bb 6634 switch (size) {
dd8fbd78
FN
6635 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6636 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6637 default: abort();
6638 }
dd8fbd78 6639 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6640 }
6641 }
6642 break;
600b828c
PM
6643 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6644 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6645 for (pass = 0; pass < q + 1; pass++) {
6646 tmp = neon_load_reg(rm, pass * 2);
6647 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6648 tmp = neon_load_reg(rm, pass * 2 + 1);
6649 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6650 switch (size) {
6651 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6652 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6653 case 2: tcg_gen_add_i64(CPU_V001); break;
6654 default: abort();
6655 }
600b828c 6656 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6657 /* Accumulate. */
ad69471c
PB
6658 neon_load_reg64(cpu_V1, rd + pass);
6659 gen_neon_addl(size);
9ee6e8bb 6660 }
ad69471c 6661 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6662 }
6663 break;
600b828c 6664 case NEON_2RM_VTRN:
9ee6e8bb 6665 if (size == 2) {
a5a14945 6666 int n;
9ee6e8bb 6667 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6668 tmp = neon_load_reg(rm, n);
6669 tmp2 = neon_load_reg(rd, n + 1);
6670 neon_store_reg(rm, n, tmp2);
6671 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6672 }
6673 } else {
6674 goto elementwise;
6675 }
6676 break;
600b828c 6677 case NEON_2RM_VUZP:
02acedf9 6678 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6679 return 1;
9ee6e8bb
PB
6680 }
6681 break;
600b828c 6682 case NEON_2RM_VZIP:
d68a6f3a 6683 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6684 return 1;
9ee6e8bb
PB
6685 }
6686 break;
600b828c
PM
6687 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6688 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6689 if (rm & 1) {
6690 return 1;
6691 }
f764718d 6692 tmp2 = NULL;
9ee6e8bb 6693 for (pass = 0; pass < 2; pass++) {
ad69471c 6694 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6695 tmp = tcg_temp_new_i32();
600b828c
PM
6696 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6697 tmp, cpu_V0);
ad69471c
PB
6698 if (pass == 0) {
6699 tmp2 = tmp;
6700 } else {
6701 neon_store_reg(rd, 0, tmp2);
6702 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6703 }
9ee6e8bb
PB
6704 }
6705 break;
600b828c 6706 case NEON_2RM_VSHLL:
fc2a9b37 6707 if (q || (rd & 1)) {
9ee6e8bb 6708 return 1;
600b828c 6709 }
ad69471c
PB
6710 tmp = neon_load_reg(rm, 0);
6711 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6712 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6713 if (pass == 1)
6714 tmp = tmp2;
6715 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6716 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6717 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6718 }
6719 break;
600b828c 6720 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
6721 {
6722 TCGv_ptr fpst;
6723 TCGv_i32 ahp;
6724
602f6e42 6725 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
6726 q || (rm & 1)) {
6727 return 1;
6728 }
7d1b0095
PM
6729 tmp = tcg_temp_new_i32();
6730 tmp2 = tcg_temp_new_i32();
486624fc
AB
6731 fpst = get_fpstatus_ptr(true);
6732 ahp = get_ahp_flag();
60011498 6733 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
486624fc 6734 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498 6735 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
486624fc 6736 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
6737 tcg_gen_shli_i32(tmp2, tmp2, 16);
6738 tcg_gen_or_i32(tmp2, tmp2, tmp);
6739 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
486624fc 6740 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498
PB
6741 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6742 neon_store_reg(rd, 0, tmp2);
7d1b0095 6743 tmp2 = tcg_temp_new_i32();
486624fc 6744 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
6745 tcg_gen_shli_i32(tmp2, tmp2, 16);
6746 tcg_gen_or_i32(tmp2, tmp2, tmp);
6747 neon_store_reg(rd, 1, tmp2);
7d1b0095 6748 tcg_temp_free_i32(tmp);
486624fc
AB
6749 tcg_temp_free_i32(ahp);
6750 tcg_temp_free_ptr(fpst);
60011498 6751 break;
486624fc 6752 }
600b828c 6753 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
6754 {
6755 TCGv_ptr fpst;
6756 TCGv_i32 ahp;
602f6e42 6757 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
6758 q || (rd & 1)) {
6759 return 1;
6760 }
486624fc
AB
6761 fpst = get_fpstatus_ptr(true);
6762 ahp = get_ahp_flag();
7d1b0095 6763 tmp3 = tcg_temp_new_i32();
60011498
PB
6764 tmp = neon_load_reg(rm, 0);
6765 tmp2 = neon_load_reg(rm, 1);
6766 tcg_gen_ext16u_i32(tmp3, tmp);
486624fc 6767 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
6768 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6769 tcg_gen_shri_i32(tmp3, tmp, 16);
486624fc 6770 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 6771 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6772 tcg_temp_free_i32(tmp);
60011498 6773 tcg_gen_ext16u_i32(tmp3, tmp2);
486624fc 6774 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
6775 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6776 tcg_gen_shri_i32(tmp3, tmp2, 16);
486624fc 6777 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 6778 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6779 tcg_temp_free_i32(tmp2);
6780 tcg_temp_free_i32(tmp3);
486624fc
AB
6781 tcg_temp_free_i32(ahp);
6782 tcg_temp_free_ptr(fpst);
60011498 6783 break;
486624fc 6784 }
9d935509 6785 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 6786 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
6787 return 1;
6788 }
1a66ac61
RH
6789 ptr1 = vfp_reg_ptr(true, rd);
6790 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
6791
6792 /* Bit 6 is the lowest opcode bit; it distinguishes between
6793 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6794 */
6795 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6796
6797 if (op == NEON_2RM_AESE) {
1a66ac61 6798 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 6799 } else {
1a66ac61 6800 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 6801 }
1a66ac61
RH
6802 tcg_temp_free_ptr(ptr1);
6803 tcg_temp_free_ptr(ptr2);
9d935509
AB
6804 tcg_temp_free_i32(tmp3);
6805 break;
f1ecb913 6806 case NEON_2RM_SHA1H:
962fcbf2 6807 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
6808 return 1;
6809 }
1a66ac61
RH
6810 ptr1 = vfp_reg_ptr(true, rd);
6811 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 6812
1a66ac61 6813 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 6814
1a66ac61
RH
6815 tcg_temp_free_ptr(ptr1);
6816 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
6817 break;
6818 case NEON_2RM_SHA1SU1:
6819 if ((rm | rd) & 1) {
6820 return 1;
6821 }
6822 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6823 if (q) {
962fcbf2 6824 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
6825 return 1;
6826 }
962fcbf2 6827 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
6828 return 1;
6829 }
1a66ac61
RH
6830 ptr1 = vfp_reg_ptr(true, rd);
6831 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 6832 if (q) {
1a66ac61 6833 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 6834 } else {
1a66ac61 6835 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 6836 }
1a66ac61
RH
6837 tcg_temp_free_ptr(ptr1);
6838 tcg_temp_free_ptr(ptr2);
f1ecb913 6839 break;
4bf940be
RH
6840
6841 case NEON_2RM_VMVN:
6842 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
6843 break;
6844 case NEON_2RM_VNEG:
6845 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
6846 break;
4e027a71
RH
6847 case NEON_2RM_VABS:
6848 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
6849 break;
4bf940be 6850
9ee6e8bb
PB
6851 default:
6852 elementwise:
6853 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6854 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6855 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6856 neon_reg_offset(rm, pass));
f764718d 6857 tmp = NULL;
9ee6e8bb 6858 } else {
dd8fbd78 6859 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6860 }
6861 switch (op) {
600b828c 6862 case NEON_2RM_VREV32:
9ee6e8bb 6863 switch (size) {
dd8fbd78
FN
6864 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6865 case 1: gen_swap_half(tmp); break;
600b828c 6866 default: abort();
9ee6e8bb
PB
6867 }
6868 break;
600b828c 6869 case NEON_2RM_VREV16:
dd8fbd78 6870 gen_rev16(tmp);
9ee6e8bb 6871 break;
600b828c 6872 case NEON_2RM_VCLS:
9ee6e8bb 6873 switch (size) {
dd8fbd78
FN
6874 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6875 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6876 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6877 default: abort();
9ee6e8bb
PB
6878 }
6879 break;
600b828c 6880 case NEON_2RM_VCLZ:
9ee6e8bb 6881 switch (size) {
dd8fbd78
FN
6882 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6883 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 6884 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 6885 default: abort();
9ee6e8bb
PB
6886 }
6887 break;
600b828c 6888 case NEON_2RM_VCNT:
dd8fbd78 6889 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6890 break;
600b828c 6891 case NEON_2RM_VQABS:
9ee6e8bb 6892 switch (size) {
02da0b2d
PM
6893 case 0:
6894 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6895 break;
6896 case 1:
6897 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6898 break;
6899 case 2:
6900 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6901 break;
600b828c 6902 default: abort();
9ee6e8bb
PB
6903 }
6904 break;
600b828c 6905 case NEON_2RM_VQNEG:
9ee6e8bb 6906 switch (size) {
02da0b2d
PM
6907 case 0:
6908 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6909 break;
6910 case 1:
6911 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6912 break;
6913 case 2:
6914 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6915 break;
600b828c 6916 default: abort();
9ee6e8bb
PB
6917 }
6918 break;
600b828c 6919 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6920 tmp2 = tcg_const_i32(0);
9ee6e8bb 6921 switch(size) {
dd8fbd78
FN
6922 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6923 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6924 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6925 default: abort();
9ee6e8bb 6926 }
39d5492a 6927 tcg_temp_free_i32(tmp2);
600b828c 6928 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6929 tcg_gen_not_i32(tmp, tmp);
600b828c 6930 }
9ee6e8bb 6931 break;
600b828c 6932 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6933 tmp2 = tcg_const_i32(0);
9ee6e8bb 6934 switch(size) {
dd8fbd78
FN
6935 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6936 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6937 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6938 default: abort();
9ee6e8bb 6939 }
39d5492a 6940 tcg_temp_free_i32(tmp2);
600b828c 6941 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6942 tcg_gen_not_i32(tmp, tmp);
600b828c 6943 }
9ee6e8bb 6944 break;
600b828c 6945 case NEON_2RM_VCEQ0:
dd8fbd78 6946 tmp2 = tcg_const_i32(0);
9ee6e8bb 6947 switch(size) {
dd8fbd78
FN
6948 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6949 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6950 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6951 default: abort();
9ee6e8bb 6952 }
39d5492a 6953 tcg_temp_free_i32(tmp2);
9ee6e8bb 6954 break;
600b828c 6955 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6956 {
6957 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6958 tmp2 = tcg_const_i32(0);
aa47cfdd 6959 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6960 tcg_temp_free_i32(tmp2);
aa47cfdd 6961 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6962 break;
aa47cfdd 6963 }
600b828c 6964 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6965 {
6966 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6967 tmp2 = tcg_const_i32(0);
aa47cfdd 6968 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6969 tcg_temp_free_i32(tmp2);
aa47cfdd 6970 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6971 break;
aa47cfdd 6972 }
600b828c 6973 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6974 {
6975 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6976 tmp2 = tcg_const_i32(0);
aa47cfdd 6977 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6978 tcg_temp_free_i32(tmp2);
aa47cfdd 6979 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6980 break;
aa47cfdd 6981 }
600b828c 6982 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6983 {
6984 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6985 tmp2 = tcg_const_i32(0);
aa47cfdd 6986 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6987 tcg_temp_free_i32(tmp2);
aa47cfdd 6988 tcg_temp_free_ptr(fpstatus);
0e326109 6989 break;
aa47cfdd 6990 }
600b828c 6991 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6992 {
6993 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6994 tmp2 = tcg_const_i32(0);
aa47cfdd 6995 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6996 tcg_temp_free_i32(tmp2);
aa47cfdd 6997 tcg_temp_free_ptr(fpstatus);
0e326109 6998 break;
aa47cfdd 6999 }
600b828c 7000 case NEON_2RM_VABS_F:
4373f3ce 7001 gen_vfp_abs(0);
9ee6e8bb 7002 break;
600b828c 7003 case NEON_2RM_VNEG_F:
4373f3ce 7004 gen_vfp_neg(0);
9ee6e8bb 7005 break;
600b828c 7006 case NEON_2RM_VSWP:
dd8fbd78
FN
7007 tmp2 = neon_load_reg(rd, pass);
7008 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7009 break;
600b828c 7010 case NEON_2RM_VTRN:
dd8fbd78 7011 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7012 switch (size) {
dd8fbd78
FN
7013 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7014 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7015 default: abort();
9ee6e8bb 7016 }
dd8fbd78 7017 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7018 break;
34f7b0a2
WN
7019 case NEON_2RM_VRINTN:
7020 case NEON_2RM_VRINTA:
7021 case NEON_2RM_VRINTM:
7022 case NEON_2RM_VRINTP:
7023 case NEON_2RM_VRINTZ:
7024 {
7025 TCGv_i32 tcg_rmode;
7026 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7027 int rmode;
7028
7029 if (op == NEON_2RM_VRINTZ) {
7030 rmode = FPROUNDING_ZERO;
7031 } else {
7032 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7033 }
7034
7035 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7036 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7037 cpu_env);
7038 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7039 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7040 cpu_env);
7041 tcg_temp_free_ptr(fpstatus);
7042 tcg_temp_free_i32(tcg_rmode);
7043 break;
7044 }
2ce70625
WN
7045 case NEON_2RM_VRINTX:
7046 {
7047 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7048 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7049 tcg_temp_free_ptr(fpstatus);
7050 break;
7051 }
901ad525
WN
7052 case NEON_2RM_VCVTAU:
7053 case NEON_2RM_VCVTAS:
7054 case NEON_2RM_VCVTNU:
7055 case NEON_2RM_VCVTNS:
7056 case NEON_2RM_VCVTPU:
7057 case NEON_2RM_VCVTPS:
7058 case NEON_2RM_VCVTMU:
7059 case NEON_2RM_VCVTMS:
7060 {
7061 bool is_signed = !extract32(insn, 7, 1);
7062 TCGv_ptr fpst = get_fpstatus_ptr(1);
7063 TCGv_i32 tcg_rmode, tcg_shift;
7064 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7065
7066 tcg_shift = tcg_const_i32(0);
7067 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7068 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7069 cpu_env);
7070
7071 if (is_signed) {
7072 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7073 tcg_shift, fpst);
7074 } else {
7075 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7076 tcg_shift, fpst);
7077 }
7078
7079 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7080 cpu_env);
7081 tcg_temp_free_i32(tcg_rmode);
7082 tcg_temp_free_i32(tcg_shift);
7083 tcg_temp_free_ptr(fpst);
7084 break;
7085 }
600b828c 7086 case NEON_2RM_VRECPE:
b6d4443a
AB
7087 {
7088 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7089 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7090 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7091 break;
b6d4443a 7092 }
600b828c 7093 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7094 {
7095 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7096 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7097 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7098 break;
c2fb418e 7099 }
600b828c 7100 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7101 {
7102 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7103 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7104 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7105 break;
b6d4443a 7106 }
600b828c 7107 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7108 {
7109 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7110 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7111 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7112 break;
c2fb418e 7113 }
600b828c 7114 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7115 gen_vfp_sito(0, 1);
9ee6e8bb 7116 break;
600b828c 7117 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7118 gen_vfp_uito(0, 1);
9ee6e8bb 7119 break;
600b828c 7120 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7121 gen_vfp_tosiz(0, 1);
9ee6e8bb 7122 break;
600b828c 7123 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7124 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7125 break;
7126 default:
600b828c
PM
7127 /* Reserved op values were caught by the
7128 * neon_2rm_sizes[] check earlier.
7129 */
7130 abort();
9ee6e8bb 7131 }
600b828c 7132 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7133 tcg_gen_st_f32(cpu_F0s, cpu_env,
7134 neon_reg_offset(rd, pass));
9ee6e8bb 7135 } else {
dd8fbd78 7136 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7137 }
7138 }
7139 break;
7140 }
7141 } else if ((insn & (1 << 10)) == 0) {
7142 /* VTBL, VTBX. */
56907d77
PM
7143 int n = ((insn >> 8) & 3) + 1;
7144 if ((rn + n) > 32) {
7145 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7146 * helper function running off the end of the register file.
7147 */
7148 return 1;
7149 }
7150 n <<= 3;
9ee6e8bb 7151 if (insn & (1 << 6)) {
8f8e3aa4 7152 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7153 } else {
7d1b0095 7154 tmp = tcg_temp_new_i32();
8f8e3aa4 7155 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7156 }
8f8e3aa4 7157 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 7158 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 7159 tmp5 = tcg_const_i32(n);
e7c06c4e 7160 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 7161 tcg_temp_free_i32(tmp);
9ee6e8bb 7162 if (insn & (1 << 6)) {
8f8e3aa4 7163 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7164 } else {
7d1b0095 7165 tmp = tcg_temp_new_i32();
8f8e3aa4 7166 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7167 }
8f8e3aa4 7168 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 7169 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 7170 tcg_temp_free_i32(tmp5);
e7c06c4e 7171 tcg_temp_free_ptr(ptr1);
8f8e3aa4 7172 neon_store_reg(rd, 0, tmp2);
3018f259 7173 neon_store_reg(rd, 1, tmp3);
7d1b0095 7174 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7175 } else if ((insn & 0x380) == 0) {
7176 /* VDUP */
32f91fb7
RH
7177 int element;
7178 TCGMemOp size;
7179
133da6aa
JR
7180 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7181 return 1;
7182 }
9ee6e8bb 7183 if (insn & (1 << 16)) {
32f91fb7
RH
7184 size = MO_8;
7185 element = (insn >> 17) & 7;
9ee6e8bb 7186 } else if (insn & (1 << 17)) {
32f91fb7
RH
7187 size = MO_16;
7188 element = (insn >> 18) & 3;
7189 } else {
7190 size = MO_32;
7191 element = (insn >> 19) & 1;
9ee6e8bb 7192 }
32f91fb7
RH
7193 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
7194 neon_element_offset(rm, element, size),
7195 q ? 16 : 8, q ? 16 : 8);
9ee6e8bb
PB
7196 } else {
7197 return 1;
7198 }
7199 }
7200 }
7201 return 0;
7202}
7203
8b7209fa
RH
7204/* Advanced SIMD three registers of the same length extension.
7205 * 31 25 23 22 20 16 12 11 10 9 8 3 0
7206 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7207 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7208 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7209 */
7210static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
7211{
26c470a7
RH
7212 gen_helper_gvec_3 *fn_gvec = NULL;
7213 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
7214 int rd, rn, rm, opr_sz;
7215 int data = 0;
87732318
RH
7216 int off_rn, off_rm;
7217 bool is_long = false, q = extract32(insn, 6, 1);
7218 bool ptr_is_env = false;
8b7209fa
RH
7219
7220 if ((insn & 0xfe200f10) == 0xfc200800) {
7221 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
7222 int size = extract32(insn, 20, 1);
7223 data = extract32(insn, 23, 2); /* rot */
962fcbf2 7224 if (!dc_isar_feature(aa32_vcma, s)
5763190f 7225 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
7226 return 1;
7227 }
7228 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
7229 } else if ((insn & 0xfea00f10) == 0xfc800800) {
7230 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
7231 int size = extract32(insn, 20, 1);
7232 data = extract32(insn, 24, 1); /* rot */
962fcbf2 7233 if (!dc_isar_feature(aa32_vcma, s)
5763190f 7234 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
7235 return 1;
7236 }
7237 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
7238 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
7239 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
7240 bool u = extract32(insn, 4, 1);
962fcbf2 7241 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
7242 return 1;
7243 }
7244 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
87732318
RH
7245 } else if ((insn & 0xff300f10) == 0xfc200810) {
7246 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
7247 int is_s = extract32(insn, 23, 1);
7248 if (!dc_isar_feature(aa32_fhm, s)) {
7249 return 1;
7250 }
7251 is_long = true;
7252 data = is_s; /* is_2 == 0 */
7253 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
7254 ptr_is_env = true;
8b7209fa
RH
7255 } else {
7256 return 1;
7257 }
7258
87732318
RH
7259 VFP_DREG_D(rd, insn);
7260 if (rd & q) {
7261 return 1;
7262 }
7263 if (q || !is_long) {
7264 VFP_DREG_N(rn, insn);
7265 VFP_DREG_M(rm, insn);
7266 if ((rn | rm) & q & !is_long) {
7267 return 1;
7268 }
7269 off_rn = vfp_reg_offset(1, rn);
7270 off_rm = vfp_reg_offset(1, rm);
7271 } else {
7272 rn = VFP_SREG_N(insn);
7273 rm = VFP_SREG_M(insn);
7274 off_rn = vfp_reg_offset(0, rn);
7275 off_rm = vfp_reg_offset(0, rm);
7276 }
7277
8b7209fa
RH
7278 if (s->fp_excp_el) {
7279 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 7280 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8b7209fa
RH
7281 return 0;
7282 }
7283 if (!s->vfp_enabled) {
7284 return 1;
7285 }
7286
7287 opr_sz = (1 + q) * 8;
26c470a7 7288 if (fn_gvec_ptr) {
87732318
RH
7289 TCGv_ptr ptr;
7290 if (ptr_is_env) {
7291 ptr = cpu_env;
7292 } else {
7293 ptr = get_fpstatus_ptr(1);
7294 }
7295 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 7296 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
7297 if (!ptr_is_env) {
7298 tcg_temp_free_ptr(ptr);
7299 }
26c470a7 7300 } else {
87732318 7301 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
7302 opr_sz, opr_sz, data, fn_gvec);
7303 }
8b7209fa
RH
7304 return 0;
7305}
7306
638808ff
RH
7307/* Advanced SIMD two registers and a scalar extension.
7308 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7309 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7310 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7311 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7312 *
7313 */
7314
7315static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7316{
26c470a7
RH
7317 gen_helper_gvec_3 *fn_gvec = NULL;
7318 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 7319 int rd, rn, rm, opr_sz, data;
87732318
RH
7320 int off_rn, off_rm;
7321 bool is_long = false, q = extract32(insn, 6, 1);
7322 bool ptr_is_env = false;
638808ff
RH
7323
7324 if ((insn & 0xff000f10) == 0xfe000800) {
7325 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
7326 int rot = extract32(insn, 20, 2);
7327 int size = extract32(insn, 23, 1);
7328 int index;
7329
962fcbf2 7330 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
7331 return 1;
7332 }
2cc99919 7333 if (size == 0) {
5763190f 7334 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2cc99919
RH
7335 return 1;
7336 }
7337 /* For fp16, rm is just Vm, and index is M. */
7338 rm = extract32(insn, 0, 4);
7339 index = extract32(insn, 5, 1);
7340 } else {
7341 /* For fp32, rm is the usual M:Vm, and index is 0. */
7342 VFP_DREG_M(rm, insn);
7343 index = 0;
7344 }
7345 data = (index << 2) | rot;
7346 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7347 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
7348 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7349 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7350 int u = extract32(insn, 4, 1);
87732318 7351
962fcbf2 7352 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
7353 return 1;
7354 }
7355 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7356 /* rm is just Vm, and index is M. */
7357 data = extract32(insn, 5, 1); /* index */
7358 rm = extract32(insn, 0, 4);
87732318
RH
7359 } else if ((insn & 0xffa00f10) == 0xfe000810) {
7360 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
7361 int is_s = extract32(insn, 20, 1);
7362 int vm20 = extract32(insn, 0, 3);
7363 int vm3 = extract32(insn, 3, 1);
7364 int m = extract32(insn, 5, 1);
7365 int index;
7366
7367 if (!dc_isar_feature(aa32_fhm, s)) {
7368 return 1;
7369 }
7370 if (q) {
7371 rm = vm20;
7372 index = m * 2 + vm3;
7373 } else {
7374 rm = vm20 * 2 + m;
7375 index = vm3;
7376 }
7377 is_long = true;
7378 data = (index << 2) | is_s; /* is_2 == 0 */
7379 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
7380 ptr_is_env = true;
638808ff
RH
7381 } else {
7382 return 1;
7383 }
7384
87732318
RH
7385 VFP_DREG_D(rd, insn);
7386 if (rd & q) {
7387 return 1;
7388 }
7389 if (q || !is_long) {
7390 VFP_DREG_N(rn, insn);
7391 if (rn & q & !is_long) {
7392 return 1;
7393 }
7394 off_rn = vfp_reg_offset(1, rn);
7395 off_rm = vfp_reg_offset(1, rm);
7396 } else {
7397 rn = VFP_SREG_N(insn);
7398 off_rn = vfp_reg_offset(0, rn);
7399 off_rm = vfp_reg_offset(0, rm);
7400 }
638808ff
RH
7401 if (s->fp_excp_el) {
7402 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 7403 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
638808ff
RH
7404 return 0;
7405 }
7406 if (!s->vfp_enabled) {
7407 return 1;
7408 }
7409
7410 opr_sz = (1 + q) * 8;
26c470a7 7411 if (fn_gvec_ptr) {
87732318
RH
7412 TCGv_ptr ptr;
7413 if (ptr_is_env) {
7414 ptr = cpu_env;
7415 } else {
7416 ptr = get_fpstatus_ptr(1);
7417 }
7418 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 7419 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
7420 if (!ptr_is_env) {
7421 tcg_temp_free_ptr(ptr);
7422 }
26c470a7 7423 } else {
87732318 7424 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
7425 opr_sz, opr_sz, data, fn_gvec);
7426 }
638808ff
RH
7427 return 0;
7428}
7429
7dcc1f89 7430static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7431{
4b6a83fb
PM
7432 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7433 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7434
7435 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7436
7437 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7438 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7439 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7440 return 1;
7441 }
d614a513 7442 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7443 return disas_iwmmxt_insn(s, insn);
d614a513 7444 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7445 return disas_dsp_insn(s, insn);
c0f4af17
PM
7446 }
7447 return 1;
4b6a83fb
PM
7448 }
7449
7450 /* Otherwise treat as a generic register access */
7451 is64 = (insn & (1 << 25)) == 0;
7452 if (!is64 && ((insn & (1 << 4)) == 0)) {
7453 /* cdp */
7454 return 1;
7455 }
7456
7457 crm = insn & 0xf;
7458 if (is64) {
7459 crn = 0;
7460 opc1 = (insn >> 4) & 0xf;
7461 opc2 = 0;
7462 rt2 = (insn >> 16) & 0xf;
7463 } else {
7464 crn = (insn >> 16) & 0xf;
7465 opc1 = (insn >> 21) & 7;
7466 opc2 = (insn >> 5) & 7;
7467 rt2 = 0;
7468 }
7469 isread = (insn >> 20) & 1;
7470 rt = (insn >> 12) & 0xf;
7471
60322b39 7472 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7473 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7474 if (ri) {
7475 /* Check access permissions */
dcbff19b 7476 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7477 return 1;
7478 }
7479
c0f4af17 7480 if (ri->accessfn ||
d614a513 7481 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7482 /* Emit code to perform further access permissions checks at
7483 * runtime; this may result in an exception.
c0f4af17
PM
7484 * Note that on XScale all cp0..c13 registers do an access check
7485 * call in order to handle c15_cpar.
f59df3f2
PM
7486 */
7487 TCGv_ptr tmpptr;
3f208fd7 7488 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7489 uint32_t syndrome;
7490
7491 /* Note that since we are an implementation which takes an
7492 * exception on a trapped conditional instruction only if the
7493 * instruction passes its condition code check, we can take
7494 * advantage of the clause in the ARM ARM that allows us to set
7495 * the COND field in the instruction to 0xE in all cases.
7496 * We could fish the actual condition out of the insn (ARM)
7497 * or the condexec bits (Thumb) but it isn't necessary.
7498 */
7499 switch (cpnum) {
7500 case 14:
7501 if (is64) {
7502 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7503 isread, false);
8bcbf37c
PM
7504 } else {
7505 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7506 rt, isread, false);
8bcbf37c
PM
7507 }
7508 break;
7509 case 15:
7510 if (is64) {
7511 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7512 isread, false);
8bcbf37c
PM
7513 } else {
7514 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7515 rt, isread, false);
8bcbf37c
PM
7516 }
7517 break;
7518 default:
7519 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7520 * so this can only happen if this is an ARMv7 or earlier CPU,
7521 * in which case the syndrome information won't actually be
7522 * guest visible.
7523 */
d614a513 7524 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7525 syndrome = syn_uncategorized();
7526 break;
7527 }
7528
43bfa4a1 7529 gen_set_condexec(s);
3977ee5d 7530 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7531 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7532 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7533 tcg_isread = tcg_const_i32(isread);
7534 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7535 tcg_isread);
f59df3f2 7536 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7537 tcg_temp_free_i32(tcg_syn);
3f208fd7 7538 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7539 }
7540
4b6a83fb
PM
7541 /* Handle special cases first */
7542 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7543 case ARM_CP_NOP:
7544 return 0;
7545 case ARM_CP_WFI:
7546 if (isread) {
7547 return 1;
7548 }
eaed129d 7549 gen_set_pc_im(s, s->pc);
dcba3a8d 7550 s->base.is_jmp = DISAS_WFI;
2bee5105 7551 return 0;
4b6a83fb
PM
7552 default:
7553 break;
7554 }
7555
c5a49c63 7556 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7557 gen_io_start();
7558 }
7559
4b6a83fb
PM
7560 if (isread) {
7561 /* Read */
7562 if (is64) {
7563 TCGv_i64 tmp64;
7564 TCGv_i32 tmp;
7565 if (ri->type & ARM_CP_CONST) {
7566 tmp64 = tcg_const_i64(ri->resetvalue);
7567 } else if (ri->readfn) {
7568 TCGv_ptr tmpptr;
4b6a83fb
PM
7569 tmp64 = tcg_temp_new_i64();
7570 tmpptr = tcg_const_ptr(ri);
7571 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7572 tcg_temp_free_ptr(tmpptr);
7573 } else {
7574 tmp64 = tcg_temp_new_i64();
7575 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7576 }
7577 tmp = tcg_temp_new_i32();
ecc7b3aa 7578 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7579 store_reg(s, rt, tmp);
7580 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7581 tmp = tcg_temp_new_i32();
ecc7b3aa 7582 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7583 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7584 store_reg(s, rt2, tmp);
7585 } else {
39d5492a 7586 TCGv_i32 tmp;
4b6a83fb
PM
7587 if (ri->type & ARM_CP_CONST) {
7588 tmp = tcg_const_i32(ri->resetvalue);
7589 } else if (ri->readfn) {
7590 TCGv_ptr tmpptr;
4b6a83fb
PM
7591 tmp = tcg_temp_new_i32();
7592 tmpptr = tcg_const_ptr(ri);
7593 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7594 tcg_temp_free_ptr(tmpptr);
7595 } else {
7596 tmp = load_cpu_offset(ri->fieldoffset);
7597 }
7598 if (rt == 15) {
7599 /* Destination register of r15 for 32 bit loads sets
7600 * the condition codes from the high 4 bits of the value
7601 */
7602 gen_set_nzcv(tmp);
7603 tcg_temp_free_i32(tmp);
7604 } else {
7605 store_reg(s, rt, tmp);
7606 }
7607 }
7608 } else {
7609 /* Write */
7610 if (ri->type & ARM_CP_CONST) {
7611 /* If not forbidden by access permissions, treat as WI */
7612 return 0;
7613 }
7614
7615 if (is64) {
39d5492a 7616 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7617 TCGv_i64 tmp64 = tcg_temp_new_i64();
7618 tmplo = load_reg(s, rt);
7619 tmphi = load_reg(s, rt2);
7620 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7621 tcg_temp_free_i32(tmplo);
7622 tcg_temp_free_i32(tmphi);
7623 if (ri->writefn) {
7624 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7625 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7626 tcg_temp_free_ptr(tmpptr);
7627 } else {
7628 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7629 }
7630 tcg_temp_free_i64(tmp64);
7631 } else {
7632 if (ri->writefn) {
39d5492a 7633 TCGv_i32 tmp;
4b6a83fb 7634 TCGv_ptr tmpptr;
4b6a83fb
PM
7635 tmp = load_reg(s, rt);
7636 tmpptr = tcg_const_ptr(ri);
7637 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7638 tcg_temp_free_ptr(tmpptr);
7639 tcg_temp_free_i32(tmp);
7640 } else {
39d5492a 7641 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7642 store_cpu_offset(tmp, ri->fieldoffset);
7643 }
7644 }
2452731c
PM
7645 }
7646
c5a49c63 7647 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7648 /* I/O operations must end the TB here (whether read or write) */
7649 gen_io_end();
7650 gen_lookup_tb(s);
7651 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7652 /* We default to ending the TB on a coprocessor register write,
7653 * but allow this to be suppressed by the register definition
7654 * (usually only necessary to work around guest bugs).
7655 */
2452731c 7656 gen_lookup_tb(s);
4b6a83fb 7657 }
2452731c 7658
4b6a83fb
PM
7659 return 0;
7660 }
7661
626187d8
PM
7662 /* Unknown register; this might be a guest error or a QEMU
7663 * unimplemented feature.
7664 */
7665 if (is64) {
7666 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7667 "64 bit system register cp:%d opc1: %d crm:%d "
7668 "(%s)\n",
7669 isread ? "read" : "write", cpnum, opc1, crm,
7670 s->ns ? "non-secure" : "secure");
626187d8
PM
7671 } else {
7672 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7673 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7674 "(%s)\n",
7675 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7676 s->ns ? "non-secure" : "secure");
626187d8
PM
7677 }
7678
4a9a539f 7679 return 1;
9ee6e8bb
PB
7680}
7681
5e3f878a
PB
7682
7683/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7684static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7685{
39d5492a 7686 TCGv_i32 tmp;
7d1b0095 7687 tmp = tcg_temp_new_i32();
ecc7b3aa 7688 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7689 store_reg(s, rlow, tmp);
7d1b0095 7690 tmp = tcg_temp_new_i32();
5e3f878a 7691 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7692 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7693 store_reg(s, rhigh, tmp);
7694}
7695
7696/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7697static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7698{
a7812ae4 7699 TCGv_i64 tmp;
39d5492a 7700 TCGv_i32 tmp2;
5e3f878a 7701
36aa55dc 7702 /* Load value and extend to 64 bits. */
a7812ae4 7703 tmp = tcg_temp_new_i64();
5e3f878a
PB
7704 tmp2 = load_reg(s, rlow);
7705 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7706 tcg_temp_free_i32(tmp2);
5e3f878a 7707 tcg_gen_add_i64(val, val, tmp);
b75263d6 7708 tcg_temp_free_i64(tmp);
5e3f878a
PB
7709}
7710
7711/* load and add a 64-bit value from a register pair. */
a7812ae4 7712static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7713{
a7812ae4 7714 TCGv_i64 tmp;
39d5492a
PM
7715 TCGv_i32 tmpl;
7716 TCGv_i32 tmph;
5e3f878a
PB
7717
7718 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7719 tmpl = load_reg(s, rlow);
7720 tmph = load_reg(s, rhigh);
a7812ae4 7721 tmp = tcg_temp_new_i64();
36aa55dc 7722 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7723 tcg_temp_free_i32(tmpl);
7724 tcg_temp_free_i32(tmph);
5e3f878a 7725 tcg_gen_add_i64(val, val, tmp);
b75263d6 7726 tcg_temp_free_i64(tmp);
5e3f878a
PB
7727}
7728
c9f10124 7729/* Set N and Z flags from hi|lo. */
39d5492a 7730static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7731{
c9f10124
RH
7732 tcg_gen_mov_i32(cpu_NF, hi);
7733 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7734}
7735
426f5abc
PB
7736/* Load/Store exclusive instructions are implemented by remembering
7737 the value/address loaded, and seeing if these are the same
354161b3 7738 when the store is performed. This should be sufficient to implement
426f5abc 7739 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7740 regular stores. The compare vs the remembered value is done during
7741 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7742static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7743 TCGv_i32 addr, int size)
426f5abc 7744{
94ee24e7 7745 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7746 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7747
50225ad0
PM
7748 s->is_ldex = true;
7749
426f5abc 7750 if (size == 3) {
39d5492a 7751 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7752 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7753
3448d47b
PM
7754 /* For AArch32, architecturally the 32-bit word at the lowest
7755 * address is always Rt and the one at addr+4 is Rt2, even if
7756 * the CPU is big-endian. That means we don't want to do a
7757 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7758 * for an architecturally 64-bit access, but instead do a
7759 * 64-bit access using MO_BE if appropriate and then split
7760 * the two halves.
7761 * This only makes a difference for BE32 user-mode, where
7762 * frob64() must not flip the two halves of the 64-bit data
7763 * but this code must treat BE32 user-mode like BE32 system.
7764 */
7765 TCGv taddr = gen_aa32_addr(s, addr, opc);
7766
7767 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7768 tcg_temp_free(taddr);
354161b3 7769 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
7770 if (s->be_data == MO_BE) {
7771 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7772 } else {
7773 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7774 }
354161b3
EC
7775 tcg_temp_free_i64(t64);
7776
7777 store_reg(s, rt2, tmp2);
03d05e2d 7778 } else {
354161b3 7779 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7780 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7781 }
03d05e2d
PM
7782
7783 store_reg(s, rt, tmp);
7784 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7785}
7786
7787static void gen_clrex(DisasContext *s)
7788{
03d05e2d 7789 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7790}
7791
426f5abc 7792static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7793 TCGv_i32 addr, int size)
426f5abc 7794{
354161b3
EC
7795 TCGv_i32 t0, t1, t2;
7796 TCGv_i64 extaddr;
7797 TCGv taddr;
42a268c2
RH
7798 TCGLabel *done_label;
7799 TCGLabel *fail_label;
354161b3 7800 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7801
7802 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7803 [addr] = {Rt};
7804 {Rd} = 0;
7805 } else {
7806 {Rd} = 1;
7807 } */
7808 fail_label = gen_new_label();
7809 done_label = gen_new_label();
03d05e2d
PM
7810 extaddr = tcg_temp_new_i64();
7811 tcg_gen_extu_i32_i64(extaddr, addr);
7812 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7813 tcg_temp_free_i64(extaddr);
7814
354161b3
EC
7815 taddr = gen_aa32_addr(s, addr, opc);
7816 t0 = tcg_temp_new_i32();
7817 t1 = load_reg(s, rt);
426f5abc 7818 if (size == 3) {
354161b3
EC
7819 TCGv_i64 o64 = tcg_temp_new_i64();
7820 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7821
354161b3 7822 t2 = load_reg(s, rt2);
3448d47b
PM
7823 /* For AArch32, architecturally the 32-bit word at the lowest
7824 * address is always Rt and the one at addr+4 is Rt2, even if
7825 * the CPU is big-endian. Since we're going to treat this as a
7826 * single 64-bit BE store, we need to put the two halves in the
7827 * opposite order for BE to LE, so that they end up in the right
7828 * places.
7829 * We don't want gen_aa32_frob64() because that does the wrong
7830 * thing for BE32 usermode.
7831 */
7832 if (s->be_data == MO_BE) {
7833 tcg_gen_concat_i32_i64(n64, t2, t1);
7834 } else {
7835 tcg_gen_concat_i32_i64(n64, t1, t2);
7836 }
354161b3 7837 tcg_temp_free_i32(t2);
03d05e2d 7838
354161b3
EC
7839 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7840 get_mem_index(s), opc);
7841 tcg_temp_free_i64(n64);
7842
354161b3
EC
7843 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7844 tcg_gen_extrl_i64_i32(t0, o64);
7845
7846 tcg_temp_free_i64(o64);
7847 } else {
7848 t2 = tcg_temp_new_i32();
7849 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7850 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7851 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7852 tcg_temp_free_i32(t2);
426f5abc 7853 }
354161b3
EC
7854 tcg_temp_free_i32(t1);
7855 tcg_temp_free(taddr);
7856 tcg_gen_mov_i32(cpu_R[rd], t0);
7857 tcg_temp_free_i32(t0);
426f5abc 7858 tcg_gen_br(done_label);
354161b3 7859
426f5abc
PB
7860 gen_set_label(fail_label);
7861 tcg_gen_movi_i32(cpu_R[rd], 1);
7862 gen_set_label(done_label);
03d05e2d 7863 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 7864}
426f5abc 7865
81465888
PM
7866/* gen_srs:
7867 * @env: CPUARMState
7868 * @s: DisasContext
7869 * @mode: mode field from insn (which stack to store to)
7870 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7871 * @writeback: true if writeback bit set
7872 *
7873 * Generate code for the SRS (Store Return State) insn.
7874 */
7875static void gen_srs(DisasContext *s,
7876 uint32_t mode, uint32_t amode, bool writeback)
7877{
7878 int32_t offset;
cbc0326b
PM
7879 TCGv_i32 addr, tmp;
7880 bool undef = false;
7881
7882 /* SRS is:
7883 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7884 * and specified mode is monitor mode
cbc0326b
PM
7885 * - UNDEFINED in Hyp mode
7886 * - UNPREDICTABLE in User or System mode
7887 * - UNPREDICTABLE if the specified mode is:
7888 * -- not implemented
7889 * -- not a valid mode number
7890 * -- a mode that's at a higher exception level
7891 * -- Monitor, if we are Non-secure
f01377f5 7892 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7893 */
ba63cf47 7894 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7895 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7896 return;
7897 }
7898
7899 if (s->current_el == 0 || s->current_el == 2) {
7900 undef = true;
7901 }
7902
7903 switch (mode) {
7904 case ARM_CPU_MODE_USR:
7905 case ARM_CPU_MODE_FIQ:
7906 case ARM_CPU_MODE_IRQ:
7907 case ARM_CPU_MODE_SVC:
7908 case ARM_CPU_MODE_ABT:
7909 case ARM_CPU_MODE_UND:
7910 case ARM_CPU_MODE_SYS:
7911 break;
7912 case ARM_CPU_MODE_HYP:
7913 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7914 undef = true;
7915 }
7916 break;
7917 case ARM_CPU_MODE_MON:
7918 /* No need to check specifically for "are we non-secure" because
7919 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7920 * so if this isn't EL3 then we must be non-secure.
7921 */
7922 if (s->current_el != 3) {
7923 undef = true;
7924 }
7925 break;
7926 default:
7927 undef = true;
7928 }
7929
7930 if (undef) {
7931 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7932 default_exception_el(s));
7933 return;
7934 }
7935
7936 addr = tcg_temp_new_i32();
7937 tmp = tcg_const_i32(mode);
f01377f5
PM
7938 /* get_r13_banked() will raise an exception if called from System mode */
7939 gen_set_condexec(s);
7940 gen_set_pc_im(s, s->pc - 4);
81465888
PM
7941 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7942 tcg_temp_free_i32(tmp);
7943 switch (amode) {
7944 case 0: /* DA */
7945 offset = -4;
7946 break;
7947 case 1: /* IA */
7948 offset = 0;
7949 break;
7950 case 2: /* DB */
7951 offset = -8;
7952 break;
7953 case 3: /* IB */
7954 offset = 4;
7955 break;
7956 default:
7957 abort();
7958 }
7959 tcg_gen_addi_i32(addr, addr, offset);
7960 tmp = load_reg(s, 14);
12dcc321 7961 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7962 tcg_temp_free_i32(tmp);
81465888
PM
7963 tmp = load_cpu_field(spsr);
7964 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 7965 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7966 tcg_temp_free_i32(tmp);
81465888
PM
7967 if (writeback) {
7968 switch (amode) {
7969 case 0:
7970 offset = -8;
7971 break;
7972 case 1:
7973 offset = 4;
7974 break;
7975 case 2:
7976 offset = -4;
7977 break;
7978 case 3:
7979 offset = 0;
7980 break;
7981 default:
7982 abort();
7983 }
7984 tcg_gen_addi_i32(addr, addr, offset);
7985 tmp = tcg_const_i32(mode);
7986 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7987 tcg_temp_free_i32(tmp);
7988 }
7989 tcg_temp_free_i32(addr);
dcba3a8d 7990 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
7991}
7992
c2d9644e
RK
7993/* Generate a label used for skipping this instruction */
7994static void arm_gen_condlabel(DisasContext *s)
7995{
7996 if (!s->condjmp) {
7997 s->condlabel = gen_new_label();
7998 s->condjmp = 1;
7999 }
8000}
8001
8002/* Skip this instruction if the ARM condition is false */
8003static void arm_skip_unless(DisasContext *s, uint32_t cond)
8004{
8005 arm_gen_condlabel(s);
8006 arm_gen_test_cc(cond ^ 1, s->condlabel);
8007}
8008
f4df2210 8009static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8010{
f4df2210 8011 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8012 TCGv_i32 tmp;
8013 TCGv_i32 tmp2;
8014 TCGv_i32 tmp3;
8015 TCGv_i32 addr;
a7812ae4 8016 TCGv_i64 tmp64;
9ee6e8bb 8017
e13886e3
PM
8018 /* M variants do not implement ARM mode; this must raise the INVSTATE
8019 * UsageFault exception.
8020 */
b53d8923 8021 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8022 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8023 default_exception_el(s));
8024 return;
b53d8923 8025 }
9ee6e8bb
PB
8026 cond = insn >> 28;
8027 if (cond == 0xf){
be5e7a76
DES
8028 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8029 * choose to UNDEF. In ARMv5 and above the space is used
8030 * for miscellaneous unconditional instructions.
8031 */
8032 ARCH(5);
8033
9ee6e8bb
PB
8034 /* Unconditional instructions. */
8035 if (((insn >> 25) & 7) == 1) {
8036 /* NEON Data processing. */
d614a513 8037 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8038 goto illegal_op;
d614a513 8039 }
9ee6e8bb 8040
7dcc1f89 8041 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8042 goto illegal_op;
7dcc1f89 8043 }
9ee6e8bb
PB
8044 return;
8045 }
8046 if ((insn & 0x0f100000) == 0x04000000) {
8047 /* NEON load/store. */
d614a513 8048 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8049 goto illegal_op;
d614a513 8050 }
9ee6e8bb 8051
7dcc1f89 8052 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8053 goto illegal_op;
7dcc1f89 8054 }
9ee6e8bb
PB
8055 return;
8056 }
6a57f3eb
WN
8057 if ((insn & 0x0f000e10) == 0x0e000a00) {
8058 /* VFP. */
7dcc1f89 8059 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8060 goto illegal_op;
8061 }
8062 return;
8063 }
3d185e5d
PM
8064 if (((insn & 0x0f30f000) == 0x0510f000) ||
8065 ((insn & 0x0f30f010) == 0x0710f000)) {
8066 if ((insn & (1 << 22)) == 0) {
8067 /* PLDW; v7MP */
d614a513 8068 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8069 goto illegal_op;
8070 }
8071 }
8072 /* Otherwise PLD; v5TE+ */
be5e7a76 8073 ARCH(5TE);
3d185e5d
PM
8074 return;
8075 }
8076 if (((insn & 0x0f70f000) == 0x0450f000) ||
8077 ((insn & 0x0f70f010) == 0x0650f000)) {
8078 ARCH(7);
8079 return; /* PLI; V7 */
8080 }
8081 if (((insn & 0x0f700000) == 0x04100000) ||
8082 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8083 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8084 goto illegal_op;
8085 }
8086 return; /* v7MP: Unallocated memory hint: must NOP */
8087 }
8088
8089 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8090 ARCH(6);
8091 /* setend */
9886ecdf
PB
8092 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8093 gen_helper_setend(cpu_env);
dcba3a8d 8094 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8095 }
8096 return;
8097 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8098 switch ((insn >> 4) & 0xf) {
8099 case 1: /* clrex */
8100 ARCH(6K);
426f5abc 8101 gen_clrex(s);
9ee6e8bb
PB
8102 return;
8103 case 4: /* dsb */
8104 case 5: /* dmb */
9ee6e8bb 8105 ARCH(7);
61e4c432 8106 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8107 return;
6df99dec
SS
8108 case 6: /* isb */
8109 /* We need to break the TB after this insn to execute
8110 * self-modifying code correctly and also to take
8111 * any pending interrupts immediately.
8112 */
0b609cc1 8113 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8114 return;
9888bd1e
RH
8115 case 7: /* sb */
8116 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
8117 goto illegal_op;
8118 }
8119 /*
8120 * TODO: There is no speculation barrier opcode
8121 * for TCG; MB and end the TB instead.
8122 */
8123 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8124 gen_goto_tb(s, 0, s->pc & ~1);
8125 return;
9ee6e8bb
PB
8126 default:
8127 goto illegal_op;
8128 }
8129 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8130 /* srs */
81465888
PM
8131 ARCH(6);
8132 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8133 return;
ea825eee 8134 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8135 /* rfe */
c67b6b71 8136 int32_t offset;
9ee6e8bb
PB
8137 if (IS_USER(s))
8138 goto illegal_op;
8139 ARCH(6);
8140 rn = (insn >> 16) & 0xf;
b0109805 8141 addr = load_reg(s, rn);
9ee6e8bb
PB
8142 i = (insn >> 23) & 3;
8143 switch (i) {
b0109805 8144 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8145 case 1: offset = 0; break; /* IA */
8146 case 2: offset = -8; break; /* DB */
b0109805 8147 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8148 default: abort();
8149 }
8150 if (offset)
b0109805
PB
8151 tcg_gen_addi_i32(addr, addr, offset);
8152 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8153 tmp = tcg_temp_new_i32();
12dcc321 8154 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8155 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8156 tmp2 = tcg_temp_new_i32();
12dcc321 8157 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8158 if (insn & (1 << 21)) {
8159 /* Base writeback. */
8160 switch (i) {
b0109805 8161 case 0: offset = -8; break;
c67b6b71
FN
8162 case 1: offset = 4; break;
8163 case 2: offset = -4; break;
b0109805 8164 case 3: offset = 0; break;
9ee6e8bb
PB
8165 default: abort();
8166 }
8167 if (offset)
b0109805
PB
8168 tcg_gen_addi_i32(addr, addr, offset);
8169 store_reg(s, rn, addr);
8170 } else {
7d1b0095 8171 tcg_temp_free_i32(addr);
9ee6e8bb 8172 }
b0109805 8173 gen_rfe(s, tmp, tmp2);
c67b6b71 8174 return;
9ee6e8bb
PB
8175 } else if ((insn & 0x0e000000) == 0x0a000000) {
8176 /* branch link and change to thumb (blx <offset>) */
8177 int32_t offset;
8178
8179 val = (uint32_t)s->pc;
7d1b0095 8180 tmp = tcg_temp_new_i32();
d9ba4830
PB
8181 tcg_gen_movi_i32(tmp, val);
8182 store_reg(s, 14, tmp);
9ee6e8bb
PB
8183 /* Sign-extend the 24-bit offset */
8184 offset = (((int32_t)insn) << 8) >> 8;
8185 /* offset * 4 + bit24 * 2 + (thumb bit) */
8186 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8187 /* pipeline offset */
8188 val += 4;
be5e7a76 8189 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8190 gen_bx_im(s, val);
9ee6e8bb
PB
8191 return;
8192 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8193 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8194 /* iWMMXt register transfer. */
c0f4af17 8195 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8196 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8197 return;
c0f4af17
PM
8198 }
8199 }
9ee6e8bb 8200 }
8b7209fa
RH
8201 } else if ((insn & 0x0e000a00) == 0x0c000800
8202 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8203 if (disas_neon_insn_3same_ext(s, insn)) {
8204 goto illegal_op;
8205 }
8206 return;
638808ff
RH
8207 } else if ((insn & 0x0f000a00) == 0x0e000800
8208 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8209 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
8210 goto illegal_op;
8211 }
8212 return;
9ee6e8bb
PB
8213 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8214 /* Coprocessor double register transfer. */
be5e7a76 8215 ARCH(5TE);
9ee6e8bb
PB
8216 } else if ((insn & 0x0f000010) == 0x0e000010) {
8217 /* Additional coprocessor register transfer. */
7997d92f 8218 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8219 uint32_t mask;
8220 uint32_t val;
8221 /* cps (privileged) */
8222 if (IS_USER(s))
8223 return;
8224 mask = val = 0;
8225 if (insn & (1 << 19)) {
8226 if (insn & (1 << 8))
8227 mask |= CPSR_A;
8228 if (insn & (1 << 7))
8229 mask |= CPSR_I;
8230 if (insn & (1 << 6))
8231 mask |= CPSR_F;
8232 if (insn & (1 << 18))
8233 val |= mask;
8234 }
7997d92f 8235 if (insn & (1 << 17)) {
9ee6e8bb
PB
8236 mask |= CPSR_M;
8237 val |= (insn & 0x1f);
8238 }
8239 if (mask) {
2fbac54b 8240 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8241 }
8242 return;
8243 }
8244 goto illegal_op;
8245 }
8246 if (cond != 0xe) {
8247 /* if not always execute, we generate a conditional jump to
8248 next instruction */
c2d9644e 8249 arm_skip_unless(s, cond);
9ee6e8bb
PB
8250 }
8251 if ((insn & 0x0f900000) == 0x03000000) {
8252 if ((insn & (1 << 21)) == 0) {
8253 ARCH(6T2);
8254 rd = (insn >> 12) & 0xf;
8255 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8256 if ((insn & (1 << 22)) == 0) {
8257 /* MOVW */
7d1b0095 8258 tmp = tcg_temp_new_i32();
5e3f878a 8259 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8260 } else {
8261 /* MOVT */
5e3f878a 8262 tmp = load_reg(s, rd);
86831435 8263 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8264 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8265 }
5e3f878a 8266 store_reg(s, rd, tmp);
9ee6e8bb
PB
8267 } else {
8268 if (((insn >> 12) & 0xf) != 0xf)
8269 goto illegal_op;
8270 if (((insn >> 16) & 0xf) == 0) {
8271 gen_nop_hint(s, insn & 0xff);
8272 } else {
8273 /* CPSR = immediate */
8274 val = insn & 0xff;
8275 shift = ((insn >> 8) & 0xf) * 2;
8276 if (shift)
8277 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8278 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8279 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8280 i, val)) {
9ee6e8bb 8281 goto illegal_op;
7dcc1f89 8282 }
9ee6e8bb
PB
8283 }
8284 }
8285 } else if ((insn & 0x0f900000) == 0x01000000
8286 && (insn & 0x00000090) != 0x00000090) {
8287 /* miscellaneous instructions */
8288 op1 = (insn >> 21) & 3;
8289 sh = (insn >> 4) & 0xf;
8290 rm = insn & 0xf;
8291 switch (sh) {
8bfd0550
PM
8292 case 0x0: /* MSR, MRS */
8293 if (insn & (1 << 9)) {
8294 /* MSR (banked) and MRS (banked) */
8295 int sysm = extract32(insn, 16, 4) |
8296 (extract32(insn, 8, 1) << 4);
8297 int r = extract32(insn, 22, 1);
8298
8299 if (op1 & 1) {
8300 /* MSR (banked) */
8301 gen_msr_banked(s, r, sysm, rm);
8302 } else {
8303 /* MRS (banked) */
8304 int rd = extract32(insn, 12, 4);
8305
8306 gen_mrs_banked(s, r, sysm, rd);
8307 }
8308 break;
8309 }
8310
8311 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8312 if (op1 & 1) {
8313 /* PSR = reg */
2fbac54b 8314 tmp = load_reg(s, rm);
9ee6e8bb 8315 i = ((op1 & 2) != 0);
7dcc1f89 8316 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8317 goto illegal_op;
8318 } else {
8319 /* reg = PSR */
8320 rd = (insn >> 12) & 0xf;
8321 if (op1 & 2) {
8322 if (IS_USER(s))
8323 goto illegal_op;
d9ba4830 8324 tmp = load_cpu_field(spsr);
9ee6e8bb 8325 } else {
7d1b0095 8326 tmp = tcg_temp_new_i32();
9ef39277 8327 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8328 }
d9ba4830 8329 store_reg(s, rd, tmp);
9ee6e8bb
PB
8330 }
8331 break;
8332 case 0x1:
8333 if (op1 == 1) {
8334 /* branch/exchange thumb (bx). */
be5e7a76 8335 ARCH(4T);
d9ba4830
PB
8336 tmp = load_reg(s, rm);
8337 gen_bx(s, tmp);
9ee6e8bb
PB
8338 } else if (op1 == 3) {
8339 /* clz */
be5e7a76 8340 ARCH(5);
9ee6e8bb 8341 rd = (insn >> 12) & 0xf;
1497c961 8342 tmp = load_reg(s, rm);
7539a012 8343 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8344 store_reg(s, rd, tmp);
9ee6e8bb
PB
8345 } else {
8346 goto illegal_op;
8347 }
8348 break;
8349 case 0x2:
8350 if (op1 == 1) {
8351 ARCH(5J); /* bxj */
8352 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8353 tmp = load_reg(s, rm);
8354 gen_bx(s, tmp);
9ee6e8bb
PB
8355 } else {
8356 goto illegal_op;
8357 }
8358 break;
8359 case 0x3:
8360 if (op1 != 1)
8361 goto illegal_op;
8362
be5e7a76 8363 ARCH(5);
9ee6e8bb 8364 /* branch link/exchange thumb (blx) */
d9ba4830 8365 tmp = load_reg(s, rm);
7d1b0095 8366 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8367 tcg_gen_movi_i32(tmp2, s->pc);
8368 store_reg(s, 14, tmp2);
8369 gen_bx(s, tmp);
9ee6e8bb 8370 break;
eb0ecd5a
WN
8371 case 0x4:
8372 {
8373 /* crc32/crc32c */
8374 uint32_t c = extract32(insn, 8, 4);
8375
8376 /* Check this CPU supports ARMv8 CRC instructions.
8377 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8378 * Bits 8, 10 and 11 should be zero.
8379 */
962fcbf2 8380 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
eb0ecd5a
WN
8381 goto illegal_op;
8382 }
8383
8384 rn = extract32(insn, 16, 4);
8385 rd = extract32(insn, 12, 4);
8386
8387 tmp = load_reg(s, rn);
8388 tmp2 = load_reg(s, rm);
aa633469
PM
8389 if (op1 == 0) {
8390 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8391 } else if (op1 == 1) {
8392 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8393 }
eb0ecd5a
WN
8394 tmp3 = tcg_const_i32(1 << op1);
8395 if (c & 0x2) {
8396 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8397 } else {
8398 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8399 }
8400 tcg_temp_free_i32(tmp2);
8401 tcg_temp_free_i32(tmp3);
8402 store_reg(s, rd, tmp);
8403 break;
8404 }
9ee6e8bb 8405 case 0x5: /* saturating add/subtract */
be5e7a76 8406 ARCH(5TE);
9ee6e8bb
PB
8407 rd = (insn >> 12) & 0xf;
8408 rn = (insn >> 16) & 0xf;
b40d0353 8409 tmp = load_reg(s, rm);
5e3f878a 8410 tmp2 = load_reg(s, rn);
9ee6e8bb 8411 if (op1 & 2)
9ef39277 8412 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8413 if (op1 & 1)
9ef39277 8414 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8415 else
9ef39277 8416 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8417 tcg_temp_free_i32(tmp2);
5e3f878a 8418 store_reg(s, rd, tmp);
9ee6e8bb 8419 break;
55c544ed
PM
8420 case 0x6: /* ERET */
8421 if (op1 != 3) {
8422 goto illegal_op;
8423 }
8424 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8425 goto illegal_op;
8426 }
8427 if ((insn & 0x000fff0f) != 0x0000000e) {
8428 /* UNPREDICTABLE; we choose to UNDEF */
8429 goto illegal_op;
8430 }
8431
8432 if (s->current_el == 2) {
8433 tmp = load_cpu_field(elr_el[2]);
8434 } else {
8435 tmp = load_reg(s, 14);
8436 }
8437 gen_exception_return(s, tmp);
8438 break;
49e14940 8439 case 7:
d4a2dc67
PM
8440 {
8441 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8442 switch (op1) {
19a6e31c
PM
8443 case 0:
8444 /* HLT */
8445 gen_hlt(s, imm16);
8446 break;
37e6456e
PM
8447 case 1:
8448 /* bkpt */
8449 ARCH(5);
c900a2e6 8450 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
8451 break;
8452 case 2:
8453 /* Hypervisor call (v7) */
8454 ARCH(7);
8455 if (IS_USER(s)) {
8456 goto illegal_op;
8457 }
8458 gen_hvc(s, imm16);
8459 break;
8460 case 3:
8461 /* Secure monitor call (v6+) */
8462 ARCH(6K);
8463 if (IS_USER(s)) {
8464 goto illegal_op;
8465 }
8466 gen_smc(s);
8467 break;
8468 default:
19a6e31c 8469 g_assert_not_reached();
49e14940 8470 }
9ee6e8bb 8471 break;
d4a2dc67 8472 }
9ee6e8bb
PB
8473 case 0x8: /* signed multiply */
8474 case 0xa:
8475 case 0xc:
8476 case 0xe:
be5e7a76 8477 ARCH(5TE);
9ee6e8bb
PB
8478 rs = (insn >> 8) & 0xf;
8479 rn = (insn >> 12) & 0xf;
8480 rd = (insn >> 16) & 0xf;
8481 if (op1 == 1) {
8482 /* (32 * 16) >> 16 */
5e3f878a
PB
8483 tmp = load_reg(s, rm);
8484 tmp2 = load_reg(s, rs);
9ee6e8bb 8485 if (sh & 4)
5e3f878a 8486 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8487 else
5e3f878a 8488 gen_sxth(tmp2);
a7812ae4
PB
8489 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8490 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8491 tmp = tcg_temp_new_i32();
ecc7b3aa 8492 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8493 tcg_temp_free_i64(tmp64);
9ee6e8bb 8494 if ((sh & 2) == 0) {
5e3f878a 8495 tmp2 = load_reg(s, rn);
9ef39277 8496 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8497 tcg_temp_free_i32(tmp2);
9ee6e8bb 8498 }
5e3f878a 8499 store_reg(s, rd, tmp);
9ee6e8bb
PB
8500 } else {
8501 /* 16 * 16 */
5e3f878a
PB
8502 tmp = load_reg(s, rm);
8503 tmp2 = load_reg(s, rs);
8504 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8505 tcg_temp_free_i32(tmp2);
9ee6e8bb 8506 if (op1 == 2) {
a7812ae4
PB
8507 tmp64 = tcg_temp_new_i64();
8508 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8509 tcg_temp_free_i32(tmp);
a7812ae4
PB
8510 gen_addq(s, tmp64, rn, rd);
8511 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8512 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8513 } else {
8514 if (op1 == 0) {
5e3f878a 8515 tmp2 = load_reg(s, rn);
9ef39277 8516 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8517 tcg_temp_free_i32(tmp2);
9ee6e8bb 8518 }
5e3f878a 8519 store_reg(s, rd, tmp);
9ee6e8bb
PB
8520 }
8521 }
8522 break;
8523 default:
8524 goto illegal_op;
8525 }
8526 } else if (((insn & 0x0e000000) == 0 &&
8527 (insn & 0x00000090) != 0x90) ||
8528 ((insn & 0x0e000000) == (1 << 25))) {
8529 int set_cc, logic_cc, shiftop;
8530
8531 op1 = (insn >> 21) & 0xf;
8532 set_cc = (insn >> 20) & 1;
8533 logic_cc = table_logic_cc[op1] & set_cc;
8534
8535 /* data processing instruction */
8536 if (insn & (1 << 25)) {
8537 /* immediate operand */
8538 val = insn & 0xff;
8539 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8540 if (shift) {
9ee6e8bb 8541 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8542 }
7d1b0095 8543 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8544 tcg_gen_movi_i32(tmp2, val);
8545 if (logic_cc && shift) {
8546 gen_set_CF_bit31(tmp2);
8547 }
9ee6e8bb
PB
8548 } else {
8549 /* register */
8550 rm = (insn) & 0xf;
e9bb4aa9 8551 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8552 shiftop = (insn >> 5) & 3;
8553 if (!(insn & (1 << 4))) {
8554 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8555 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8556 } else {
8557 rs = (insn >> 8) & 0xf;
8984bd2e 8558 tmp = load_reg(s, rs);
e9bb4aa9 8559 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8560 }
8561 }
8562 if (op1 != 0x0f && op1 != 0x0d) {
8563 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8564 tmp = load_reg(s, rn);
8565 } else {
f764718d 8566 tmp = NULL;
9ee6e8bb
PB
8567 }
8568 rd = (insn >> 12) & 0xf;
8569 switch(op1) {
8570 case 0x00:
e9bb4aa9
JR
8571 tcg_gen_and_i32(tmp, tmp, tmp2);
8572 if (logic_cc) {
8573 gen_logic_CC(tmp);
8574 }
7dcc1f89 8575 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8576 break;
8577 case 0x01:
e9bb4aa9
JR
8578 tcg_gen_xor_i32(tmp, tmp, tmp2);
8579 if (logic_cc) {
8580 gen_logic_CC(tmp);
8581 }
7dcc1f89 8582 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8583 break;
8584 case 0x02:
8585 if (set_cc && rd == 15) {
8586 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8587 if (IS_USER(s)) {
9ee6e8bb 8588 goto illegal_op;
e9bb4aa9 8589 }
72485ec4 8590 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8591 gen_exception_return(s, tmp);
9ee6e8bb 8592 } else {
e9bb4aa9 8593 if (set_cc) {
72485ec4 8594 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8595 } else {
8596 tcg_gen_sub_i32(tmp, tmp, tmp2);
8597 }
7dcc1f89 8598 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8599 }
8600 break;
8601 case 0x03:
e9bb4aa9 8602 if (set_cc) {
72485ec4 8603 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8604 } else {
8605 tcg_gen_sub_i32(tmp, tmp2, tmp);
8606 }
7dcc1f89 8607 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8608 break;
8609 case 0x04:
e9bb4aa9 8610 if (set_cc) {
72485ec4 8611 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8612 } else {
8613 tcg_gen_add_i32(tmp, tmp, tmp2);
8614 }
7dcc1f89 8615 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8616 break;
8617 case 0x05:
e9bb4aa9 8618 if (set_cc) {
49b4c31e 8619 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8620 } else {
8621 gen_add_carry(tmp, tmp, tmp2);
8622 }
7dcc1f89 8623 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8624 break;
8625 case 0x06:
e9bb4aa9 8626 if (set_cc) {
2de68a49 8627 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8628 } else {
8629 gen_sub_carry(tmp, tmp, tmp2);
8630 }
7dcc1f89 8631 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8632 break;
8633 case 0x07:
e9bb4aa9 8634 if (set_cc) {
2de68a49 8635 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8636 } else {
8637 gen_sub_carry(tmp, tmp2, tmp);
8638 }
7dcc1f89 8639 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8640 break;
8641 case 0x08:
8642 if (set_cc) {
e9bb4aa9
JR
8643 tcg_gen_and_i32(tmp, tmp, tmp2);
8644 gen_logic_CC(tmp);
9ee6e8bb 8645 }
7d1b0095 8646 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8647 break;
8648 case 0x09:
8649 if (set_cc) {
e9bb4aa9
JR
8650 tcg_gen_xor_i32(tmp, tmp, tmp2);
8651 gen_logic_CC(tmp);
9ee6e8bb 8652 }
7d1b0095 8653 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8654 break;
8655 case 0x0a:
8656 if (set_cc) {
72485ec4 8657 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8658 }
7d1b0095 8659 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8660 break;
8661 case 0x0b:
8662 if (set_cc) {
72485ec4 8663 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8664 }
7d1b0095 8665 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8666 break;
8667 case 0x0c:
e9bb4aa9
JR
8668 tcg_gen_or_i32(tmp, tmp, tmp2);
8669 if (logic_cc) {
8670 gen_logic_CC(tmp);
8671 }
7dcc1f89 8672 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8673 break;
8674 case 0x0d:
8675 if (logic_cc && rd == 15) {
8676 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8677 if (IS_USER(s)) {
9ee6e8bb 8678 goto illegal_op;
e9bb4aa9
JR
8679 }
8680 gen_exception_return(s, tmp2);
9ee6e8bb 8681 } else {
e9bb4aa9
JR
8682 if (logic_cc) {
8683 gen_logic_CC(tmp2);
8684 }
7dcc1f89 8685 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8686 }
8687 break;
8688 case 0x0e:
f669df27 8689 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8690 if (logic_cc) {
8691 gen_logic_CC(tmp);
8692 }
7dcc1f89 8693 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8694 break;
8695 default:
8696 case 0x0f:
e9bb4aa9
JR
8697 tcg_gen_not_i32(tmp2, tmp2);
8698 if (logic_cc) {
8699 gen_logic_CC(tmp2);
8700 }
7dcc1f89 8701 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8702 break;
8703 }
e9bb4aa9 8704 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8705 tcg_temp_free_i32(tmp2);
e9bb4aa9 8706 }
9ee6e8bb
PB
8707 } else {
8708 /* other instructions */
8709 op1 = (insn >> 24) & 0xf;
8710 switch(op1) {
8711 case 0x0:
8712 case 0x1:
8713 /* multiplies, extra load/stores */
8714 sh = (insn >> 5) & 3;
8715 if (sh == 0) {
8716 if (op1 == 0x0) {
8717 rd = (insn >> 16) & 0xf;
8718 rn = (insn >> 12) & 0xf;
8719 rs = (insn >> 8) & 0xf;
8720 rm = (insn) & 0xf;
8721 op1 = (insn >> 20) & 0xf;
8722 switch (op1) {
8723 case 0: case 1: case 2: case 3: case 6:
8724 /* 32 bit mul */
5e3f878a
PB
8725 tmp = load_reg(s, rs);
8726 tmp2 = load_reg(s, rm);
8727 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8728 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8729 if (insn & (1 << 22)) {
8730 /* Subtract (mls) */
8731 ARCH(6T2);
5e3f878a
PB
8732 tmp2 = load_reg(s, rn);
8733 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8734 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8735 } else if (insn & (1 << 21)) {
8736 /* Add */
5e3f878a
PB
8737 tmp2 = load_reg(s, rn);
8738 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8739 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8740 }
8741 if (insn & (1 << 20))
5e3f878a
PB
8742 gen_logic_CC(tmp);
8743 store_reg(s, rd, tmp);
9ee6e8bb 8744 break;
8aac08b1
AJ
8745 case 4:
8746 /* 64 bit mul double accumulate (UMAAL) */
8747 ARCH(6);
8748 tmp = load_reg(s, rs);
8749 tmp2 = load_reg(s, rm);
8750 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8751 gen_addq_lo(s, tmp64, rn);
8752 gen_addq_lo(s, tmp64, rd);
8753 gen_storeq_reg(s, rn, rd, tmp64);
8754 tcg_temp_free_i64(tmp64);
8755 break;
8756 case 8: case 9: case 10: case 11:
8757 case 12: case 13: case 14: case 15:
8758 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8759 tmp = load_reg(s, rs);
8760 tmp2 = load_reg(s, rm);
8aac08b1 8761 if (insn & (1 << 22)) {
c9f10124 8762 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8763 } else {
c9f10124 8764 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8765 }
8766 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8767 TCGv_i32 al = load_reg(s, rn);
8768 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8769 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8770 tcg_temp_free_i32(al);
8771 tcg_temp_free_i32(ah);
9ee6e8bb 8772 }
8aac08b1 8773 if (insn & (1 << 20)) {
c9f10124 8774 gen_logicq_cc(tmp, tmp2);
8aac08b1 8775 }
c9f10124
RH
8776 store_reg(s, rn, tmp);
8777 store_reg(s, rd, tmp2);
9ee6e8bb 8778 break;
8aac08b1
AJ
8779 default:
8780 goto illegal_op;
9ee6e8bb
PB
8781 }
8782 } else {
8783 rn = (insn >> 16) & 0xf;
8784 rd = (insn >> 12) & 0xf;
8785 if (insn & (1 << 23)) {
8786 /* load/store exclusive */
96c55295
PM
8787 bool is_ld = extract32(insn, 20, 1);
8788 bool is_lasr = !extract32(insn, 8, 1);
2359bf80 8789 int op2 = (insn >> 8) & 3;
86753403 8790 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8791
8792 switch (op2) {
8793 case 0: /* lda/stl */
8794 if (op1 == 1) {
8795 goto illegal_op;
8796 }
8797 ARCH(8);
8798 break;
8799 case 1: /* reserved */
8800 goto illegal_op;
8801 case 2: /* ldaex/stlex */
8802 ARCH(8);
8803 break;
8804 case 3: /* ldrex/strex */
8805 if (op1) {
8806 ARCH(6K);
8807 } else {
8808 ARCH(6);
8809 }
8810 break;
8811 }
8812
3174f8e9 8813 addr = tcg_temp_local_new_i32();
98a46317 8814 load_reg_var(s, addr, rn);
2359bf80 8815
96c55295
PM
8816 if (is_lasr && !is_ld) {
8817 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8818 }
8819
2359bf80 8820 if (op2 == 0) {
96c55295 8821 if (is_ld) {
2359bf80
MR
8822 tmp = tcg_temp_new_i32();
8823 switch (op1) {
8824 case 0: /* lda */
9bb6558a
PM
8825 gen_aa32_ld32u_iss(s, tmp, addr,
8826 get_mem_index(s),
8827 rd | ISSIsAcqRel);
2359bf80
MR
8828 break;
8829 case 2: /* ldab */
9bb6558a
PM
8830 gen_aa32_ld8u_iss(s, tmp, addr,
8831 get_mem_index(s),
8832 rd | ISSIsAcqRel);
2359bf80
MR
8833 break;
8834 case 3: /* ldah */
9bb6558a
PM
8835 gen_aa32_ld16u_iss(s, tmp, addr,
8836 get_mem_index(s),
8837 rd | ISSIsAcqRel);
2359bf80
MR
8838 break;
8839 default:
8840 abort();
8841 }
8842 store_reg(s, rd, tmp);
8843 } else {
8844 rm = insn & 0xf;
8845 tmp = load_reg(s, rm);
8846 switch (op1) {
8847 case 0: /* stl */
9bb6558a
PM
8848 gen_aa32_st32_iss(s, tmp, addr,
8849 get_mem_index(s),
8850 rm | ISSIsAcqRel);
2359bf80
MR
8851 break;
8852 case 2: /* stlb */
9bb6558a
PM
8853 gen_aa32_st8_iss(s, tmp, addr,
8854 get_mem_index(s),
8855 rm | ISSIsAcqRel);
2359bf80
MR
8856 break;
8857 case 3: /* stlh */
9bb6558a
PM
8858 gen_aa32_st16_iss(s, tmp, addr,
8859 get_mem_index(s),
8860 rm | ISSIsAcqRel);
2359bf80
MR
8861 break;
8862 default:
8863 abort();
8864 }
8865 tcg_temp_free_i32(tmp);
8866 }
96c55295 8867 } else if (is_ld) {
86753403
PB
8868 switch (op1) {
8869 case 0: /* ldrex */
426f5abc 8870 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8871 break;
8872 case 1: /* ldrexd */
426f5abc 8873 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8874 break;
8875 case 2: /* ldrexb */
426f5abc 8876 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8877 break;
8878 case 3: /* ldrexh */
426f5abc 8879 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8880 break;
8881 default:
8882 abort();
8883 }
9ee6e8bb
PB
8884 } else {
8885 rm = insn & 0xf;
86753403
PB
8886 switch (op1) {
8887 case 0: /* strex */
426f5abc 8888 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8889 break;
8890 case 1: /* strexd */
502e64fe 8891 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8892 break;
8893 case 2: /* strexb */
426f5abc 8894 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8895 break;
8896 case 3: /* strexh */
426f5abc 8897 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8898 break;
8899 default:
8900 abort();
8901 }
9ee6e8bb 8902 }
39d5492a 8903 tcg_temp_free_i32(addr);
96c55295
PM
8904
8905 if (is_lasr && is_ld) {
8906 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
8907 }
c4869ca6
OS
8908 } else if ((insn & 0x00300f00) == 0) {
8909 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
8910 * - SWP, SWPB
8911 */
8912
cf12bce0
EC
8913 TCGv taddr;
8914 TCGMemOp opc = s->be_data;
8915
9ee6e8bb
PB
8916 rm = (insn) & 0xf;
8917
9ee6e8bb 8918 if (insn & (1 << 22)) {
cf12bce0 8919 opc |= MO_UB;
9ee6e8bb 8920 } else {
cf12bce0 8921 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 8922 }
cf12bce0
EC
8923
8924 addr = load_reg(s, rn);
8925 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 8926 tcg_temp_free_i32(addr);
cf12bce0
EC
8927
8928 tmp = load_reg(s, rm);
8929 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8930 get_mem_index(s), opc);
8931 tcg_temp_free(taddr);
8932 store_reg(s, rd, tmp);
c4869ca6
OS
8933 } else {
8934 goto illegal_op;
9ee6e8bb
PB
8935 }
8936 }
8937 } else {
8938 int address_offset;
3960c336 8939 bool load = insn & (1 << 20);
63f26fcf
PM
8940 bool wbit = insn & (1 << 21);
8941 bool pbit = insn & (1 << 24);
3960c336 8942 bool doubleword = false;
9bb6558a
PM
8943 ISSInfo issinfo;
8944
9ee6e8bb
PB
8945 /* Misc load/store */
8946 rn = (insn >> 16) & 0xf;
8947 rd = (insn >> 12) & 0xf;
3960c336 8948
9bb6558a
PM
8949 /* ISS not valid if writeback */
8950 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8951
3960c336
PM
8952 if (!load && (sh & 2)) {
8953 /* doubleword */
8954 ARCH(5TE);
8955 if (rd & 1) {
8956 /* UNPREDICTABLE; we choose to UNDEF */
8957 goto illegal_op;
8958 }
8959 load = (sh & 1) == 0;
8960 doubleword = true;
8961 }
8962
b0109805 8963 addr = load_reg(s, rn);
63f26fcf 8964 if (pbit) {
b0109805 8965 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 8966 }
9ee6e8bb 8967 address_offset = 0;
3960c336
PM
8968
8969 if (doubleword) {
8970 if (!load) {
9ee6e8bb 8971 /* store */
b0109805 8972 tmp = load_reg(s, rd);
12dcc321 8973 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8974 tcg_temp_free_i32(tmp);
b0109805
PB
8975 tcg_gen_addi_i32(addr, addr, 4);
8976 tmp = load_reg(s, rd + 1);
12dcc321 8977 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8978 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8979 } else {
8980 /* load */
5a839c0d 8981 tmp = tcg_temp_new_i32();
12dcc321 8982 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8983 store_reg(s, rd, tmp);
8984 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8985 tmp = tcg_temp_new_i32();
12dcc321 8986 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8987 rd++;
9ee6e8bb
PB
8988 }
8989 address_offset = -4;
3960c336
PM
8990 } else if (load) {
8991 /* load */
8992 tmp = tcg_temp_new_i32();
8993 switch (sh) {
8994 case 1:
9bb6558a
PM
8995 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
8996 issinfo);
3960c336
PM
8997 break;
8998 case 2:
9bb6558a
PM
8999 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9000 issinfo);
3960c336
PM
9001 break;
9002 default:
9003 case 3:
9bb6558a
PM
9004 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9005 issinfo);
3960c336
PM
9006 break;
9007 }
9ee6e8bb
PB
9008 } else {
9009 /* store */
b0109805 9010 tmp = load_reg(s, rd);
9bb6558a 9011 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9012 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9013 }
9014 /* Perform base writeback before the loaded value to
9015 ensure correct behavior with overlapping index registers.
b6af0975 9016 ldrd with base writeback is undefined if the
9ee6e8bb 9017 destination and index registers overlap. */
63f26fcf 9018 if (!pbit) {
b0109805
PB
9019 gen_add_datah_offset(s, insn, address_offset, addr);
9020 store_reg(s, rn, addr);
63f26fcf 9021 } else if (wbit) {
9ee6e8bb 9022 if (address_offset)
b0109805
PB
9023 tcg_gen_addi_i32(addr, addr, address_offset);
9024 store_reg(s, rn, addr);
9025 } else {
7d1b0095 9026 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9027 }
9028 if (load) {
9029 /* Complete the load. */
b0109805 9030 store_reg(s, rd, tmp);
9ee6e8bb
PB
9031 }
9032 }
9033 break;
9034 case 0x4:
9035 case 0x5:
9036 goto do_ldst;
9037 case 0x6:
9038 case 0x7:
9039 if (insn & (1 << 4)) {
9040 ARCH(6);
9041 /* Armv6 Media instructions. */
9042 rm = insn & 0xf;
9043 rn = (insn >> 16) & 0xf;
2c0262af 9044 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9045 rs = (insn >> 8) & 0xf;
9046 switch ((insn >> 23) & 3) {
9047 case 0: /* Parallel add/subtract. */
9048 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9049 tmp = load_reg(s, rn);
9050 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9051 sh = (insn >> 5) & 7;
9052 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9053 goto illegal_op;
6ddbc6e4 9054 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9055 tcg_temp_free_i32(tmp2);
6ddbc6e4 9056 store_reg(s, rd, tmp);
9ee6e8bb
PB
9057 break;
9058 case 1:
9059 if ((insn & 0x00700020) == 0) {
6c95676b 9060 /* Halfword pack. */
3670669c
PB
9061 tmp = load_reg(s, rn);
9062 tmp2 = load_reg(s, rm);
9ee6e8bb 9063 shift = (insn >> 7) & 0x1f;
3670669c
PB
9064 if (insn & (1 << 6)) {
9065 /* pkhtb */
22478e79
AZ
9066 if (shift == 0)
9067 shift = 31;
9068 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9069 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9070 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9071 } else {
9072 /* pkhbt */
22478e79
AZ
9073 if (shift)
9074 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9075 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9076 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9077 }
9078 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9079 tcg_temp_free_i32(tmp2);
3670669c 9080 store_reg(s, rd, tmp);
9ee6e8bb
PB
9081 } else if ((insn & 0x00200020) == 0x00200000) {
9082 /* [us]sat */
6ddbc6e4 9083 tmp = load_reg(s, rm);
9ee6e8bb
PB
9084 shift = (insn >> 7) & 0x1f;
9085 if (insn & (1 << 6)) {
9086 if (shift == 0)
9087 shift = 31;
6ddbc6e4 9088 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9089 } else {
6ddbc6e4 9090 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9091 }
9092 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9093 tmp2 = tcg_const_i32(sh);
9094 if (insn & (1 << 22))
9ef39277 9095 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9096 else
9ef39277 9097 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9098 tcg_temp_free_i32(tmp2);
6ddbc6e4 9099 store_reg(s, rd, tmp);
9ee6e8bb
PB
9100 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9101 /* [us]sat16 */
6ddbc6e4 9102 tmp = load_reg(s, rm);
9ee6e8bb 9103 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9104 tmp2 = tcg_const_i32(sh);
9105 if (insn & (1 << 22))
9ef39277 9106 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9107 else
9ef39277 9108 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9109 tcg_temp_free_i32(tmp2);
6ddbc6e4 9110 store_reg(s, rd, tmp);
9ee6e8bb
PB
9111 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9112 /* Select bytes. */
6ddbc6e4
PB
9113 tmp = load_reg(s, rn);
9114 tmp2 = load_reg(s, rm);
7d1b0095 9115 tmp3 = tcg_temp_new_i32();
0ecb72a5 9116 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9117 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9118 tcg_temp_free_i32(tmp3);
9119 tcg_temp_free_i32(tmp2);
6ddbc6e4 9120 store_reg(s, rd, tmp);
9ee6e8bb 9121 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9122 tmp = load_reg(s, rm);
9ee6e8bb 9123 shift = (insn >> 10) & 3;
1301f322 9124 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9125 rotate, a shift is sufficient. */
9126 if (shift != 0)
f669df27 9127 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9128 op1 = (insn >> 20) & 7;
9129 switch (op1) {
5e3f878a
PB
9130 case 0: gen_sxtb16(tmp); break;
9131 case 2: gen_sxtb(tmp); break;
9132 case 3: gen_sxth(tmp); break;
9133 case 4: gen_uxtb16(tmp); break;
9134 case 6: gen_uxtb(tmp); break;
9135 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9136 default: goto illegal_op;
9137 }
9138 if (rn != 15) {
5e3f878a 9139 tmp2 = load_reg(s, rn);
9ee6e8bb 9140 if ((op1 & 3) == 0) {
5e3f878a 9141 gen_add16(tmp, tmp2);
9ee6e8bb 9142 } else {
5e3f878a 9143 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9144 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9145 }
9146 }
6c95676b 9147 store_reg(s, rd, tmp);
9ee6e8bb
PB
9148 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9149 /* rev */
b0109805 9150 tmp = load_reg(s, rm);
9ee6e8bb
PB
9151 if (insn & (1 << 22)) {
9152 if (insn & (1 << 7)) {
b0109805 9153 gen_revsh(tmp);
9ee6e8bb
PB
9154 } else {
9155 ARCH(6T2);
b0109805 9156 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9157 }
9158 } else {
9159 if (insn & (1 << 7))
b0109805 9160 gen_rev16(tmp);
9ee6e8bb 9161 else
66896cb8 9162 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9163 }
b0109805 9164 store_reg(s, rd, tmp);
9ee6e8bb
PB
9165 } else {
9166 goto illegal_op;
9167 }
9168 break;
9169 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9170 switch ((insn >> 20) & 0x7) {
9171 case 5:
9172 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9173 /* op2 not 00x or 11x : UNDEF */
9174 goto illegal_op;
9175 }
838fa72d
AJ
9176 /* Signed multiply most significant [accumulate].
9177 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9178 tmp = load_reg(s, rm);
9179 tmp2 = load_reg(s, rs);
a7812ae4 9180 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9181
955a7dd5 9182 if (rd != 15) {
838fa72d 9183 tmp = load_reg(s, rd);
9ee6e8bb 9184 if (insn & (1 << 6)) {
838fa72d 9185 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9186 } else {
838fa72d 9187 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9188 }
9189 }
838fa72d
AJ
9190 if (insn & (1 << 5)) {
9191 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9192 }
9193 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9194 tmp = tcg_temp_new_i32();
ecc7b3aa 9195 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9196 tcg_temp_free_i64(tmp64);
955a7dd5 9197 store_reg(s, rn, tmp);
41e9564d
PM
9198 break;
9199 case 0:
9200 case 4:
9201 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9202 if (insn & (1 << 7)) {
9203 goto illegal_op;
9204 }
9205 tmp = load_reg(s, rm);
9206 tmp2 = load_reg(s, rs);
9ee6e8bb 9207 if (insn & (1 << 5))
5e3f878a
PB
9208 gen_swap_half(tmp2);
9209 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9210 if (insn & (1 << 22)) {
5e3f878a 9211 /* smlald, smlsld */
33bbd75a
PC
9212 TCGv_i64 tmp64_2;
9213
a7812ae4 9214 tmp64 = tcg_temp_new_i64();
33bbd75a 9215 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9216 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9217 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9218 tcg_temp_free_i32(tmp);
33bbd75a
PC
9219 tcg_temp_free_i32(tmp2);
9220 if (insn & (1 << 6)) {
9221 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9222 } else {
9223 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9224 }
9225 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9226 gen_addq(s, tmp64, rd, rn);
9227 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9228 tcg_temp_free_i64(tmp64);
9ee6e8bb 9229 } else {
5e3f878a 9230 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9231 if (insn & (1 << 6)) {
9232 /* This subtraction cannot overflow. */
9233 tcg_gen_sub_i32(tmp, tmp, tmp2);
9234 } else {
9235 /* This addition cannot overflow 32 bits;
9236 * however it may overflow considered as a
9237 * signed operation, in which case we must set
9238 * the Q flag.
9239 */
9240 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9241 }
9242 tcg_temp_free_i32(tmp2);
22478e79 9243 if (rd != 15)
9ee6e8bb 9244 {
22478e79 9245 tmp2 = load_reg(s, rd);
9ef39277 9246 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9247 tcg_temp_free_i32(tmp2);
9ee6e8bb 9248 }
22478e79 9249 store_reg(s, rn, tmp);
9ee6e8bb 9250 }
41e9564d 9251 break;
b8b8ea05
PM
9252 case 1:
9253 case 3:
9254 /* SDIV, UDIV */
7e0cf8b4 9255 if (!dc_isar_feature(arm_div, s)) {
b8b8ea05
PM
9256 goto illegal_op;
9257 }
9258 if (((insn >> 5) & 7) || (rd != 15)) {
9259 goto illegal_op;
9260 }
9261 tmp = load_reg(s, rm);
9262 tmp2 = load_reg(s, rs);
9263 if (insn & (1 << 21)) {
9264 gen_helper_udiv(tmp, tmp, tmp2);
9265 } else {
9266 gen_helper_sdiv(tmp, tmp, tmp2);
9267 }
9268 tcg_temp_free_i32(tmp2);
9269 store_reg(s, rn, tmp);
9270 break;
41e9564d
PM
9271 default:
9272 goto illegal_op;
9ee6e8bb
PB
9273 }
9274 break;
9275 case 3:
9276 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9277 switch (op1) {
9278 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9279 ARCH(6);
9280 tmp = load_reg(s, rm);
9281 tmp2 = load_reg(s, rs);
9282 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9283 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9284 if (rd != 15) {
9285 tmp2 = load_reg(s, rd);
6ddbc6e4 9286 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9287 tcg_temp_free_i32(tmp2);
9ee6e8bb 9288 }
ded9d295 9289 store_reg(s, rn, tmp);
9ee6e8bb
PB
9290 break;
9291 case 0x20: case 0x24: case 0x28: case 0x2c:
9292 /* Bitfield insert/clear. */
9293 ARCH(6T2);
9294 shift = (insn >> 7) & 0x1f;
9295 i = (insn >> 16) & 0x1f;
45140a57
KB
9296 if (i < shift) {
9297 /* UNPREDICTABLE; we choose to UNDEF */
9298 goto illegal_op;
9299 }
9ee6e8bb
PB
9300 i = i + 1 - shift;
9301 if (rm == 15) {
7d1b0095 9302 tmp = tcg_temp_new_i32();
5e3f878a 9303 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9304 } else {
5e3f878a 9305 tmp = load_reg(s, rm);
9ee6e8bb
PB
9306 }
9307 if (i != 32) {
5e3f878a 9308 tmp2 = load_reg(s, rd);
d593c48e 9309 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9310 tcg_temp_free_i32(tmp2);
9ee6e8bb 9311 }
5e3f878a 9312 store_reg(s, rd, tmp);
9ee6e8bb
PB
9313 break;
9314 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9315 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9316 ARCH(6T2);
5e3f878a 9317 tmp = load_reg(s, rm);
9ee6e8bb
PB
9318 shift = (insn >> 7) & 0x1f;
9319 i = ((insn >> 16) & 0x1f) + 1;
9320 if (shift + i > 32)
9321 goto illegal_op;
9322 if (i < 32) {
9323 if (op1 & 0x20) {
59a71b4c 9324 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9325 } else {
59a71b4c 9326 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9327 }
9328 }
5e3f878a 9329 store_reg(s, rd, tmp);
9ee6e8bb
PB
9330 break;
9331 default:
9332 goto illegal_op;
9333 }
9334 break;
9335 }
9336 break;
9337 }
9338 do_ldst:
9339 /* Check for undefined extension instructions
9340 * per the ARM Bible IE:
9341 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9342 */
9343 sh = (0xf << 20) | (0xf << 4);
9344 if (op1 == 0x7 && ((insn & sh) == sh))
9345 {
9346 goto illegal_op;
9347 }
9348 /* load/store byte/word */
9349 rn = (insn >> 16) & 0xf;
9350 rd = (insn >> 12) & 0xf;
b0109805 9351 tmp2 = load_reg(s, rn);
a99caa48
PM
9352 if ((insn & 0x01200000) == 0x00200000) {
9353 /* ldrt/strt */
579d21cc 9354 i = get_a32_user_mem_index(s);
a99caa48
PM
9355 } else {
9356 i = get_mem_index(s);
9357 }
9ee6e8bb 9358 if (insn & (1 << 24))
b0109805 9359 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9360 if (insn & (1 << 20)) {
9361 /* load */
5a839c0d 9362 tmp = tcg_temp_new_i32();
9ee6e8bb 9363 if (insn & (1 << 22)) {
9bb6558a 9364 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9365 } else {
9bb6558a 9366 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9367 }
9ee6e8bb
PB
9368 } else {
9369 /* store */
b0109805 9370 tmp = load_reg(s, rd);
5a839c0d 9371 if (insn & (1 << 22)) {
9bb6558a 9372 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9373 } else {
9bb6558a 9374 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9375 }
9376 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9377 }
9378 if (!(insn & (1 << 24))) {
b0109805
PB
9379 gen_add_data_offset(s, insn, tmp2);
9380 store_reg(s, rn, tmp2);
9381 } else if (insn & (1 << 21)) {
9382 store_reg(s, rn, tmp2);
9383 } else {
7d1b0095 9384 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9385 }
9386 if (insn & (1 << 20)) {
9387 /* Complete the load. */
7dcc1f89 9388 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9389 }
9390 break;
9391 case 0x08:
9392 case 0x09:
9393 {
da3e53dd
PM
9394 int j, n, loaded_base;
9395 bool exc_return = false;
9396 bool is_load = extract32(insn, 20, 1);
9397 bool user = false;
39d5492a 9398 TCGv_i32 loaded_var;
9ee6e8bb
PB
9399 /* load/store multiple words */
9400 /* XXX: store correct base if write back */
9ee6e8bb 9401 if (insn & (1 << 22)) {
da3e53dd 9402 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9403 if (IS_USER(s))
9404 goto illegal_op; /* only usable in supervisor mode */
9405
da3e53dd
PM
9406 if (is_load && extract32(insn, 15, 1)) {
9407 exc_return = true;
9408 } else {
9409 user = true;
9410 }
9ee6e8bb
PB
9411 }
9412 rn = (insn >> 16) & 0xf;
b0109805 9413 addr = load_reg(s, rn);
9ee6e8bb
PB
9414
9415 /* compute total size */
9416 loaded_base = 0;
f764718d 9417 loaded_var = NULL;
9ee6e8bb
PB
9418 n = 0;
9419 for(i=0;i<16;i++) {
9420 if (insn & (1 << i))
9421 n++;
9422 }
9423 /* XXX: test invalid n == 0 case ? */
9424 if (insn & (1 << 23)) {
9425 if (insn & (1 << 24)) {
9426 /* pre increment */
b0109805 9427 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9428 } else {
9429 /* post increment */
9430 }
9431 } else {
9432 if (insn & (1 << 24)) {
9433 /* pre decrement */
b0109805 9434 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9435 } else {
9436 /* post decrement */
9437 if (n != 1)
b0109805 9438 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9439 }
9440 }
9441 j = 0;
9442 for(i=0;i<16;i++) {
9443 if (insn & (1 << i)) {
da3e53dd 9444 if (is_load) {
9ee6e8bb 9445 /* load */
5a839c0d 9446 tmp = tcg_temp_new_i32();
12dcc321 9447 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9448 if (user) {
b75263d6 9449 tmp2 = tcg_const_i32(i);
1ce94f81 9450 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9451 tcg_temp_free_i32(tmp2);
7d1b0095 9452 tcg_temp_free_i32(tmp);
9ee6e8bb 9453 } else if (i == rn) {
b0109805 9454 loaded_var = tmp;
9ee6e8bb 9455 loaded_base = 1;
9d090d17 9456 } else if (i == 15 && exc_return) {
fb0e8e79 9457 store_pc_exc_ret(s, tmp);
9ee6e8bb 9458 } else {
7dcc1f89 9459 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9460 }
9461 } else {
9462 /* store */
9463 if (i == 15) {
9464 /* special case: r15 = PC + 8 */
9465 val = (long)s->pc + 4;
7d1b0095 9466 tmp = tcg_temp_new_i32();
b0109805 9467 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9468 } else if (user) {
7d1b0095 9469 tmp = tcg_temp_new_i32();
b75263d6 9470 tmp2 = tcg_const_i32(i);
9ef39277 9471 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9472 tcg_temp_free_i32(tmp2);
9ee6e8bb 9473 } else {
b0109805 9474 tmp = load_reg(s, i);
9ee6e8bb 9475 }
12dcc321 9476 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9477 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9478 }
9479 j++;
9480 /* no need to add after the last transfer */
9481 if (j != n)
b0109805 9482 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9483 }
9484 }
9485 if (insn & (1 << 21)) {
9486 /* write back */
9487 if (insn & (1 << 23)) {
9488 if (insn & (1 << 24)) {
9489 /* pre increment */
9490 } else {
9491 /* post increment */
b0109805 9492 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9493 }
9494 } else {
9495 if (insn & (1 << 24)) {
9496 /* pre decrement */
9497 if (n != 1)
b0109805 9498 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9499 } else {
9500 /* post decrement */
b0109805 9501 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9502 }
9503 }
b0109805
PB
9504 store_reg(s, rn, addr);
9505 } else {
7d1b0095 9506 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9507 }
9508 if (loaded_base) {
b0109805 9509 store_reg(s, rn, loaded_var);
9ee6e8bb 9510 }
da3e53dd 9511 if (exc_return) {
9ee6e8bb 9512 /* Restore CPSR from SPSR. */
d9ba4830 9513 tmp = load_cpu_field(spsr);
e69ad9df
AL
9514 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9515 gen_io_start();
9516 }
235ea1f5 9517 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
9518 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9519 gen_io_end();
9520 }
7d1b0095 9521 tcg_temp_free_i32(tmp);
b29fd33d 9522 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9523 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9524 }
9525 }
9526 break;
9527 case 0xa:
9528 case 0xb:
9529 {
9530 int32_t offset;
9531
9532 /* branch (and link) */
9533 val = (int32_t)s->pc;
9534 if (insn & (1 << 24)) {
7d1b0095 9535 tmp = tcg_temp_new_i32();
5e3f878a
PB
9536 tcg_gen_movi_i32(tmp, val);
9537 store_reg(s, 14, tmp);
9ee6e8bb 9538 }
534df156
PM
9539 offset = sextract32(insn << 2, 0, 26);
9540 val += offset + 4;
9ee6e8bb
PB
9541 gen_jmp(s, val);
9542 }
9543 break;
9544 case 0xc:
9545 case 0xd:
9546 case 0xe:
6a57f3eb
WN
9547 if (((insn >> 8) & 0xe) == 10) {
9548 /* VFP. */
7dcc1f89 9549 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9550 goto illegal_op;
9551 }
7dcc1f89 9552 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9553 /* Coprocessor. */
9ee6e8bb 9554 goto illegal_op;
6a57f3eb 9555 }
9ee6e8bb
PB
9556 break;
9557 case 0xf:
9558 /* swi */
eaed129d 9559 gen_set_pc_im(s, s->pc);
d4a2dc67 9560 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 9561 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
9562 break;
9563 default:
9564 illegal_op:
73710361
GB
9565 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9566 default_exception_el(s));
9ee6e8bb
PB
9567 break;
9568 }
9569 }
9570}
9571
296e5a0a
PM
9572static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
9573{
9574 /* Return true if this is a 16 bit instruction. We must be precise
9575 * about this (matching the decode). We assume that s->pc still
9576 * points to the first 16 bits of the insn.
9577 */
9578 if ((insn >> 11) < 0x1d) {
9579 /* Definitely a 16-bit instruction */
9580 return true;
9581 }
9582
9583 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9584 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9585 * end up actually treating this as two 16-bit insns, though,
9586 * if it's half of a bl/blx pair that might span a page boundary.
9587 */
14120108
JS
9588 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
9589 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
9590 /* Thumb2 cores (including all M profile ones) always treat
9591 * 32-bit insns as 32-bit.
9592 */
9593 return false;
9594 }
9595
bfe7ad5b 9596 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
9597 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9598 * is not on the next page; we merge this into a 32-bit
9599 * insn.
9600 */
9601 return false;
9602 }
9603 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9604 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9605 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9606 * -- handle as single 16 bit insn
9607 */
9608 return true;
9609}
9610
9ee6e8bb
PB
9611/* Return true if this is a Thumb-2 logical op. */
9612static int
9613thumb2_logic_op(int op)
9614{
9615 return (op < 8);
9616}
9617
9618/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9619 then set condition code flags based on the result of the operation.
9620 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9621 to the high bit of T1.
9622 Returns zero if the opcode is valid. */
9623
9624static int
39d5492a
PM
9625gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9626 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9627{
9628 int logic_cc;
9629
9630 logic_cc = 0;
9631 switch (op) {
9632 case 0: /* and */
396e467c 9633 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9634 logic_cc = conds;
9635 break;
9636 case 1: /* bic */
f669df27 9637 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9638 logic_cc = conds;
9639 break;
9640 case 2: /* orr */
396e467c 9641 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9642 logic_cc = conds;
9643 break;
9644 case 3: /* orn */
29501f1b 9645 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9646 logic_cc = conds;
9647 break;
9648 case 4: /* eor */
396e467c 9649 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9650 logic_cc = conds;
9651 break;
9652 case 8: /* add */
9653 if (conds)
72485ec4 9654 gen_add_CC(t0, t0, t1);
9ee6e8bb 9655 else
396e467c 9656 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9657 break;
9658 case 10: /* adc */
9659 if (conds)
49b4c31e 9660 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9661 else
396e467c 9662 gen_adc(t0, t1);
9ee6e8bb
PB
9663 break;
9664 case 11: /* sbc */
2de68a49
RH
9665 if (conds) {
9666 gen_sbc_CC(t0, t0, t1);
9667 } else {
396e467c 9668 gen_sub_carry(t0, t0, t1);
2de68a49 9669 }
9ee6e8bb
PB
9670 break;
9671 case 13: /* sub */
9672 if (conds)
72485ec4 9673 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9674 else
396e467c 9675 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9676 break;
9677 case 14: /* rsb */
9678 if (conds)
72485ec4 9679 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9680 else
396e467c 9681 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9682 break;
9683 default: /* 5, 6, 7, 9, 12, 15. */
9684 return 1;
9685 }
9686 if (logic_cc) {
396e467c 9687 gen_logic_CC(t0);
9ee6e8bb 9688 if (shifter_out)
396e467c 9689 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9690 }
9691 return 0;
9692}
9693
2eea841c
PM
9694/* Translate a 32-bit thumb instruction. */
9695static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 9696{
296e5a0a 9697 uint32_t imm, shift, offset;
9ee6e8bb 9698 uint32_t rd, rn, rm, rs;
39d5492a
PM
9699 TCGv_i32 tmp;
9700 TCGv_i32 tmp2;
9701 TCGv_i32 tmp3;
9702 TCGv_i32 addr;
a7812ae4 9703 TCGv_i64 tmp64;
9ee6e8bb
PB
9704 int op;
9705 int shiftop;
9706 int conds;
9707 int logic_cc;
9708
14120108
JS
9709 /*
9710 * ARMv6-M supports a limited subset of Thumb2 instructions.
9711 * Other Thumb1 architectures allow only 32-bit
9712 * combined BL/BLX prefix and suffix.
296e5a0a 9713 */
14120108
JS
9714 if (arm_dc_feature(s, ARM_FEATURE_M) &&
9715 !arm_dc_feature(s, ARM_FEATURE_V7)) {
9716 int i;
9717 bool found = false;
8297cb13
JS
9718 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
9719 0xf3b08040 /* dsb */,
9720 0xf3b08050 /* dmb */,
9721 0xf3b08060 /* isb */,
9722 0xf3e08000 /* mrs */,
9723 0xf000d000 /* bl */};
9724 static const uint32_t armv6m_mask[] = {0xffe0d000,
9725 0xfff0d0f0,
9726 0xfff0d0f0,
9727 0xfff0d0f0,
9728 0xffe0d000,
9729 0xf800d000};
14120108
JS
9730
9731 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
9732 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
9733 found = true;
9734 break;
9735 }
9736 }
9737 if (!found) {
9738 goto illegal_op;
9739 }
9740 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
9741 ARCH(6T2);
9742 }
9743
9744 rn = (insn >> 16) & 0xf;
9745 rs = (insn >> 12) & 0xf;
9746 rd = (insn >> 8) & 0xf;
9747 rm = insn & 0xf;
9748 switch ((insn >> 25) & 0xf) {
9749 case 0: case 1: case 2: case 3:
9750 /* 16-bit instructions. Should never happen. */
9751 abort();
9752 case 4:
9753 if (insn & (1 << 22)) {
ebfe27c5
PM
9754 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9755 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 9756 * table branch, TT.
ebfe27c5 9757 */
76eff04d
PM
9758 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9759 arm_dc_feature(s, ARM_FEATURE_V8)) {
9760 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9761 * - SG (v8M only)
9762 * The bulk of the behaviour for this instruction is implemented
9763 * in v7m_handle_execute_nsc(), which deals with the insn when
9764 * it is executed by a CPU in non-secure state from memory
9765 * which is Secure & NonSecure-Callable.
9766 * Here we only need to handle the remaining cases:
9767 * * in NS memory (including the "security extension not
9768 * implemented" case) : NOP
9769 * * in S memory but CPU already secure (clear IT bits)
9770 * We know that the attribute for the memory this insn is
9771 * in must match the current CPU state, because otherwise
9772 * get_phys_addr_pmsav8 would have generated an exception.
9773 */
9774 if (s->v8m_secure) {
9775 /* Like the IT insn, we don't need to generate any code */
9776 s->condexec_cond = 0;
9777 s->condexec_mask = 0;
9778 }
9779 } else if (insn & 0x01200000) {
ebfe27c5
PM
9780 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9781 * - load/store dual (post-indexed)
9782 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9783 * - load/store dual (literal and immediate)
9784 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9785 * - load/store dual (pre-indexed)
9786 */
910d7692
PM
9787 bool wback = extract32(insn, 21, 1);
9788
9ee6e8bb 9789 if (rn == 15) {
ebfe27c5
PM
9790 if (insn & (1 << 21)) {
9791 /* UNPREDICTABLE */
9792 goto illegal_op;
9793 }
7d1b0095 9794 addr = tcg_temp_new_i32();
b0109805 9795 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9796 } else {
b0109805 9797 addr = load_reg(s, rn);
9ee6e8bb
PB
9798 }
9799 offset = (insn & 0xff) * 4;
910d7692 9800 if ((insn & (1 << 23)) == 0) {
9ee6e8bb 9801 offset = -offset;
910d7692
PM
9802 }
9803
9804 if (s->v8m_stackcheck && rn == 13 && wback) {
9805 /*
9806 * Here 'addr' is the current SP; if offset is +ve we're
9807 * moving SP up, else down. It is UNKNOWN whether the limit
9808 * check triggers when SP starts below the limit and ends
9809 * up above it; check whichever of the current and final
9810 * SP is lower, so QEMU will trigger in that situation.
9811 */
9812 if ((int32_t)offset < 0) {
9813 TCGv_i32 newsp = tcg_temp_new_i32();
9814
9815 tcg_gen_addi_i32(newsp, addr, offset);
9816 gen_helper_v8m_stackcheck(cpu_env, newsp);
9817 tcg_temp_free_i32(newsp);
9818 } else {
9819 gen_helper_v8m_stackcheck(cpu_env, addr);
9820 }
9821 }
9822
9ee6e8bb 9823 if (insn & (1 << 24)) {
b0109805 9824 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9825 offset = 0;
9826 }
9827 if (insn & (1 << 20)) {
9828 /* ldrd */
e2592fad 9829 tmp = tcg_temp_new_i32();
12dcc321 9830 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9831 store_reg(s, rs, tmp);
9832 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9833 tmp = tcg_temp_new_i32();
12dcc321 9834 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9835 store_reg(s, rd, tmp);
9ee6e8bb
PB
9836 } else {
9837 /* strd */
b0109805 9838 tmp = load_reg(s, rs);
12dcc321 9839 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9840 tcg_temp_free_i32(tmp);
b0109805
PB
9841 tcg_gen_addi_i32(addr, addr, 4);
9842 tmp = load_reg(s, rd);
12dcc321 9843 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9844 tcg_temp_free_i32(tmp);
9ee6e8bb 9845 }
910d7692 9846 if (wback) {
9ee6e8bb 9847 /* Base writeback. */
b0109805
PB
9848 tcg_gen_addi_i32(addr, addr, offset - 4);
9849 store_reg(s, rn, addr);
9850 } else {
7d1b0095 9851 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9852 }
9853 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
9854 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9855 * - load/store exclusive word
5158de24 9856 * - TT (v8M only)
ebfe27c5
PM
9857 */
9858 if (rs == 15) {
5158de24
PM
9859 if (!(insn & (1 << 20)) &&
9860 arm_dc_feature(s, ARM_FEATURE_M) &&
9861 arm_dc_feature(s, ARM_FEATURE_V8)) {
9862 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
9863 * - TT (v8M only)
9864 */
9865 bool alt = insn & (1 << 7);
9866 TCGv_i32 addr, op, ttresp;
9867
9868 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
9869 /* we UNDEF for these UNPREDICTABLE cases */
9870 goto illegal_op;
9871 }
9872
9873 if (alt && !s->v8m_secure) {
9874 goto illegal_op;
9875 }
9876
9877 addr = load_reg(s, rn);
9878 op = tcg_const_i32(extract32(insn, 6, 2));
9879 ttresp = tcg_temp_new_i32();
9880 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
9881 tcg_temp_free_i32(addr);
9882 tcg_temp_free_i32(op);
9883 store_reg(s, rd, ttresp);
384c6c03 9884 break;
5158de24 9885 }
ebfe27c5
PM
9886 goto illegal_op;
9887 }
39d5492a 9888 addr = tcg_temp_local_new_i32();
98a46317 9889 load_reg_var(s, addr, rn);
426f5abc 9890 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9891 if (insn & (1 << 20)) {
426f5abc 9892 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9893 } else {
426f5abc 9894 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9895 }
39d5492a 9896 tcg_temp_free_i32(addr);
2359bf80 9897 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9898 /* Table Branch. */
9899 if (rn == 15) {
7d1b0095 9900 addr = tcg_temp_new_i32();
b0109805 9901 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9902 } else {
b0109805 9903 addr = load_reg(s, rn);
9ee6e8bb 9904 }
b26eefb6 9905 tmp = load_reg(s, rm);
b0109805 9906 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9907 if (insn & (1 << 4)) {
9908 /* tbh */
b0109805 9909 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9910 tcg_temp_free_i32(tmp);
e2592fad 9911 tmp = tcg_temp_new_i32();
12dcc321 9912 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9913 } else { /* tbb */
7d1b0095 9914 tcg_temp_free_i32(tmp);
e2592fad 9915 tmp = tcg_temp_new_i32();
12dcc321 9916 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9917 }
7d1b0095 9918 tcg_temp_free_i32(addr);
b0109805
PB
9919 tcg_gen_shli_i32(tmp, tmp, 1);
9920 tcg_gen_addi_i32(tmp, tmp, s->pc);
9921 store_reg(s, 15, tmp);
9ee6e8bb 9922 } else {
96c55295
PM
9923 bool is_lasr = false;
9924 bool is_ld = extract32(insn, 20, 1);
2359bf80 9925 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9926 op = (insn >> 4) & 0x3;
2359bf80
MR
9927 switch (op2) {
9928 case 0:
426f5abc 9929 goto illegal_op;
2359bf80
MR
9930 case 1:
9931 /* Load/store exclusive byte/halfword/doubleword */
9932 if (op == 2) {
9933 goto illegal_op;
9934 }
9935 ARCH(7);
9936 break;
9937 case 2:
9938 /* Load-acquire/store-release */
9939 if (op == 3) {
9940 goto illegal_op;
9941 }
9942 /* Fall through */
9943 case 3:
9944 /* Load-acquire/store-release exclusive */
9945 ARCH(8);
96c55295 9946 is_lasr = true;
2359bf80 9947 break;
426f5abc 9948 }
96c55295
PM
9949
9950 if (is_lasr && !is_ld) {
9951 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9952 }
9953
39d5492a 9954 addr = tcg_temp_local_new_i32();
98a46317 9955 load_reg_var(s, addr, rn);
2359bf80 9956 if (!(op2 & 1)) {
96c55295 9957 if (is_ld) {
2359bf80
MR
9958 tmp = tcg_temp_new_i32();
9959 switch (op) {
9960 case 0: /* ldab */
9bb6558a
PM
9961 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9962 rs | ISSIsAcqRel);
2359bf80
MR
9963 break;
9964 case 1: /* ldah */
9bb6558a
PM
9965 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9966 rs | ISSIsAcqRel);
2359bf80
MR
9967 break;
9968 case 2: /* lda */
9bb6558a
PM
9969 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9970 rs | ISSIsAcqRel);
2359bf80
MR
9971 break;
9972 default:
9973 abort();
9974 }
9975 store_reg(s, rs, tmp);
9976 } else {
9977 tmp = load_reg(s, rs);
9978 switch (op) {
9979 case 0: /* stlb */
9bb6558a
PM
9980 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9981 rs | ISSIsAcqRel);
2359bf80
MR
9982 break;
9983 case 1: /* stlh */
9bb6558a
PM
9984 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9985 rs | ISSIsAcqRel);
2359bf80
MR
9986 break;
9987 case 2: /* stl */
9bb6558a
PM
9988 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9989 rs | ISSIsAcqRel);
2359bf80
MR
9990 break;
9991 default:
9992 abort();
9993 }
9994 tcg_temp_free_i32(tmp);
9995 }
96c55295 9996 } else if (is_ld) {
426f5abc 9997 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9998 } else {
426f5abc 9999 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 10000 }
39d5492a 10001 tcg_temp_free_i32(addr);
96c55295
PM
10002
10003 if (is_lasr && is_ld) {
10004 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
10005 }
9ee6e8bb
PB
10006 }
10007 } else {
10008 /* Load/store multiple, RFE, SRS. */
10009 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 10010 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 10011 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10012 goto illegal_op;
00115976 10013 }
9ee6e8bb
PB
10014 if (insn & (1 << 20)) {
10015 /* rfe */
b0109805
PB
10016 addr = load_reg(s, rn);
10017 if ((insn & (1 << 24)) == 0)
10018 tcg_gen_addi_i32(addr, addr, -8);
10019 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 10020 tmp = tcg_temp_new_i32();
12dcc321 10021 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10022 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10023 tmp2 = tcg_temp_new_i32();
12dcc321 10024 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
10025 if (insn & (1 << 21)) {
10026 /* Base writeback. */
b0109805
PB
10027 if (insn & (1 << 24)) {
10028 tcg_gen_addi_i32(addr, addr, 4);
10029 } else {
10030 tcg_gen_addi_i32(addr, addr, -4);
10031 }
10032 store_reg(s, rn, addr);
10033 } else {
7d1b0095 10034 tcg_temp_free_i32(addr);
9ee6e8bb 10035 }
b0109805 10036 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10037 } else {
10038 /* srs */
81465888
PM
10039 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10040 insn & (1 << 21));
9ee6e8bb
PB
10041 }
10042 } else {
5856d44e 10043 int i, loaded_base = 0;
39d5492a 10044 TCGv_i32 loaded_var;
7c0ed88e 10045 bool wback = extract32(insn, 21, 1);
9ee6e8bb 10046 /* Load/store multiple. */
b0109805 10047 addr = load_reg(s, rn);
9ee6e8bb
PB
10048 offset = 0;
10049 for (i = 0; i < 16; i++) {
10050 if (insn & (1 << i))
10051 offset += 4;
10052 }
7c0ed88e 10053
9ee6e8bb 10054 if (insn & (1 << 24)) {
b0109805 10055 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10056 }
10057
7c0ed88e
PM
10058 if (s->v8m_stackcheck && rn == 13 && wback) {
10059 /*
10060 * If the writeback is incrementing SP rather than
10061 * decrementing it, and the initial SP is below the
10062 * stack limit but the final written-back SP would
10063 * be above, then then we must not perform any memory
10064 * accesses, but it is IMPDEF whether we generate
10065 * an exception. We choose to do so in this case.
10066 * At this point 'addr' is the lowest address, so
10067 * either the original SP (if incrementing) or our
10068 * final SP (if decrementing), so that's what we check.
10069 */
10070 gen_helper_v8m_stackcheck(cpu_env, addr);
10071 }
10072
f764718d 10073 loaded_var = NULL;
9ee6e8bb
PB
10074 for (i = 0; i < 16; i++) {
10075 if ((insn & (1 << i)) == 0)
10076 continue;
10077 if (insn & (1 << 20)) {
10078 /* Load. */
e2592fad 10079 tmp = tcg_temp_new_i32();
12dcc321 10080 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10081 if (i == 15) {
3bb8a96f 10082 gen_bx_excret(s, tmp);
5856d44e
YO
10083 } else if (i == rn) {
10084 loaded_var = tmp;
10085 loaded_base = 1;
9ee6e8bb 10086 } else {
b0109805 10087 store_reg(s, i, tmp);
9ee6e8bb
PB
10088 }
10089 } else {
10090 /* Store. */
b0109805 10091 tmp = load_reg(s, i);
12dcc321 10092 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10093 tcg_temp_free_i32(tmp);
9ee6e8bb 10094 }
b0109805 10095 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 10096 }
5856d44e
YO
10097 if (loaded_base) {
10098 store_reg(s, rn, loaded_var);
10099 }
7c0ed88e 10100 if (wback) {
9ee6e8bb
PB
10101 /* Base register writeback. */
10102 if (insn & (1 << 24)) {
b0109805 10103 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10104 }
10105 /* Fault if writeback register is in register list. */
10106 if (insn & (1 << rn))
10107 goto illegal_op;
b0109805
PB
10108 store_reg(s, rn, addr);
10109 } else {
7d1b0095 10110 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10111 }
10112 }
10113 }
10114 break;
2af9ab77
JB
10115 case 5:
10116
9ee6e8bb 10117 op = (insn >> 21) & 0xf;
2af9ab77 10118 if (op == 6) {
62b44f05
AR
10119 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10120 goto illegal_op;
10121 }
2af9ab77
JB
10122 /* Halfword pack. */
10123 tmp = load_reg(s, rn);
10124 tmp2 = load_reg(s, rm);
10125 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10126 if (insn & (1 << 5)) {
10127 /* pkhtb */
10128 if (shift == 0)
10129 shift = 31;
10130 tcg_gen_sari_i32(tmp2, tmp2, shift);
10131 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10132 tcg_gen_ext16u_i32(tmp2, tmp2);
10133 } else {
10134 /* pkhbt */
10135 if (shift)
10136 tcg_gen_shli_i32(tmp2, tmp2, shift);
10137 tcg_gen_ext16u_i32(tmp, tmp);
10138 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10139 }
10140 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10141 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10142 store_reg(s, rd, tmp);
10143 } else {
2af9ab77
JB
10144 /* Data processing register constant shift. */
10145 if (rn == 15) {
7d1b0095 10146 tmp = tcg_temp_new_i32();
2af9ab77
JB
10147 tcg_gen_movi_i32(tmp, 0);
10148 } else {
10149 tmp = load_reg(s, rn);
10150 }
10151 tmp2 = load_reg(s, rm);
10152
10153 shiftop = (insn >> 4) & 3;
10154 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10155 conds = (insn & (1 << 20)) != 0;
10156 logic_cc = (conds && thumb2_logic_op(op));
10157 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10158 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10159 goto illegal_op;
7d1b0095 10160 tcg_temp_free_i32(tmp2);
55203189
PM
10161 if (rd == 13 &&
10162 ((op == 2 && rn == 15) ||
10163 (op == 8 && rn == 13) ||
10164 (op == 13 && rn == 13))) {
10165 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
10166 store_sp_checked(s, tmp);
10167 } else if (rd != 15) {
2af9ab77
JB
10168 store_reg(s, rd, tmp);
10169 } else {
7d1b0095 10170 tcg_temp_free_i32(tmp);
2af9ab77 10171 }
3174f8e9 10172 }
9ee6e8bb
PB
10173 break;
10174 case 13: /* Misc data processing. */
10175 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10176 if (op < 4 && (insn & 0xf000) != 0xf000)
10177 goto illegal_op;
10178 switch (op) {
10179 case 0: /* Register controlled shift. */
8984bd2e
PB
10180 tmp = load_reg(s, rn);
10181 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10182 if ((insn & 0x70) != 0)
10183 goto illegal_op;
a2d12f0f
PM
10184 /*
10185 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
10186 * - MOV, MOVS (register-shifted register), flagsetting
10187 */
9ee6e8bb 10188 op = (insn >> 21) & 3;
8984bd2e
PB
10189 logic_cc = (insn & (1 << 20)) != 0;
10190 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10191 if (logic_cc)
10192 gen_logic_CC(tmp);
bedb8a6b 10193 store_reg(s, rd, tmp);
9ee6e8bb
PB
10194 break;
10195 case 1: /* Sign/zero extend. */
62b44f05
AR
10196 op = (insn >> 20) & 7;
10197 switch (op) {
10198 case 0: /* SXTAH, SXTH */
10199 case 1: /* UXTAH, UXTH */
10200 case 4: /* SXTAB, SXTB */
10201 case 5: /* UXTAB, UXTB */
10202 break;
10203 case 2: /* SXTAB16, SXTB16 */
10204 case 3: /* UXTAB16, UXTB16 */
10205 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10206 goto illegal_op;
10207 }
10208 break;
10209 default:
10210 goto illegal_op;
10211 }
10212 if (rn != 15) {
10213 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10214 goto illegal_op;
10215 }
10216 }
5e3f878a 10217 tmp = load_reg(s, rm);
9ee6e8bb 10218 shift = (insn >> 4) & 3;
1301f322 10219 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10220 rotate, a shift is sufficient. */
10221 if (shift != 0)
f669df27 10222 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10223 op = (insn >> 20) & 7;
10224 switch (op) {
5e3f878a
PB
10225 case 0: gen_sxth(tmp); break;
10226 case 1: gen_uxth(tmp); break;
10227 case 2: gen_sxtb16(tmp); break;
10228 case 3: gen_uxtb16(tmp); break;
10229 case 4: gen_sxtb(tmp); break;
10230 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10231 default:
10232 g_assert_not_reached();
9ee6e8bb
PB
10233 }
10234 if (rn != 15) {
5e3f878a 10235 tmp2 = load_reg(s, rn);
9ee6e8bb 10236 if ((op >> 1) == 1) {
5e3f878a 10237 gen_add16(tmp, tmp2);
9ee6e8bb 10238 } else {
5e3f878a 10239 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10240 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10241 }
10242 }
5e3f878a 10243 store_reg(s, rd, tmp);
9ee6e8bb
PB
10244 break;
10245 case 2: /* SIMD add/subtract. */
62b44f05
AR
10246 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10247 goto illegal_op;
10248 }
9ee6e8bb
PB
10249 op = (insn >> 20) & 7;
10250 shift = (insn >> 4) & 7;
10251 if ((op & 3) == 3 || (shift & 3) == 3)
10252 goto illegal_op;
6ddbc6e4
PB
10253 tmp = load_reg(s, rn);
10254 tmp2 = load_reg(s, rm);
10255 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10256 tcg_temp_free_i32(tmp2);
6ddbc6e4 10257 store_reg(s, rd, tmp);
9ee6e8bb
PB
10258 break;
10259 case 3: /* Other data processing. */
10260 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10261 if (op < 4) {
10262 /* Saturating add/subtract. */
62b44f05
AR
10263 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10264 goto illegal_op;
10265 }
d9ba4830
PB
10266 tmp = load_reg(s, rn);
10267 tmp2 = load_reg(s, rm);
9ee6e8bb 10268 if (op & 1)
9ef39277 10269 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10270 if (op & 2)
9ef39277 10271 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10272 else
9ef39277 10273 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10274 tcg_temp_free_i32(tmp2);
9ee6e8bb 10275 } else {
62b44f05
AR
10276 switch (op) {
10277 case 0x0a: /* rbit */
10278 case 0x08: /* rev */
10279 case 0x09: /* rev16 */
10280 case 0x0b: /* revsh */
10281 case 0x18: /* clz */
10282 break;
10283 case 0x10: /* sel */
10284 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10285 goto illegal_op;
10286 }
10287 break;
10288 case 0x20: /* crc32/crc32c */
10289 case 0x21:
10290 case 0x22:
10291 case 0x28:
10292 case 0x29:
10293 case 0x2a:
962fcbf2 10294 if (!dc_isar_feature(aa32_crc32, s)) {
62b44f05
AR
10295 goto illegal_op;
10296 }
10297 break;
10298 default:
10299 goto illegal_op;
10300 }
d9ba4830 10301 tmp = load_reg(s, rn);
9ee6e8bb
PB
10302 switch (op) {
10303 case 0x0a: /* rbit */
d9ba4830 10304 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10305 break;
10306 case 0x08: /* rev */
66896cb8 10307 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10308 break;
10309 case 0x09: /* rev16 */
d9ba4830 10310 gen_rev16(tmp);
9ee6e8bb
PB
10311 break;
10312 case 0x0b: /* revsh */
d9ba4830 10313 gen_revsh(tmp);
9ee6e8bb
PB
10314 break;
10315 case 0x10: /* sel */
d9ba4830 10316 tmp2 = load_reg(s, rm);
7d1b0095 10317 tmp3 = tcg_temp_new_i32();
0ecb72a5 10318 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10319 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10320 tcg_temp_free_i32(tmp3);
10321 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10322 break;
10323 case 0x18: /* clz */
7539a012 10324 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10325 break;
eb0ecd5a
WN
10326 case 0x20:
10327 case 0x21:
10328 case 0x22:
10329 case 0x28:
10330 case 0x29:
10331 case 0x2a:
10332 {
10333 /* crc32/crc32c */
10334 uint32_t sz = op & 0x3;
10335 uint32_t c = op & 0x8;
10336
eb0ecd5a 10337 tmp2 = load_reg(s, rm);
aa633469
PM
10338 if (sz == 0) {
10339 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10340 } else if (sz == 1) {
10341 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10342 }
eb0ecd5a
WN
10343 tmp3 = tcg_const_i32(1 << sz);
10344 if (c) {
10345 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10346 } else {
10347 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10348 }
10349 tcg_temp_free_i32(tmp2);
10350 tcg_temp_free_i32(tmp3);
10351 break;
10352 }
9ee6e8bb 10353 default:
62b44f05 10354 g_assert_not_reached();
9ee6e8bb
PB
10355 }
10356 }
d9ba4830 10357 store_reg(s, rd, tmp);
9ee6e8bb
PB
10358 break;
10359 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10360 switch ((insn >> 20) & 7) {
10361 case 0: /* 32 x 32 -> 32 */
10362 case 7: /* Unsigned sum of absolute differences. */
10363 break;
10364 case 1: /* 16 x 16 -> 32 */
10365 case 2: /* Dual multiply add. */
10366 case 3: /* 32 * 16 -> 32msb */
10367 case 4: /* Dual multiply subtract. */
10368 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10369 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10370 goto illegal_op;
10371 }
10372 break;
10373 }
9ee6e8bb 10374 op = (insn >> 4) & 0xf;
d9ba4830
PB
10375 tmp = load_reg(s, rn);
10376 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10377 switch ((insn >> 20) & 7) {
10378 case 0: /* 32 x 32 -> 32 */
d9ba4830 10379 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10380 tcg_temp_free_i32(tmp2);
9ee6e8bb 10381 if (rs != 15) {
d9ba4830 10382 tmp2 = load_reg(s, rs);
9ee6e8bb 10383 if (op)
d9ba4830 10384 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10385 else
d9ba4830 10386 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10387 tcg_temp_free_i32(tmp2);
9ee6e8bb 10388 }
9ee6e8bb
PB
10389 break;
10390 case 1: /* 16 x 16 -> 32 */
d9ba4830 10391 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10392 tcg_temp_free_i32(tmp2);
9ee6e8bb 10393 if (rs != 15) {
d9ba4830 10394 tmp2 = load_reg(s, rs);
9ef39277 10395 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10396 tcg_temp_free_i32(tmp2);
9ee6e8bb 10397 }
9ee6e8bb
PB
10398 break;
10399 case 2: /* Dual multiply add. */
10400 case 4: /* Dual multiply subtract. */
10401 if (op)
d9ba4830
PB
10402 gen_swap_half(tmp2);
10403 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10404 if (insn & (1 << 22)) {
e1d177b9 10405 /* This subtraction cannot overflow. */
d9ba4830 10406 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10407 } else {
e1d177b9
PM
10408 /* This addition cannot overflow 32 bits;
10409 * however it may overflow considered as a signed
10410 * operation, in which case we must set the Q flag.
10411 */
9ef39277 10412 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10413 }
7d1b0095 10414 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10415 if (rs != 15)
10416 {
d9ba4830 10417 tmp2 = load_reg(s, rs);
9ef39277 10418 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10419 tcg_temp_free_i32(tmp2);
9ee6e8bb 10420 }
9ee6e8bb
PB
10421 break;
10422 case 3: /* 32 * 16 -> 32msb */
10423 if (op)
d9ba4830 10424 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10425 else
d9ba4830 10426 gen_sxth(tmp2);
a7812ae4
PB
10427 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10428 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10429 tmp = tcg_temp_new_i32();
ecc7b3aa 10430 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10431 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10432 if (rs != 15)
10433 {
d9ba4830 10434 tmp2 = load_reg(s, rs);
9ef39277 10435 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10436 tcg_temp_free_i32(tmp2);
9ee6e8bb 10437 }
9ee6e8bb 10438 break;
838fa72d
AJ
10439 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10440 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10441 if (rs != 15) {
838fa72d
AJ
10442 tmp = load_reg(s, rs);
10443 if (insn & (1 << 20)) {
10444 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10445 } else {
838fa72d 10446 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10447 }
2c0262af 10448 }
838fa72d
AJ
10449 if (insn & (1 << 4)) {
10450 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10451 }
10452 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10453 tmp = tcg_temp_new_i32();
ecc7b3aa 10454 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10455 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10456 break;
10457 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10458 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10459 tcg_temp_free_i32(tmp2);
9ee6e8bb 10460 if (rs != 15) {
d9ba4830
PB
10461 tmp2 = load_reg(s, rs);
10462 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10463 tcg_temp_free_i32(tmp2);
5fd46862 10464 }
9ee6e8bb 10465 break;
2c0262af 10466 }
d9ba4830 10467 store_reg(s, rd, tmp);
2c0262af 10468 break;
9ee6e8bb
PB
10469 case 6: case 7: /* 64-bit multiply, Divide. */
10470 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10471 tmp = load_reg(s, rn);
10472 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10473 if ((op & 0x50) == 0x10) {
10474 /* sdiv, udiv */
7e0cf8b4 10475 if (!dc_isar_feature(thumb_div, s)) {
9ee6e8bb 10476 goto illegal_op;
47789990 10477 }
9ee6e8bb 10478 if (op & 0x20)
5e3f878a 10479 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10480 else
5e3f878a 10481 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10482 tcg_temp_free_i32(tmp2);
5e3f878a 10483 store_reg(s, rd, tmp);
9ee6e8bb
PB
10484 } else if ((op & 0xe) == 0xc) {
10485 /* Dual multiply accumulate long. */
62b44f05
AR
10486 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10487 tcg_temp_free_i32(tmp);
10488 tcg_temp_free_i32(tmp2);
10489 goto illegal_op;
10490 }
9ee6e8bb 10491 if (op & 1)
5e3f878a
PB
10492 gen_swap_half(tmp2);
10493 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10494 if (op & 0x10) {
5e3f878a 10495 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10496 } else {
5e3f878a 10497 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10498 }
7d1b0095 10499 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10500 /* BUGFIX */
10501 tmp64 = tcg_temp_new_i64();
10502 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10503 tcg_temp_free_i32(tmp);
a7812ae4
PB
10504 gen_addq(s, tmp64, rs, rd);
10505 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10506 tcg_temp_free_i64(tmp64);
2c0262af 10507 } else {
9ee6e8bb
PB
10508 if (op & 0x20) {
10509 /* Unsigned 64-bit multiply */
a7812ae4 10510 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10511 } else {
9ee6e8bb
PB
10512 if (op & 8) {
10513 /* smlalxy */
62b44f05
AR
10514 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10515 tcg_temp_free_i32(tmp2);
10516 tcg_temp_free_i32(tmp);
10517 goto illegal_op;
10518 }
5e3f878a 10519 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10520 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10521 tmp64 = tcg_temp_new_i64();
10522 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10523 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10524 } else {
10525 /* Signed 64-bit multiply */
a7812ae4 10526 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10527 }
b5ff1b31 10528 }
9ee6e8bb
PB
10529 if (op & 4) {
10530 /* umaal */
62b44f05
AR
10531 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10532 tcg_temp_free_i64(tmp64);
10533 goto illegal_op;
10534 }
a7812ae4
PB
10535 gen_addq_lo(s, tmp64, rs);
10536 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10537 } else if (op & 0x40) {
10538 /* 64-bit accumulate. */
a7812ae4 10539 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10540 }
a7812ae4 10541 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10542 tcg_temp_free_i64(tmp64);
5fd46862 10543 }
2c0262af 10544 break;
9ee6e8bb
PB
10545 }
10546 break;
10547 case 6: case 7: case 14: case 15:
10548 /* Coprocessor. */
7517748e 10549 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8859ba3c
PM
10550 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10551 if (extract32(insn, 24, 2) == 3) {
10552 goto illegal_op; /* op0 = 0b11 : unallocated */
10553 }
10554
10555 /*
10556 * Decode VLLDM and VLSTM first: these are nonstandard because:
10557 * * if there is no FPU then these insns must NOP in
10558 * Secure state and UNDEF in Nonsecure state
10559 * * if there is an FPU then these insns do not have
10560 * the usual behaviour that disas_vfp_insn() provides of
10561 * being controlled by CPACR/NSACR enable bits or the
10562 * lazy-stacking logic.
7517748e 10563 */
b1e5336a
PM
10564 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10565 (insn & 0xffa00f00) == 0xec200a00) {
10566 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10567 * - VLLDM, VLSTM
10568 * We choose to UNDEF if the RAZ bits are non-zero.
10569 */
10570 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10571 goto illegal_op;
10572 }
019076b0
PM
10573
10574 if (arm_dc_feature(s, ARM_FEATURE_VFP)) {
10575 TCGv_i32 fptr = load_reg(s, rn);
10576
10577 if (extract32(insn, 20, 1)) {
956fe143 10578 gen_helper_v7m_vlldm(cpu_env, fptr);
019076b0
PM
10579 } else {
10580 gen_helper_v7m_vlstm(cpu_env, fptr);
10581 }
10582 tcg_temp_free_i32(fptr);
10583
10584 /* End the TB, because we have updated FP control bits */
10585 s->base.is_jmp = DISAS_UPDATE;
10586 }
b1e5336a
PM
10587 break;
10588 }
8859ba3c
PM
10589 if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
10590 ((insn >> 8) & 0xe) == 10) {
10591 /* FP, and the CPU supports it */
10592 if (disas_vfp_insn(s, insn)) {
10593 goto illegal_op;
10594 }
10595 break;
10596 }
10597
b1e5336a 10598 /* All other insns: NOCP */
7517748e
PM
10599 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10600 default_exception_el(s));
10601 break;
10602 }
0052087e
RH
10603 if ((insn & 0xfe000a00) == 0xfc000800
10604 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10605 /* The Thumb2 and ARM encodings are identical. */
10606 if (disas_neon_insn_3same_ext(s, insn)) {
10607 goto illegal_op;
10608 }
10609 } else if ((insn & 0xff000a00) == 0xfe000800
10610 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10611 /* The Thumb2 and ARM encodings are identical. */
10612 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10613 goto illegal_op;
10614 }
10615 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 10616 /* Translate into the equivalent ARM encoding. */
f06053e3 10617 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10618 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10619 goto illegal_op;
7dcc1f89 10620 }
6a57f3eb 10621 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10622 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10623 goto illegal_op;
10624 }
9ee6e8bb
PB
10625 } else {
10626 if (insn & (1 << 28))
10627 goto illegal_op;
7dcc1f89 10628 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10629 goto illegal_op;
7dcc1f89 10630 }
9ee6e8bb
PB
10631 }
10632 break;
10633 case 8: case 9: case 10: case 11:
10634 if (insn & (1 << 15)) {
10635 /* Branches, misc control. */
10636 if (insn & 0x5000) {
10637 /* Unconditional branch. */
10638 /* signextend(hw1[10:0]) -> offset[:12]. */
10639 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10640 /* hw1[10:0] -> offset[11:1]. */
10641 offset |= (insn & 0x7ff) << 1;
10642 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10643 offset[24:22] already have the same value because of the
10644 sign extension above. */
10645 offset ^= ((~insn) & (1 << 13)) << 10;
10646 offset ^= ((~insn) & (1 << 11)) << 11;
10647
9ee6e8bb
PB
10648 if (insn & (1 << 14)) {
10649 /* Branch and link. */
3174f8e9 10650 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10651 }
3b46e624 10652
b0109805 10653 offset += s->pc;
9ee6e8bb
PB
10654 if (insn & (1 << 12)) {
10655 /* b/bl */
b0109805 10656 gen_jmp(s, offset);
9ee6e8bb
PB
10657 } else {
10658 /* blx */
b0109805 10659 offset &= ~(uint32_t)2;
be5e7a76 10660 /* thumb2 bx, no need to check */
b0109805 10661 gen_bx_im(s, offset);
2c0262af 10662 }
9ee6e8bb
PB
10663 } else if (((insn >> 23) & 7) == 7) {
10664 /* Misc control */
10665 if (insn & (1 << 13))
10666 goto illegal_op;
10667
10668 if (insn & (1 << 26)) {
001b3cab
PM
10669 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10670 goto illegal_op;
10671 }
37e6456e
PM
10672 if (!(insn & (1 << 20))) {
10673 /* Hypervisor call (v7) */
10674 int imm16 = extract32(insn, 16, 4) << 12
10675 | extract32(insn, 0, 12);
10676 ARCH(7);
10677 if (IS_USER(s)) {
10678 goto illegal_op;
10679 }
10680 gen_hvc(s, imm16);
10681 } else {
10682 /* Secure monitor call (v6+) */
10683 ARCH(6K);
10684 if (IS_USER(s)) {
10685 goto illegal_op;
10686 }
10687 gen_smc(s);
10688 }
2c0262af 10689 } else {
9ee6e8bb
PB
10690 op = (insn >> 20) & 7;
10691 switch (op) {
10692 case 0: /* msr cpsr. */
b53d8923 10693 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10694 tmp = load_reg(s, rn);
b28b3377
PM
10695 /* the constant is the mask and SYSm fields */
10696 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10697 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10698 tcg_temp_free_i32(addr);
7d1b0095 10699 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10700 gen_lookup_tb(s);
10701 break;
10702 }
10703 /* fall through */
10704 case 1: /* msr spsr. */
b53d8923 10705 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10706 goto illegal_op;
b53d8923 10707 }
8bfd0550
PM
10708
10709 if (extract32(insn, 5, 1)) {
10710 /* MSR (banked) */
10711 int sysm = extract32(insn, 8, 4) |
10712 (extract32(insn, 4, 1) << 4);
10713 int r = op & 1;
10714
10715 gen_msr_banked(s, r, sysm, rm);
10716 break;
10717 }
10718
10719 /* MSR (for PSRs) */
2fbac54b
FN
10720 tmp = load_reg(s, rn);
10721 if (gen_set_psr(s,
7dcc1f89 10722 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10723 op == 1, tmp))
9ee6e8bb
PB
10724 goto illegal_op;
10725 break;
10726 case 2: /* cps, nop-hint. */
10727 if (((insn >> 8) & 7) == 0) {
10728 gen_nop_hint(s, insn & 0xff);
10729 }
10730 /* Implemented as NOP in user mode. */
10731 if (IS_USER(s))
10732 break;
10733 offset = 0;
10734 imm = 0;
10735 if (insn & (1 << 10)) {
10736 if (insn & (1 << 7))
10737 offset |= CPSR_A;
10738 if (insn & (1 << 6))
10739 offset |= CPSR_I;
10740 if (insn & (1 << 5))
10741 offset |= CPSR_F;
10742 if (insn & (1 << 9))
10743 imm = CPSR_A | CPSR_I | CPSR_F;
10744 }
10745 if (insn & (1 << 8)) {
10746 offset |= 0x1f;
10747 imm |= (insn & 0x1f);
10748 }
10749 if (offset) {
2fbac54b 10750 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10751 }
10752 break;
10753 case 3: /* Special control operations. */
14120108 10754 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 10755 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
10756 goto illegal_op;
10757 }
9ee6e8bb
PB
10758 op = (insn >> 4) & 0xf;
10759 switch (op) {
10760 case 2: /* clrex */
426f5abc 10761 gen_clrex(s);
9ee6e8bb
PB
10762 break;
10763 case 4: /* dsb */
10764 case 5: /* dmb */
61e4c432 10765 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10766 break;
6df99dec
SS
10767 case 6: /* isb */
10768 /* We need to break the TB after this insn
10769 * to execute self-modifying code correctly
10770 * and also to take any pending interrupts
10771 * immediately.
10772 */
0b609cc1 10773 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 10774 break;
9888bd1e
RH
10775 case 7: /* sb */
10776 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
10777 goto illegal_op;
10778 }
10779 /*
10780 * TODO: There is no speculation barrier opcode
10781 * for TCG; MB and end the TB instead.
10782 */
10783 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10784 gen_goto_tb(s, 0, s->pc & ~1);
10785 break;
9ee6e8bb
PB
10786 default:
10787 goto illegal_op;
10788 }
10789 break;
10790 case 4: /* bxj */
9d7c59c8
PM
10791 /* Trivial implementation equivalent to bx.
10792 * This instruction doesn't exist at all for M-profile.
10793 */
10794 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10795 goto illegal_op;
10796 }
d9ba4830
PB
10797 tmp = load_reg(s, rn);
10798 gen_bx(s, tmp);
9ee6e8bb
PB
10799 break;
10800 case 5: /* Exception return. */
b8b45b68
RV
10801 if (IS_USER(s)) {
10802 goto illegal_op;
10803 }
10804 if (rn != 14 || rd != 15) {
10805 goto illegal_op;
10806 }
55c544ed
PM
10807 if (s->current_el == 2) {
10808 /* ERET from Hyp uses ELR_Hyp, not LR */
10809 if (insn & 0xff) {
10810 goto illegal_op;
10811 }
10812 tmp = load_cpu_field(elr_el[2]);
10813 } else {
10814 tmp = load_reg(s, rn);
10815 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10816 }
b8b45b68
RV
10817 gen_exception_return(s, tmp);
10818 break;
8bfd0550 10819 case 6: /* MRS */
43ac6574
PM
10820 if (extract32(insn, 5, 1) &&
10821 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10822 /* MRS (banked) */
10823 int sysm = extract32(insn, 16, 4) |
10824 (extract32(insn, 4, 1) << 4);
10825
10826 gen_mrs_banked(s, 0, sysm, rd);
10827 break;
10828 }
10829
3d54026f
PM
10830 if (extract32(insn, 16, 4) != 0xf) {
10831 goto illegal_op;
10832 }
10833 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10834 extract32(insn, 0, 8) != 0) {
10835 goto illegal_op;
10836 }
10837
8bfd0550 10838 /* mrs cpsr */
7d1b0095 10839 tmp = tcg_temp_new_i32();
b53d8923 10840 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10841 addr = tcg_const_i32(insn & 0xff);
10842 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10843 tcg_temp_free_i32(addr);
9ee6e8bb 10844 } else {
9ef39277 10845 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10846 }
8984bd2e 10847 store_reg(s, rd, tmp);
9ee6e8bb 10848 break;
8bfd0550 10849 case 7: /* MRS */
43ac6574
PM
10850 if (extract32(insn, 5, 1) &&
10851 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10852 /* MRS (banked) */
10853 int sysm = extract32(insn, 16, 4) |
10854 (extract32(insn, 4, 1) << 4);
10855
10856 gen_mrs_banked(s, 1, sysm, rd);
10857 break;
10858 }
10859
10860 /* mrs spsr. */
9ee6e8bb 10861 /* Not accessible in user mode. */
b53d8923 10862 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10863 goto illegal_op;
b53d8923 10864 }
3d54026f
PM
10865
10866 if (extract32(insn, 16, 4) != 0xf ||
10867 extract32(insn, 0, 8) != 0) {
10868 goto illegal_op;
10869 }
10870
d9ba4830
PB
10871 tmp = load_cpu_field(spsr);
10872 store_reg(s, rd, tmp);
9ee6e8bb 10873 break;
2c0262af
FB
10874 }
10875 }
9ee6e8bb
PB
10876 } else {
10877 /* Conditional branch. */
10878 op = (insn >> 22) & 0xf;
10879 /* Generate a conditional jump to next instruction. */
c2d9644e 10880 arm_skip_unless(s, op);
9ee6e8bb
PB
10881
10882 /* offset[11:1] = insn[10:0] */
10883 offset = (insn & 0x7ff) << 1;
10884 /* offset[17:12] = insn[21:16]. */
10885 offset |= (insn & 0x003f0000) >> 4;
10886 /* offset[31:20] = insn[26]. */
10887 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10888 /* offset[18] = insn[13]. */
10889 offset |= (insn & (1 << 13)) << 5;
10890 /* offset[19] = insn[11]. */
10891 offset |= (insn & (1 << 11)) << 8;
10892
10893 /* jump to the offset */
b0109805 10894 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10895 }
10896 } else {
55203189
PM
10897 /*
10898 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
10899 * - Data-processing (modified immediate, plain binary immediate)
10900 */
9ee6e8bb 10901 if (insn & (1 << 25)) {
55203189
PM
10902 /*
10903 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
10904 * - Data-processing (plain binary immediate)
10905 */
9ee6e8bb
PB
10906 if (insn & (1 << 24)) {
10907 if (insn & (1 << 20))
10908 goto illegal_op;
10909 /* Bitfield/Saturate. */
10910 op = (insn >> 21) & 7;
10911 imm = insn & 0x1f;
10912 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10913 if (rn == 15) {
7d1b0095 10914 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10915 tcg_gen_movi_i32(tmp, 0);
10916 } else {
10917 tmp = load_reg(s, rn);
10918 }
9ee6e8bb
PB
10919 switch (op) {
10920 case 2: /* Signed bitfield extract. */
10921 imm++;
10922 if (shift + imm > 32)
10923 goto illegal_op;
59a71b4c
RH
10924 if (imm < 32) {
10925 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10926 }
9ee6e8bb
PB
10927 break;
10928 case 6: /* Unsigned bitfield extract. */
10929 imm++;
10930 if (shift + imm > 32)
10931 goto illegal_op;
59a71b4c
RH
10932 if (imm < 32) {
10933 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10934 }
9ee6e8bb
PB
10935 break;
10936 case 3: /* Bitfield insert/clear. */
10937 if (imm < shift)
10938 goto illegal_op;
10939 imm = imm + 1 - shift;
10940 if (imm != 32) {
6ddbc6e4 10941 tmp2 = load_reg(s, rd);
d593c48e 10942 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10943 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10944 }
10945 break;
10946 case 7:
10947 goto illegal_op;
10948 default: /* Saturate. */
9ee6e8bb
PB
10949 if (shift) {
10950 if (op & 1)
6ddbc6e4 10951 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10952 else
6ddbc6e4 10953 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10954 }
6ddbc6e4 10955 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10956 if (op & 4) {
10957 /* Unsigned. */
62b44f05
AR
10958 if ((op & 1) && shift == 0) {
10959 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10960 tcg_temp_free_i32(tmp);
10961 tcg_temp_free_i32(tmp2);
10962 goto illegal_op;
10963 }
9ef39277 10964 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10965 } else {
9ef39277 10966 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10967 }
2c0262af 10968 } else {
9ee6e8bb 10969 /* Signed. */
62b44f05
AR
10970 if ((op & 1) && shift == 0) {
10971 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10972 tcg_temp_free_i32(tmp);
10973 tcg_temp_free_i32(tmp2);
10974 goto illegal_op;
10975 }
9ef39277 10976 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10977 } else {
9ef39277 10978 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10979 }
2c0262af 10980 }
b75263d6 10981 tcg_temp_free_i32(tmp2);
9ee6e8bb 10982 break;
2c0262af 10983 }
6ddbc6e4 10984 store_reg(s, rd, tmp);
9ee6e8bb
PB
10985 } else {
10986 imm = ((insn & 0x04000000) >> 15)
10987 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10988 if (insn & (1 << 22)) {
10989 /* 16-bit immediate. */
10990 imm |= (insn >> 4) & 0xf000;
10991 if (insn & (1 << 23)) {
10992 /* movt */
5e3f878a 10993 tmp = load_reg(s, rd);
86831435 10994 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10995 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10996 } else {
9ee6e8bb 10997 /* movw */
7d1b0095 10998 tmp = tcg_temp_new_i32();
5e3f878a 10999 tcg_gen_movi_i32(tmp, imm);
2c0262af 11000 }
55203189 11001 store_reg(s, rd, tmp);
2c0262af 11002 } else {
9ee6e8bb
PB
11003 /* Add/sub 12-bit immediate. */
11004 if (rn == 15) {
b0109805 11005 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 11006 if (insn & (1 << 23))
b0109805 11007 offset -= imm;
9ee6e8bb 11008 else
b0109805 11009 offset += imm;
7d1b0095 11010 tmp = tcg_temp_new_i32();
5e3f878a 11011 tcg_gen_movi_i32(tmp, offset);
55203189 11012 store_reg(s, rd, tmp);
2c0262af 11013 } else {
5e3f878a 11014 tmp = load_reg(s, rn);
9ee6e8bb 11015 if (insn & (1 << 23))
5e3f878a 11016 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 11017 else
5e3f878a 11018 tcg_gen_addi_i32(tmp, tmp, imm);
55203189
PM
11019 if (rn == 13 && rd == 13) {
11020 /* ADD SP, SP, imm or SUB SP, SP, imm */
11021 store_sp_checked(s, tmp);
11022 } else {
11023 store_reg(s, rd, tmp);
11024 }
2c0262af 11025 }
9ee6e8bb 11026 }
191abaa2 11027 }
9ee6e8bb 11028 } else {
55203189
PM
11029 /*
11030 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
11031 * - Data-processing (modified immediate)
11032 */
9ee6e8bb
PB
11033 int shifter_out = 0;
11034 /* modified 12-bit immediate. */
11035 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11036 imm = (insn & 0xff);
11037 switch (shift) {
11038 case 0: /* XY */
11039 /* Nothing to do. */
11040 break;
11041 case 1: /* 00XY00XY */
11042 imm |= imm << 16;
11043 break;
11044 case 2: /* XY00XY00 */
11045 imm |= imm << 16;
11046 imm <<= 8;
11047 break;
11048 case 3: /* XYXYXYXY */
11049 imm |= imm << 16;
11050 imm |= imm << 8;
11051 break;
11052 default: /* Rotated constant. */
11053 shift = (shift << 1) | (imm >> 7);
11054 imm |= 0x80;
11055 imm = imm << (32 - shift);
11056 shifter_out = 1;
11057 break;
b5ff1b31 11058 }
7d1b0095 11059 tmp2 = tcg_temp_new_i32();
3174f8e9 11060 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 11061 rn = (insn >> 16) & 0xf;
3174f8e9 11062 if (rn == 15) {
7d1b0095 11063 tmp = tcg_temp_new_i32();
3174f8e9
FN
11064 tcg_gen_movi_i32(tmp, 0);
11065 } else {
11066 tmp = load_reg(s, rn);
11067 }
9ee6e8bb
PB
11068 op = (insn >> 21) & 0xf;
11069 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 11070 shifter_out, tmp, tmp2))
9ee6e8bb 11071 goto illegal_op;
7d1b0095 11072 tcg_temp_free_i32(tmp2);
9ee6e8bb 11073 rd = (insn >> 8) & 0xf;
55203189
PM
11074 if (rd == 13 && rn == 13
11075 && (op == 8 || op == 13)) {
11076 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
11077 store_sp_checked(s, tmp);
11078 } else if (rd != 15) {
3174f8e9
FN
11079 store_reg(s, rd, tmp);
11080 } else {
7d1b0095 11081 tcg_temp_free_i32(tmp);
2c0262af 11082 }
2c0262af 11083 }
9ee6e8bb
PB
11084 }
11085 break;
11086 case 12: /* Load/store single data item. */
11087 {
11088 int postinc = 0;
11089 int writeback = 0;
a99caa48 11090 int memidx;
9bb6558a
PM
11091 ISSInfo issinfo;
11092
9ee6e8bb 11093 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 11094 if (disas_neon_ls_insn(s, insn)) {
c1713132 11095 goto illegal_op;
7dcc1f89 11096 }
9ee6e8bb
PB
11097 break;
11098 }
a2fdc890
PM
11099 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
11100 if (rs == 15) {
11101 if (!(insn & (1 << 20))) {
11102 goto illegal_op;
11103 }
11104 if (op != 2) {
11105 /* Byte or halfword load space with dest == r15 : memory hints.
11106 * Catch them early so we don't emit pointless addressing code.
11107 * This space is a mix of:
11108 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11109 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11110 * cores)
11111 * unallocated hints, which must be treated as NOPs
11112 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11113 * which is easiest for the decoding logic
11114 * Some space which must UNDEF
11115 */
11116 int op1 = (insn >> 23) & 3;
11117 int op2 = (insn >> 6) & 0x3f;
11118 if (op & 2) {
11119 goto illegal_op;
11120 }
11121 if (rn == 15) {
02afbf64
PM
11122 /* UNPREDICTABLE, unallocated hint or
11123 * PLD/PLDW/PLI (literal)
11124 */
2eea841c 11125 return;
a2fdc890
PM
11126 }
11127 if (op1 & 1) {
2eea841c 11128 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11129 }
11130 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 11131 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11132 }
11133 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 11134 goto illegal_op;
a2fdc890
PM
11135 }
11136 }
a99caa48 11137 memidx = get_mem_index(s);
9ee6e8bb 11138 if (rn == 15) {
7d1b0095 11139 addr = tcg_temp_new_i32();
9ee6e8bb
PB
11140 /* PC relative. */
11141 /* s->pc has already been incremented by 4. */
11142 imm = s->pc & 0xfffffffc;
11143 if (insn & (1 << 23))
11144 imm += insn & 0xfff;
11145 else
11146 imm -= insn & 0xfff;
b0109805 11147 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 11148 } else {
b0109805 11149 addr = load_reg(s, rn);
9ee6e8bb
PB
11150 if (insn & (1 << 23)) {
11151 /* Positive offset. */
11152 imm = insn & 0xfff;
b0109805 11153 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 11154 } else {
9ee6e8bb 11155 imm = insn & 0xff;
2a0308c5
PM
11156 switch ((insn >> 8) & 0xf) {
11157 case 0x0: /* Shifted Register. */
9ee6e8bb 11158 shift = (insn >> 4) & 0xf;
2a0308c5
PM
11159 if (shift > 3) {
11160 tcg_temp_free_i32(addr);
18c9b560 11161 goto illegal_op;
2a0308c5 11162 }
b26eefb6 11163 tmp = load_reg(s, rm);
9ee6e8bb 11164 if (shift)
b26eefb6 11165 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 11166 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11167 tcg_temp_free_i32(tmp);
9ee6e8bb 11168 break;
2a0308c5 11169 case 0xc: /* Negative offset. */
b0109805 11170 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 11171 break;
2a0308c5 11172 case 0xe: /* User privilege. */
b0109805 11173 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 11174 memidx = get_a32_user_mem_index(s);
9ee6e8bb 11175 break;
2a0308c5 11176 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
11177 imm = -imm;
11178 /* Fall through. */
2a0308c5 11179 case 0xb: /* Post-increment. */
9ee6e8bb
PB
11180 postinc = 1;
11181 writeback = 1;
11182 break;
2a0308c5 11183 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
11184 imm = -imm;
11185 /* Fall through. */
2a0308c5 11186 case 0xf: /* Pre-increment. */
9ee6e8bb
PB
11187 writeback = 1;
11188 break;
11189 default:
2a0308c5 11190 tcg_temp_free_i32(addr);
b7bcbe95 11191 goto illegal_op;
9ee6e8bb
PB
11192 }
11193 }
11194 }
9bb6558a
PM
11195
11196 issinfo = writeback ? ISSInvalid : rs;
11197
0bc003ba
PM
11198 if (s->v8m_stackcheck && rn == 13 && writeback) {
11199 /*
11200 * Stackcheck. Here we know 'addr' is the current SP;
11201 * if imm is +ve we're moving SP up, else down. It is
11202 * UNKNOWN whether the limit check triggers when SP starts
11203 * below the limit and ends up above it; we chose to do so.
11204 */
11205 if ((int32_t)imm < 0) {
11206 TCGv_i32 newsp = tcg_temp_new_i32();
11207
11208 tcg_gen_addi_i32(newsp, addr, imm);
11209 gen_helper_v8m_stackcheck(cpu_env, newsp);
11210 tcg_temp_free_i32(newsp);
11211 } else {
11212 gen_helper_v8m_stackcheck(cpu_env, addr);
11213 }
11214 }
11215
11216 if (writeback && !postinc) {
11217 tcg_gen_addi_i32(addr, addr, imm);
11218 }
11219
9ee6e8bb
PB
11220 if (insn & (1 << 20)) {
11221 /* Load. */
5a839c0d 11222 tmp = tcg_temp_new_i32();
a2fdc890 11223 switch (op) {
5a839c0d 11224 case 0:
9bb6558a 11225 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11226 break;
11227 case 4:
9bb6558a 11228 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11229 break;
11230 case 1:
9bb6558a 11231 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11232 break;
11233 case 5:
9bb6558a 11234 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11235 break;
11236 case 2:
9bb6558a 11237 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11238 break;
2a0308c5 11239 default:
5a839c0d 11240 tcg_temp_free_i32(tmp);
2a0308c5
PM
11241 tcg_temp_free_i32(addr);
11242 goto illegal_op;
a2fdc890
PM
11243 }
11244 if (rs == 15) {
3bb8a96f 11245 gen_bx_excret(s, tmp);
9ee6e8bb 11246 } else {
a2fdc890 11247 store_reg(s, rs, tmp);
9ee6e8bb
PB
11248 }
11249 } else {
11250 /* Store. */
b0109805 11251 tmp = load_reg(s, rs);
9ee6e8bb 11252 switch (op) {
5a839c0d 11253 case 0:
9bb6558a 11254 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11255 break;
11256 case 1:
9bb6558a 11257 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11258 break;
11259 case 2:
9bb6558a 11260 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11261 break;
2a0308c5 11262 default:
5a839c0d 11263 tcg_temp_free_i32(tmp);
2a0308c5
PM
11264 tcg_temp_free_i32(addr);
11265 goto illegal_op;
b7bcbe95 11266 }
5a839c0d 11267 tcg_temp_free_i32(tmp);
2c0262af 11268 }
9ee6e8bb 11269 if (postinc)
b0109805
PB
11270 tcg_gen_addi_i32(addr, addr, imm);
11271 if (writeback) {
11272 store_reg(s, rn, addr);
11273 } else {
7d1b0095 11274 tcg_temp_free_i32(addr);
b0109805 11275 }
9ee6e8bb
PB
11276 }
11277 break;
11278 default:
11279 goto illegal_op;
2c0262af 11280 }
2eea841c 11281 return;
9ee6e8bb 11282illegal_op:
2eea841c
PM
11283 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11284 default_exception_el(s));
2c0262af
FB
11285}
11286
296e5a0a 11287static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 11288{
296e5a0a 11289 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
11290 int32_t offset;
11291 int i;
39d5492a
PM
11292 TCGv_i32 tmp;
11293 TCGv_i32 tmp2;
11294 TCGv_i32 addr;
99c475ab 11295
99c475ab
FB
11296 switch (insn >> 12) {
11297 case 0: case 1:
396e467c 11298
99c475ab
FB
11299 rd = insn & 7;
11300 op = (insn >> 11) & 3;
11301 if (op == 3) {
a2d12f0f
PM
11302 /*
11303 * 0b0001_1xxx_xxxx_xxxx
11304 * - Add, subtract (three low registers)
11305 * - Add, subtract (two low registers and immediate)
11306 */
99c475ab 11307 rn = (insn >> 3) & 7;
396e467c 11308 tmp = load_reg(s, rn);
99c475ab
FB
11309 if (insn & (1 << 10)) {
11310 /* immediate */
7d1b0095 11311 tmp2 = tcg_temp_new_i32();
396e467c 11312 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11313 } else {
11314 /* reg */
11315 rm = (insn >> 6) & 7;
396e467c 11316 tmp2 = load_reg(s, rm);
99c475ab 11317 }
9ee6e8bb
PB
11318 if (insn & (1 << 9)) {
11319 if (s->condexec_mask)
396e467c 11320 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11321 else
72485ec4 11322 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11323 } else {
11324 if (s->condexec_mask)
396e467c 11325 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11326 else
72485ec4 11327 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11328 }
7d1b0095 11329 tcg_temp_free_i32(tmp2);
396e467c 11330 store_reg(s, rd, tmp);
99c475ab
FB
11331 } else {
11332 /* shift immediate */
11333 rm = (insn >> 3) & 7;
11334 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11335 tmp = load_reg(s, rm);
11336 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11337 if (!s->condexec_mask)
11338 gen_logic_CC(tmp);
11339 store_reg(s, rd, tmp);
99c475ab
FB
11340 }
11341 break;
11342 case 2: case 3:
a2d12f0f
PM
11343 /*
11344 * 0b001x_xxxx_xxxx_xxxx
11345 * - Add, subtract, compare, move (one low register and immediate)
11346 */
99c475ab
FB
11347 op = (insn >> 11) & 3;
11348 rd = (insn >> 8) & 0x7;
396e467c 11349 if (op == 0) { /* mov */
7d1b0095 11350 tmp = tcg_temp_new_i32();
396e467c 11351 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11352 if (!s->condexec_mask)
396e467c
FN
11353 gen_logic_CC(tmp);
11354 store_reg(s, rd, tmp);
11355 } else {
11356 tmp = load_reg(s, rd);
7d1b0095 11357 tmp2 = tcg_temp_new_i32();
396e467c
FN
11358 tcg_gen_movi_i32(tmp2, insn & 0xff);
11359 switch (op) {
11360 case 1: /* cmp */
72485ec4 11361 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11362 tcg_temp_free_i32(tmp);
11363 tcg_temp_free_i32(tmp2);
396e467c
FN
11364 break;
11365 case 2: /* add */
11366 if (s->condexec_mask)
11367 tcg_gen_add_i32(tmp, tmp, tmp2);
11368 else
72485ec4 11369 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11370 tcg_temp_free_i32(tmp2);
396e467c
FN
11371 store_reg(s, rd, tmp);
11372 break;
11373 case 3: /* sub */
11374 if (s->condexec_mask)
11375 tcg_gen_sub_i32(tmp, tmp, tmp2);
11376 else
72485ec4 11377 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11378 tcg_temp_free_i32(tmp2);
396e467c
FN
11379 store_reg(s, rd, tmp);
11380 break;
11381 }
99c475ab 11382 }
99c475ab
FB
11383 break;
11384 case 4:
11385 if (insn & (1 << 11)) {
11386 rd = (insn >> 8) & 7;
5899f386
FB
11387 /* load pc-relative. Bit 1 of PC is ignored. */
11388 val = s->pc + 2 + ((insn & 0xff) * 4);
11389 val &= ~(uint32_t)2;
7d1b0095 11390 addr = tcg_temp_new_i32();
b0109805 11391 tcg_gen_movi_i32(addr, val);
c40c8556 11392 tmp = tcg_temp_new_i32();
9bb6558a
PM
11393 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11394 rd | ISSIs16Bit);
7d1b0095 11395 tcg_temp_free_i32(addr);
b0109805 11396 store_reg(s, rd, tmp);
99c475ab
FB
11397 break;
11398 }
11399 if (insn & (1 << 10)) {
ebfe27c5
PM
11400 /* 0b0100_01xx_xxxx_xxxx
11401 * - data processing extended, branch and exchange
11402 */
99c475ab
FB
11403 rd = (insn & 7) | ((insn >> 4) & 8);
11404 rm = (insn >> 3) & 0xf;
11405 op = (insn >> 8) & 3;
11406 switch (op) {
11407 case 0: /* add */
396e467c
FN
11408 tmp = load_reg(s, rd);
11409 tmp2 = load_reg(s, rm);
11410 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11411 tcg_temp_free_i32(tmp2);
55203189
PM
11412 if (rd == 13) {
11413 /* ADD SP, SP, reg */
11414 store_sp_checked(s, tmp);
11415 } else {
11416 store_reg(s, rd, tmp);
11417 }
99c475ab
FB
11418 break;
11419 case 1: /* cmp */
396e467c
FN
11420 tmp = load_reg(s, rd);
11421 tmp2 = load_reg(s, rm);
72485ec4 11422 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11423 tcg_temp_free_i32(tmp2);
11424 tcg_temp_free_i32(tmp);
99c475ab
FB
11425 break;
11426 case 2: /* mov/cpy */
396e467c 11427 tmp = load_reg(s, rm);
55203189
PM
11428 if (rd == 13) {
11429 /* MOV SP, reg */
11430 store_sp_checked(s, tmp);
11431 } else {
11432 store_reg(s, rd, tmp);
11433 }
99c475ab 11434 break;
ebfe27c5
PM
11435 case 3:
11436 {
11437 /* 0b0100_0111_xxxx_xxxx
11438 * - branch [and link] exchange thumb register
11439 */
11440 bool link = insn & (1 << 7);
11441
fb602cb7 11442 if (insn & 3) {
ebfe27c5
PM
11443 goto undef;
11444 }
11445 if (link) {
be5e7a76 11446 ARCH(5);
ebfe27c5 11447 }
fb602cb7
PM
11448 if ((insn & 4)) {
11449 /* BXNS/BLXNS: only exists for v8M with the
11450 * security extensions, and always UNDEF if NonSecure.
11451 * We don't implement these in the user-only mode
11452 * either (in theory you can use them from Secure User
11453 * mode but they are too tied in to system emulation.)
11454 */
11455 if (!s->v8m_secure || IS_USER_ONLY) {
11456 goto undef;
11457 }
11458 if (link) {
3e3fa230 11459 gen_blxns(s, rm);
fb602cb7
PM
11460 } else {
11461 gen_bxns(s, rm);
11462 }
11463 break;
11464 }
11465 /* BLX/BX */
ebfe27c5
PM
11466 tmp = load_reg(s, rm);
11467 if (link) {
99c475ab 11468 val = (uint32_t)s->pc | 1;
7d1b0095 11469 tmp2 = tcg_temp_new_i32();
b0109805
PB
11470 tcg_gen_movi_i32(tmp2, val);
11471 store_reg(s, 14, tmp2);
3bb8a96f
PM
11472 gen_bx(s, tmp);
11473 } else {
11474 /* Only BX works as exception-return, not BLX */
11475 gen_bx_excret(s, tmp);
99c475ab 11476 }
99c475ab
FB
11477 break;
11478 }
ebfe27c5 11479 }
99c475ab
FB
11480 break;
11481 }
11482
a2d12f0f
PM
11483 /*
11484 * 0b0100_00xx_xxxx_xxxx
11485 * - Data-processing (two low registers)
11486 */
99c475ab
FB
11487 rd = insn & 7;
11488 rm = (insn >> 3) & 7;
11489 op = (insn >> 6) & 0xf;
11490 if (op == 2 || op == 3 || op == 4 || op == 7) {
11491 /* the shift/rotate ops want the operands backwards */
11492 val = rm;
11493 rm = rd;
11494 rd = val;
11495 val = 1;
11496 } else {
11497 val = 0;
11498 }
11499
396e467c 11500 if (op == 9) { /* neg */
7d1b0095 11501 tmp = tcg_temp_new_i32();
396e467c
FN
11502 tcg_gen_movi_i32(tmp, 0);
11503 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11504 tmp = load_reg(s, rd);
11505 } else {
f764718d 11506 tmp = NULL;
396e467c 11507 }
99c475ab 11508
396e467c 11509 tmp2 = load_reg(s, rm);
5899f386 11510 switch (op) {
99c475ab 11511 case 0x0: /* and */
396e467c 11512 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11513 if (!s->condexec_mask)
396e467c 11514 gen_logic_CC(tmp);
99c475ab
FB
11515 break;
11516 case 0x1: /* eor */
396e467c 11517 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11518 if (!s->condexec_mask)
396e467c 11519 gen_logic_CC(tmp);
99c475ab
FB
11520 break;
11521 case 0x2: /* lsl */
9ee6e8bb 11522 if (s->condexec_mask) {
365af80e 11523 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11524 } else {
9ef39277 11525 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11526 gen_logic_CC(tmp2);
9ee6e8bb 11527 }
99c475ab
FB
11528 break;
11529 case 0x3: /* lsr */
9ee6e8bb 11530 if (s->condexec_mask) {
365af80e 11531 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11532 } else {
9ef39277 11533 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11534 gen_logic_CC(tmp2);
9ee6e8bb 11535 }
99c475ab
FB
11536 break;
11537 case 0x4: /* asr */
9ee6e8bb 11538 if (s->condexec_mask) {
365af80e 11539 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11540 } else {
9ef39277 11541 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11542 gen_logic_CC(tmp2);
9ee6e8bb 11543 }
99c475ab
FB
11544 break;
11545 case 0x5: /* adc */
49b4c31e 11546 if (s->condexec_mask) {
396e467c 11547 gen_adc(tmp, tmp2);
49b4c31e
RH
11548 } else {
11549 gen_adc_CC(tmp, tmp, tmp2);
11550 }
99c475ab
FB
11551 break;
11552 case 0x6: /* sbc */
2de68a49 11553 if (s->condexec_mask) {
396e467c 11554 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11555 } else {
11556 gen_sbc_CC(tmp, tmp, tmp2);
11557 }
99c475ab
FB
11558 break;
11559 case 0x7: /* ror */
9ee6e8bb 11560 if (s->condexec_mask) {
f669df27
AJ
11561 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11562 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11563 } else {
9ef39277 11564 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11565 gen_logic_CC(tmp2);
9ee6e8bb 11566 }
99c475ab
FB
11567 break;
11568 case 0x8: /* tst */
396e467c
FN
11569 tcg_gen_and_i32(tmp, tmp, tmp2);
11570 gen_logic_CC(tmp);
99c475ab 11571 rd = 16;
5899f386 11572 break;
99c475ab 11573 case 0x9: /* neg */
9ee6e8bb 11574 if (s->condexec_mask)
396e467c 11575 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11576 else
72485ec4 11577 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11578 break;
11579 case 0xa: /* cmp */
72485ec4 11580 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11581 rd = 16;
11582 break;
11583 case 0xb: /* cmn */
72485ec4 11584 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11585 rd = 16;
11586 break;
11587 case 0xc: /* orr */
396e467c 11588 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11589 if (!s->condexec_mask)
396e467c 11590 gen_logic_CC(tmp);
99c475ab
FB
11591 break;
11592 case 0xd: /* mul */
7b2919a0 11593 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11594 if (!s->condexec_mask)
396e467c 11595 gen_logic_CC(tmp);
99c475ab
FB
11596 break;
11597 case 0xe: /* bic */
f669df27 11598 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11599 if (!s->condexec_mask)
396e467c 11600 gen_logic_CC(tmp);
99c475ab
FB
11601 break;
11602 case 0xf: /* mvn */
396e467c 11603 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11604 if (!s->condexec_mask)
396e467c 11605 gen_logic_CC(tmp2);
99c475ab 11606 val = 1;
5899f386 11607 rm = rd;
99c475ab
FB
11608 break;
11609 }
11610 if (rd != 16) {
396e467c
FN
11611 if (val) {
11612 store_reg(s, rm, tmp2);
11613 if (op != 0xf)
7d1b0095 11614 tcg_temp_free_i32(tmp);
396e467c
FN
11615 } else {
11616 store_reg(s, rd, tmp);
7d1b0095 11617 tcg_temp_free_i32(tmp2);
396e467c
FN
11618 }
11619 } else {
7d1b0095
PM
11620 tcg_temp_free_i32(tmp);
11621 tcg_temp_free_i32(tmp2);
99c475ab
FB
11622 }
11623 break;
11624
11625 case 5:
11626 /* load/store register offset. */
11627 rd = insn & 7;
11628 rn = (insn >> 3) & 7;
11629 rm = (insn >> 6) & 7;
11630 op = (insn >> 9) & 7;
b0109805 11631 addr = load_reg(s, rn);
b26eefb6 11632 tmp = load_reg(s, rm);
b0109805 11633 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11634 tcg_temp_free_i32(tmp);
99c475ab 11635
c40c8556 11636 if (op < 3) { /* store */
b0109805 11637 tmp = load_reg(s, rd);
c40c8556
PM
11638 } else {
11639 tmp = tcg_temp_new_i32();
11640 }
99c475ab
FB
11641
11642 switch (op) {
11643 case 0: /* str */
9bb6558a 11644 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11645 break;
11646 case 1: /* strh */
9bb6558a 11647 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11648 break;
11649 case 2: /* strb */
9bb6558a 11650 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11651 break;
11652 case 3: /* ldrsb */
9bb6558a 11653 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11654 break;
11655 case 4: /* ldr */
9bb6558a 11656 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11657 break;
11658 case 5: /* ldrh */
9bb6558a 11659 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11660 break;
11661 case 6: /* ldrb */
9bb6558a 11662 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11663 break;
11664 case 7: /* ldrsh */
9bb6558a 11665 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11666 break;
11667 }
c40c8556 11668 if (op >= 3) { /* load */
b0109805 11669 store_reg(s, rd, tmp);
c40c8556
PM
11670 } else {
11671 tcg_temp_free_i32(tmp);
11672 }
7d1b0095 11673 tcg_temp_free_i32(addr);
99c475ab
FB
11674 break;
11675
11676 case 6:
11677 /* load/store word immediate offset */
11678 rd = insn & 7;
11679 rn = (insn >> 3) & 7;
b0109805 11680 addr = load_reg(s, rn);
99c475ab 11681 val = (insn >> 4) & 0x7c;
b0109805 11682 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11683
11684 if (insn & (1 << 11)) {
11685 /* load */
c40c8556 11686 tmp = tcg_temp_new_i32();
12dcc321 11687 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11688 store_reg(s, rd, tmp);
99c475ab
FB
11689 } else {
11690 /* store */
b0109805 11691 tmp = load_reg(s, rd);
12dcc321 11692 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11693 tcg_temp_free_i32(tmp);
99c475ab 11694 }
7d1b0095 11695 tcg_temp_free_i32(addr);
99c475ab
FB
11696 break;
11697
11698 case 7:
11699 /* load/store byte immediate offset */
11700 rd = insn & 7;
11701 rn = (insn >> 3) & 7;
b0109805 11702 addr = load_reg(s, rn);
99c475ab 11703 val = (insn >> 6) & 0x1f;
b0109805 11704 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11705
11706 if (insn & (1 << 11)) {
11707 /* load */
c40c8556 11708 tmp = tcg_temp_new_i32();
9bb6558a 11709 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11710 store_reg(s, rd, tmp);
99c475ab
FB
11711 } else {
11712 /* store */
b0109805 11713 tmp = load_reg(s, rd);
9bb6558a 11714 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11715 tcg_temp_free_i32(tmp);
99c475ab 11716 }
7d1b0095 11717 tcg_temp_free_i32(addr);
99c475ab
FB
11718 break;
11719
11720 case 8:
11721 /* load/store halfword immediate offset */
11722 rd = insn & 7;
11723 rn = (insn >> 3) & 7;
b0109805 11724 addr = load_reg(s, rn);
99c475ab 11725 val = (insn >> 5) & 0x3e;
b0109805 11726 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11727
11728 if (insn & (1 << 11)) {
11729 /* load */
c40c8556 11730 tmp = tcg_temp_new_i32();
9bb6558a 11731 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11732 store_reg(s, rd, tmp);
99c475ab
FB
11733 } else {
11734 /* store */
b0109805 11735 tmp = load_reg(s, rd);
9bb6558a 11736 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11737 tcg_temp_free_i32(tmp);
99c475ab 11738 }
7d1b0095 11739 tcg_temp_free_i32(addr);
99c475ab
FB
11740 break;
11741
11742 case 9:
11743 /* load/store from stack */
11744 rd = (insn >> 8) & 7;
b0109805 11745 addr = load_reg(s, 13);
99c475ab 11746 val = (insn & 0xff) * 4;
b0109805 11747 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11748
11749 if (insn & (1 << 11)) {
11750 /* load */
c40c8556 11751 tmp = tcg_temp_new_i32();
9bb6558a 11752 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11753 store_reg(s, rd, tmp);
99c475ab
FB
11754 } else {
11755 /* store */
b0109805 11756 tmp = load_reg(s, rd);
9bb6558a 11757 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11758 tcg_temp_free_i32(tmp);
99c475ab 11759 }
7d1b0095 11760 tcg_temp_free_i32(addr);
99c475ab
FB
11761 break;
11762
11763 case 10:
55203189
PM
11764 /*
11765 * 0b1010_xxxx_xxxx_xxxx
11766 * - Add PC/SP (immediate)
11767 */
99c475ab 11768 rd = (insn >> 8) & 7;
5899f386
FB
11769 if (insn & (1 << 11)) {
11770 /* SP */
5e3f878a 11771 tmp = load_reg(s, 13);
5899f386
FB
11772 } else {
11773 /* PC. bit 1 is ignored. */
7d1b0095 11774 tmp = tcg_temp_new_i32();
5e3f878a 11775 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11776 }
99c475ab 11777 val = (insn & 0xff) * 4;
5e3f878a
PB
11778 tcg_gen_addi_i32(tmp, tmp, val);
11779 store_reg(s, rd, tmp);
99c475ab
FB
11780 break;
11781
11782 case 11:
11783 /* misc */
11784 op = (insn >> 8) & 0xf;
11785 switch (op) {
11786 case 0:
55203189
PM
11787 /*
11788 * 0b1011_0000_xxxx_xxxx
11789 * - ADD (SP plus immediate)
11790 * - SUB (SP minus immediate)
11791 */
b26eefb6 11792 tmp = load_reg(s, 13);
99c475ab
FB
11793 val = (insn & 0x7f) * 4;
11794 if (insn & (1 << 7))
6a0d8a1d 11795 val = -(int32_t)val;
b26eefb6 11796 tcg_gen_addi_i32(tmp, tmp, val);
55203189 11797 store_sp_checked(s, tmp);
99c475ab
FB
11798 break;
11799
9ee6e8bb
PB
11800 case 2: /* sign/zero extend. */
11801 ARCH(6);
11802 rd = insn & 7;
11803 rm = (insn >> 3) & 7;
b0109805 11804 tmp = load_reg(s, rm);
9ee6e8bb 11805 switch ((insn >> 6) & 3) {
b0109805
PB
11806 case 0: gen_sxth(tmp); break;
11807 case 1: gen_sxtb(tmp); break;
11808 case 2: gen_uxth(tmp); break;
11809 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11810 }
b0109805 11811 store_reg(s, rd, tmp);
9ee6e8bb 11812 break;
99c475ab 11813 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
11814 /*
11815 * 0b1011_x10x_xxxx_xxxx
11816 * - push/pop
11817 */
b0109805 11818 addr = load_reg(s, 13);
5899f386
FB
11819 if (insn & (1 << 8))
11820 offset = 4;
99c475ab 11821 else
5899f386
FB
11822 offset = 0;
11823 for (i = 0; i < 8; i++) {
11824 if (insn & (1 << i))
11825 offset += 4;
11826 }
11827 if ((insn & (1 << 11)) == 0) {
b0109805 11828 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11829 }
aa369e5c
PM
11830
11831 if (s->v8m_stackcheck) {
11832 /*
11833 * Here 'addr' is the lower of "old SP" and "new SP";
11834 * if this is a pop that starts below the limit and ends
11835 * above it, it is UNKNOWN whether the limit check triggers;
11836 * we choose to trigger.
11837 */
11838 gen_helper_v8m_stackcheck(cpu_env, addr);
11839 }
11840
99c475ab
FB
11841 for (i = 0; i < 8; i++) {
11842 if (insn & (1 << i)) {
11843 if (insn & (1 << 11)) {
11844 /* pop */
c40c8556 11845 tmp = tcg_temp_new_i32();
12dcc321 11846 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11847 store_reg(s, i, tmp);
99c475ab
FB
11848 } else {
11849 /* push */
b0109805 11850 tmp = load_reg(s, i);
12dcc321 11851 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11852 tcg_temp_free_i32(tmp);
99c475ab 11853 }
5899f386 11854 /* advance to the next address. */
b0109805 11855 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11856 }
11857 }
f764718d 11858 tmp = NULL;
99c475ab
FB
11859 if (insn & (1 << 8)) {
11860 if (insn & (1 << 11)) {
11861 /* pop pc */
c40c8556 11862 tmp = tcg_temp_new_i32();
12dcc321 11863 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11864 /* don't set the pc until the rest of the instruction
11865 has completed */
11866 } else {
11867 /* push lr */
b0109805 11868 tmp = load_reg(s, 14);
12dcc321 11869 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11870 tcg_temp_free_i32(tmp);
99c475ab 11871 }
b0109805 11872 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11873 }
5899f386 11874 if ((insn & (1 << 11)) == 0) {
b0109805 11875 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11876 }
99c475ab 11877 /* write back the new stack pointer */
b0109805 11878 store_reg(s, 13, addr);
99c475ab 11879 /* set the new PC value */
be5e7a76 11880 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11881 store_reg_from_load(s, 15, tmp);
be5e7a76 11882 }
99c475ab
FB
11883 break;
11884
9ee6e8bb
PB
11885 case 1: case 3: case 9: case 11: /* czb */
11886 rm = insn & 7;
d9ba4830 11887 tmp = load_reg(s, rm);
c2d9644e 11888 arm_gen_condlabel(s);
9ee6e8bb 11889 if (insn & (1 << 11))
cb63669a 11890 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11891 else
cb63669a 11892 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11893 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11894 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11895 val = (uint32_t)s->pc + 2;
11896 val += offset;
11897 gen_jmp(s, val);
11898 break;
11899
11900 case 15: /* IT, nop-hint. */
11901 if ((insn & 0xf) == 0) {
11902 gen_nop_hint(s, (insn >> 4) & 0xf);
11903 break;
11904 }
11905 /* If Then. */
11906 s->condexec_cond = (insn >> 4) & 0xe;
11907 s->condexec_mask = insn & 0x1f;
11908 /* No actual code generated for this insn, just setup state. */
11909 break;
11910
06c949e6 11911 case 0xe: /* bkpt */
d4a2dc67
PM
11912 {
11913 int imm8 = extract32(insn, 0, 8);
be5e7a76 11914 ARCH(5);
c900a2e6 11915 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 11916 break;
d4a2dc67 11917 }
06c949e6 11918
19a6e31c
PM
11919 case 0xa: /* rev, and hlt */
11920 {
11921 int op1 = extract32(insn, 6, 2);
11922
11923 if (op1 == 2) {
11924 /* HLT */
11925 int imm6 = extract32(insn, 0, 6);
11926
11927 gen_hlt(s, imm6);
11928 break;
11929 }
11930
11931 /* Otherwise this is rev */
9ee6e8bb
PB
11932 ARCH(6);
11933 rn = (insn >> 3) & 0x7;
11934 rd = insn & 0x7;
b0109805 11935 tmp = load_reg(s, rn);
19a6e31c 11936 switch (op1) {
66896cb8 11937 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11938 case 1: gen_rev16(tmp); break;
11939 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11940 default:
11941 g_assert_not_reached();
9ee6e8bb 11942 }
b0109805 11943 store_reg(s, rd, tmp);
9ee6e8bb 11944 break;
19a6e31c 11945 }
9ee6e8bb 11946
d9e028c1
PM
11947 case 6:
11948 switch ((insn >> 5) & 7) {
11949 case 2:
11950 /* setend */
11951 ARCH(6);
9886ecdf
PB
11952 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11953 gen_helper_setend(cpu_env);
dcba3a8d 11954 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 11955 }
9ee6e8bb 11956 break;
d9e028c1
PM
11957 case 3:
11958 /* cps */
11959 ARCH(6);
11960 if (IS_USER(s)) {
11961 break;
8984bd2e 11962 }
b53d8923 11963 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11964 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11965 /* FAULTMASK */
11966 if (insn & 1) {
11967 addr = tcg_const_i32(19);
11968 gen_helper_v7m_msr(cpu_env, addr, tmp);
11969 tcg_temp_free_i32(addr);
11970 }
11971 /* PRIMASK */
11972 if (insn & 2) {
11973 addr = tcg_const_i32(16);
11974 gen_helper_v7m_msr(cpu_env, addr, tmp);
11975 tcg_temp_free_i32(addr);
11976 }
11977 tcg_temp_free_i32(tmp);
11978 gen_lookup_tb(s);
11979 } else {
11980 if (insn & (1 << 4)) {
11981 shift = CPSR_A | CPSR_I | CPSR_F;
11982 } else {
11983 shift = 0;
11984 }
11985 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11986 }
d9e028c1
PM
11987 break;
11988 default:
11989 goto undef;
9ee6e8bb
PB
11990 }
11991 break;
11992
99c475ab
FB
11993 default:
11994 goto undef;
11995 }
11996 break;
11997
11998 case 12:
a7d3970d 11999 {
99c475ab 12000 /* load/store multiple */
f764718d 12001 TCGv_i32 loaded_var = NULL;
99c475ab 12002 rn = (insn >> 8) & 0x7;
b0109805 12003 addr = load_reg(s, rn);
99c475ab
FB
12004 for (i = 0; i < 8; i++) {
12005 if (insn & (1 << i)) {
99c475ab
FB
12006 if (insn & (1 << 11)) {
12007 /* load */
c40c8556 12008 tmp = tcg_temp_new_i32();
12dcc321 12009 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
12010 if (i == rn) {
12011 loaded_var = tmp;
12012 } else {
12013 store_reg(s, i, tmp);
12014 }
99c475ab
FB
12015 } else {
12016 /* store */
b0109805 12017 tmp = load_reg(s, i);
12dcc321 12018 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12019 tcg_temp_free_i32(tmp);
99c475ab 12020 }
5899f386 12021 /* advance to the next address */
b0109805 12022 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12023 }
12024 }
b0109805 12025 if ((insn & (1 << rn)) == 0) {
a7d3970d 12026 /* base reg not in list: base register writeback */
b0109805
PB
12027 store_reg(s, rn, addr);
12028 } else {
a7d3970d
PM
12029 /* base reg in list: if load, complete it now */
12030 if (insn & (1 << 11)) {
12031 store_reg(s, rn, loaded_var);
12032 }
7d1b0095 12033 tcg_temp_free_i32(addr);
b0109805 12034 }
99c475ab 12035 break;
a7d3970d 12036 }
99c475ab
FB
12037 case 13:
12038 /* conditional branch or swi */
12039 cond = (insn >> 8) & 0xf;
12040 if (cond == 0xe)
12041 goto undef;
12042
12043 if (cond == 0xf) {
12044 /* swi */
eaed129d 12045 gen_set_pc_im(s, s->pc);
d4a2dc67 12046 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 12047 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
12048 break;
12049 }
12050 /* generate a conditional jump to next instruction */
c2d9644e 12051 arm_skip_unless(s, cond);
99c475ab
FB
12052
12053 /* jump to the offset */
5899f386 12054 val = (uint32_t)s->pc + 2;
99c475ab 12055 offset = ((int32_t)insn << 24) >> 24;
5899f386 12056 val += offset << 1;
8aaca4c0 12057 gen_jmp(s, val);
99c475ab
FB
12058 break;
12059
12060 case 14:
358bf29e 12061 if (insn & (1 << 11)) {
296e5a0a
PM
12062 /* thumb_insn_is_16bit() ensures we can't get here for
12063 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12064 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12065 */
12066 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12067 ARCH(5);
12068 offset = ((insn & 0x7ff) << 1);
12069 tmp = load_reg(s, 14);
12070 tcg_gen_addi_i32(tmp, tmp, offset);
12071 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12072
12073 tmp2 = tcg_temp_new_i32();
12074 tcg_gen_movi_i32(tmp2, s->pc | 1);
12075 store_reg(s, 14, tmp2);
12076 gen_bx(s, tmp);
358bf29e
PB
12077 break;
12078 }
9ee6e8bb 12079 /* unconditional branch */
99c475ab
FB
12080 val = (uint32_t)s->pc;
12081 offset = ((int32_t)insn << 21) >> 21;
12082 val += (offset << 1) + 2;
8aaca4c0 12083 gen_jmp(s, val);
99c475ab
FB
12084 break;
12085
12086 case 15:
296e5a0a
PM
12087 /* thumb_insn_is_16bit() ensures we can't get here for
12088 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12089 */
12090 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12091
12092 if (insn & (1 << 11)) {
12093 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
12094 offset = ((insn & 0x7ff) << 1) | 1;
12095 tmp = load_reg(s, 14);
12096 tcg_gen_addi_i32(tmp, tmp, offset);
12097
12098 tmp2 = tcg_temp_new_i32();
12099 tcg_gen_movi_i32(tmp2, s->pc | 1);
12100 store_reg(s, 14, tmp2);
12101 gen_bx(s, tmp);
12102 } else {
12103 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
12104 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
12105
12106 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
12107 }
9ee6e8bb 12108 break;
99c475ab
FB
12109 }
12110 return;
9ee6e8bb 12111illegal_op:
99c475ab 12112undef:
73710361
GB
12113 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
12114 default_exception_el(s));
99c475ab
FB
12115}
12116
541ebcd4
PM
12117static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
12118{
12119 /* Return true if the insn at dc->pc might cross a page boundary.
12120 * (False positives are OK, false negatives are not.)
5b8d7289
PM
12121 * We know this is a Thumb insn, and our caller ensures we are
12122 * only called if dc->pc is less than 4 bytes from the page
12123 * boundary, so we cross the page if the first 16 bits indicate
12124 * that this is a 32 bit insn.
541ebcd4 12125 */
5b8d7289 12126 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 12127
5b8d7289 12128 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
12129}
12130
b542683d 12131static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 12132{
1d8a5535 12133 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 12134 CPUARMState *env = cs->env_ptr;
2fc0cc0e 12135 ARMCPU *cpu = env_archcpu(env);
aad821ac
RH
12136 uint32_t tb_flags = dc->base.tb->flags;
12137 uint32_t condexec, core_mmu_idx;
3b46e624 12138
962fcbf2 12139 dc->isar = &cpu->isar;
dcba3a8d 12140 dc->pc = dc->base.pc_first;
e50e6a20 12141 dc->condjmp = 0;
3926cc84 12142
40f860cd 12143 dc->aarch64 = 0;
cef9ee70
SS
12144 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
12145 * there is no secure EL1, so we route exceptions to EL3.
12146 */
12147 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
12148 !arm_el_is_aa64(env, 3);
aad821ac
RH
12149 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
12150 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
12151 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
12152 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
12153 dc->condexec_mask = (condexec & 0xf) << 1;
12154 dc->condexec_cond = condexec >> 4;
12155 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
12156 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
c1e37810 12157 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 12158#if !defined(CONFIG_USER_ONLY)
c1e37810 12159 dc->user = (dc->current_el == 0);
3926cc84 12160#endif
aad821ac
RH
12161 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
12162 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
12163 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
12164 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
ea7ac69d
PM
12165 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
12166 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
12167 dc->vec_stride = 0;
12168 } else {
12169 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
12170 dc->c15_cpar = 0;
12171 }
aad821ac 12172 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
fb602cb7
PM
12173 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12174 regime_is_secure(env, dc->mmu_idx);
aad821ac 12175 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
6d60c67a 12176 dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
6000531e
PM
12177 dc->v7m_new_fp_ctxt_needed =
12178 FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
e33cf0f8 12179 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
60322b39 12180 dc->cp_regs = cpu->cp_regs;
a984e42c 12181 dc->features = env->features;
40f860cd 12182
50225ad0
PM
12183 /* Single step state. The code-generation logic here is:
12184 * SS_ACTIVE == 0:
12185 * generate code with no special handling for single-stepping (except
12186 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12187 * this happens anyway because those changes are all system register or
12188 * PSTATE writes).
12189 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12190 * emit code for one insn
12191 * emit code to clear PSTATE.SS
12192 * emit code to generate software step exception for completed step
12193 * end TB (as usual for having generated an exception)
12194 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12195 * emit code to generate a software step exception
12196 * end the TB
12197 */
aad821ac
RH
12198 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
12199 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
50225ad0
PM
12200 dc->is_ldex = false;
12201 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
12202
bfe7ad5b 12203 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 12204
f7708456
RH
12205 /* If architectural single step active, limit to 1. */
12206 if (is_singlestepping(dc)) {
b542683d 12207 dc->base.max_insns = 1;
f7708456
RH
12208 }
12209
d0264d86
RH
12210 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12211 to those left on the page. */
12212 if (!dc->thumb) {
bfe7ad5b 12213 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 12214 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
12215 }
12216
a7812ae4
PB
12217 cpu_F0s = tcg_temp_new_i32();
12218 cpu_F1s = tcg_temp_new_i32();
12219 cpu_F0d = tcg_temp_new_i64();
12220 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
12221 cpu_V0 = cpu_F0d;
12222 cpu_V1 = cpu_F1d;
e677137d 12223 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 12224 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
12225}
12226
b1476854
LV
12227static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12228{
12229 DisasContext *dc = container_of(dcbase, DisasContext, base);
12230
12231 /* A note on handling of the condexec (IT) bits:
12232 *
12233 * We want to avoid the overhead of having to write the updated condexec
12234 * bits back to the CPUARMState for every instruction in an IT block. So:
12235 * (1) if the condexec bits are not already zero then we write
12236 * zero back into the CPUARMState now. This avoids complications trying
12237 * to do it at the end of the block. (For example if we don't do this
12238 * it's hard to identify whether we can safely skip writing condexec
12239 * at the end of the TB, which we definitely want to do for the case
12240 * where a TB doesn't do anything with the IT state at all.)
12241 * (2) if we are going to leave the TB then we call gen_set_condexec()
12242 * which will write the correct value into CPUARMState if zero is wrong.
12243 * This is done both for leaving the TB at the end, and for leaving
12244 * it because of an exception we know will happen, which is done in
12245 * gen_exception_insn(). The latter is necessary because we need to
12246 * leave the TB with the PC/IT state just prior to execution of the
12247 * instruction which caused the exception.
12248 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12249 * then the CPUARMState will be wrong and we need to reset it.
12250 * This is handled in the same way as restoration of the
12251 * PC in these situations; we save the value of the condexec bits
12252 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12253 * then uses this to restore them after an exception.
12254 *
12255 * Note that there are no instructions which can read the condexec
12256 * bits, and none which can write non-static values to them, so
12257 * we don't need to care about whether CPUARMState is correct in the
12258 * middle of a TB.
12259 */
12260
12261 /* Reset the conditional execution bits immediately. This avoids
12262 complications trying to do it at the end of the block. */
12263 if (dc->condexec_mask || dc->condexec_cond) {
12264 TCGv_i32 tmp = tcg_temp_new_i32();
12265 tcg_gen_movi_i32(tmp, 0);
12266 store_cpu_field(tmp, condexec_bits);
12267 }
12268}
12269
f62bd897
LV
12270static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12271{
12272 DisasContext *dc = container_of(dcbase, DisasContext, base);
12273
f62bd897
LV
12274 tcg_gen_insn_start(dc->pc,
12275 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12276 0);
15fa08f8 12277 dc->insn_start = tcg_last_op();
f62bd897
LV
12278}
12279
a68956ad
LV
12280static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12281 const CPUBreakpoint *bp)
12282{
12283 DisasContext *dc = container_of(dcbase, DisasContext, base);
12284
12285 if (bp->flags & BP_CPU) {
12286 gen_set_condexec(dc);
12287 gen_set_pc_im(dc, dc->pc);
12288 gen_helper_check_breakpoints(cpu_env);
12289 /* End the TB early; it's likely not going to be executed */
12290 dc->base.is_jmp = DISAS_TOO_MANY;
12291 } else {
12292 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12293 /* The address covered by the breakpoint must be
12294 included in [tb->pc, tb->pc + tb->size) in order
12295 to for it to be properly cleared -- thus we
12296 increment the PC here so that the logic setting
12297 tb->size below does the right thing. */
12298 /* TODO: Advance PC by correct instruction length to
12299 * avoid disassembler error messages */
12300 dc->pc += 2;
12301 dc->base.is_jmp = DISAS_NORETURN;
12302 }
12303
12304 return true;
12305}
12306
722ef0a5 12307static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 12308{
13189a90
LV
12309#ifdef CONFIG_USER_ONLY
12310 /* Intercept jump to the magic kernel page. */
12311 if (dc->pc >= 0xffff0000) {
12312 /* We always get here via a jump, so know we are not in a
12313 conditional execution block. */
12314 gen_exception_internal(EXCP_KERNEL_TRAP);
12315 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12316 return true;
13189a90
LV
12317 }
12318#endif
12319
12320 if (dc->ss_active && !dc->pstate_ss) {
12321 /* Singlestep state is Active-pending.
12322 * If we're in this state at the start of a TB then either
12323 * a) we just took an exception to an EL which is being debugged
12324 * and this is the first insn in the exception handler
12325 * b) debug exceptions were masked and we just unmasked them
12326 * without changing EL (eg by clearing PSTATE.D)
12327 * In either case we're going to take a swstep exception in the
12328 * "did not step an insn" case, and so the syndrome ISV and EX
12329 * bits should be zero.
12330 */
12331 assert(dc->base.num_insns == 1);
12332 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12333 default_exception_el(dc));
12334 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12335 return true;
13189a90
LV
12336 }
12337
722ef0a5
RH
12338 return false;
12339}
13189a90 12340
d0264d86 12341static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12342{
13189a90
LV
12343 if (dc->condjmp && !dc->base.is_jmp) {
12344 gen_set_label(dc->condlabel);
12345 dc->condjmp = 0;
12346 }
13189a90 12347 dc->base.pc_next = dc->pc;
23169224 12348 translator_loop_temp_check(&dc->base);
13189a90
LV
12349}
12350
722ef0a5
RH
12351static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12352{
12353 DisasContext *dc = container_of(dcbase, DisasContext, base);
12354 CPUARMState *env = cpu->env_ptr;
12355 unsigned int insn;
12356
12357 if (arm_pre_translate_insn(dc)) {
12358 return;
12359 }
12360
12361 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 12362 dc->insn = insn;
722ef0a5
RH
12363 dc->pc += 4;
12364 disas_arm_insn(dc, insn);
12365
d0264d86
RH
12366 arm_post_translate_insn(dc);
12367
12368 /* ARM is a fixed-length ISA. We performed the cross-page check
12369 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12370}
12371
dcf14dfb
PM
12372static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12373{
12374 /* Return true if this Thumb insn is always unconditional,
12375 * even inside an IT block. This is true of only a very few
12376 * instructions: BKPT, HLT, and SG.
12377 *
12378 * A larger class of instructions are UNPREDICTABLE if used
12379 * inside an IT block; we do not need to detect those here, because
12380 * what we do by default (perform the cc check and update the IT
12381 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12382 * choice for those situations.
12383 *
12384 * insn is either a 16-bit or a 32-bit instruction; the two are
12385 * distinguishable because for the 16-bit case the top 16 bits
12386 * are zeroes, and that isn't a valid 32-bit encoding.
12387 */
12388 if ((insn & 0xffffff00) == 0xbe00) {
12389 /* BKPT */
12390 return true;
12391 }
12392
12393 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12394 !arm_dc_feature(s, ARM_FEATURE_M)) {
12395 /* HLT: v8A only. This is unconditional even when it is going to
12396 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12397 * For v7 cores this was a plain old undefined encoding and so
12398 * honours its cc check. (We might be using the encoding as
12399 * a semihosting trap, but we don't change the cc check behaviour
12400 * on that account, because a debugger connected to a real v7A
12401 * core and emulating semihosting traps by catching the UNDEF
12402 * exception would also only see cases where the cc check passed.
12403 * No guest code should be trying to do a HLT semihosting trap
12404 * in an IT block anyway.
12405 */
12406 return true;
12407 }
12408
12409 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12410 arm_dc_feature(s, ARM_FEATURE_M)) {
12411 /* SG: v8M only */
12412 return true;
12413 }
12414
12415 return false;
12416}
12417
722ef0a5
RH
12418static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12419{
12420 DisasContext *dc = container_of(dcbase, DisasContext, base);
12421 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12422 uint32_t insn;
12423 bool is_16bit;
722ef0a5
RH
12424
12425 if (arm_pre_translate_insn(dc)) {
12426 return;
12427 }
12428
296e5a0a
PM
12429 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12430 is_16bit = thumb_insn_is_16bit(dc, insn);
12431 dc->pc += 2;
12432 if (!is_16bit) {
12433 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12434
12435 insn = insn << 16 | insn2;
12436 dc->pc += 2;
12437 }
58803318 12438 dc->insn = insn;
296e5a0a 12439
dcf14dfb 12440 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12441 uint32_t cond = dc->condexec_cond;
12442
12443 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
c2d9644e 12444 arm_skip_unless(dc, cond);
296e5a0a
PM
12445 }
12446 }
12447
12448 if (is_16bit) {
12449 disas_thumb_insn(dc, insn);
12450 } else {
2eea841c 12451 disas_thumb2_insn(dc, insn);
296e5a0a 12452 }
722ef0a5
RH
12453
12454 /* Advance the Thumb condexec condition. */
12455 if (dc->condexec_mask) {
12456 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12457 ((dc->condexec_mask >> 4) & 1));
12458 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12459 if (dc->condexec_mask == 0) {
12460 dc->condexec_cond = 0;
12461 }
12462 }
12463
d0264d86
RH
12464 arm_post_translate_insn(dc);
12465
12466 /* Thumb is a variable-length ISA. Stop translation when the next insn
12467 * will touch a new page. This ensures that prefetch aborts occur at
12468 * the right place.
12469 *
12470 * We want to stop the TB if the next insn starts in a new page,
12471 * or if it spans between this page and the next. This means that
12472 * if we're looking at the last halfword in the page we need to
12473 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12474 * or a 32-bit Thumb insn (which won't).
12475 * This is to avoid generating a silly TB with a single 16-bit insn
12476 * in it at the end of this page (which would execute correctly
12477 * but isn't very efficient).
12478 */
12479 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
12480 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
12481 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
12482 && insn_crosses_page(env, dc)))) {
12483 dc->base.is_jmp = DISAS_TOO_MANY;
12484 }
722ef0a5
RH
12485}
12486
70d3c035 12487static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12488{
70d3c035 12489 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12490
c5a49c63 12491 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12492 /* FIXME: This can theoretically happen with self-modifying code. */
12493 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12494 }
9ee6e8bb 12495
b5ff1b31 12496 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12497 instruction was a conditional branch or trap, and the PC has
12498 already been written. */
f021b2c4 12499 gen_set_condexec(dc);
dcba3a8d 12500 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12501 /* Exception return branches need some special case code at the
12502 * end of the TB, which is complex enough that it has to
12503 * handle the single-step vs not and the condition-failed
12504 * insn codepath itself.
12505 */
12506 gen_bx_excret_final_code(dc);
12507 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12508 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12509 switch (dc->base.is_jmp) {
7999a5c8 12510 case DISAS_SWI:
50225ad0 12511 gen_ss_advance(dc);
73710361
GB
12512 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12513 default_exception_el(dc));
7999a5c8
SF
12514 break;
12515 case DISAS_HVC:
37e6456e 12516 gen_ss_advance(dc);
73710361 12517 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12518 break;
12519 case DISAS_SMC:
37e6456e 12520 gen_ss_advance(dc);
73710361 12521 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12522 break;
12523 case DISAS_NEXT:
a68956ad 12524 case DISAS_TOO_MANY:
7999a5c8
SF
12525 case DISAS_UPDATE:
12526 gen_set_pc_im(dc, dc->pc);
12527 /* fall through */
12528 default:
5425415e
PM
12529 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12530 gen_singlestep_exception(dc);
a0c231e6
RH
12531 break;
12532 case DISAS_NORETURN:
12533 break;
7999a5c8 12534 }
8aaca4c0 12535 } else {
9ee6e8bb
PB
12536 /* While branches must always occur at the end of an IT block,
12537 there are a few other things that can cause us to terminate
65626741 12538 the TB in the middle of an IT block:
9ee6e8bb
PB
12539 - Exception generating instructions (bkpt, swi, undefined).
12540 - Page boundaries.
12541 - Hardware watchpoints.
12542 Hardware breakpoints have already been handled and skip this code.
12543 */
dcba3a8d 12544 switch(dc->base.is_jmp) {
8aaca4c0 12545 case DISAS_NEXT:
a68956ad 12546 case DISAS_TOO_MANY:
6e256c93 12547 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12548 break;
577bf808 12549 case DISAS_JUMP:
8a6b28c7
EC
12550 gen_goto_ptr();
12551 break;
e8d52302
AB
12552 case DISAS_UPDATE:
12553 gen_set_pc_im(dc, dc->pc);
12554 /* fall through */
577bf808 12555 default:
8aaca4c0 12556 /* indicate that the hash table must be used to find the next TB */
07ea28b4 12557 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 12558 break;
a0c231e6 12559 case DISAS_NORETURN:
8aaca4c0
FB
12560 /* nothing more to generate */
12561 break;
9ee6e8bb 12562 case DISAS_WFI:
58803318
SS
12563 {
12564 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12565 !(dc->insn & (1U << 31))) ? 2 : 4);
12566
12567 gen_helper_wfi(cpu_env, tmp);
12568 tcg_temp_free_i32(tmp);
84549b6d
PM
12569 /* The helper doesn't necessarily throw an exception, but we
12570 * must go back to the main loop to check for interrupts anyway.
12571 */
07ea28b4 12572 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 12573 break;
58803318 12574 }
72c1d3af
PM
12575 case DISAS_WFE:
12576 gen_helper_wfe(cpu_env);
12577 break;
c87e5a61
PM
12578 case DISAS_YIELD:
12579 gen_helper_yield(cpu_env);
12580 break;
9ee6e8bb 12581 case DISAS_SWI:
73710361
GB
12582 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12583 default_exception_el(dc));
9ee6e8bb 12584 break;
37e6456e 12585 case DISAS_HVC:
73710361 12586 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12587 break;
12588 case DISAS_SMC:
73710361 12589 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12590 break;
8aaca4c0 12591 }
f021b2c4
PM
12592 }
12593
12594 if (dc->condjmp) {
12595 /* "Condition failed" instruction codepath for the branch/trap insn */
12596 gen_set_label(dc->condlabel);
12597 gen_set_condexec(dc);
b636649f 12598 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12599 gen_set_pc_im(dc, dc->pc);
12600 gen_singlestep_exception(dc);
12601 } else {
6e256c93 12602 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12603 }
2c0262af 12604 }
23169224
LV
12605
12606 /* Functions above can change dc->pc, so re-align db->pc_next */
12607 dc->base.pc_next = dc->pc;
70d3c035
LV
12608}
12609
4013f7fc
LV
12610static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12611{
12612 DisasContext *dc = container_of(dcbase, DisasContext, base);
12613
12614 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12615 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12616}
12617
23169224
LV
12618static const TranslatorOps arm_translator_ops = {
12619 .init_disas_context = arm_tr_init_disas_context,
12620 .tb_start = arm_tr_tb_start,
12621 .insn_start = arm_tr_insn_start,
12622 .breakpoint_check = arm_tr_breakpoint_check,
12623 .translate_insn = arm_tr_translate_insn,
12624 .tb_stop = arm_tr_tb_stop,
12625 .disas_log = arm_tr_disas_log,
12626};
12627
722ef0a5
RH
12628static const TranslatorOps thumb_translator_ops = {
12629 .init_disas_context = arm_tr_init_disas_context,
12630 .tb_start = arm_tr_tb_start,
12631 .insn_start = arm_tr_insn_start,
12632 .breakpoint_check = arm_tr_breakpoint_check,
12633 .translate_insn = thumb_tr_translate_insn,
12634 .tb_stop = arm_tr_tb_stop,
12635 .disas_log = arm_tr_disas_log,
12636};
12637
70d3c035 12638/* generate intermediate code for basic block 'tb'. */
8b86d6d2 12639void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
70d3c035 12640{
23169224
LV
12641 DisasContext dc;
12642 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12643
aad821ac 12644 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
722ef0a5
RH
12645 ops = &thumb_translator_ops;
12646 }
23169224 12647#ifdef TARGET_AARCH64
aad821ac 12648 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
23169224 12649 ops = &aarch64_translator_ops;
2c0262af
FB
12650 }
12651#endif
23169224 12652
8b86d6d2 12653 translator_loop(ops, &dc.base, cpu, tb, max_insns);
2c0262af
FB
12654}
12655
90c84c56 12656void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
2c0262af 12657{
878096ee
AF
12658 ARMCPU *cpu = ARM_CPU(cs);
12659 CPUARMState *env = &cpu->env;
2c0262af
FB
12660 int i;
12661
17731115 12662 if (is_a64(env)) {
90c84c56 12663 aarch64_cpu_dump_state(cs, f, flags);
17731115
PM
12664 return;
12665 }
12666
2c0262af 12667 for(i=0;i<16;i++) {
90c84c56 12668 qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12669 if ((i % 4) == 3)
90c84c56 12670 qemu_fprintf(f, "\n");
2c0262af 12671 else
90c84c56 12672 qemu_fprintf(f, " ");
2c0262af 12673 }
06e5cf7a 12674
5b906f35
PM
12675 if (arm_feature(env, ARM_FEATURE_M)) {
12676 uint32_t xpsr = xpsr_read(env);
12677 const char *mode;
1e577cc7
PM
12678 const char *ns_status = "";
12679
12680 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12681 ns_status = env->v7m.secure ? "S " : "NS ";
12682 }
5b906f35
PM
12683
12684 if (xpsr & XPSR_EXCP) {
12685 mode = "handler";
12686 } else {
8bfc26ea 12687 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
12688 mode = "unpriv-thread";
12689 } else {
12690 mode = "priv-thread";
12691 }
12692 }
12693
90c84c56
MA
12694 qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
12695 xpsr,
12696 xpsr & XPSR_N ? 'N' : '-',
12697 xpsr & XPSR_Z ? 'Z' : '-',
12698 xpsr & XPSR_C ? 'C' : '-',
12699 xpsr & XPSR_V ? 'V' : '-',
12700 xpsr & XPSR_T ? 'T' : 'A',
12701 ns_status,
12702 mode);
06e5cf7a 12703 } else {
5b906f35
PM
12704 uint32_t psr = cpsr_read(env);
12705 const char *ns_status = "";
12706
12707 if (arm_feature(env, ARM_FEATURE_EL3) &&
12708 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12709 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12710 }
12711
90c84c56
MA
12712 qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12713 psr,
12714 psr & CPSR_N ? 'N' : '-',
12715 psr & CPSR_Z ? 'Z' : '-',
12716 psr & CPSR_C ? 'C' : '-',
12717 psr & CPSR_V ? 'V' : '-',
12718 psr & CPSR_T ? 'T' : 'A',
12719 ns_status,
12720 aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
5b906f35 12721 }
b7bcbe95 12722
f2617cfc
PM
12723 if (flags & CPU_DUMP_FPU) {
12724 int numvfpregs = 0;
12725 if (arm_feature(env, ARM_FEATURE_VFP)) {
12726 numvfpregs += 16;
12727 }
12728 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12729 numvfpregs += 16;
12730 }
12731 for (i = 0; i < numvfpregs; i++) {
9a2b5256 12732 uint64_t v = *aa32_vfp_dreg(env, i);
90c84c56
MA
12733 qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12734 i * 2, (uint32_t)v,
12735 i * 2 + 1, (uint32_t)(v >> 32),
12736 i, v);
f2617cfc 12737 }
90c84c56 12738 qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
b7bcbe95 12739 }
2c0262af 12740}
a6b025d3 12741
bad729e2
RH
12742void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12743 target_ulong *data)
d2856f1a 12744{
3926cc84 12745 if (is_a64(env)) {
bad729e2 12746 env->pc = data[0];
40f860cd 12747 env->condexec_bits = 0;
aaa1f954 12748 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12749 } else {
bad729e2
RH
12750 env->regs[15] = data[0];
12751 env->condexec_bits = data[1];
aaa1f954 12752 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12753 }
d2856f1a 12754}