]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Convert VRINTA/VRINTN/VRINTP/VRINTM to decodetree
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
90c84c56 31#include "qemu/qemu-print.h"
1d854765 32#include "arm_ldst.h"
f1672e6f 33#include "hw/semihosting/semihost.h"
1497c961 34
2ef6175a
RH
35#include "exec/helper-proto.h"
36#include "exec/helper-gen.h"
2c0262af 37
a7e30d84 38#include "trace-tcg.h"
508127e2 39#include "exec/log.h"
a7e30d84
LV
40
41
2b51668f
PM
42#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
43#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 44/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 45#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
09cbd501 46#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
2b51668f
PM
47#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
48#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
49#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
50#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
51#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 52
86753403 53#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 54
f570c61e 55#include "translate.h"
e12ce78d 56
b5ff1b31
FB
57#if defined(CONFIG_USER_ONLY)
58#define IS_USER(s) 1
59#else
60#define IS_USER(s) (s->user)
61#endif
62
ad69471c 63/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 64static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 65static TCGv_i32 cpu_R[16];
78bcaa3e
RH
66TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
67TCGv_i64 cpu_exclusive_addr;
68TCGv_i64 cpu_exclusive_val;
ad69471c 69
b26eefb6 70/* FIXME: These should be removed. */
39d5492a 71static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 72static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 73
022c62cb 74#include "exec/gen-icount.h"
2e70f6ef 75
308e5636 76static const char * const regnames[] =
155c3eac
FN
77 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
78 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79
61adacc8
RH
80/* Function prototypes for gen_ functions calling Neon helpers. */
81typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
82 TCGv_i32, TCGv_i32);
83
b26eefb6
PB
84/* initialize TCG globals. */
85void arm_translate_init(void)
86{
155c3eac
FN
87 int i;
88
155c3eac 89 for (i = 0; i < 16; i++) {
e1ccc054 90 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 91 offsetof(CPUARMState, regs[i]),
155c3eac
FN
92 regnames[i]);
93 }
e1ccc054
RH
94 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
95 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
96 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
97 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 98
e1ccc054 99 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 100 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 101 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 102 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 103
14ade10f 104 a64_translate_init();
b26eefb6
PB
105}
106
9bb6558a
PM
107/* Flags for the disas_set_da_iss info argument:
108 * lower bits hold the Rt register number, higher bits are flags.
109 */
110typedef enum ISSInfo {
111 ISSNone = 0,
112 ISSRegMask = 0x1f,
113 ISSInvalid = (1 << 5),
114 ISSIsAcqRel = (1 << 6),
115 ISSIsWrite = (1 << 7),
116 ISSIs16Bit = (1 << 8),
117} ISSInfo;
118
119/* Save the syndrome information for a Data Abort */
120static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
121{
122 uint32_t syn;
123 int sas = memop & MO_SIZE;
124 bool sse = memop & MO_SIGN;
125 bool is_acqrel = issinfo & ISSIsAcqRel;
126 bool is_write = issinfo & ISSIsWrite;
127 bool is_16bit = issinfo & ISSIs16Bit;
128 int srt = issinfo & ISSRegMask;
129
130 if (issinfo & ISSInvalid) {
131 /* Some callsites want to conditionally provide ISS info,
132 * eg "only if this was not a writeback"
133 */
134 return;
135 }
136
137 if (srt == 15) {
138 /* For AArch32, insns where the src/dest is R15 never generate
139 * ISS information. Catching that here saves checking at all
140 * the call sites.
141 */
142 return;
143 }
144
145 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
146 0, 0, 0, is_write, 0, is_16bit);
147 disas_set_insn_syndrome(s, syn);
148}
149
8bd5c820 150static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 151{
8bd5c820 152 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
153 * insns:
154 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
155 * otherwise, access as if at PL0.
156 */
157 switch (s->mmu_idx) {
158 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
159 case ARMMMUIdx_S12NSE0:
160 case ARMMMUIdx_S12NSE1:
8bd5c820 161 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
162 case ARMMMUIdx_S1E3:
163 case ARMMMUIdx_S1SE0:
164 case ARMMMUIdx_S1SE1:
8bd5c820 165 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
166 case ARMMMUIdx_MUser:
167 case ARMMMUIdx_MPriv:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
169 case ARMMMUIdx_MUserNegPri:
170 case ARMMMUIdx_MPrivNegPri:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
172 case ARMMMUIdx_MSUser:
173 case ARMMMUIdx_MSPriv:
b9f587d6 174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
175 case ARMMMUIdx_MSUserNegPri:
176 case ARMMMUIdx_MSPrivNegPri:
177 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
178 case ARMMMUIdx_S2NS:
179 default:
180 g_assert_not_reached();
181 }
182}
183
39d5492a 184static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 185{
39d5492a 186 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
187 tcg_gen_ld_i32(tmp, cpu_env, offset);
188 return tmp;
189}
190
0ecb72a5 191#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 192
39d5492a 193static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
194{
195 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 196 tcg_temp_free_i32(var);
d9ba4830
PB
197}
198
199#define store_cpu_field(var, name) \
0ecb72a5 200 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 201
b26eefb6 202/* Set a variable to the value of a CPU register. */
39d5492a 203static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
204{
205 if (reg == 15) {
206 uint32_t addr;
b90372ad 207 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
208 if (s->thumb)
209 addr = (long)s->pc + 2;
210 else
211 addr = (long)s->pc + 4;
212 tcg_gen_movi_i32(var, addr);
213 } else {
155c3eac 214 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
215 }
216}
217
218/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 219static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 220{
39d5492a 221 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
222 load_reg_var(s, tmp, reg);
223 return tmp;
224}
225
226/* Set a CPU register. The source must be a temporary and will be
227 marked as dead. */
39d5492a 228static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
229{
230 if (reg == 15) {
9b6a3ea7
PM
231 /* In Thumb mode, we must ignore bit 0.
232 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
233 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
234 * We choose to ignore [1:0] in ARM mode for all architecture versions.
235 */
236 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 237 s->base.is_jmp = DISAS_JUMP;
b26eefb6 238 }
155c3eac 239 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 240 tcg_temp_free_i32(var);
b26eefb6
PB
241}
242
55203189
PM
243/*
244 * Variant of store_reg which applies v8M stack-limit checks before updating
245 * SP. If the check fails this will result in an exception being taken.
246 * We disable the stack checks for CONFIG_USER_ONLY because we have
247 * no idea what the stack limits should be in that case.
248 * If stack checking is not being done this just acts like store_reg().
249 */
250static void store_sp_checked(DisasContext *s, TCGv_i32 var)
251{
252#ifndef CONFIG_USER_ONLY
253 if (s->v8m_stackcheck) {
254 gen_helper_v8m_stackcheck(cpu_env, var);
255 }
256#endif
257 store_reg(s, 13, var);
258}
259
b26eefb6 260/* Value extensions. */
86831435
PB
261#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
262#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
263#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
264#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
265
1497c961
PB
266#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
267#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 268
b26eefb6 269
39d5492a 270static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 271{
39d5492a 272 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 273 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
274 tcg_temp_free_i32(tmp_mask);
275}
d9ba4830
PB
276/* Set NZCV flags from the high 4 bits of var. */
277#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
278
d4a2dc67 279static void gen_exception_internal(int excp)
d9ba4830 280{
d4a2dc67
PM
281 TCGv_i32 tcg_excp = tcg_const_i32(excp);
282
283 assert(excp_is_internal(excp));
284 gen_helper_exception_internal(cpu_env, tcg_excp);
285 tcg_temp_free_i32(tcg_excp);
286}
287
73710361 288static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
289{
290 TCGv_i32 tcg_excp = tcg_const_i32(excp);
291 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 292 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 293
73710361
GB
294 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
295 tcg_syn, tcg_el);
296
297 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
298 tcg_temp_free_i32(tcg_syn);
299 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
300}
301
50225ad0
PM
302static void gen_step_complete_exception(DisasContext *s)
303{
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
312 */
313 gen_ss_advance(s);
73710361
GB
314 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
315 default_exception_el(s));
dcba3a8d 316 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
317}
318
5425415e
PM
319static void gen_singlestep_exception(DisasContext *s)
320{
321 /* Generate the right kind of exception for singlestep, which is
322 * either the architectural singlestep or EXCP_DEBUG for QEMU's
323 * gdb singlestepping.
324 */
325 if (s->ss_active) {
326 gen_step_complete_exception(s);
327 } else {
328 gen_exception_internal(EXCP_DEBUG);
329 }
330}
331
b636649f
PM
332static inline bool is_singlestepping(DisasContext *s)
333{
334 /* Return true if we are singlestepping either because of
335 * architectural singlestep or QEMU gdbstub singlestep. This does
336 * not include the command line '-singlestep' mode which is rather
337 * misnamed as it only means "one instruction per TB" and doesn't
338 * affect the code we generate.
339 */
dcba3a8d 340 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
341}
342
39d5492a 343static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 344{
39d5492a
PM
345 TCGv_i32 tmp1 = tcg_temp_new_i32();
346 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
347 tcg_gen_ext16s_i32(tmp1, a);
348 tcg_gen_ext16s_i32(tmp2, b);
3670669c 349 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 350 tcg_temp_free_i32(tmp2);
3670669c
PB
351 tcg_gen_sari_i32(a, a, 16);
352 tcg_gen_sari_i32(b, b, 16);
353 tcg_gen_mul_i32(b, b, a);
354 tcg_gen_mov_i32(a, tmp1);
7d1b0095 355 tcg_temp_free_i32(tmp1);
3670669c
PB
356}
357
358/* Byteswap each halfword. */
39d5492a 359static void gen_rev16(TCGv_i32 var)
3670669c 360{
39d5492a 361 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 362 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 363 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
364 tcg_gen_and_i32(tmp, tmp, mask);
365 tcg_gen_and_i32(var, var, mask);
3670669c 366 tcg_gen_shli_i32(var, var, 8);
3670669c 367 tcg_gen_or_i32(var, var, tmp);
68cedf73 368 tcg_temp_free_i32(mask);
7d1b0095 369 tcg_temp_free_i32(tmp);
3670669c
PB
370}
371
372/* Byteswap low halfword and sign extend. */
39d5492a 373static void gen_revsh(TCGv_i32 var)
3670669c 374{
1a855029
AJ
375 tcg_gen_ext16u_i32(var, var);
376 tcg_gen_bswap16_i32(var, var);
377 tcg_gen_ext16s_i32(var, var);
3670669c
PB
378}
379
838fa72d 380/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 381static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 382{
838fa72d
AJ
383 TCGv_i64 tmp64 = tcg_temp_new_i64();
384
385 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 386 tcg_temp_free_i32(b);
838fa72d
AJ
387 tcg_gen_shli_i64(tmp64, tmp64, 32);
388 tcg_gen_add_i64(a, tmp64, a);
389
390 tcg_temp_free_i64(tmp64);
391 return a;
392}
393
394/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 395static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
396{
397 TCGv_i64 tmp64 = tcg_temp_new_i64();
398
399 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 400 tcg_temp_free_i32(b);
838fa72d
AJ
401 tcg_gen_shli_i64(tmp64, tmp64, 32);
402 tcg_gen_sub_i64(a, tmp64, a);
403
404 tcg_temp_free_i64(tmp64);
405 return a;
3670669c
PB
406}
407
5e3f878a 408/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 409static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 410{
39d5492a
PM
411 TCGv_i32 lo = tcg_temp_new_i32();
412 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 413 TCGv_i64 ret;
5e3f878a 414
831d7fe8 415 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 416 tcg_temp_free_i32(a);
7d1b0095 417 tcg_temp_free_i32(b);
831d7fe8
RH
418
419 ret = tcg_temp_new_i64();
420 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
421 tcg_temp_free_i32(lo);
422 tcg_temp_free_i32(hi);
831d7fe8
RH
423
424 return ret;
5e3f878a
PB
425}
426
39d5492a 427static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 428{
39d5492a
PM
429 TCGv_i32 lo = tcg_temp_new_i32();
430 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 431 TCGv_i64 ret;
5e3f878a 432
831d7fe8 433 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 434 tcg_temp_free_i32(a);
7d1b0095 435 tcg_temp_free_i32(b);
831d7fe8
RH
436
437 ret = tcg_temp_new_i64();
438 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
439 tcg_temp_free_i32(lo);
440 tcg_temp_free_i32(hi);
831d7fe8
RH
441
442 return ret;
5e3f878a
PB
443}
444
8f01245e 445/* Swap low and high halfwords. */
39d5492a 446static void gen_swap_half(TCGv_i32 var)
8f01245e 447{
39d5492a 448 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
449 tcg_gen_shri_i32(tmp, var, 16);
450 tcg_gen_shli_i32(var, var, 16);
451 tcg_gen_or_i32(var, var, tmp);
7d1b0095 452 tcg_temp_free_i32(tmp);
8f01245e
PB
453}
454
b26eefb6
PB
455/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
456 tmp = (t0 ^ t1) & 0x8000;
457 t0 &= ~0x8000;
458 t1 &= ~0x8000;
459 t0 = (t0 + t1) ^ tmp;
460 */
461
39d5492a 462static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 463{
39d5492a 464 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
465 tcg_gen_xor_i32(tmp, t0, t1);
466 tcg_gen_andi_i32(tmp, tmp, 0x8000);
467 tcg_gen_andi_i32(t0, t0, ~0x8000);
468 tcg_gen_andi_i32(t1, t1, ~0x8000);
469 tcg_gen_add_i32(t0, t0, t1);
470 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
471 tcg_temp_free_i32(tmp);
472 tcg_temp_free_i32(t1);
b26eefb6
PB
473}
474
475/* Set CF to the top bit of var. */
39d5492a 476static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 477{
66c374de 478 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
479}
480
481/* Set N and Z flags from var. */
39d5492a 482static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 483{
66c374de
AJ
484 tcg_gen_mov_i32(cpu_NF, var);
485 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
486}
487
488/* T0 += T1 + CF. */
39d5492a 489static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 490{
396e467c 491 tcg_gen_add_i32(t0, t0, t1);
66c374de 492 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
493}
494
e9bb4aa9 495/* dest = T0 + T1 + CF. */
39d5492a 496static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 497{
e9bb4aa9 498 tcg_gen_add_i32(dest, t0, t1);
66c374de 499 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
500}
501
3670669c 502/* dest = T0 - T1 + CF - 1. */
39d5492a 503static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 504{
3670669c 505 tcg_gen_sub_i32(dest, t0, t1);
66c374de 506 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 507 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
508}
509
72485ec4 510/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 511static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 512{
39d5492a 513 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
514 tcg_gen_movi_i32(tmp, 0);
515 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 516 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 517 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
518 tcg_gen_xor_i32(tmp, t0, t1);
519 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
520 tcg_temp_free_i32(tmp);
521 tcg_gen_mov_i32(dest, cpu_NF);
522}
523
49b4c31e 524/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 525static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 526{
39d5492a 527 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
528 if (TCG_TARGET_HAS_add2_i32) {
529 tcg_gen_movi_i32(tmp, 0);
530 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 531 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
532 } else {
533 TCGv_i64 q0 = tcg_temp_new_i64();
534 TCGv_i64 q1 = tcg_temp_new_i64();
535 tcg_gen_extu_i32_i64(q0, t0);
536 tcg_gen_extu_i32_i64(q1, t1);
537 tcg_gen_add_i64(q0, q0, q1);
538 tcg_gen_extu_i32_i64(q1, cpu_CF);
539 tcg_gen_add_i64(q0, q0, q1);
540 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
541 tcg_temp_free_i64(q0);
542 tcg_temp_free_i64(q1);
543 }
544 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
545 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
546 tcg_gen_xor_i32(tmp, t0, t1);
547 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
548 tcg_temp_free_i32(tmp);
549 tcg_gen_mov_i32(dest, cpu_NF);
550}
551
72485ec4 552/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 553static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 554{
39d5492a 555 TCGv_i32 tmp;
72485ec4
AJ
556 tcg_gen_sub_i32(cpu_NF, t0, t1);
557 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
558 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
559 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
560 tmp = tcg_temp_new_i32();
561 tcg_gen_xor_i32(tmp, t0, t1);
562 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
563 tcg_temp_free_i32(tmp);
564 tcg_gen_mov_i32(dest, cpu_NF);
565}
566
e77f0832 567/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 568static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 569{
39d5492a 570 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
571 tcg_gen_not_i32(tmp, t1);
572 gen_adc_CC(dest, t0, tmp);
39d5492a 573 tcg_temp_free_i32(tmp);
2de68a49
RH
574}
575
365af80e 576#define GEN_SHIFT(name) \
39d5492a 577static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 578{ \
39d5492a 579 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
580 tmp1 = tcg_temp_new_i32(); \
581 tcg_gen_andi_i32(tmp1, t1, 0xff); \
582 tmp2 = tcg_const_i32(0); \
583 tmp3 = tcg_const_i32(0x1f); \
584 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
585 tcg_temp_free_i32(tmp3); \
586 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
587 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
588 tcg_temp_free_i32(tmp2); \
589 tcg_temp_free_i32(tmp1); \
590}
591GEN_SHIFT(shl)
592GEN_SHIFT(shr)
593#undef GEN_SHIFT
594
39d5492a 595static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 596{
39d5492a 597 TCGv_i32 tmp1, tmp2;
365af80e
AJ
598 tmp1 = tcg_temp_new_i32();
599 tcg_gen_andi_i32(tmp1, t1, 0xff);
600 tmp2 = tcg_const_i32(0x1f);
601 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
602 tcg_temp_free_i32(tmp2);
603 tcg_gen_sar_i32(dest, t0, tmp1);
604 tcg_temp_free_i32(tmp1);
605}
606
39d5492a 607static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 608{
9a119ff6 609 if (shift == 0) {
66c374de 610 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 611 } else {
66c374de
AJ
612 tcg_gen_shri_i32(cpu_CF, var, shift);
613 if (shift != 31) {
614 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
615 }
9a119ff6 616 }
9a119ff6 617}
b26eefb6 618
9a119ff6 619/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
620static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
621 int shift, int flags)
9a119ff6
PB
622{
623 switch (shiftop) {
624 case 0: /* LSL */
625 if (shift != 0) {
626 if (flags)
627 shifter_out_im(var, 32 - shift);
628 tcg_gen_shli_i32(var, var, shift);
629 }
630 break;
631 case 1: /* LSR */
632 if (shift == 0) {
633 if (flags) {
66c374de 634 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
635 }
636 tcg_gen_movi_i32(var, 0);
637 } else {
638 if (flags)
639 shifter_out_im(var, shift - 1);
640 tcg_gen_shri_i32(var, var, shift);
641 }
642 break;
643 case 2: /* ASR */
644 if (shift == 0)
645 shift = 32;
646 if (flags)
647 shifter_out_im(var, shift - 1);
648 if (shift == 32)
649 shift = 31;
650 tcg_gen_sari_i32(var, var, shift);
651 break;
652 case 3: /* ROR/RRX */
653 if (shift != 0) {
654 if (flags)
655 shifter_out_im(var, shift - 1);
f669df27 656 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 657 } else {
39d5492a 658 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 659 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
660 if (flags)
661 shifter_out_im(var, 0);
662 tcg_gen_shri_i32(var, var, 1);
b26eefb6 663 tcg_gen_or_i32(var, var, tmp);
7d1b0095 664 tcg_temp_free_i32(tmp);
b26eefb6
PB
665 }
666 }
667};
668
39d5492a
PM
669static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
670 TCGv_i32 shift, int flags)
8984bd2e
PB
671{
672 if (flags) {
673 switch (shiftop) {
9ef39277
BS
674 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
675 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
676 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
677 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
678 }
679 } else {
680 switch (shiftop) {
365af80e
AJ
681 case 0:
682 gen_shl(var, var, shift);
683 break;
684 case 1:
685 gen_shr(var, var, shift);
686 break;
687 case 2:
688 gen_sar(var, var, shift);
689 break;
f669df27
AJ
690 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
691 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
692 }
693 }
7d1b0095 694 tcg_temp_free_i32(shift);
8984bd2e
PB
695}
696
6ddbc6e4
PB
697#define PAS_OP(pfx) \
698 switch (op2) { \
699 case 0: gen_pas_helper(glue(pfx,add16)); break; \
700 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
701 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
702 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
703 case 4: gen_pas_helper(glue(pfx,add8)); break; \
704 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
705 }
39d5492a 706static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 707{
a7812ae4 708 TCGv_ptr tmp;
6ddbc6e4
PB
709
710 switch (op1) {
711#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
712 case 1:
a7812ae4 713 tmp = tcg_temp_new_ptr();
0ecb72a5 714 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 715 PAS_OP(s)
b75263d6 716 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
717 break;
718 case 5:
a7812ae4 719 tmp = tcg_temp_new_ptr();
0ecb72a5 720 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 721 PAS_OP(u)
b75263d6 722 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
723 break;
724#undef gen_pas_helper
725#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
726 case 2:
727 PAS_OP(q);
728 break;
729 case 3:
730 PAS_OP(sh);
731 break;
732 case 6:
733 PAS_OP(uq);
734 break;
735 case 7:
736 PAS_OP(uh);
737 break;
738#undef gen_pas_helper
739 }
740}
9ee6e8bb
PB
741#undef PAS_OP
742
6ddbc6e4
PB
743/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
744#define PAS_OP(pfx) \
ed89a2f1 745 switch (op1) { \
6ddbc6e4
PB
746 case 0: gen_pas_helper(glue(pfx,add8)); break; \
747 case 1: gen_pas_helper(glue(pfx,add16)); break; \
748 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
749 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
750 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
751 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
752 }
39d5492a 753static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 754{
a7812ae4 755 TCGv_ptr tmp;
6ddbc6e4 756
ed89a2f1 757 switch (op2) {
6ddbc6e4
PB
758#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
759 case 0:
a7812ae4 760 tmp = tcg_temp_new_ptr();
0ecb72a5 761 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 762 PAS_OP(s)
b75263d6 763 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
764 break;
765 case 4:
a7812ae4 766 tmp = tcg_temp_new_ptr();
0ecb72a5 767 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 768 PAS_OP(u)
b75263d6 769 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
770 break;
771#undef gen_pas_helper
772#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
773 case 1:
774 PAS_OP(q);
775 break;
776 case 2:
777 PAS_OP(sh);
778 break;
779 case 5:
780 PAS_OP(uq);
781 break;
782 case 6:
783 PAS_OP(uh);
784 break;
785#undef gen_pas_helper
786 }
787}
9ee6e8bb
PB
788#undef PAS_OP
789
39fb730a 790/*
6c2c63d3 791 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
792 * This is common between ARM and Aarch64 targets.
793 */
6c2c63d3 794void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 795{
6c2c63d3
RH
796 TCGv_i32 value;
797 TCGCond cond;
798 bool global = true;
d9ba4830 799
d9ba4830
PB
800 switch (cc) {
801 case 0: /* eq: Z */
d9ba4830 802 case 1: /* ne: !Z */
6c2c63d3
RH
803 cond = TCG_COND_EQ;
804 value = cpu_ZF;
d9ba4830 805 break;
6c2c63d3 806
d9ba4830 807 case 2: /* cs: C */
d9ba4830 808 case 3: /* cc: !C */
6c2c63d3
RH
809 cond = TCG_COND_NE;
810 value = cpu_CF;
d9ba4830 811 break;
6c2c63d3 812
d9ba4830 813 case 4: /* mi: N */
d9ba4830 814 case 5: /* pl: !N */
6c2c63d3
RH
815 cond = TCG_COND_LT;
816 value = cpu_NF;
d9ba4830 817 break;
6c2c63d3 818
d9ba4830 819 case 6: /* vs: V */
d9ba4830 820 case 7: /* vc: !V */
6c2c63d3
RH
821 cond = TCG_COND_LT;
822 value = cpu_VF;
d9ba4830 823 break;
6c2c63d3 824
d9ba4830 825 case 8: /* hi: C && !Z */
6c2c63d3
RH
826 case 9: /* ls: !C || Z -> !(C && !Z) */
827 cond = TCG_COND_NE;
828 value = tcg_temp_new_i32();
829 global = false;
830 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
831 ZF is non-zero for !Z; so AND the two subexpressions. */
832 tcg_gen_neg_i32(value, cpu_CF);
833 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 834 break;
6c2c63d3 835
d9ba4830 836 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 837 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
838 /* Since we're only interested in the sign bit, == 0 is >= 0. */
839 cond = TCG_COND_GE;
840 value = tcg_temp_new_i32();
841 global = false;
842 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 843 break;
6c2c63d3 844
d9ba4830 845 case 12: /* gt: !Z && N == V */
d9ba4830 846 case 13: /* le: Z || N != V */
6c2c63d3
RH
847 cond = TCG_COND_NE;
848 value = tcg_temp_new_i32();
849 global = false;
850 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
851 * the sign bit then AND with ZF to yield the result. */
852 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
853 tcg_gen_sari_i32(value, value, 31);
854 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 855 break;
6c2c63d3 856
9305eac0
RH
857 case 14: /* always */
858 case 15: /* always */
859 /* Use the ALWAYS condition, which will fold early.
860 * It doesn't matter what we use for the value. */
861 cond = TCG_COND_ALWAYS;
862 value = cpu_ZF;
863 goto no_invert;
864
d9ba4830
PB
865 default:
866 fprintf(stderr, "Bad condition code 0x%x\n", cc);
867 abort();
868 }
6c2c63d3
RH
869
870 if (cc & 1) {
871 cond = tcg_invert_cond(cond);
872 }
873
9305eac0 874 no_invert:
6c2c63d3
RH
875 cmp->cond = cond;
876 cmp->value = value;
877 cmp->value_global = global;
878}
879
880void arm_free_cc(DisasCompare *cmp)
881{
882 if (!cmp->value_global) {
883 tcg_temp_free_i32(cmp->value);
884 }
885}
886
887void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
888{
889 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
890}
891
892void arm_gen_test_cc(int cc, TCGLabel *label)
893{
894 DisasCompare cmp;
895 arm_test_cc(&cmp, cc);
896 arm_jump_cc(&cmp, label);
897 arm_free_cc(&cmp);
d9ba4830 898}
2c0262af 899
b1d8e52e 900static const uint8_t table_logic_cc[16] = {
2c0262af
FB
901 1, /* and */
902 1, /* xor */
903 0, /* sub */
904 0, /* rsb */
905 0, /* add */
906 0, /* adc */
907 0, /* sbc */
908 0, /* rsc */
909 1, /* andl */
910 1, /* xorl */
911 0, /* cmp */
912 0, /* cmn */
913 1, /* orr */
914 1, /* mov */
915 1, /* bic */
916 1, /* mvn */
917};
3b46e624 918
4d5e8c96
PM
919static inline void gen_set_condexec(DisasContext *s)
920{
921 if (s->condexec_mask) {
922 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
923 TCGv_i32 tmp = tcg_temp_new_i32();
924 tcg_gen_movi_i32(tmp, val);
925 store_cpu_field(tmp, condexec_bits);
926 }
927}
928
929static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
930{
931 tcg_gen_movi_i32(cpu_R[15], val);
932}
933
d9ba4830
PB
934/* Set PC and Thumb state from an immediate address. */
935static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 936{
39d5492a 937 TCGv_i32 tmp;
99c475ab 938
dcba3a8d 939 s->base.is_jmp = DISAS_JUMP;
d9ba4830 940 if (s->thumb != (addr & 1)) {
7d1b0095 941 tmp = tcg_temp_new_i32();
d9ba4830 942 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 943 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 944 tcg_temp_free_i32(tmp);
d9ba4830 945 }
155c3eac 946 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
947}
948
949/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 950static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 951{
dcba3a8d 952 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
953 tcg_gen_andi_i32(cpu_R[15], var, ~1);
954 tcg_gen_andi_i32(var, var, 1);
955 store_cpu_field(var, thumb);
d9ba4830
PB
956}
957
3bb8a96f
PM
958/* Set PC and Thumb state from var. var is marked as dead.
959 * For M-profile CPUs, include logic to detect exception-return
960 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
961 * and BX reg, and no others, and happens only for code in Handler mode.
962 */
963static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
964{
965 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 966 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
967 */
968 gen_bx(s, var);
d02a8698
PM
969 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
970 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 971 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
972 }
973}
974
975static inline void gen_bx_excret_final_code(DisasContext *s)
976{
977 /* Generate the code to finish possible exception return and end the TB */
978 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
979 uint32_t min_magic;
980
981 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
982 /* Covers FNC_RETURN and EXC_RETURN magic */
983 min_magic = FNC_RETURN_MIN_MAGIC;
984 } else {
985 /* EXC_RETURN magic only */
986 min_magic = EXC_RETURN_MIN_MAGIC;
987 }
3bb8a96f
PM
988
989 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 990 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
991 /* No: end the TB as we would for a DISAS_JMP */
992 if (is_singlestepping(s)) {
993 gen_singlestep_exception(s);
994 } else {
07ea28b4 995 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
996 }
997 gen_set_label(excret_label);
998 /* Yes: this is an exception return.
999 * At this point in runtime env->regs[15] and env->thumb will hold
1000 * the exception-return magic number, which do_v7m_exception_exit()
1001 * will read. Nothing else will be able to see those values because
1002 * the cpu-exec main loop guarantees that we will always go straight
1003 * from raising the exception to the exception-handling code.
1004 *
1005 * gen_ss_advance(s) does nothing on M profile currently but
1006 * calling it is conceptually the right thing as we have executed
1007 * this instruction (compare SWI, HVC, SMC handling).
1008 */
1009 gen_ss_advance(s);
1010 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1011}
1012
fb602cb7
PM
1013static inline void gen_bxns(DisasContext *s, int rm)
1014{
1015 TCGv_i32 var = load_reg(s, rm);
1016
1017 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1018 * we need to sync state before calling it, but:
1019 * - we don't need to do gen_set_pc_im() because the bxns helper will
1020 * always set the PC itself
1021 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1022 * unless it's outside an IT block or the last insn in an IT block,
1023 * so we know that condexec == 0 (already set at the top of the TB)
1024 * is correct in the non-UNPREDICTABLE cases, and we can choose
1025 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1026 */
1027 gen_helper_v7m_bxns(cpu_env, var);
1028 tcg_temp_free_i32(var);
ef475b5d 1029 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1030}
1031
3e3fa230
PM
1032static inline void gen_blxns(DisasContext *s, int rm)
1033{
1034 TCGv_i32 var = load_reg(s, rm);
1035
1036 /* We don't need to sync condexec state, for the same reason as bxns.
1037 * We do however need to set the PC, because the blxns helper reads it.
1038 * The blxns helper may throw an exception.
1039 */
1040 gen_set_pc_im(s, s->pc);
1041 gen_helper_v7m_blxns(cpu_env, var);
1042 tcg_temp_free_i32(var);
1043 s->base.is_jmp = DISAS_EXIT;
1044}
1045
21aeb343
JR
1046/* Variant of store_reg which uses branch&exchange logic when storing
1047 to r15 in ARM architecture v7 and above. The source must be a temporary
1048 and will be marked as dead. */
7dcc1f89 1049static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1050{
1051 if (reg == 15 && ENABLE_ARCH_7) {
1052 gen_bx(s, var);
1053 } else {
1054 store_reg(s, reg, var);
1055 }
1056}
1057
be5e7a76
DES
1058/* Variant of store_reg which uses branch&exchange logic when storing
1059 * to r15 in ARM architecture v5T and above. This is used for storing
1060 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1061 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1062static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1063{
1064 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1065 gen_bx_excret(s, var);
be5e7a76
DES
1066 } else {
1067 store_reg(s, reg, var);
1068 }
1069}
1070
e334bd31
PB
1071#ifdef CONFIG_USER_ONLY
1072#define IS_USER_ONLY 1
1073#else
1074#define IS_USER_ONLY 0
1075#endif
1076
08307563
PM
1077/* Abstractions of "generate code to do a guest load/store for
1078 * AArch32", where a vaddr is always 32 bits (and is zero
1079 * extended if we're a 64 bit core) and data is also
1080 * 32 bits unless specifically doing a 64 bit access.
1081 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1082 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1083 */
08307563 1084
7f5616f5 1085static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1086{
7f5616f5
RH
1087 TCGv addr = tcg_temp_new();
1088 tcg_gen_extu_i32_tl(addr, a32);
1089
e334bd31 1090 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1091 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1092 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1093 }
7f5616f5 1094 return addr;
08307563
PM
1095}
1096
7f5616f5
RH
1097static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1098 int index, TCGMemOp opc)
08307563 1099{
2aeba0d0
JS
1100 TCGv addr;
1101
1102 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1103 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1104 opc |= MO_ALIGN;
1105 }
1106
1107 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1108 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1109 tcg_temp_free(addr);
08307563
PM
1110}
1111
7f5616f5
RH
1112static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1113 int index, TCGMemOp opc)
1114{
2aeba0d0
JS
1115 TCGv addr;
1116
1117 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1118 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1119 opc |= MO_ALIGN;
1120 }
1121
1122 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1123 tcg_gen_qemu_st_i32(val, addr, index, opc);
1124 tcg_temp_free(addr);
1125}
08307563 1126
7f5616f5 1127#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1128static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1129 TCGv_i32 a32, int index) \
08307563 1130{ \
7f5616f5 1131 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1132} \
1133static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1134 TCGv_i32 val, \
1135 TCGv_i32 a32, int index, \
1136 ISSInfo issinfo) \
1137{ \
1138 gen_aa32_ld##SUFF(s, val, a32, index); \
1139 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1140}
1141
7f5616f5 1142#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1143static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1144 TCGv_i32 a32, int index) \
08307563 1145{ \
7f5616f5 1146 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1147} \
1148static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1149 TCGv_i32 val, \
1150 TCGv_i32 a32, int index, \
1151 ISSInfo issinfo) \
1152{ \
1153 gen_aa32_st##SUFF(s, val, a32, index); \
1154 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1155}
1156
7f5616f5 1157static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1158{
e334bd31
PB
1159 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1160 if (!IS_USER_ONLY && s->sctlr_b) {
1161 tcg_gen_rotri_i64(val, val, 32);
1162 }
08307563
PM
1163}
1164
7f5616f5
RH
1165static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1166 int index, TCGMemOp opc)
08307563 1167{
7f5616f5
RH
1168 TCGv addr = gen_aa32_addr(s, a32, opc);
1169 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1170 gen_aa32_frob64(s, val);
1171 tcg_temp_free(addr);
1172}
1173
1174static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1175 TCGv_i32 a32, int index)
1176{
1177 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1178}
1179
1180static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1181 int index, TCGMemOp opc)
1182{
1183 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1184
1185 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1186 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1187 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1188 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1189 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1190 tcg_temp_free_i64(tmp);
e334bd31 1191 } else {
7f5616f5 1192 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1193 }
7f5616f5 1194 tcg_temp_free(addr);
08307563
PM
1195}
1196
7f5616f5
RH
1197static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1198 TCGv_i32 a32, int index)
1199{
1200 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1201}
08307563 1202
7f5616f5
RH
1203DO_GEN_LD(8s, MO_SB)
1204DO_GEN_LD(8u, MO_UB)
1205DO_GEN_LD(16s, MO_SW)
1206DO_GEN_LD(16u, MO_UW)
1207DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1208DO_GEN_ST(8, MO_UB)
1209DO_GEN_ST(16, MO_UW)
1210DO_GEN_ST(32, MO_UL)
08307563 1211
37e6456e
PM
1212static inline void gen_hvc(DisasContext *s, int imm16)
1213{
1214 /* The pre HVC helper handles cases when HVC gets trapped
1215 * as an undefined insn by runtime configuration (ie before
1216 * the insn really executes).
1217 */
1218 gen_set_pc_im(s, s->pc - 4);
1219 gen_helper_pre_hvc(cpu_env);
1220 /* Otherwise we will treat this as a real exception which
1221 * happens after execution of the insn. (The distinction matters
1222 * for the PC value reported to the exception handler and also
1223 * for single stepping.)
1224 */
1225 s->svc_imm = imm16;
1226 gen_set_pc_im(s, s->pc);
dcba3a8d 1227 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1228}
1229
1230static inline void gen_smc(DisasContext *s)
1231{
1232 /* As with HVC, we may take an exception either before or after
1233 * the insn executes.
1234 */
1235 TCGv_i32 tmp;
1236
1237 gen_set_pc_im(s, s->pc - 4);
1238 tmp = tcg_const_i32(syn_aa32_smc());
1239 gen_helper_pre_smc(cpu_env, tmp);
1240 tcg_temp_free_i32(tmp);
1241 gen_set_pc_im(s, s->pc);
dcba3a8d 1242 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1243}
1244
d4a2dc67
PM
1245static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1246{
1247 gen_set_condexec(s);
1248 gen_set_pc_im(s, s->pc - offset);
1249 gen_exception_internal(excp);
dcba3a8d 1250 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1251}
1252
73710361
GB
1253static void gen_exception_insn(DisasContext *s, int offset, int excp,
1254 int syn, uint32_t target_el)
d4a2dc67
PM
1255{
1256 gen_set_condexec(s);
1257 gen_set_pc_im(s, s->pc - offset);
73710361 1258 gen_exception(excp, syn, target_el);
dcba3a8d 1259 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1260}
1261
c900a2e6
PM
1262static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1263{
1264 TCGv_i32 tcg_syn;
1265
1266 gen_set_condexec(s);
1267 gen_set_pc_im(s, s->pc - offset);
1268 tcg_syn = tcg_const_i32(syn);
1269 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1270 tcg_temp_free_i32(tcg_syn);
1271 s->base.is_jmp = DISAS_NORETURN;
1272}
1273
b5ff1b31
FB
1274/* Force a TB lookup after an instruction that changes the CPU state. */
1275static inline void gen_lookup_tb(DisasContext *s)
1276{
a6445c52 1277 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1278 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1279}
1280
19a6e31c
PM
1281static inline void gen_hlt(DisasContext *s, int imm)
1282{
1283 /* HLT. This has two purposes.
1284 * Architecturally, it is an external halting debug instruction.
1285 * Since QEMU doesn't implement external debug, we treat this as
1286 * it is required for halting debug disabled: it will UNDEF.
1287 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1288 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1289 * must trigger semihosting even for ARMv7 and earlier, where
1290 * HLT was an undefined encoding.
1291 * In system mode, we don't allow userspace access to
1292 * semihosting, to provide some semblance of security
1293 * (and for consistency with our 32-bit semihosting).
1294 */
1295 if (semihosting_enabled() &&
1296#ifndef CONFIG_USER_ONLY
1297 s->current_el != 0 &&
1298#endif
1299 (imm == (s->thumb ? 0x3c : 0xf000))) {
1300 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1301 return;
1302 }
1303
1304 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1305 default_exception_el(s));
1306}
1307
b0109805 1308static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1309 TCGv_i32 var)
2c0262af 1310{
1e8d4eec 1311 int val, rm, shift, shiftop;
39d5492a 1312 TCGv_i32 offset;
2c0262af
FB
1313
1314 if (!(insn & (1 << 25))) {
1315 /* immediate */
1316 val = insn & 0xfff;
1317 if (!(insn & (1 << 23)))
1318 val = -val;
537730b9 1319 if (val != 0)
b0109805 1320 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1321 } else {
1322 /* shift/register */
1323 rm = (insn) & 0xf;
1324 shift = (insn >> 7) & 0x1f;
1e8d4eec 1325 shiftop = (insn >> 5) & 3;
b26eefb6 1326 offset = load_reg(s, rm);
9a119ff6 1327 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1328 if (!(insn & (1 << 23)))
b0109805 1329 tcg_gen_sub_i32(var, var, offset);
2c0262af 1330 else
b0109805 1331 tcg_gen_add_i32(var, var, offset);
7d1b0095 1332 tcg_temp_free_i32(offset);
2c0262af
FB
1333 }
1334}
1335
191f9a93 1336static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1337 int extra, TCGv_i32 var)
2c0262af
FB
1338{
1339 int val, rm;
39d5492a 1340 TCGv_i32 offset;
3b46e624 1341
2c0262af
FB
1342 if (insn & (1 << 22)) {
1343 /* immediate */
1344 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1345 if (!(insn & (1 << 23)))
1346 val = -val;
18acad92 1347 val += extra;
537730b9 1348 if (val != 0)
b0109805 1349 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1350 } else {
1351 /* register */
191f9a93 1352 if (extra)
b0109805 1353 tcg_gen_addi_i32(var, var, extra);
2c0262af 1354 rm = (insn) & 0xf;
b26eefb6 1355 offset = load_reg(s, rm);
2c0262af 1356 if (!(insn & (1 << 23)))
b0109805 1357 tcg_gen_sub_i32(var, var, offset);
2c0262af 1358 else
b0109805 1359 tcg_gen_add_i32(var, var, offset);
7d1b0095 1360 tcg_temp_free_i32(offset);
2c0262af
FB
1361 }
1362}
1363
5aaebd13
PM
1364static TCGv_ptr get_fpstatus_ptr(int neon)
1365{
1366 TCGv_ptr statusptr = tcg_temp_new_ptr();
1367 int offset;
1368 if (neon) {
0ecb72a5 1369 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1370 } else {
0ecb72a5 1371 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1372 }
1373 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1374 return statusptr;
1375}
1376
4373f3ce
PB
1377#define VFP_OP2(name) \
1378static inline void gen_vfp_##name(int dp) \
1379{ \
ae1857ec
PM
1380 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1381 if (dp) { \
1382 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1383 } else { \
1384 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1385 } \
1386 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1387}
1388
4373f3ce
PB
1389VFP_OP2(add)
1390VFP_OP2(sub)
1391VFP_OP2(mul)
1392VFP_OP2(div)
1393
1394#undef VFP_OP2
1395
605a6aed
PM
1396static inline void gen_vfp_F1_mul(int dp)
1397{
1398 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1399 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1400 if (dp) {
ae1857ec 1401 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1402 } else {
ae1857ec 1403 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1404 }
ae1857ec 1405 tcg_temp_free_ptr(fpst);
605a6aed
PM
1406}
1407
1408static inline void gen_vfp_F1_neg(int dp)
1409{
1410 /* Like gen_vfp_neg() but put result in F1 */
1411 if (dp) {
1412 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1413 } else {
1414 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1415 }
1416}
1417
4373f3ce
PB
1418static inline void gen_vfp_abs(int dp)
1419{
1420 if (dp)
1421 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1422 else
1423 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1424}
1425
1426static inline void gen_vfp_neg(int dp)
1427{
1428 if (dp)
1429 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1430 else
1431 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1432}
1433
1434static inline void gen_vfp_sqrt(int dp)
1435{
1436 if (dp)
1437 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1438 else
1439 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1440}
1441
1442static inline void gen_vfp_cmp(int dp)
1443{
1444 if (dp)
1445 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1446 else
1447 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1448}
1449
1450static inline void gen_vfp_cmpe(int dp)
1451{
1452 if (dp)
1453 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1454 else
1455 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1456}
1457
1458static inline void gen_vfp_F1_ld0(int dp)
1459{
1460 if (dp)
5b340b51 1461 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1462 else
5b340b51 1463 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1464}
1465
5500b06c
PM
1466#define VFP_GEN_ITOF(name) \
1467static inline void gen_vfp_##name(int dp, int neon) \
1468{ \
5aaebd13 1469 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1470 if (dp) { \
1471 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1472 } else { \
1473 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1474 } \
b7fa9214 1475 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1476}
1477
5500b06c
PM
1478VFP_GEN_ITOF(uito)
1479VFP_GEN_ITOF(sito)
1480#undef VFP_GEN_ITOF
4373f3ce 1481
5500b06c
PM
1482#define VFP_GEN_FTOI(name) \
1483static inline void gen_vfp_##name(int dp, int neon) \
1484{ \
5aaebd13 1485 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1486 if (dp) { \
1487 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1488 } else { \
1489 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1490 } \
b7fa9214 1491 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1492}
1493
5500b06c
PM
1494VFP_GEN_FTOI(toui)
1495VFP_GEN_FTOI(touiz)
1496VFP_GEN_FTOI(tosi)
1497VFP_GEN_FTOI(tosiz)
1498#undef VFP_GEN_FTOI
4373f3ce 1499
16d5b3ca 1500#define VFP_GEN_FIX(name, round) \
5500b06c 1501static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1502{ \
39d5492a 1503 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1504 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1505 if (dp) { \
16d5b3ca
WN
1506 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1507 statusptr); \
5500b06c 1508 } else { \
16d5b3ca
WN
1509 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1510 statusptr); \
5500b06c 1511 } \
b75263d6 1512 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1513 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1514}
16d5b3ca
WN
1515VFP_GEN_FIX(tosh, _round_to_zero)
1516VFP_GEN_FIX(tosl, _round_to_zero)
1517VFP_GEN_FIX(touh, _round_to_zero)
1518VFP_GEN_FIX(toul, _round_to_zero)
1519VFP_GEN_FIX(shto, )
1520VFP_GEN_FIX(slto, )
1521VFP_GEN_FIX(uhto, )
1522VFP_GEN_FIX(ulto, )
4373f3ce 1523#undef VFP_GEN_FIX
9ee6e8bb 1524
39d5492a 1525static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1526{
08307563 1527 if (dp) {
12dcc321 1528 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1529 } else {
12dcc321 1530 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1531 }
b5ff1b31
FB
1532}
1533
39d5492a 1534static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1535{
08307563 1536 if (dp) {
12dcc321 1537 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1538 } else {
12dcc321 1539 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1540 }
b5ff1b31
FB
1541}
1542
c39c2b90 1543static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1544{
9a2b5256 1545 if (dp) {
c39c2b90 1546 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1547 } else {
c39c2b90 1548 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1549 if (reg & 1) {
1550 ofs += offsetof(CPU_DoubleU, l.upper);
1551 } else {
1552 ofs += offsetof(CPU_DoubleU, l.lower);
1553 }
1554 return ofs;
8e96005d
FB
1555 }
1556}
9ee6e8bb
PB
1557
1558/* Return the offset of a 32-bit piece of a NEON register.
1559 zero is the least significant end of the register. */
1560static inline long
1561neon_reg_offset (int reg, int n)
1562{
1563 int sreg;
1564 sreg = reg * 2 + n;
1565 return vfp_reg_offset(0, sreg);
1566}
1567
32f91fb7
RH
1568/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1569 * where 0 is the least significant end of the register.
1570 */
1571static inline long
1572neon_element_offset(int reg, int element, TCGMemOp size)
1573{
1574 int element_size = 1 << size;
1575 int ofs = element * element_size;
1576#ifdef HOST_WORDS_BIGENDIAN
1577 /* Calculate the offset assuming fully little-endian,
1578 * then XOR to account for the order of the 8-byte units.
1579 */
1580 if (element_size < 8) {
1581 ofs ^= 8 - element_size;
1582 }
1583#endif
1584 return neon_reg_offset(reg, 0) + ofs;
1585}
1586
39d5492a 1587static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1588{
39d5492a 1589 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1590 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1591 return tmp;
1592}
1593
2d6ac920
RH
1594static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1595{
1596 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1597
1598 switch (mop) {
1599 case MO_UB:
1600 tcg_gen_ld8u_i32(var, cpu_env, offset);
1601 break;
1602 case MO_UW:
1603 tcg_gen_ld16u_i32(var, cpu_env, offset);
1604 break;
1605 case MO_UL:
1606 tcg_gen_ld_i32(var, cpu_env, offset);
1607 break;
1608 default:
1609 g_assert_not_reached();
1610 }
1611}
1612
ac55d007
RH
1613static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1614{
1615 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1616
1617 switch (mop) {
1618 case MO_UB:
1619 tcg_gen_ld8u_i64(var, cpu_env, offset);
1620 break;
1621 case MO_UW:
1622 tcg_gen_ld16u_i64(var, cpu_env, offset);
1623 break;
1624 case MO_UL:
1625 tcg_gen_ld32u_i64(var, cpu_env, offset);
1626 break;
1627 case MO_Q:
1628 tcg_gen_ld_i64(var, cpu_env, offset);
1629 break;
1630 default:
1631 g_assert_not_reached();
1632 }
1633}
1634
39d5492a 1635static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1636{
1637 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1638 tcg_temp_free_i32(var);
8f8e3aa4
PB
1639}
1640
2d6ac920
RH
1641static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1642{
1643 long offset = neon_element_offset(reg, ele, size);
1644
1645 switch (size) {
1646 case MO_8:
1647 tcg_gen_st8_i32(var, cpu_env, offset);
1648 break;
1649 case MO_16:
1650 tcg_gen_st16_i32(var, cpu_env, offset);
1651 break;
1652 case MO_32:
1653 tcg_gen_st_i32(var, cpu_env, offset);
1654 break;
1655 default:
1656 g_assert_not_reached();
1657 }
1658}
1659
ac55d007
RH
1660static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1661{
1662 long offset = neon_element_offset(reg, ele, size);
1663
1664 switch (size) {
1665 case MO_8:
1666 tcg_gen_st8_i64(var, cpu_env, offset);
1667 break;
1668 case MO_16:
1669 tcg_gen_st16_i64(var, cpu_env, offset);
1670 break;
1671 case MO_32:
1672 tcg_gen_st32_i64(var, cpu_env, offset);
1673 break;
1674 case MO_64:
1675 tcg_gen_st_i64(var, cpu_env, offset);
1676 break;
1677 default:
1678 g_assert_not_reached();
1679 }
1680}
1681
a7812ae4 1682static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1683{
1684 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1685}
1686
a7812ae4 1687static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1688{
1689 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1690}
1691
1a66ac61
RH
1692static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1693{
1694 TCGv_ptr ret = tcg_temp_new_ptr();
1695 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1696 return ret;
1697}
1698
4373f3ce
PB
1699#define tcg_gen_ld_f32 tcg_gen_ld_i32
1700#define tcg_gen_ld_f64 tcg_gen_ld_i64
1701#define tcg_gen_st_f32 tcg_gen_st_i32
1702#define tcg_gen_st_f64 tcg_gen_st_i64
1703
b7bcbe95
FB
1704static inline void gen_mov_F0_vreg(int dp, int reg)
1705{
1706 if (dp)
4373f3ce 1707 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1708 else
4373f3ce 1709 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1710}
1711
1712static inline void gen_mov_F1_vreg(int dp, int reg)
1713{
1714 if (dp)
4373f3ce 1715 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1716 else
4373f3ce 1717 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1718}
1719
1720static inline void gen_mov_vreg_F0(int dp, int reg)
1721{
1722 if (dp)
4373f3ce 1723 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1724 else
4373f3ce 1725 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1726}
1727
d00584b7 1728#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1729
78e138bc
PM
1730/* Include the VFP decoder */
1731#include "translate-vfp.inc.c"
1732
a7812ae4 1733static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1734{
0ecb72a5 1735 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1736}
1737
a7812ae4 1738static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1739{
0ecb72a5 1740 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1741}
1742
39d5492a 1743static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1744{
39d5492a 1745 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1746 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1747 return var;
e677137d
PB
1748}
1749
39d5492a 1750static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1751{
0ecb72a5 1752 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1753 tcg_temp_free_i32(var);
e677137d
PB
1754}
1755
1756static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1757{
1758 iwmmxt_store_reg(cpu_M0, rn);
1759}
1760
1761static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1762{
1763 iwmmxt_load_reg(cpu_M0, rn);
1764}
1765
1766static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1767{
1768 iwmmxt_load_reg(cpu_V1, rn);
1769 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1770}
1771
1772static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1773{
1774 iwmmxt_load_reg(cpu_V1, rn);
1775 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1776}
1777
1778static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1779{
1780 iwmmxt_load_reg(cpu_V1, rn);
1781 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1782}
1783
1784#define IWMMXT_OP(name) \
1785static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1786{ \
1787 iwmmxt_load_reg(cpu_V1, rn); \
1788 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1789}
1790
477955bd
PM
1791#define IWMMXT_OP_ENV(name) \
1792static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1793{ \
1794 iwmmxt_load_reg(cpu_V1, rn); \
1795 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1796}
1797
1798#define IWMMXT_OP_ENV_SIZE(name) \
1799IWMMXT_OP_ENV(name##b) \
1800IWMMXT_OP_ENV(name##w) \
1801IWMMXT_OP_ENV(name##l)
e677137d 1802
477955bd 1803#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1804static inline void gen_op_iwmmxt_##name##_M0(void) \
1805{ \
477955bd 1806 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1807}
1808
1809IWMMXT_OP(maddsq)
1810IWMMXT_OP(madduq)
1811IWMMXT_OP(sadb)
1812IWMMXT_OP(sadw)
1813IWMMXT_OP(mulslw)
1814IWMMXT_OP(mulshw)
1815IWMMXT_OP(mululw)
1816IWMMXT_OP(muluhw)
1817IWMMXT_OP(macsw)
1818IWMMXT_OP(macuw)
1819
477955bd
PM
1820IWMMXT_OP_ENV_SIZE(unpackl)
1821IWMMXT_OP_ENV_SIZE(unpackh)
1822
1823IWMMXT_OP_ENV1(unpacklub)
1824IWMMXT_OP_ENV1(unpackluw)
1825IWMMXT_OP_ENV1(unpacklul)
1826IWMMXT_OP_ENV1(unpackhub)
1827IWMMXT_OP_ENV1(unpackhuw)
1828IWMMXT_OP_ENV1(unpackhul)
1829IWMMXT_OP_ENV1(unpacklsb)
1830IWMMXT_OP_ENV1(unpacklsw)
1831IWMMXT_OP_ENV1(unpacklsl)
1832IWMMXT_OP_ENV1(unpackhsb)
1833IWMMXT_OP_ENV1(unpackhsw)
1834IWMMXT_OP_ENV1(unpackhsl)
1835
1836IWMMXT_OP_ENV_SIZE(cmpeq)
1837IWMMXT_OP_ENV_SIZE(cmpgtu)
1838IWMMXT_OP_ENV_SIZE(cmpgts)
1839
1840IWMMXT_OP_ENV_SIZE(mins)
1841IWMMXT_OP_ENV_SIZE(minu)
1842IWMMXT_OP_ENV_SIZE(maxs)
1843IWMMXT_OP_ENV_SIZE(maxu)
1844
1845IWMMXT_OP_ENV_SIZE(subn)
1846IWMMXT_OP_ENV_SIZE(addn)
1847IWMMXT_OP_ENV_SIZE(subu)
1848IWMMXT_OP_ENV_SIZE(addu)
1849IWMMXT_OP_ENV_SIZE(subs)
1850IWMMXT_OP_ENV_SIZE(adds)
1851
1852IWMMXT_OP_ENV(avgb0)
1853IWMMXT_OP_ENV(avgb1)
1854IWMMXT_OP_ENV(avgw0)
1855IWMMXT_OP_ENV(avgw1)
e677137d 1856
477955bd
PM
1857IWMMXT_OP_ENV(packuw)
1858IWMMXT_OP_ENV(packul)
1859IWMMXT_OP_ENV(packuq)
1860IWMMXT_OP_ENV(packsw)
1861IWMMXT_OP_ENV(packsl)
1862IWMMXT_OP_ENV(packsq)
e677137d 1863
e677137d
PB
1864static void gen_op_iwmmxt_set_mup(void)
1865{
39d5492a 1866 TCGv_i32 tmp;
e677137d
PB
1867 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1868 tcg_gen_ori_i32(tmp, tmp, 2);
1869 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1870}
1871
1872static void gen_op_iwmmxt_set_cup(void)
1873{
39d5492a 1874 TCGv_i32 tmp;
e677137d
PB
1875 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1876 tcg_gen_ori_i32(tmp, tmp, 1);
1877 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1878}
1879
1880static void gen_op_iwmmxt_setpsr_nz(void)
1881{
39d5492a 1882 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1883 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1884 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1885}
1886
1887static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1888{
1889 iwmmxt_load_reg(cpu_V1, rn);
86831435 1890 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1891 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1892}
1893
39d5492a
PM
1894static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1895 TCGv_i32 dest)
18c9b560
AZ
1896{
1897 int rd;
1898 uint32_t offset;
39d5492a 1899 TCGv_i32 tmp;
18c9b560
AZ
1900
1901 rd = (insn >> 16) & 0xf;
da6b5335 1902 tmp = load_reg(s, rd);
18c9b560
AZ
1903
1904 offset = (insn & 0xff) << ((insn >> 7) & 2);
1905 if (insn & (1 << 24)) {
1906 /* Pre indexed */
1907 if (insn & (1 << 23))
da6b5335 1908 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1909 else
da6b5335
FN
1910 tcg_gen_addi_i32(tmp, tmp, -offset);
1911 tcg_gen_mov_i32(dest, tmp);
18c9b560 1912 if (insn & (1 << 21))
da6b5335
FN
1913 store_reg(s, rd, tmp);
1914 else
7d1b0095 1915 tcg_temp_free_i32(tmp);
18c9b560
AZ
1916 } else if (insn & (1 << 21)) {
1917 /* Post indexed */
da6b5335 1918 tcg_gen_mov_i32(dest, tmp);
18c9b560 1919 if (insn & (1 << 23))
da6b5335 1920 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1921 else
da6b5335
FN
1922 tcg_gen_addi_i32(tmp, tmp, -offset);
1923 store_reg(s, rd, tmp);
18c9b560
AZ
1924 } else if (!(insn & (1 << 23)))
1925 return 1;
1926 return 0;
1927}
1928
39d5492a 1929static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1930{
1931 int rd = (insn >> 0) & 0xf;
39d5492a 1932 TCGv_i32 tmp;
18c9b560 1933
da6b5335
FN
1934 if (insn & (1 << 8)) {
1935 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1936 return 1;
da6b5335
FN
1937 } else {
1938 tmp = iwmmxt_load_creg(rd);
1939 }
1940 } else {
7d1b0095 1941 tmp = tcg_temp_new_i32();
da6b5335 1942 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1943 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1944 }
1945 tcg_gen_andi_i32(tmp, tmp, mask);
1946 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1947 tcg_temp_free_i32(tmp);
18c9b560
AZ
1948 return 0;
1949}
1950
a1c7273b 1951/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1952 (ie. an undefined instruction). */
7dcc1f89 1953static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1954{
1955 int rd, wrd;
1956 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1957 TCGv_i32 addr;
1958 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1959
1960 if ((insn & 0x0e000e00) == 0x0c000000) {
1961 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1962 wrd = insn & 0xf;
1963 rdlo = (insn >> 12) & 0xf;
1964 rdhi = (insn >> 16) & 0xf;
d00584b7 1965 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1966 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1967 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1968 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1969 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1970 } else { /* TMCRR */
da6b5335
FN
1971 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1972 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1973 gen_op_iwmmxt_set_mup();
1974 }
1975 return 0;
1976 }
1977
1978 wrd = (insn >> 12) & 0xf;
7d1b0095 1979 addr = tcg_temp_new_i32();
da6b5335 1980 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1981 tcg_temp_free_i32(addr);
18c9b560 1982 return 1;
da6b5335 1983 }
18c9b560 1984 if (insn & ARM_CP_RW_BIT) {
d00584b7 1985 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1986 tmp = tcg_temp_new_i32();
12dcc321 1987 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1988 iwmmxt_store_creg(wrd, tmp);
18c9b560 1989 } else {
e677137d
PB
1990 i = 1;
1991 if (insn & (1 << 8)) {
d00584b7 1992 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1993 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1994 i = 0;
d00584b7 1995 } else { /* WLDRW wRd */
29531141 1996 tmp = tcg_temp_new_i32();
12dcc321 1997 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1998 }
1999 } else {
29531141 2000 tmp = tcg_temp_new_i32();
d00584b7 2001 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 2002 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 2003 } else { /* WLDRB */
12dcc321 2004 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
2005 }
2006 }
2007 if (i) {
2008 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 2009 tcg_temp_free_i32(tmp);
e677137d 2010 }
18c9b560
AZ
2011 gen_op_iwmmxt_movq_wRn_M0(wrd);
2012 }
2013 } else {
d00584b7 2014 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 2015 tmp = iwmmxt_load_creg(wrd);
12dcc321 2016 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
2017 } else {
2018 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2019 tmp = tcg_temp_new_i32();
e677137d 2020 if (insn & (1 << 8)) {
d00584b7 2021 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 2022 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 2023 } else { /* WSTRW wRd */
ecc7b3aa 2024 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2025 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
2026 }
2027 } else {
d00584b7 2028 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 2029 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2030 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 2031 } else { /* WSTRB */
ecc7b3aa 2032 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2033 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
2034 }
2035 }
18c9b560 2036 }
29531141 2037 tcg_temp_free_i32(tmp);
18c9b560 2038 }
7d1b0095 2039 tcg_temp_free_i32(addr);
18c9b560
AZ
2040 return 0;
2041 }
2042
2043 if ((insn & 0x0f000000) != 0x0e000000)
2044 return 1;
2045
2046 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 2047 case 0x000: /* WOR */
18c9b560
AZ
2048 wrd = (insn >> 12) & 0xf;
2049 rd0 = (insn >> 0) & 0xf;
2050 rd1 = (insn >> 16) & 0xf;
2051 gen_op_iwmmxt_movq_M0_wRn(rd0);
2052 gen_op_iwmmxt_orq_M0_wRn(rd1);
2053 gen_op_iwmmxt_setpsr_nz();
2054 gen_op_iwmmxt_movq_wRn_M0(wrd);
2055 gen_op_iwmmxt_set_mup();
2056 gen_op_iwmmxt_set_cup();
2057 break;
d00584b7 2058 case 0x011: /* TMCR */
18c9b560
AZ
2059 if (insn & 0xf)
2060 return 1;
2061 rd = (insn >> 12) & 0xf;
2062 wrd = (insn >> 16) & 0xf;
2063 switch (wrd) {
2064 case ARM_IWMMXT_wCID:
2065 case ARM_IWMMXT_wCASF:
2066 break;
2067 case ARM_IWMMXT_wCon:
2068 gen_op_iwmmxt_set_cup();
2069 /* Fall through. */
2070 case ARM_IWMMXT_wCSSF:
da6b5335
FN
2071 tmp = iwmmxt_load_creg(wrd);
2072 tmp2 = load_reg(s, rd);
f669df27 2073 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 2074 tcg_temp_free_i32(tmp2);
da6b5335 2075 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2076 break;
2077 case ARM_IWMMXT_wCGR0:
2078 case ARM_IWMMXT_wCGR1:
2079 case ARM_IWMMXT_wCGR2:
2080 case ARM_IWMMXT_wCGR3:
2081 gen_op_iwmmxt_set_cup();
da6b5335
FN
2082 tmp = load_reg(s, rd);
2083 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2084 break;
2085 default:
2086 return 1;
2087 }
2088 break;
d00584b7 2089 case 0x100: /* WXOR */
18c9b560
AZ
2090 wrd = (insn >> 12) & 0xf;
2091 rd0 = (insn >> 0) & 0xf;
2092 rd1 = (insn >> 16) & 0xf;
2093 gen_op_iwmmxt_movq_M0_wRn(rd0);
2094 gen_op_iwmmxt_xorq_M0_wRn(rd1);
2095 gen_op_iwmmxt_setpsr_nz();
2096 gen_op_iwmmxt_movq_wRn_M0(wrd);
2097 gen_op_iwmmxt_set_mup();
2098 gen_op_iwmmxt_set_cup();
2099 break;
d00584b7 2100 case 0x111: /* TMRC */
18c9b560
AZ
2101 if (insn & 0xf)
2102 return 1;
2103 rd = (insn >> 12) & 0xf;
2104 wrd = (insn >> 16) & 0xf;
da6b5335
FN
2105 tmp = iwmmxt_load_creg(wrd);
2106 store_reg(s, rd, tmp);
18c9b560 2107 break;
d00584b7 2108 case 0x300: /* WANDN */
18c9b560
AZ
2109 wrd = (insn >> 12) & 0xf;
2110 rd0 = (insn >> 0) & 0xf;
2111 rd1 = (insn >> 16) & 0xf;
2112 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 2113 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
2114 gen_op_iwmmxt_andq_M0_wRn(rd1);
2115 gen_op_iwmmxt_setpsr_nz();
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2119 break;
d00584b7 2120 case 0x200: /* WAND */
18c9b560
AZ
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 0) & 0xf;
2123 rd1 = (insn >> 16) & 0xf;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0);
2125 gen_op_iwmmxt_andq_M0_wRn(rd1);
2126 gen_op_iwmmxt_setpsr_nz();
2127 gen_op_iwmmxt_movq_wRn_M0(wrd);
2128 gen_op_iwmmxt_set_mup();
2129 gen_op_iwmmxt_set_cup();
2130 break;
d00584b7 2131 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
2132 wrd = (insn >> 12) & 0xf;
2133 rd0 = (insn >> 0) & 0xf;
2134 rd1 = (insn >> 16) & 0xf;
2135 gen_op_iwmmxt_movq_M0_wRn(rd0);
2136 if (insn & (1 << 21))
2137 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2140 gen_op_iwmmxt_movq_wRn_M0(wrd);
2141 gen_op_iwmmxt_set_mup();
2142 break;
d00584b7 2143 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
2144 wrd = (insn >> 12) & 0xf;
2145 rd0 = (insn >> 16) & 0xf;
2146 rd1 = (insn >> 0) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 switch ((insn >> 22) & 3) {
2149 case 0:
2150 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2151 break;
2152 case 1:
2153 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2154 break;
2155 case 2:
2156 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2157 break;
2158 case 3:
2159 return 1;
2160 }
2161 gen_op_iwmmxt_movq_wRn_M0(wrd);
2162 gen_op_iwmmxt_set_mup();
2163 gen_op_iwmmxt_set_cup();
2164 break;
d00584b7 2165 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
2166 wrd = (insn >> 12) & 0xf;
2167 rd0 = (insn >> 16) & 0xf;
2168 rd1 = (insn >> 0) & 0xf;
2169 gen_op_iwmmxt_movq_M0_wRn(rd0);
2170 switch ((insn >> 22) & 3) {
2171 case 0:
2172 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2173 break;
2174 case 1:
2175 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2176 break;
2177 case 2:
2178 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2179 break;
2180 case 3:
2181 return 1;
2182 }
2183 gen_op_iwmmxt_movq_wRn_M0(wrd);
2184 gen_op_iwmmxt_set_mup();
2185 gen_op_iwmmxt_set_cup();
2186 break;
d00584b7 2187 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
2188 wrd = (insn >> 12) & 0xf;
2189 rd0 = (insn >> 16) & 0xf;
2190 rd1 = (insn >> 0) & 0xf;
2191 gen_op_iwmmxt_movq_M0_wRn(rd0);
2192 if (insn & (1 << 22))
2193 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2194 else
2195 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2196 if (!(insn & (1 << 20)))
2197 gen_op_iwmmxt_addl_M0_wRn(wrd);
2198 gen_op_iwmmxt_movq_wRn_M0(wrd);
2199 gen_op_iwmmxt_set_mup();
2200 break;
d00584b7 2201 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
2202 wrd = (insn >> 12) & 0xf;
2203 rd0 = (insn >> 16) & 0xf;
2204 rd1 = (insn >> 0) & 0xf;
2205 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2206 if (insn & (1 << 21)) {
2207 if (insn & (1 << 20))
2208 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2209 else
2210 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2211 } else {
2212 if (insn & (1 << 20))
2213 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2214 else
2215 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2216 }
18c9b560
AZ
2217 gen_op_iwmmxt_movq_wRn_M0(wrd);
2218 gen_op_iwmmxt_set_mup();
2219 break;
d00584b7 2220 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
2221 wrd = (insn >> 12) & 0xf;
2222 rd0 = (insn >> 16) & 0xf;
2223 rd1 = (insn >> 0) & 0xf;
2224 gen_op_iwmmxt_movq_M0_wRn(rd0);
2225 if (insn & (1 << 21))
2226 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2227 else
2228 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2229 if (!(insn & (1 << 20))) {
e677137d
PB
2230 iwmmxt_load_reg(cpu_V1, wrd);
2231 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2232 }
2233 gen_op_iwmmxt_movq_wRn_M0(wrd);
2234 gen_op_iwmmxt_set_mup();
2235 break;
d00584b7 2236 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2237 wrd = (insn >> 12) & 0xf;
2238 rd0 = (insn >> 16) & 0xf;
2239 rd1 = (insn >> 0) & 0xf;
2240 gen_op_iwmmxt_movq_M0_wRn(rd0);
2241 switch ((insn >> 22) & 3) {
2242 case 0:
2243 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2244 break;
2245 case 1:
2246 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2247 break;
2248 case 2:
2249 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2250 break;
2251 case 3:
2252 return 1;
2253 }
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2257 break;
d00584b7 2258 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2259 wrd = (insn >> 12) & 0xf;
2260 rd0 = (insn >> 16) & 0xf;
2261 rd1 = (insn >> 0) & 0xf;
2262 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2263 if (insn & (1 << 22)) {
2264 if (insn & (1 << 20))
2265 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2266 else
2267 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2268 } else {
2269 if (insn & (1 << 20))
2270 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2271 else
2272 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2273 }
18c9b560
AZ
2274 gen_op_iwmmxt_movq_wRn_M0(wrd);
2275 gen_op_iwmmxt_set_mup();
2276 gen_op_iwmmxt_set_cup();
2277 break;
d00584b7 2278 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2279 wrd = (insn >> 12) & 0xf;
2280 rd0 = (insn >> 16) & 0xf;
2281 rd1 = (insn >> 0) & 0xf;
2282 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2283 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2284 tcg_gen_andi_i32(tmp, tmp, 7);
2285 iwmmxt_load_reg(cpu_V1, rd1);
2286 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2287 tcg_temp_free_i32(tmp);
18c9b560
AZ
2288 gen_op_iwmmxt_movq_wRn_M0(wrd);
2289 gen_op_iwmmxt_set_mup();
2290 break;
d00584b7 2291 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2292 if (((insn >> 6) & 3) == 3)
2293 return 1;
18c9b560
AZ
2294 rd = (insn >> 12) & 0xf;
2295 wrd = (insn >> 16) & 0xf;
da6b5335 2296 tmp = load_reg(s, rd);
18c9b560
AZ
2297 gen_op_iwmmxt_movq_M0_wRn(wrd);
2298 switch ((insn >> 6) & 3) {
2299 case 0:
da6b5335
FN
2300 tmp2 = tcg_const_i32(0xff);
2301 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2302 break;
2303 case 1:
da6b5335
FN
2304 tmp2 = tcg_const_i32(0xffff);
2305 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2306 break;
2307 case 2:
da6b5335
FN
2308 tmp2 = tcg_const_i32(0xffffffff);
2309 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2310 break;
da6b5335 2311 default:
f764718d
RH
2312 tmp2 = NULL;
2313 tmp3 = NULL;
18c9b560 2314 }
da6b5335 2315 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2316 tcg_temp_free_i32(tmp3);
2317 tcg_temp_free_i32(tmp2);
7d1b0095 2318 tcg_temp_free_i32(tmp);
18c9b560
AZ
2319 gen_op_iwmmxt_movq_wRn_M0(wrd);
2320 gen_op_iwmmxt_set_mup();
2321 break;
d00584b7 2322 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2323 rd = (insn >> 12) & 0xf;
2324 wrd = (insn >> 16) & 0xf;
da6b5335 2325 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2326 return 1;
2327 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2328 tmp = tcg_temp_new_i32();
18c9b560
AZ
2329 switch ((insn >> 22) & 3) {
2330 case 0:
da6b5335 2331 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2332 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2333 if (insn & 8) {
2334 tcg_gen_ext8s_i32(tmp, tmp);
2335 } else {
2336 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2337 }
2338 break;
2339 case 1:
da6b5335 2340 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2341 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2342 if (insn & 8) {
2343 tcg_gen_ext16s_i32(tmp, tmp);
2344 } else {
2345 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2346 }
2347 break;
2348 case 2:
da6b5335 2349 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2350 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2351 break;
18c9b560 2352 }
da6b5335 2353 store_reg(s, rd, tmp);
18c9b560 2354 break;
d00584b7 2355 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2356 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2357 return 1;
da6b5335 2358 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2359 switch ((insn >> 22) & 3) {
2360 case 0:
da6b5335 2361 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2362 break;
2363 case 1:
da6b5335 2364 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2365 break;
2366 case 2:
da6b5335 2367 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2368 break;
18c9b560 2369 }
da6b5335
FN
2370 tcg_gen_shli_i32(tmp, tmp, 28);
2371 gen_set_nzcv(tmp);
7d1b0095 2372 tcg_temp_free_i32(tmp);
18c9b560 2373 break;
d00584b7 2374 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2375 if (((insn >> 6) & 3) == 3)
2376 return 1;
18c9b560
AZ
2377 rd = (insn >> 12) & 0xf;
2378 wrd = (insn >> 16) & 0xf;
da6b5335 2379 tmp = load_reg(s, rd);
18c9b560
AZ
2380 switch ((insn >> 6) & 3) {
2381 case 0:
da6b5335 2382 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2383 break;
2384 case 1:
da6b5335 2385 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2386 break;
2387 case 2:
da6b5335 2388 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2389 break;
18c9b560 2390 }
7d1b0095 2391 tcg_temp_free_i32(tmp);
18c9b560
AZ
2392 gen_op_iwmmxt_movq_wRn_M0(wrd);
2393 gen_op_iwmmxt_set_mup();
2394 break;
d00584b7 2395 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2396 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2397 return 1;
da6b5335 2398 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2399 tmp2 = tcg_temp_new_i32();
da6b5335 2400 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2401 switch ((insn >> 22) & 3) {
2402 case 0:
2403 for (i = 0; i < 7; i ++) {
da6b5335
FN
2404 tcg_gen_shli_i32(tmp2, tmp2, 4);
2405 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2406 }
2407 break;
2408 case 1:
2409 for (i = 0; i < 3; i ++) {
da6b5335
FN
2410 tcg_gen_shli_i32(tmp2, tmp2, 8);
2411 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2412 }
2413 break;
2414 case 2:
da6b5335
FN
2415 tcg_gen_shli_i32(tmp2, tmp2, 16);
2416 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2417 break;
18c9b560 2418 }
da6b5335 2419 gen_set_nzcv(tmp);
7d1b0095
PM
2420 tcg_temp_free_i32(tmp2);
2421 tcg_temp_free_i32(tmp);
18c9b560 2422 break;
d00584b7 2423 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2424 wrd = (insn >> 12) & 0xf;
2425 rd0 = (insn >> 16) & 0xf;
2426 gen_op_iwmmxt_movq_M0_wRn(rd0);
2427 switch ((insn >> 22) & 3) {
2428 case 0:
e677137d 2429 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2430 break;
2431 case 1:
e677137d 2432 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2433 break;
2434 case 2:
e677137d 2435 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2436 break;
2437 case 3:
2438 return 1;
2439 }
2440 gen_op_iwmmxt_movq_wRn_M0(wrd);
2441 gen_op_iwmmxt_set_mup();
2442 break;
d00584b7 2443 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2444 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2445 return 1;
da6b5335 2446 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2447 tmp2 = tcg_temp_new_i32();
da6b5335 2448 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2449 switch ((insn >> 22) & 3) {
2450 case 0:
2451 for (i = 0; i < 7; i ++) {
da6b5335
FN
2452 tcg_gen_shli_i32(tmp2, tmp2, 4);
2453 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2454 }
2455 break;
2456 case 1:
2457 for (i = 0; i < 3; i ++) {
da6b5335
FN
2458 tcg_gen_shli_i32(tmp2, tmp2, 8);
2459 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2460 }
2461 break;
2462 case 2:
da6b5335
FN
2463 tcg_gen_shli_i32(tmp2, tmp2, 16);
2464 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2465 break;
18c9b560 2466 }
da6b5335 2467 gen_set_nzcv(tmp);
7d1b0095
PM
2468 tcg_temp_free_i32(tmp2);
2469 tcg_temp_free_i32(tmp);
18c9b560 2470 break;
d00584b7 2471 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2472 rd = (insn >> 12) & 0xf;
2473 rd0 = (insn >> 16) & 0xf;
da6b5335 2474 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2475 return 1;
2476 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2477 tmp = tcg_temp_new_i32();
18c9b560
AZ
2478 switch ((insn >> 22) & 3) {
2479 case 0:
da6b5335 2480 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2481 break;
2482 case 1:
da6b5335 2483 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2484 break;
2485 case 2:
da6b5335 2486 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2487 break;
18c9b560 2488 }
da6b5335 2489 store_reg(s, rd, tmp);
18c9b560 2490 break;
d00584b7 2491 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2492 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2493 wrd = (insn >> 12) & 0xf;
2494 rd0 = (insn >> 16) & 0xf;
2495 rd1 = (insn >> 0) & 0xf;
2496 gen_op_iwmmxt_movq_M0_wRn(rd0);
2497 switch ((insn >> 22) & 3) {
2498 case 0:
2499 if (insn & (1 << 21))
2500 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2501 else
2502 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2503 break;
2504 case 1:
2505 if (insn & (1 << 21))
2506 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2507 else
2508 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2509 break;
2510 case 2:
2511 if (insn & (1 << 21))
2512 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2513 else
2514 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2515 break;
2516 case 3:
2517 return 1;
2518 }
2519 gen_op_iwmmxt_movq_wRn_M0(wrd);
2520 gen_op_iwmmxt_set_mup();
2521 gen_op_iwmmxt_set_cup();
2522 break;
d00584b7 2523 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2524 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2525 wrd = (insn >> 12) & 0xf;
2526 rd0 = (insn >> 16) & 0xf;
2527 gen_op_iwmmxt_movq_M0_wRn(rd0);
2528 switch ((insn >> 22) & 3) {
2529 case 0:
2530 if (insn & (1 << 21))
2531 gen_op_iwmmxt_unpacklsb_M0();
2532 else
2533 gen_op_iwmmxt_unpacklub_M0();
2534 break;
2535 case 1:
2536 if (insn & (1 << 21))
2537 gen_op_iwmmxt_unpacklsw_M0();
2538 else
2539 gen_op_iwmmxt_unpackluw_M0();
2540 break;
2541 case 2:
2542 if (insn & (1 << 21))
2543 gen_op_iwmmxt_unpacklsl_M0();
2544 else
2545 gen_op_iwmmxt_unpacklul_M0();
2546 break;
2547 case 3:
2548 return 1;
2549 }
2550 gen_op_iwmmxt_movq_wRn_M0(wrd);
2551 gen_op_iwmmxt_set_mup();
2552 gen_op_iwmmxt_set_cup();
2553 break;
d00584b7 2554 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2555 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2556 wrd = (insn >> 12) & 0xf;
2557 rd0 = (insn >> 16) & 0xf;
2558 gen_op_iwmmxt_movq_M0_wRn(rd0);
2559 switch ((insn >> 22) & 3) {
2560 case 0:
2561 if (insn & (1 << 21))
2562 gen_op_iwmmxt_unpackhsb_M0();
2563 else
2564 gen_op_iwmmxt_unpackhub_M0();
2565 break;
2566 case 1:
2567 if (insn & (1 << 21))
2568 gen_op_iwmmxt_unpackhsw_M0();
2569 else
2570 gen_op_iwmmxt_unpackhuw_M0();
2571 break;
2572 case 2:
2573 if (insn & (1 << 21))
2574 gen_op_iwmmxt_unpackhsl_M0();
2575 else
2576 gen_op_iwmmxt_unpackhul_M0();
2577 break;
2578 case 3:
2579 return 1;
2580 }
2581 gen_op_iwmmxt_movq_wRn_M0(wrd);
2582 gen_op_iwmmxt_set_mup();
2583 gen_op_iwmmxt_set_cup();
2584 break;
d00584b7 2585 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2586 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2587 if (((insn >> 22) & 3) == 0)
2588 return 1;
18c9b560
AZ
2589 wrd = (insn >> 12) & 0xf;
2590 rd0 = (insn >> 16) & 0xf;
2591 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2592 tmp = tcg_temp_new_i32();
da6b5335 2593 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2594 tcg_temp_free_i32(tmp);
18c9b560 2595 return 1;
da6b5335 2596 }
18c9b560 2597 switch ((insn >> 22) & 3) {
18c9b560 2598 case 1:
477955bd 2599 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2600 break;
2601 case 2:
477955bd 2602 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2603 break;
2604 case 3:
477955bd 2605 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2606 break;
2607 }
7d1b0095 2608 tcg_temp_free_i32(tmp);
18c9b560
AZ
2609 gen_op_iwmmxt_movq_wRn_M0(wrd);
2610 gen_op_iwmmxt_set_mup();
2611 gen_op_iwmmxt_set_cup();
2612 break;
d00584b7 2613 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2614 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2615 if (((insn >> 22) & 3) == 0)
2616 return 1;
18c9b560
AZ
2617 wrd = (insn >> 12) & 0xf;
2618 rd0 = (insn >> 16) & 0xf;
2619 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2620 tmp = tcg_temp_new_i32();
da6b5335 2621 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2622 tcg_temp_free_i32(tmp);
18c9b560 2623 return 1;
da6b5335 2624 }
18c9b560 2625 switch ((insn >> 22) & 3) {
18c9b560 2626 case 1:
477955bd 2627 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2628 break;
2629 case 2:
477955bd 2630 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2631 break;
2632 case 3:
477955bd 2633 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2634 break;
2635 }
7d1b0095 2636 tcg_temp_free_i32(tmp);
18c9b560
AZ
2637 gen_op_iwmmxt_movq_wRn_M0(wrd);
2638 gen_op_iwmmxt_set_mup();
2639 gen_op_iwmmxt_set_cup();
2640 break;
d00584b7 2641 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2642 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2643 if (((insn >> 22) & 3) == 0)
2644 return 1;
18c9b560
AZ
2645 wrd = (insn >> 12) & 0xf;
2646 rd0 = (insn >> 16) & 0xf;
2647 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2648 tmp = tcg_temp_new_i32();
da6b5335 2649 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2650 tcg_temp_free_i32(tmp);
18c9b560 2651 return 1;
da6b5335 2652 }
18c9b560 2653 switch ((insn >> 22) & 3) {
18c9b560 2654 case 1:
477955bd 2655 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2656 break;
2657 case 2:
477955bd 2658 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2659 break;
2660 case 3:
477955bd 2661 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2662 break;
2663 }
7d1b0095 2664 tcg_temp_free_i32(tmp);
18c9b560
AZ
2665 gen_op_iwmmxt_movq_wRn_M0(wrd);
2666 gen_op_iwmmxt_set_mup();
2667 gen_op_iwmmxt_set_cup();
2668 break;
d00584b7 2669 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2670 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2671 if (((insn >> 22) & 3) == 0)
2672 return 1;
18c9b560
AZ
2673 wrd = (insn >> 12) & 0xf;
2674 rd0 = (insn >> 16) & 0xf;
2675 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2676 tmp = tcg_temp_new_i32();
18c9b560 2677 switch ((insn >> 22) & 3) {
18c9b560 2678 case 1:
da6b5335 2679 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2680 tcg_temp_free_i32(tmp);
18c9b560 2681 return 1;
da6b5335 2682 }
477955bd 2683 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2684 break;
2685 case 2:
da6b5335 2686 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2687 tcg_temp_free_i32(tmp);
18c9b560 2688 return 1;
da6b5335 2689 }
477955bd 2690 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2691 break;
2692 case 3:
da6b5335 2693 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2694 tcg_temp_free_i32(tmp);
18c9b560 2695 return 1;
da6b5335 2696 }
477955bd 2697 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2698 break;
2699 }
7d1b0095 2700 tcg_temp_free_i32(tmp);
18c9b560
AZ
2701 gen_op_iwmmxt_movq_wRn_M0(wrd);
2702 gen_op_iwmmxt_set_mup();
2703 gen_op_iwmmxt_set_cup();
2704 break;
d00584b7 2705 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2706 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2707 wrd = (insn >> 12) & 0xf;
2708 rd0 = (insn >> 16) & 0xf;
2709 rd1 = (insn >> 0) & 0xf;
2710 gen_op_iwmmxt_movq_M0_wRn(rd0);
2711 switch ((insn >> 22) & 3) {
2712 case 0:
2713 if (insn & (1 << 21))
2714 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2715 else
2716 gen_op_iwmmxt_minub_M0_wRn(rd1);
2717 break;
2718 case 1:
2719 if (insn & (1 << 21))
2720 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2721 else
2722 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2723 break;
2724 case 2:
2725 if (insn & (1 << 21))
2726 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2727 else
2728 gen_op_iwmmxt_minul_M0_wRn(rd1);
2729 break;
2730 case 3:
2731 return 1;
2732 }
2733 gen_op_iwmmxt_movq_wRn_M0(wrd);
2734 gen_op_iwmmxt_set_mup();
2735 break;
d00584b7 2736 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2737 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2738 wrd = (insn >> 12) & 0xf;
2739 rd0 = (insn >> 16) & 0xf;
2740 rd1 = (insn >> 0) & 0xf;
2741 gen_op_iwmmxt_movq_M0_wRn(rd0);
2742 switch ((insn >> 22) & 3) {
2743 case 0:
2744 if (insn & (1 << 21))
2745 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2746 else
2747 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2748 break;
2749 case 1:
2750 if (insn & (1 << 21))
2751 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2752 else
2753 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2754 break;
2755 case 2:
2756 if (insn & (1 << 21))
2757 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2758 else
2759 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2760 break;
2761 case 3:
2762 return 1;
2763 }
2764 gen_op_iwmmxt_movq_wRn_M0(wrd);
2765 gen_op_iwmmxt_set_mup();
2766 break;
d00584b7 2767 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2768 case 0x402: case 0x502: case 0x602: case 0x702:
2769 wrd = (insn >> 12) & 0xf;
2770 rd0 = (insn >> 16) & 0xf;
2771 rd1 = (insn >> 0) & 0xf;
2772 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2773 tmp = tcg_const_i32((insn >> 20) & 3);
2774 iwmmxt_load_reg(cpu_V1, rd1);
2775 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2776 tcg_temp_free_i32(tmp);
18c9b560
AZ
2777 gen_op_iwmmxt_movq_wRn_M0(wrd);
2778 gen_op_iwmmxt_set_mup();
2779 break;
d00584b7 2780 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2781 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2782 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2783 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2784 wrd = (insn >> 12) & 0xf;
2785 rd0 = (insn >> 16) & 0xf;
2786 rd1 = (insn >> 0) & 0xf;
2787 gen_op_iwmmxt_movq_M0_wRn(rd0);
2788 switch ((insn >> 20) & 0xf) {
2789 case 0x0:
2790 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2791 break;
2792 case 0x1:
2793 gen_op_iwmmxt_subub_M0_wRn(rd1);
2794 break;
2795 case 0x3:
2796 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2797 break;
2798 case 0x4:
2799 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2800 break;
2801 case 0x5:
2802 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2803 break;
2804 case 0x7:
2805 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2806 break;
2807 case 0x8:
2808 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2809 break;
2810 case 0x9:
2811 gen_op_iwmmxt_subul_M0_wRn(rd1);
2812 break;
2813 case 0xb:
2814 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2815 break;
2816 default:
2817 return 1;
2818 }
2819 gen_op_iwmmxt_movq_wRn_M0(wrd);
2820 gen_op_iwmmxt_set_mup();
2821 gen_op_iwmmxt_set_cup();
2822 break;
d00584b7 2823 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2824 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2825 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2826 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2827 wrd = (insn >> 12) & 0xf;
2828 rd0 = (insn >> 16) & 0xf;
2829 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2830 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2831 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2832 tcg_temp_free_i32(tmp);
18c9b560
AZ
2833 gen_op_iwmmxt_movq_wRn_M0(wrd);
2834 gen_op_iwmmxt_set_mup();
2835 gen_op_iwmmxt_set_cup();
2836 break;
d00584b7 2837 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2838 case 0x418: case 0x518: case 0x618: case 0x718:
2839 case 0x818: case 0x918: case 0xa18: case 0xb18:
2840 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2841 wrd = (insn >> 12) & 0xf;
2842 rd0 = (insn >> 16) & 0xf;
2843 rd1 = (insn >> 0) & 0xf;
2844 gen_op_iwmmxt_movq_M0_wRn(rd0);
2845 switch ((insn >> 20) & 0xf) {
2846 case 0x0:
2847 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2848 break;
2849 case 0x1:
2850 gen_op_iwmmxt_addub_M0_wRn(rd1);
2851 break;
2852 case 0x3:
2853 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2854 break;
2855 case 0x4:
2856 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2857 break;
2858 case 0x5:
2859 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2860 break;
2861 case 0x7:
2862 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2863 break;
2864 case 0x8:
2865 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2866 break;
2867 case 0x9:
2868 gen_op_iwmmxt_addul_M0_wRn(rd1);
2869 break;
2870 case 0xb:
2871 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2872 break;
2873 default:
2874 return 1;
2875 }
2876 gen_op_iwmmxt_movq_wRn_M0(wrd);
2877 gen_op_iwmmxt_set_mup();
2878 gen_op_iwmmxt_set_cup();
2879 break;
d00584b7 2880 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2881 case 0x408: case 0x508: case 0x608: case 0x708:
2882 case 0x808: case 0x908: case 0xa08: case 0xb08:
2883 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2884 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2885 return 1;
18c9b560
AZ
2886 wrd = (insn >> 12) & 0xf;
2887 rd0 = (insn >> 16) & 0xf;
2888 rd1 = (insn >> 0) & 0xf;
2889 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2890 switch ((insn >> 22) & 3) {
18c9b560
AZ
2891 case 1:
2892 if (insn & (1 << 21))
2893 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2894 else
2895 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2896 break;
2897 case 2:
2898 if (insn & (1 << 21))
2899 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2900 else
2901 gen_op_iwmmxt_packul_M0_wRn(rd1);
2902 break;
2903 case 3:
2904 if (insn & (1 << 21))
2905 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2906 else
2907 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2908 break;
2909 }
2910 gen_op_iwmmxt_movq_wRn_M0(wrd);
2911 gen_op_iwmmxt_set_mup();
2912 gen_op_iwmmxt_set_cup();
2913 break;
2914 case 0x201: case 0x203: case 0x205: case 0x207:
2915 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2916 case 0x211: case 0x213: case 0x215: case 0x217:
2917 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2918 wrd = (insn >> 5) & 0xf;
2919 rd0 = (insn >> 12) & 0xf;
2920 rd1 = (insn >> 0) & 0xf;
2921 if (rd0 == 0xf || rd1 == 0xf)
2922 return 1;
2923 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2924 tmp = load_reg(s, rd0);
2925 tmp2 = load_reg(s, rd1);
18c9b560 2926 switch ((insn >> 16) & 0xf) {
d00584b7 2927 case 0x0: /* TMIA */
da6b5335 2928 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2929 break;
d00584b7 2930 case 0x8: /* TMIAPH */
da6b5335 2931 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2932 break;
d00584b7 2933 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2934 if (insn & (1 << 16))
da6b5335 2935 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2936 if (insn & (1 << 17))
da6b5335
FN
2937 tcg_gen_shri_i32(tmp2, tmp2, 16);
2938 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2939 break;
2940 default:
7d1b0095
PM
2941 tcg_temp_free_i32(tmp2);
2942 tcg_temp_free_i32(tmp);
18c9b560
AZ
2943 return 1;
2944 }
7d1b0095
PM
2945 tcg_temp_free_i32(tmp2);
2946 tcg_temp_free_i32(tmp);
18c9b560
AZ
2947 gen_op_iwmmxt_movq_wRn_M0(wrd);
2948 gen_op_iwmmxt_set_mup();
2949 break;
2950 default:
2951 return 1;
2952 }
2953
2954 return 0;
2955}
2956
a1c7273b 2957/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2958 (ie. an undefined instruction). */
7dcc1f89 2959static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2960{
2961 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2962 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2963
2964 if ((insn & 0x0ff00f10) == 0x0e200010) {
2965 /* Multiply with Internal Accumulate Format */
2966 rd0 = (insn >> 12) & 0xf;
2967 rd1 = insn & 0xf;
2968 acc = (insn >> 5) & 7;
2969
2970 if (acc != 0)
2971 return 1;
2972
3a554c0f
FN
2973 tmp = load_reg(s, rd0);
2974 tmp2 = load_reg(s, rd1);
18c9b560 2975 switch ((insn >> 16) & 0xf) {
d00584b7 2976 case 0x0: /* MIA */
3a554c0f 2977 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2978 break;
d00584b7 2979 case 0x8: /* MIAPH */
3a554c0f 2980 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2981 break;
d00584b7
PM
2982 case 0xc: /* MIABB */
2983 case 0xd: /* MIABT */
2984 case 0xe: /* MIATB */
2985 case 0xf: /* MIATT */
18c9b560 2986 if (insn & (1 << 16))
3a554c0f 2987 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2988 if (insn & (1 << 17))
3a554c0f
FN
2989 tcg_gen_shri_i32(tmp2, tmp2, 16);
2990 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2991 break;
2992 default:
2993 return 1;
2994 }
7d1b0095
PM
2995 tcg_temp_free_i32(tmp2);
2996 tcg_temp_free_i32(tmp);
18c9b560
AZ
2997
2998 gen_op_iwmmxt_movq_wRn_M0(acc);
2999 return 0;
3000 }
3001
3002 if ((insn & 0x0fe00ff8) == 0x0c400000) {
3003 /* Internal Accumulator Access Format */
3004 rdhi = (insn >> 16) & 0xf;
3005 rdlo = (insn >> 12) & 0xf;
3006 acc = insn & 7;
3007
3008 if (acc != 0)
3009 return 1;
3010
d00584b7 3011 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 3012 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 3013 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 3014 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 3015 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 3016 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 3017 } else { /* MAR */
3a554c0f
FN
3018 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
3019 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
3020 }
3021 return 0;
3022 }
3023
3024 return 1;
3025}
3026
9ee6e8bb
PB
3027#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
3028#define VFP_SREG(insn, bigbit, smallbit) \
3029 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
3030#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 3031 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
3032 reg = (((insn) >> (bigbit)) & 0x0f) \
3033 | (((insn) >> ((smallbit) - 4)) & 0x10); \
3034 } else { \
3035 if (insn & (1 << (smallbit))) \
3036 return 1; \
3037 reg = ((insn) >> (bigbit)) & 0x0f; \
3038 }} while (0)
3039
3040#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
3041#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
3042#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
3043#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
3044#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
3045#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
3046
4373f3ce 3047/* Move between integer and VFP cores. */
39d5492a 3048static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 3049{
39d5492a 3050 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
3051 tcg_gen_mov_i32(tmp, cpu_F0s);
3052 return tmp;
3053}
3054
39d5492a 3055static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
3056{
3057 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 3058 tcg_temp_free_i32(tmp);
4373f3ce
PB
3059}
3060
39d5492a 3061static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 3062{
39d5492a 3063 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 3064 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
3065 tcg_gen_shli_i32(tmp, var, 16);
3066 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3067 tcg_temp_free_i32(tmp);
ad69471c
PB
3068}
3069
39d5492a 3070static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 3071{
39d5492a 3072 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
3073 tcg_gen_andi_i32(var, var, 0xffff0000);
3074 tcg_gen_shri_i32(tmp, var, 16);
3075 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3076 tcg_temp_free_i32(tmp);
ad69471c
PB
3077}
3078
b3ff4b87 3079static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
04731fb5 3080{
b3ff4b87
PM
3081 uint32_t rd, rn, rm;
3082 bool dp = a->dp;
3083
3084 if (!dc_isar_feature(aa32_vsel, s)) {
3085 return false;
3086 }
3087
3088 /* UNDEF accesses to D16-D31 if they don't exist */
3089 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
3090 ((a->vm | a->vn | a->vd) & 0x10)) {
3091 return false;
3092 }
3093 rd = a->vd;
3094 rn = a->vn;
3095 rm = a->vm;
3096
3097 if (!vfp_access_check(s)) {
3098 return true;
3099 }
04731fb5
WN
3100
3101 if (dp) {
3102 TCGv_i64 frn, frm, dest;
3103 TCGv_i64 tmp, zero, zf, nf, vf;
3104
3105 zero = tcg_const_i64(0);
3106
3107 frn = tcg_temp_new_i64();
3108 frm = tcg_temp_new_i64();
3109 dest = tcg_temp_new_i64();
3110
3111 zf = tcg_temp_new_i64();
3112 nf = tcg_temp_new_i64();
3113 vf = tcg_temp_new_i64();
3114
3115 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3116 tcg_gen_ext_i32_i64(nf, cpu_NF);
3117 tcg_gen_ext_i32_i64(vf, cpu_VF);
3118
3119 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3120 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
b3ff4b87 3121 switch (a->cc) {
04731fb5
WN
3122 case 0: /* eq: Z */
3123 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3124 frn, frm);
3125 break;
3126 case 1: /* vs: V */
3127 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3128 frn, frm);
3129 break;
3130 case 2: /* ge: N == V -> N ^ V == 0 */
3131 tmp = tcg_temp_new_i64();
3132 tcg_gen_xor_i64(tmp, vf, nf);
3133 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3134 frn, frm);
3135 tcg_temp_free_i64(tmp);
3136 break;
3137 case 3: /* gt: !Z && N == V */
3138 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3139 frn, frm);
3140 tmp = tcg_temp_new_i64();
3141 tcg_gen_xor_i64(tmp, vf, nf);
3142 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3143 dest, frm);
3144 tcg_temp_free_i64(tmp);
3145 break;
3146 }
3147 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3148 tcg_temp_free_i64(frn);
3149 tcg_temp_free_i64(frm);
3150 tcg_temp_free_i64(dest);
3151
3152 tcg_temp_free_i64(zf);
3153 tcg_temp_free_i64(nf);
3154 tcg_temp_free_i64(vf);
3155
3156 tcg_temp_free_i64(zero);
3157 } else {
3158 TCGv_i32 frn, frm, dest;
3159 TCGv_i32 tmp, zero;
3160
3161 zero = tcg_const_i32(0);
3162
3163 frn = tcg_temp_new_i32();
3164 frm = tcg_temp_new_i32();
3165 dest = tcg_temp_new_i32();
3166 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3167 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
b3ff4b87 3168 switch (a->cc) {
04731fb5
WN
3169 case 0: /* eq: Z */
3170 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3171 frn, frm);
3172 break;
3173 case 1: /* vs: V */
3174 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3175 frn, frm);
3176 break;
3177 case 2: /* ge: N == V -> N ^ V == 0 */
3178 tmp = tcg_temp_new_i32();
3179 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3180 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3181 frn, frm);
3182 tcg_temp_free_i32(tmp);
3183 break;
3184 case 3: /* gt: !Z && N == V */
3185 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3186 frn, frm);
3187 tmp = tcg_temp_new_i32();
3188 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3189 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3190 dest, frm);
3191 tcg_temp_free_i32(tmp);
3192 break;
3193 }
3194 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3195 tcg_temp_free_i32(frn);
3196 tcg_temp_free_i32(frm);
3197 tcg_temp_free_i32(dest);
3198
3199 tcg_temp_free_i32(zero);
3200 }
3201
b3ff4b87 3202 return true;
04731fb5
WN
3203}
3204
f65988a1 3205static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
40cfacdd 3206{
f65988a1
PM
3207 uint32_t rd, rn, rm;
3208 bool dp = a->dp;
3209 bool vmin = a->op;
3210 TCGv_ptr fpst;
3211
3212 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
3213 return false;
3214 }
3215
3216 /* UNDEF accesses to D16-D31 if they don't exist */
3217 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
3218 ((a->vm | a->vn | a->vd) & 0x10)) {
3219 return false;
3220 }
3221 rd = a->vd;
3222 rn = a->vn;
3223 rm = a->vm;
3224
3225 if (!vfp_access_check(s)) {
3226 return true;
3227 }
3228
3229 fpst = get_fpstatus_ptr(0);
40cfacdd
WN
3230
3231 if (dp) {
3232 TCGv_i64 frn, frm, dest;
3233
3234 frn = tcg_temp_new_i64();
3235 frm = tcg_temp_new_i64();
3236 dest = tcg_temp_new_i64();
3237
3238 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3239 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3240 if (vmin) {
f71a2ae5 3241 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3242 } else {
f71a2ae5 3243 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3244 }
3245 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3246 tcg_temp_free_i64(frn);
3247 tcg_temp_free_i64(frm);
3248 tcg_temp_free_i64(dest);
3249 } else {
3250 TCGv_i32 frn, frm, dest;
3251
3252 frn = tcg_temp_new_i32();
3253 frm = tcg_temp_new_i32();
3254 dest = tcg_temp_new_i32();
3255
3256 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3257 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3258 if (vmin) {
f71a2ae5 3259 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3260 } else {
f71a2ae5 3261 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3262 }
3263 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3264 tcg_temp_free_i32(frn);
3265 tcg_temp_free_i32(frm);
3266 tcg_temp_free_i32(dest);
3267 }
3268
3269 tcg_temp_free_ptr(fpst);
f65988a1 3270 return true;
40cfacdd
WN
3271}
3272
e3bb599d
PM
3273/*
3274 * Table for converting the most common AArch32 encoding of
3275 * rounding mode to arm_fprounding order (which matches the
3276 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3277 */
3278static const uint8_t fp_decode_rm[] = {
3279 FPROUNDING_TIEAWAY,
3280 FPROUNDING_TIEEVEN,
3281 FPROUNDING_POSINF,
3282 FPROUNDING_NEGINF,
3283};
3284
3285static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
7655f39b 3286{
e3bb599d
PM
3287 uint32_t rd, rm;
3288 bool dp = a->dp;
3289 TCGv_ptr fpst;
7655f39b 3290 TCGv_i32 tcg_rmode;
e3bb599d
PM
3291 int rounding = fp_decode_rm[a->rm];
3292
3293 if (!dc_isar_feature(aa32_vrint, s)) {
3294 return false;
3295 }
3296
3297 /* UNDEF accesses to D16-D31 if they don't exist */
3298 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
3299 ((a->vm | a->vd) & 0x10)) {
3300 return false;
3301 }
3302 rd = a->vd;
3303 rm = a->vm;
3304
3305 if (!vfp_access_check(s)) {
3306 return true;
3307 }
3308
3309 fpst = get_fpstatus_ptr(0);
7655f39b
WN
3310
3311 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3312 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3313
3314 if (dp) {
3315 TCGv_i64 tcg_op;
3316 TCGv_i64 tcg_res;
3317 tcg_op = tcg_temp_new_i64();
3318 tcg_res = tcg_temp_new_i64();
3319 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3320 gen_helper_rintd(tcg_res, tcg_op, fpst);
3321 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3322 tcg_temp_free_i64(tcg_op);
3323 tcg_temp_free_i64(tcg_res);
3324 } else {
3325 TCGv_i32 tcg_op;
3326 TCGv_i32 tcg_res;
3327 tcg_op = tcg_temp_new_i32();
3328 tcg_res = tcg_temp_new_i32();
3329 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3330 gen_helper_rints(tcg_res, tcg_op, fpst);
3331 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3332 tcg_temp_free_i32(tcg_op);
3333 tcg_temp_free_i32(tcg_res);
3334 }
3335
9b049916 3336 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3337 tcg_temp_free_i32(tcg_rmode);
3338
3339 tcg_temp_free_ptr(fpst);
e3bb599d 3340 return true;
7655f39b
WN
3341}
3342
c9975a83
WN
3343static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3344 int rounding)
3345{
3346 bool is_signed = extract32(insn, 7, 1);
3347 TCGv_ptr fpst = get_fpstatus_ptr(0);
3348 TCGv_i32 tcg_rmode, tcg_shift;
3349
3350 tcg_shift = tcg_const_i32(0);
3351
3352 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3353 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3354
3355 if (dp) {
3356 TCGv_i64 tcg_double, tcg_res;
3357 TCGv_i32 tcg_tmp;
3358 /* Rd is encoded as a single precision register even when the source
3359 * is double precision.
3360 */
3361 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3362 tcg_double = tcg_temp_new_i64();
3363 tcg_res = tcg_temp_new_i64();
3364 tcg_tmp = tcg_temp_new_i32();
3365 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3366 if (is_signed) {
3367 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3368 } else {
3369 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3370 }
ecc7b3aa 3371 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3372 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3373 tcg_temp_free_i32(tcg_tmp);
3374 tcg_temp_free_i64(tcg_res);
3375 tcg_temp_free_i64(tcg_double);
3376 } else {
3377 TCGv_i32 tcg_single, tcg_res;
3378 tcg_single = tcg_temp_new_i32();
3379 tcg_res = tcg_temp_new_i32();
3380 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3381 if (is_signed) {
3382 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3383 } else {
3384 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3385 }
3386 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3387 tcg_temp_free_i32(tcg_res);
3388 tcg_temp_free_i32(tcg_single);
3389 }
3390
9b049916 3391 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3392 tcg_temp_free_i32(tcg_rmode);
3393
3394 tcg_temp_free_i32(tcg_shift);
3395
3396 tcg_temp_free_ptr(fpst);
3397
3398 return 0;
3399}
7655f39b 3400
c0c760af 3401static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn)
04731fb5 3402{
f65988a1 3403 uint32_t rd, rm, dp = extract32(insn, 8, 1);
04731fb5 3404
04731fb5
WN
3405 if (dp) {
3406 VFP_DREG_D(rd, insn);
04731fb5
WN
3407 VFP_DREG_M(rm, insn);
3408 } else {
3409 rd = VFP_SREG_D(insn);
04731fb5
WN
3410 rm = VFP_SREG_M(insn);
3411 }
3412
e3bb599d
PM
3413 if ((insn & 0x0fbc0e50) == 0x0ebc0a40 &&
3414 dc_isar_feature(aa32_vcvt_dr, s)) {
c9975a83
WN
3415 /* VCVTA, VCVTN, VCVTP, VCVTM */
3416 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3417 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3418 }
3419 return 1;
3420}
3421
06db8196
PM
3422/*
3423 * Disassemble a VFP instruction. Returns nonzero if an error occurred
3424 * (ie. an undefined instruction).
3425 */
7dcc1f89 3426static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3427{
3428 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3429 int dp, veclen;
39d5492a
PM
3430 TCGv_i32 addr;
3431 TCGv_i32 tmp;
3432 TCGv_i32 tmp2;
06db8196 3433 bool ignore_vfp_enabled = false;
b7bcbe95 3434
d614a513 3435 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3436 return 1;
d614a513 3437 }
40f137e1 3438
78e138bc
PM
3439 /*
3440 * If the decodetree decoder handles this insn it will always
3441 * emit code to either execute the insn or generate an appropriate
3442 * exception; so we don't need to ever return non-zero to tell
3443 * the calling code to emit an UNDEF exception.
3444 */
3445 if (extract32(insn, 28, 4) == 0xf) {
3446 if (disas_vfp_uncond(s, insn)) {
3447 return 0;
3448 }
3449 } else {
3450 if (disas_vfp(s, insn)) {
3451 return 0;
3452 }
3453 }
3454
06db8196
PM
3455 /*
3456 * FIXME: this access check should not take precedence over UNDEF
2c7ffc41
PM
3457 * for invalid encodings; we will generate incorrect syndrome information
3458 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3459 */
06db8196 3460 if ((insn & 0x0fe00fff) == 0x0ee00a10) {
40f137e1 3461 rn = (insn >> 16) & 0xf;
06db8196
PM
3462 if (rn == ARM_VFP_FPSID || rn == ARM_VFP_FPEXC || rn == ARM_VFP_MVFR2
3463 || rn == ARM_VFP_MVFR1 || rn == ARM_VFP_MVFR0) {
3464 ignore_vfp_enabled = true;
a50c0f51 3465 }
40f137e1 3466 }
06db8196
PM
3467 if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
3468 return 0;
6d60c67a
PM
3469 }
3470
6a57f3eb 3471 if (extract32(insn, 28, 4) == 0xf) {
c0c760af
PM
3472 /*
3473 * Encodings with T=1 (Thumb) or unconditional (ARM):
3474 * only used for the "miscellaneous VFP features" added in v8A
3475 * and v7M (and gated on the MVFR2.FPMisc field).
6a57f3eb 3476 */
c0c760af 3477 return disas_vfp_misc_insn(s, insn);
6a57f3eb
WN
3478 }
3479
b7bcbe95
FB
3480 dp = ((insn & 0xf00) == 0xb00);
3481 switch ((insn >> 24) & 0xf) {
3482 case 0xe:
3483 if (insn & (1 << 4)) {
3484 /* single register transfer */
b7bcbe95
FB
3485 rd = (insn >> 12) & 0xf;
3486 if (dp) {
9ee6e8bb
PB
3487 int size;
3488 int pass;
3489
3490 VFP_DREG_N(rn, insn);
3491 if (insn & 0xf)
b7bcbe95 3492 return 1;
9ee6e8bb 3493 if (insn & 0x00c00060
d614a513 3494 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3495 return 1;
d614a513 3496 }
9ee6e8bb
PB
3497
3498 pass = (insn >> 21) & 1;
3499 if (insn & (1 << 22)) {
3500 size = 0;
3501 offset = ((insn >> 5) & 3) * 8;
3502 } else if (insn & (1 << 5)) {
3503 size = 1;
3504 offset = (insn & (1 << 6)) ? 16 : 0;
3505 } else {
3506 size = 2;
3507 offset = 0;
3508 }
18c9b560 3509 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3510 /* vfp->arm */
ad69471c 3511 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3512 switch (size) {
3513 case 0:
9ee6e8bb 3514 if (offset)
ad69471c 3515 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3516 if (insn & (1 << 23))
ad69471c 3517 gen_uxtb(tmp);
9ee6e8bb 3518 else
ad69471c 3519 gen_sxtb(tmp);
9ee6e8bb
PB
3520 break;
3521 case 1:
9ee6e8bb
PB
3522 if (insn & (1 << 23)) {
3523 if (offset) {
ad69471c 3524 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3525 } else {
ad69471c 3526 gen_uxth(tmp);
9ee6e8bb
PB
3527 }
3528 } else {
3529 if (offset) {
ad69471c 3530 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3531 } else {
ad69471c 3532 gen_sxth(tmp);
9ee6e8bb
PB
3533 }
3534 }
3535 break;
3536 case 2:
9ee6e8bb
PB
3537 break;
3538 }
ad69471c 3539 store_reg(s, rd, tmp);
b7bcbe95
FB
3540 } else {
3541 /* arm->vfp */
ad69471c 3542 tmp = load_reg(s, rd);
9ee6e8bb
PB
3543 if (insn & (1 << 23)) {
3544 /* VDUP */
32f91fb7
RH
3545 int vec_size = pass ? 16 : 8;
3546 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0),
3547 vec_size, vec_size, tmp);
3548 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
3549 } else {
3550 /* VMOV */
3551 switch (size) {
3552 case 0:
ad69471c 3553 tmp2 = neon_load_reg(rn, pass);
d593c48e 3554 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3555 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3556 break;
3557 case 1:
ad69471c 3558 tmp2 = neon_load_reg(rn, pass);
d593c48e 3559 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3560 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3561 break;
3562 case 2:
9ee6e8bb
PB
3563 break;
3564 }
ad69471c 3565 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3566 }
b7bcbe95 3567 }
9ee6e8bb 3568 } else { /* !dp */
ef9aae25
PM
3569 bool is_sysreg;
3570
9ee6e8bb
PB
3571 if ((insn & 0x6f) != 0x00)
3572 return 1;
3573 rn = VFP_SREG_N(insn);
ef9aae25
PM
3574
3575 is_sysreg = extract32(insn, 21, 1);
3576
3577 if (arm_dc_feature(s, ARM_FEATURE_M)) {
3578 /*
3579 * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
3580 * Writes to R15 are UNPREDICTABLE; we choose to undef.
3581 */
3582 if (is_sysreg && (rd == 15 || (rn >> 1) != ARM_VFP_FPSCR)) {
3583 return 1;
3584 }
3585 }
3586
18c9b560 3587 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3588 /* vfp->arm */
ef9aae25 3589 if (is_sysreg) {
b7bcbe95 3590 /* system register */
40f137e1 3591 rn >>= 1;
9ee6e8bb 3592
b7bcbe95 3593 switch (rn) {
40f137e1 3594 case ARM_VFP_FPSID:
4373f3ce 3595 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3596 VFP3 restricts all id registers to privileged
3597 accesses. */
3598 if (IS_USER(s)
d614a513 3599 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3600 return 1;
d614a513 3601 }
4373f3ce 3602 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3603 break;
40f137e1 3604 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3605 if (IS_USER(s))
3606 return 1;
4373f3ce 3607 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3608 break;
40f137e1
PB
3609 case ARM_VFP_FPINST:
3610 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3611 /* Not present in VFP3. */
3612 if (IS_USER(s)
d614a513 3613 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3614 return 1;
d614a513 3615 }
4373f3ce 3616 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3617 break;
40f137e1 3618 case ARM_VFP_FPSCR:
601d70b9 3619 if (rd == 15) {
4373f3ce
PB
3620 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3621 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3622 } else {
7d1b0095 3623 tmp = tcg_temp_new_i32();
4373f3ce
PB
3624 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3625 }
b7bcbe95 3626 break;
a50c0f51 3627 case ARM_VFP_MVFR2:
d614a513 3628 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3629 return 1;
3630 }
3631 /* fall through */
9ee6e8bb
PB
3632 case ARM_VFP_MVFR0:
3633 case ARM_VFP_MVFR1:
3634 if (IS_USER(s)
d614a513 3635 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3636 return 1;
d614a513 3637 }
4373f3ce 3638 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3639 break;
b7bcbe95
FB
3640 default:
3641 return 1;
3642 }
3643 } else {
3644 gen_mov_F0_vreg(0, rn);
4373f3ce 3645 tmp = gen_vfp_mrs();
b7bcbe95
FB
3646 }
3647 if (rd == 15) {
b5ff1b31 3648 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3649 gen_set_nzcv(tmp);
7d1b0095 3650 tcg_temp_free_i32(tmp);
4373f3ce
PB
3651 } else {
3652 store_reg(s, rd, tmp);
3653 }
b7bcbe95
FB
3654 } else {
3655 /* arm->vfp */
ef9aae25 3656 if (is_sysreg) {
40f137e1 3657 rn >>= 1;
b7bcbe95
FB
3658 /* system register */
3659 switch (rn) {
40f137e1 3660 case ARM_VFP_FPSID:
9ee6e8bb
PB
3661 case ARM_VFP_MVFR0:
3662 case ARM_VFP_MVFR1:
b7bcbe95
FB
3663 /* Writes are ignored. */
3664 break;
40f137e1 3665 case ARM_VFP_FPSCR:
e4c1cfa5 3666 tmp = load_reg(s, rd);
4373f3ce 3667 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3668 tcg_temp_free_i32(tmp);
b5ff1b31 3669 gen_lookup_tb(s);
b7bcbe95 3670 break;
40f137e1 3671 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3672 if (IS_USER(s))
3673 return 1;
71b3c3de
JR
3674 /* TODO: VFP subarchitecture support.
3675 * For now, keep the EN bit only */
e4c1cfa5 3676 tmp = load_reg(s, rd);
71b3c3de 3677 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3678 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3679 gen_lookup_tb(s);
3680 break;
3681 case ARM_VFP_FPINST:
3682 case ARM_VFP_FPINST2:
23adb861
PM
3683 if (IS_USER(s)) {
3684 return 1;
3685 }
e4c1cfa5 3686 tmp = load_reg(s, rd);
4373f3ce 3687 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3688 break;
b7bcbe95
FB
3689 default:
3690 return 1;
3691 }
3692 } else {
e4c1cfa5 3693 tmp = load_reg(s, rd);
4373f3ce 3694 gen_vfp_msr(tmp);
b7bcbe95
FB
3695 gen_mov_vreg_F0(0, rn);
3696 }
3697 }
3698 }
3699 } else {
3700 /* data processing */
e80941bd
RH
3701 bool rd_is_dp = dp;
3702 bool rm_is_dp = dp;
3703 bool no_output = false;
3704
b7bcbe95
FB
3705 /* The opcode is in bits 23, 21, 20 and 6. */
3706 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
e80941bd 3707 rn = VFP_SREG_N(insn);
b7bcbe95 3708
e80941bd
RH
3709 if (op == 15) {
3710 /* rn is opcode, encoded as per VFP_SREG_N. */
3711 switch (rn) {
3712 case 0x00: /* vmov */
3713 case 0x01: /* vabs */
3714 case 0x02: /* vneg */
3715 case 0x03: /* vsqrt */
3716 break;
3717
3718 case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */
3719 case 0x05: /* vcvtt.f64.f16, vcvtt.f32.f16 */
3720 /*
3721 * VCVTB, VCVTT: only present with the halfprec extension
3722 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3723 * (we choose to UNDEF)
04595bf6 3724 */
602f6e42
PM
3725 if (dp) {
3726 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
3727 return 1;
3728 }
3729 } else {
3730 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
3731 return 1;
3732 }
e80941bd
RH
3733 }
3734 rm_is_dp = false;
3735 break;
3736 case 0x06: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3737 case 0x07: /* vcvtt.f16.f32, vcvtt.f16.f64 */
602f6e42
PM
3738 if (dp) {
3739 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
3740 return 1;
3741 }
3742 } else {
3743 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
3744 return 1;
3745 }
e80941bd
RH
3746 }
3747 rd_is_dp = false;
3748 break;
3749
3750 case 0x08: case 0x0a: /* vcmp, vcmpz */
3751 case 0x09: case 0x0b: /* vcmpe, vcmpez */
3752 no_output = true;
3753 break;
3754
3755 case 0x0c: /* vrintr */
3756 case 0x0d: /* vrintz */
3757 case 0x0e: /* vrintx */
3758 break;
3759
3760 case 0x0f: /* vcvt double<->single */
3761 rd_is_dp = !dp;
3762 break;
3763
3764 case 0x10: /* vcvt.fxx.u32 */
3765 case 0x11: /* vcvt.fxx.s32 */
3766 rm_is_dp = false;
3767 break;
3768 case 0x18: /* vcvtr.u32.fxx */
3769 case 0x19: /* vcvtz.u32.fxx */
3770 case 0x1a: /* vcvtr.s32.fxx */
3771 case 0x1b: /* vcvtz.s32.fxx */
3772 rd_is_dp = false;
3773 break;
3774
3775 case 0x14: /* vcvt fp <-> fixed */
3776 case 0x15:
3777 case 0x16:
3778 case 0x17:
3779 case 0x1c:
3780 case 0x1d:
3781 case 0x1e:
3782 case 0x1f:
3783 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3784 return 1;
3785 }
3786 /* Immediate frac_bits has same format as SREG_M. */
3787 rm_is_dp = false;
3788 break;
3789
6c1f6f27
RH
3790 case 0x13: /* vjcvt */
3791 if (!dp || !dc_isar_feature(aa32_jscvt, s)) {
3792 return 1;
3793 }
3794 rd_is_dp = false;
3795 break;
3796
e80941bd
RH
3797 default:
3798 return 1;
b7bcbe95 3799 }
e80941bd
RH
3800 } else if (dp) {
3801 /* rn is register number */
3802 VFP_DREG_N(rn, insn);
3803 }
3804
3805 if (rd_is_dp) {
3806 VFP_DREG_D(rd, insn);
3807 } else {
3808 rd = VFP_SREG_D(insn);
3809 }
3810 if (rm_is_dp) {
3811 VFP_DREG_M(rm, insn);
b7bcbe95 3812 } else {
9ee6e8bb 3813 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3814 }
3815
69d1fc22 3816 veclen = s->vec_len;
e80941bd 3817 if (op == 15 && rn > 3) {
b7bcbe95 3818 veclen = 0;
e80941bd 3819 }
b7bcbe95
FB
3820
3821 /* Shut up compiler warnings. */
3822 delta_m = 0;
3823 delta_d = 0;
3824 bank_mask = 0;
3b46e624 3825
b7bcbe95
FB
3826 if (veclen > 0) {
3827 if (dp)
3828 bank_mask = 0xc;
3829 else
3830 bank_mask = 0x18;
3831
3832 /* Figure out what type of vector operation this is. */
3833 if ((rd & bank_mask) == 0) {
3834 /* scalar */
3835 veclen = 0;
3836 } else {
3837 if (dp)
69d1fc22 3838 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3839 else
69d1fc22 3840 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3841
3842 if ((rm & bank_mask) == 0) {
3843 /* mixed scalar/vector */
3844 delta_m = 0;
3845 } else {
3846 /* vector */
3847 delta_m = delta_d;
3848 }
3849 }
3850 }
3851
3852 /* Load the initial operands. */
3853 if (op == 15) {
3854 switch (rn) {
e80941bd 3855 case 0x08: case 0x09: /* Compare */
b7bcbe95
FB
3856 gen_mov_F0_vreg(dp, rd);
3857 gen_mov_F1_vreg(dp, rm);
3858 break;
e80941bd 3859 case 0x0a: case 0x0b: /* Compare with zero */
b7bcbe95
FB
3860 gen_mov_F0_vreg(dp, rd);
3861 gen_vfp_F1_ld0(dp);
3862 break;
e80941bd
RH
3863 case 0x14: /* vcvt fp <-> fixed */
3864 case 0x15:
3865 case 0x16:
3866 case 0x17:
3867 case 0x1c:
3868 case 0x1d:
3869 case 0x1e:
3870 case 0x1f:
9ee6e8bb
PB
3871 /* Source and destination the same. */
3872 gen_mov_F0_vreg(dp, rd);
3873 break;
b7bcbe95
FB
3874 default:
3875 /* One source operand. */
e80941bd 3876 gen_mov_F0_vreg(rm_is_dp, rm);
9ee6e8bb 3877 break;
b7bcbe95
FB
3878 }
3879 } else {
3880 /* Two source operands. */
3881 gen_mov_F0_vreg(dp, rn);
3882 gen_mov_F1_vreg(dp, rm);
3883 }
3884
3885 for (;;) {
3886 /* Perform the calculation. */
3887 switch (op) {
605a6aed
PM
3888 case 0: /* VMLA: fd + (fn * fm) */
3889 /* Note that order of inputs to the add matters for NaNs */
3890 gen_vfp_F1_mul(dp);
3891 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3892 gen_vfp_add(dp);
3893 break;
605a6aed 3894 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3895 gen_vfp_mul(dp);
605a6aed
PM
3896 gen_vfp_F1_neg(dp);
3897 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3898 gen_vfp_add(dp);
3899 break;
605a6aed
PM
3900 case 2: /* VNMLS: -fd + (fn * fm) */
3901 /* Note that it isn't valid to replace (-A + B) with (B - A)
3902 * or similar plausible looking simplifications
3903 * because this will give wrong results for NaNs.
3904 */
3905 gen_vfp_F1_mul(dp);
3906 gen_mov_F0_vreg(dp, rd);
3907 gen_vfp_neg(dp);
3908 gen_vfp_add(dp);
b7bcbe95 3909 break;
605a6aed 3910 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3911 gen_vfp_mul(dp);
605a6aed
PM
3912 gen_vfp_F1_neg(dp);
3913 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3914 gen_vfp_neg(dp);
605a6aed 3915 gen_vfp_add(dp);
b7bcbe95
FB
3916 break;
3917 case 4: /* mul: fn * fm */
3918 gen_vfp_mul(dp);
3919 break;
3920 case 5: /* nmul: -(fn * fm) */
3921 gen_vfp_mul(dp);
3922 gen_vfp_neg(dp);
3923 break;
3924 case 6: /* add: fn + fm */
3925 gen_vfp_add(dp);
3926 break;
3927 case 7: /* sub: fn - fm */
3928 gen_vfp_sub(dp);
3929 break;
3930 case 8: /* div: fn / fm */
3931 gen_vfp_div(dp);
3932 break;
da97f52c
PM
3933 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3934 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3935 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3936 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3937 /* These are fused multiply-add, and must be done as one
3938 * floating point operation with no rounding between the
3939 * multiplication and addition steps.
3940 * NB that doing the negations here as separate steps is
3941 * correct : an input NaN should come out with its sign bit
3942 * flipped if it is a negated-input.
3943 */
d614a513 3944 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3945 return 1;
3946 }
3947 if (dp) {
3948 TCGv_ptr fpst;
3949 TCGv_i64 frd;
3950 if (op & 1) {
3951 /* VFNMS, VFMS */
3952 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3953 }
3954 frd = tcg_temp_new_i64();
3955 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3956 if (op & 2) {
3957 /* VFNMA, VFNMS */
3958 gen_helper_vfp_negd(frd, frd);
3959 }
3960 fpst = get_fpstatus_ptr(0);
3961 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3962 cpu_F1d, frd, fpst);
3963 tcg_temp_free_ptr(fpst);
3964 tcg_temp_free_i64(frd);
3965 } else {
3966 TCGv_ptr fpst;
3967 TCGv_i32 frd;
3968 if (op & 1) {
3969 /* VFNMS, VFMS */
3970 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3971 }
3972 frd = tcg_temp_new_i32();
3973 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3974 if (op & 2) {
3975 gen_helper_vfp_negs(frd, frd);
3976 }
3977 fpst = get_fpstatus_ptr(0);
3978 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3979 cpu_F1s, frd, fpst);
3980 tcg_temp_free_ptr(fpst);
3981 tcg_temp_free_i32(frd);
3982 }
3983 break;
9ee6e8bb 3984 case 14: /* fconst */
d614a513
PM
3985 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3986 return 1;
3987 }
9ee6e8bb
PB
3988
3989 n = (insn << 12) & 0x80000000;
3990 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3991 if (dp) {
3992 if (i & 0x40)
3993 i |= 0x3f80;
3994 else
3995 i |= 0x4000;
3996 n |= i << 16;
4373f3ce 3997 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3998 } else {
3999 if (i & 0x40)
4000 i |= 0x780;
4001 else
4002 i |= 0x800;
4003 n |= i << 19;
5b340b51 4004 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 4005 }
9ee6e8bb 4006 break;
b7bcbe95
FB
4007 case 15: /* extension space */
4008 switch (rn) {
4009 case 0: /* cpy */
4010 /* no-op */
4011 break;
4012 case 1: /* abs */
4013 gen_vfp_abs(dp);
4014 break;
4015 case 2: /* neg */
4016 gen_vfp_neg(dp);
4017 break;
4018 case 3: /* sqrt */
4019 gen_vfp_sqrt(dp);
4020 break;
239c20c7 4021 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
486624fc
AB
4022 {
4023 TCGv_ptr fpst = get_fpstatus_ptr(false);
4024 TCGv_i32 ahp_mode = get_ahp_flag();
60011498
PB
4025 tmp = gen_vfp_mrs();
4026 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
4027 if (dp) {
4028 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 4029 fpst, ahp_mode);
239c20c7
WN
4030 } else {
4031 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 4032 fpst, ahp_mode);
239c20c7 4033 }
486624fc
AB
4034 tcg_temp_free_i32(ahp_mode);
4035 tcg_temp_free_ptr(fpst);
7d1b0095 4036 tcg_temp_free_i32(tmp);
60011498 4037 break;
486624fc 4038 }
239c20c7 4039 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
486624fc
AB
4040 {
4041 TCGv_ptr fpst = get_fpstatus_ptr(false);
4042 TCGv_i32 ahp = get_ahp_flag();
60011498
PB
4043 tmp = gen_vfp_mrs();
4044 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
4045 if (dp) {
4046 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 4047 fpst, ahp);
239c20c7
WN
4048 } else {
4049 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 4050 fpst, ahp);
239c20c7 4051 }
7d1b0095 4052 tcg_temp_free_i32(tmp);
486624fc
AB
4053 tcg_temp_free_i32(ahp);
4054 tcg_temp_free_ptr(fpst);
60011498 4055 break;
486624fc 4056 }
239c20c7 4057 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
486624fc
AB
4058 {
4059 TCGv_ptr fpst = get_fpstatus_ptr(false);
4060 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 4061 tmp = tcg_temp_new_i32();
486624fc 4062
239c20c7
WN
4063 if (dp) {
4064 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 4065 fpst, ahp);
239c20c7
WN
4066 } else {
4067 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 4068 fpst, ahp);
239c20c7 4069 }
486624fc
AB
4070 tcg_temp_free_i32(ahp);
4071 tcg_temp_free_ptr(fpst);
60011498
PB
4072 gen_mov_F0_vreg(0, rd);
4073 tmp2 = gen_vfp_mrs();
4074 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
4075 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4076 tcg_temp_free_i32(tmp2);
60011498
PB
4077 gen_vfp_msr(tmp);
4078 break;
486624fc 4079 }
239c20c7 4080 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
486624fc
AB
4081 {
4082 TCGv_ptr fpst = get_fpstatus_ptr(false);
4083 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 4084 tmp = tcg_temp_new_i32();
239c20c7
WN
4085 if (dp) {
4086 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 4087 fpst, ahp);
239c20c7
WN
4088 } else {
4089 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 4090 fpst, ahp);
239c20c7 4091 }
486624fc
AB
4092 tcg_temp_free_i32(ahp);
4093 tcg_temp_free_ptr(fpst);
60011498
PB
4094 tcg_gen_shli_i32(tmp, tmp, 16);
4095 gen_mov_F0_vreg(0, rd);
4096 tmp2 = gen_vfp_mrs();
4097 tcg_gen_ext16u_i32(tmp2, tmp2);
4098 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4099 tcg_temp_free_i32(tmp2);
60011498
PB
4100 gen_vfp_msr(tmp);
4101 break;
486624fc 4102 }
b7bcbe95
FB
4103 case 8: /* cmp */
4104 gen_vfp_cmp(dp);
4105 break;
4106 case 9: /* cmpe */
4107 gen_vfp_cmpe(dp);
4108 break;
4109 case 10: /* cmpz */
4110 gen_vfp_cmp(dp);
4111 break;
4112 case 11: /* cmpez */
4113 gen_vfp_F1_ld0(dp);
4114 gen_vfp_cmpe(dp);
4115 break;
664c6733
WN
4116 case 12: /* vrintr */
4117 {
4118 TCGv_ptr fpst = get_fpstatus_ptr(0);
4119 if (dp) {
4120 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4121 } else {
4122 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4123 }
4124 tcg_temp_free_ptr(fpst);
4125 break;
4126 }
a290c62a
WN
4127 case 13: /* vrintz */
4128 {
4129 TCGv_ptr fpst = get_fpstatus_ptr(0);
4130 TCGv_i32 tcg_rmode;
4131 tcg_rmode = tcg_const_i32(float_round_to_zero);
9b049916 4132 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
4133 if (dp) {
4134 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4135 } else {
4136 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4137 }
9b049916 4138 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
4139 tcg_temp_free_i32(tcg_rmode);
4140 tcg_temp_free_ptr(fpst);
4141 break;
4142 }
4e82bc01
WN
4143 case 14: /* vrintx */
4144 {
4145 TCGv_ptr fpst = get_fpstatus_ptr(0);
4146 if (dp) {
4147 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
4148 } else {
4149 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
4150 }
4151 tcg_temp_free_ptr(fpst);
4152 break;
4153 }
b7bcbe95 4154 case 15: /* single<->double conversion */
e80941bd 4155 if (dp) {
4373f3ce 4156 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
e80941bd 4157 } else {
4373f3ce 4158 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
e80941bd 4159 }
b7bcbe95
FB
4160 break;
4161 case 16: /* fuito */
5500b06c 4162 gen_vfp_uito(dp, 0);
b7bcbe95
FB
4163 break;
4164 case 17: /* fsito */
5500b06c 4165 gen_vfp_sito(dp, 0);
b7bcbe95 4166 break;
6c1f6f27
RH
4167 case 19: /* vjcvt */
4168 gen_helper_vjcvt(cpu_F0s, cpu_F0d, cpu_env);
4169 break;
9ee6e8bb 4170 case 20: /* fshto */
5500b06c 4171 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
4172 break;
4173 case 21: /* fslto */
5500b06c 4174 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
4175 break;
4176 case 22: /* fuhto */
5500b06c 4177 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
4178 break;
4179 case 23: /* fulto */
5500b06c 4180 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 4181 break;
b7bcbe95 4182 case 24: /* ftoui */
5500b06c 4183 gen_vfp_toui(dp, 0);
b7bcbe95
FB
4184 break;
4185 case 25: /* ftouiz */
5500b06c 4186 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
4187 break;
4188 case 26: /* ftosi */
5500b06c 4189 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
4190 break;
4191 case 27: /* ftosiz */
5500b06c 4192 gen_vfp_tosiz(dp, 0);
b7bcbe95 4193 break;
9ee6e8bb 4194 case 28: /* ftosh */
5500b06c 4195 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
4196 break;
4197 case 29: /* ftosl */
5500b06c 4198 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
4199 break;
4200 case 30: /* ftouh */
5500b06c 4201 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
4202 break;
4203 case 31: /* ftoul */
5500b06c 4204 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 4205 break;
b7bcbe95 4206 default: /* undefined */
e80941bd 4207 g_assert_not_reached();
b7bcbe95
FB
4208 }
4209 break;
4210 default: /* undefined */
b7bcbe95
FB
4211 return 1;
4212 }
4213
e80941bd
RH
4214 /* Write back the result, if any. */
4215 if (!no_output) {
4216 gen_mov_vreg_F0(rd_is_dp, rd);
239c20c7 4217 }
b7bcbe95
FB
4218
4219 /* break out of the loop if we have finished */
e80941bd 4220 if (veclen == 0) {
b7bcbe95 4221 break;
e80941bd 4222 }
b7bcbe95
FB
4223
4224 if (op == 15 && delta_m == 0) {
4225 /* single source one-many */
4226 while (veclen--) {
4227 rd = ((rd + delta_d) & (bank_mask - 1))
4228 | (rd & bank_mask);
4229 gen_mov_vreg_F0(dp, rd);
4230 }
4231 break;
4232 }
4233 /* Setup the next operands. */
4234 veclen--;
4235 rd = ((rd + delta_d) & (bank_mask - 1))
4236 | (rd & bank_mask);
4237
4238 if (op == 15) {
4239 /* One source operand. */
4240 rm = ((rm + delta_m) & (bank_mask - 1))
4241 | (rm & bank_mask);
4242 gen_mov_F0_vreg(dp, rm);
4243 } else {
4244 /* Two source operands. */
4245 rn = ((rn + delta_d) & (bank_mask - 1))
4246 | (rn & bank_mask);
4247 gen_mov_F0_vreg(dp, rn);
4248 if (delta_m) {
4249 rm = ((rm + delta_m) & (bank_mask - 1))
4250 | (rm & bank_mask);
4251 gen_mov_F1_vreg(dp, rm);
4252 }
4253 }
4254 }
4255 }
4256 break;
4257 case 0xc:
4258 case 0xd:
8387da81 4259 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4260 /* two-register transfer */
4261 rn = (insn >> 16) & 0xf;
4262 rd = (insn >> 12) & 0xf;
4263 if (dp) {
9ee6e8bb
PB
4264 VFP_DREG_M(rm, insn);
4265 } else {
4266 rm = VFP_SREG_M(insn);
4267 }
b7bcbe95 4268
18c9b560 4269 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4270 /* vfp->arm */
4271 if (dp) {
4373f3ce
PB
4272 gen_mov_F0_vreg(0, rm * 2);
4273 tmp = gen_vfp_mrs();
4274 store_reg(s, rd, tmp);
4275 gen_mov_F0_vreg(0, rm * 2 + 1);
4276 tmp = gen_vfp_mrs();
4277 store_reg(s, rn, tmp);
b7bcbe95
FB
4278 } else {
4279 gen_mov_F0_vreg(0, rm);
4373f3ce 4280 tmp = gen_vfp_mrs();
8387da81 4281 store_reg(s, rd, tmp);
b7bcbe95 4282 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4283 tmp = gen_vfp_mrs();
8387da81 4284 store_reg(s, rn, tmp);
b7bcbe95
FB
4285 }
4286 } else {
4287 /* arm->vfp */
4288 if (dp) {
4373f3ce
PB
4289 tmp = load_reg(s, rd);
4290 gen_vfp_msr(tmp);
4291 gen_mov_vreg_F0(0, rm * 2);
4292 tmp = load_reg(s, rn);
4293 gen_vfp_msr(tmp);
4294 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4295 } else {
8387da81 4296 tmp = load_reg(s, rd);
4373f3ce 4297 gen_vfp_msr(tmp);
b7bcbe95 4298 gen_mov_vreg_F0(0, rm);
8387da81 4299 tmp = load_reg(s, rn);
4373f3ce 4300 gen_vfp_msr(tmp);
b7bcbe95
FB
4301 gen_mov_vreg_F0(0, rm + 1);
4302 }
4303 }
4304 } else {
4305 /* Load/store */
4306 rn = (insn >> 16) & 0xf;
4307 if (dp)
9ee6e8bb 4308 VFP_DREG_D(rd, insn);
b7bcbe95 4309 else
9ee6e8bb 4310 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4311 if ((insn & 0x01200000) == 0x01000000) {
4312 /* Single load/store */
4313 offset = (insn & 0xff) << 2;
4314 if ((insn & (1 << 23)) == 0)
4315 offset = -offset;
934814f1
PM
4316 if (s->thumb && rn == 15) {
4317 /* This is actually UNPREDICTABLE */
4318 addr = tcg_temp_new_i32();
4319 tcg_gen_movi_i32(addr, s->pc & ~2);
4320 } else {
4321 addr = load_reg(s, rn);
4322 }
312eea9f 4323 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4324 if (insn & (1 << 20)) {
312eea9f 4325 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4326 gen_mov_vreg_F0(dp, rd);
4327 } else {
4328 gen_mov_F0_vreg(dp, rd);
312eea9f 4329 gen_vfp_st(s, dp, addr);
b7bcbe95 4330 }
7d1b0095 4331 tcg_temp_free_i32(addr);
b7bcbe95
FB
4332 } else {
4333 /* load/store multiple */
934814f1 4334 int w = insn & (1 << 21);
b7bcbe95
FB
4335 if (dp)
4336 n = (insn >> 1) & 0x7f;
4337 else
4338 n = insn & 0xff;
4339
934814f1
PM
4340 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4341 /* P == U , W == 1 => UNDEF */
4342 return 1;
4343 }
4344 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4345 /* UNPREDICTABLE cases for bad immediates: we choose to
4346 * UNDEF to avoid generating huge numbers of TCG ops
4347 */
4348 return 1;
4349 }
4350 if (rn == 15 && w) {
4351 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4352 return 1;
4353 }
4354
4355 if (s->thumb && rn == 15) {
4356 /* This is actually UNPREDICTABLE */
4357 addr = tcg_temp_new_i32();
4358 tcg_gen_movi_i32(addr, s->pc & ~2);
4359 } else {
4360 addr = load_reg(s, rn);
4361 }
b7bcbe95 4362 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4363 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95 4364
8a954faf
PM
4365 if (s->v8m_stackcheck && rn == 13 && w) {
4366 /*
4367 * Here 'addr' is the lowest address we will store to,
4368 * and is either the old SP (if post-increment) or
4369 * the new SP (if pre-decrement). For post-increment
4370 * where the old value is below the limit and the new
4371 * value is above, it is UNKNOWN whether the limit check
4372 * triggers; we choose to trigger.
4373 */
4374 gen_helper_v8m_stackcheck(cpu_env, addr);
4375 }
4376
b7bcbe95
FB
4377 if (dp)
4378 offset = 8;
4379 else
4380 offset = 4;
4381 for (i = 0; i < n; i++) {
18c9b560 4382 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4383 /* load */
312eea9f 4384 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4385 gen_mov_vreg_F0(dp, rd + i);
4386 } else {
4387 /* store */
4388 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4389 gen_vfp_st(s, dp, addr);
b7bcbe95 4390 }
312eea9f 4391 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4392 }
934814f1 4393 if (w) {
b7bcbe95
FB
4394 /* writeback */
4395 if (insn & (1 << 24))
4396 offset = -offset * n;
4397 else if (dp && (insn & 1))
4398 offset = 4;
4399 else
4400 offset = 0;
4401
4402 if (offset != 0)
312eea9f
FN
4403 tcg_gen_addi_i32(addr, addr, offset);
4404 store_reg(s, rn, addr);
4405 } else {
7d1b0095 4406 tcg_temp_free_i32(addr);
b7bcbe95
FB
4407 }
4408 }
4409 }
4410 break;
4411 default:
4412 /* Should never happen. */
4413 return 1;
4414 }
4415 return 0;
4416}
4417
90aa39a1 4418static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4419{
90aa39a1 4420#ifndef CONFIG_USER_ONLY
dcba3a8d 4421 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4422 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4423#else
4424 return true;
4425#endif
4426}
6e256c93 4427
8a6b28c7
EC
4428static void gen_goto_ptr(void)
4429{
7f11636d 4430 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4431}
4432
4cae8f56
AB
4433/* This will end the TB but doesn't guarantee we'll return to
4434 * cpu_loop_exec. Any live exit_requests will be processed as we
4435 * enter the next TB.
4436 */
8a6b28c7 4437static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4438{
4439 if (use_goto_tb(s, dest)) {
57fec1fe 4440 tcg_gen_goto_tb(n);
eaed129d 4441 gen_set_pc_im(s, dest);
07ea28b4 4442 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 4443 } else {
eaed129d 4444 gen_set_pc_im(s, dest);
8a6b28c7 4445 gen_goto_ptr();
6e256c93 4446 }
dcba3a8d 4447 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4448}
4449
8aaca4c0
FB
4450static inline void gen_jmp (DisasContext *s, uint32_t dest)
4451{
b636649f 4452 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4453 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4454 if (s->thumb)
d9ba4830
PB
4455 dest |= 1;
4456 gen_bx_im(s, dest);
8aaca4c0 4457 } else {
6e256c93 4458 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4459 }
4460}
4461
39d5492a 4462static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4463{
ee097184 4464 if (x)
d9ba4830 4465 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4466 else
d9ba4830 4467 gen_sxth(t0);
ee097184 4468 if (y)
d9ba4830 4469 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4470 else
d9ba4830
PB
4471 gen_sxth(t1);
4472 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4473}
4474
4475/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4476static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4477{
b5ff1b31
FB
4478 uint32_t mask;
4479
4480 mask = 0;
4481 if (flags & (1 << 0))
4482 mask |= 0xff;
4483 if (flags & (1 << 1))
4484 mask |= 0xff00;
4485 if (flags & (1 << 2))
4486 mask |= 0xff0000;
4487 if (flags & (1 << 3))
4488 mask |= 0xff000000;
9ee6e8bb 4489
2ae23e75 4490 /* Mask out undefined bits. */
9ee6e8bb 4491 mask &= ~CPSR_RESERVED;
d614a513 4492 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4493 mask &= ~CPSR_T;
d614a513
PM
4494 }
4495 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4496 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4497 }
4498 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4499 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4500 }
4501 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4502 mask &= ~CPSR_IT;
d614a513 4503 }
4051e12c
PM
4504 /* Mask out execution state and reserved bits. */
4505 if (!spsr) {
4506 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4507 }
b5ff1b31
FB
4508 /* Mask out privileged bits. */
4509 if (IS_USER(s))
9ee6e8bb 4510 mask &= CPSR_USER;
b5ff1b31
FB
4511 return mask;
4512}
4513
2fbac54b 4514/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4515static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4516{
39d5492a 4517 TCGv_i32 tmp;
b5ff1b31
FB
4518 if (spsr) {
4519 /* ??? This is also undefined in system mode. */
4520 if (IS_USER(s))
4521 return 1;
d9ba4830
PB
4522
4523 tmp = load_cpu_field(spsr);
4524 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4525 tcg_gen_andi_i32(t0, t0, mask);
4526 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4527 store_cpu_field(tmp, spsr);
b5ff1b31 4528 } else {
2fbac54b 4529 gen_set_cpsr(t0, mask);
b5ff1b31 4530 }
7d1b0095 4531 tcg_temp_free_i32(t0);
b5ff1b31
FB
4532 gen_lookup_tb(s);
4533 return 0;
4534}
4535
2fbac54b
FN
4536/* Returns nonzero if access to the PSR is not permitted. */
4537static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4538{
39d5492a 4539 TCGv_i32 tmp;
7d1b0095 4540 tmp = tcg_temp_new_i32();
2fbac54b
FN
4541 tcg_gen_movi_i32(tmp, val);
4542 return gen_set_psr(s, mask, spsr, tmp);
4543}
4544
8bfd0550
PM
4545static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4546 int *tgtmode, int *regno)
4547{
4548 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4549 * the target mode and register number, and identify the various
4550 * unpredictable cases.
4551 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4552 * + executed in user mode
4553 * + using R15 as the src/dest register
4554 * + accessing an unimplemented register
4555 * + accessing a register that's inaccessible at current PL/security state*
4556 * + accessing a register that you could access with a different insn
4557 * We choose to UNDEF in all these cases.
4558 * Since we don't know which of the various AArch32 modes we are in
4559 * we have to defer some checks to runtime.
4560 * Accesses to Monitor mode registers from Secure EL1 (which implies
4561 * that EL3 is AArch64) must trap to EL3.
4562 *
4563 * If the access checks fail this function will emit code to take
4564 * an exception and return false. Otherwise it will return true,
4565 * and set *tgtmode and *regno appropriately.
4566 */
4567 int exc_target = default_exception_el(s);
4568
4569 /* These instructions are present only in ARMv8, or in ARMv7 with the
4570 * Virtualization Extensions.
4571 */
4572 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4573 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4574 goto undef;
4575 }
4576
4577 if (IS_USER(s) || rn == 15) {
4578 goto undef;
4579 }
4580
4581 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4582 * of registers into (r, sysm).
4583 */
4584 if (r) {
4585 /* SPSRs for other modes */
4586 switch (sysm) {
4587 case 0xe: /* SPSR_fiq */
4588 *tgtmode = ARM_CPU_MODE_FIQ;
4589 break;
4590 case 0x10: /* SPSR_irq */
4591 *tgtmode = ARM_CPU_MODE_IRQ;
4592 break;
4593 case 0x12: /* SPSR_svc */
4594 *tgtmode = ARM_CPU_MODE_SVC;
4595 break;
4596 case 0x14: /* SPSR_abt */
4597 *tgtmode = ARM_CPU_MODE_ABT;
4598 break;
4599 case 0x16: /* SPSR_und */
4600 *tgtmode = ARM_CPU_MODE_UND;
4601 break;
4602 case 0x1c: /* SPSR_mon */
4603 *tgtmode = ARM_CPU_MODE_MON;
4604 break;
4605 case 0x1e: /* SPSR_hyp */
4606 *tgtmode = ARM_CPU_MODE_HYP;
4607 break;
4608 default: /* unallocated */
4609 goto undef;
4610 }
4611 /* We arbitrarily assign SPSR a register number of 16. */
4612 *regno = 16;
4613 } else {
4614 /* general purpose registers for other modes */
4615 switch (sysm) {
4616 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4617 *tgtmode = ARM_CPU_MODE_USR;
4618 *regno = sysm + 8;
4619 break;
4620 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4621 *tgtmode = ARM_CPU_MODE_FIQ;
4622 *regno = sysm;
4623 break;
4624 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4625 *tgtmode = ARM_CPU_MODE_IRQ;
4626 *regno = sysm & 1 ? 13 : 14;
4627 break;
4628 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4629 *tgtmode = ARM_CPU_MODE_SVC;
4630 *regno = sysm & 1 ? 13 : 14;
4631 break;
4632 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4633 *tgtmode = ARM_CPU_MODE_ABT;
4634 *regno = sysm & 1 ? 13 : 14;
4635 break;
4636 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4637 *tgtmode = ARM_CPU_MODE_UND;
4638 *regno = sysm & 1 ? 13 : 14;
4639 break;
4640 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4641 *tgtmode = ARM_CPU_MODE_MON;
4642 *regno = sysm & 1 ? 13 : 14;
4643 break;
4644 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4645 *tgtmode = ARM_CPU_MODE_HYP;
4646 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4647 *regno = sysm & 1 ? 13 : 17;
4648 break;
4649 default: /* unallocated */
4650 goto undef;
4651 }
4652 }
4653
4654 /* Catch the 'accessing inaccessible register' cases we can detect
4655 * at translate time.
4656 */
4657 switch (*tgtmode) {
4658 case ARM_CPU_MODE_MON:
4659 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4660 goto undef;
4661 }
4662 if (s->current_el == 1) {
4663 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4664 * then accesses to Mon registers trap to EL3
4665 */
4666 exc_target = 3;
4667 goto undef;
4668 }
4669 break;
4670 case ARM_CPU_MODE_HYP:
aec4dd09
PM
4671 /*
4672 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
4673 * (and so we can forbid accesses from EL2 or below). elr_hyp
4674 * can be accessed also from Hyp mode, so forbid accesses from
4675 * EL0 or EL1.
8bfd0550 4676 */
aec4dd09
PM
4677 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
4678 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
4679 goto undef;
4680 }
4681 break;
4682 default:
4683 break;
4684 }
4685
4686 return true;
4687
4688undef:
4689 /* If we get here then some access check did not pass */
4690 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4691 return false;
4692}
4693
4694static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4695{
4696 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4697 int tgtmode = 0, regno = 0;
4698
4699 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4700 return;
4701 }
4702
4703 /* Sync state because msr_banked() can raise exceptions */
4704 gen_set_condexec(s);
4705 gen_set_pc_im(s, s->pc - 4);
4706 tcg_reg = load_reg(s, rn);
4707 tcg_tgtmode = tcg_const_i32(tgtmode);
4708 tcg_regno = tcg_const_i32(regno);
4709 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4710 tcg_temp_free_i32(tcg_tgtmode);
4711 tcg_temp_free_i32(tcg_regno);
4712 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4713 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4714}
4715
4716static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4717{
4718 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4719 int tgtmode = 0, regno = 0;
4720
4721 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4722 return;
4723 }
4724
4725 /* Sync state because mrs_banked() can raise exceptions */
4726 gen_set_condexec(s);
4727 gen_set_pc_im(s, s->pc - 4);
4728 tcg_reg = tcg_temp_new_i32();
4729 tcg_tgtmode = tcg_const_i32(tgtmode);
4730 tcg_regno = tcg_const_i32(regno);
4731 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4732 tcg_temp_free_i32(tcg_tgtmode);
4733 tcg_temp_free_i32(tcg_regno);
4734 store_reg(s, rn, tcg_reg);
dcba3a8d 4735 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4736}
4737
fb0e8e79
PM
4738/* Store value to PC as for an exception return (ie don't
4739 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4740 * will do the masking based on the new value of the Thumb bit.
4741 */
4742static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4743{
fb0e8e79
PM
4744 tcg_gen_mov_i32(cpu_R[15], pc);
4745 tcg_temp_free_i32(pc);
b5ff1b31
FB
4746}
4747
b0109805 4748/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4749static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4750{
fb0e8e79
PM
4751 store_pc_exc_ret(s, pc);
4752 /* The cpsr_write_eret helper will mask the low bits of PC
4753 * appropriately depending on the new Thumb bit, so it must
4754 * be called after storing the new PC.
4755 */
e69ad9df
AL
4756 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4757 gen_io_start();
4758 }
235ea1f5 4759 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
4760 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4761 gen_io_end();
4762 }
7d1b0095 4763 tcg_temp_free_i32(cpsr);
b29fd33d 4764 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4765 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4766}
3b46e624 4767
fb0e8e79
PM
4768/* Generate an old-style exception return. Marks pc as dead. */
4769static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4770{
4771 gen_rfe(s, pc, load_cpu_field(spsr));
4772}
4773
c22edfeb
AB
4774/*
4775 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4776 * only call the helper when running single threaded TCG code to ensure
4777 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4778 * just skip this instruction. Currently the SEV/SEVL instructions
4779 * which are *one* of many ways to wake the CPU from WFE are not
4780 * implemented so we can't sleep like WFI does.
4781 */
9ee6e8bb
PB
4782static void gen_nop_hint(DisasContext *s, int val)
4783{
4784 switch (val) {
2399d4e7
EC
4785 /* When running in MTTCG we don't generate jumps to the yield and
4786 * WFE helpers as it won't affect the scheduling of other vCPUs.
4787 * If we wanted to more completely model WFE/SEV so we don't busy
4788 * spin unnecessarily we would need to do something more involved.
4789 */
c87e5a61 4790 case 1: /* yield */
2399d4e7 4791 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4792 gen_set_pc_im(s, s->pc);
dcba3a8d 4793 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4794 }
c87e5a61 4795 break;
9ee6e8bb 4796 case 3: /* wfi */
eaed129d 4797 gen_set_pc_im(s, s->pc);
dcba3a8d 4798 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4799 break;
4800 case 2: /* wfe */
2399d4e7 4801 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4802 gen_set_pc_im(s, s->pc);
dcba3a8d 4803 s->base.is_jmp = DISAS_WFE;
c22edfeb 4804 }
72c1d3af 4805 break;
9ee6e8bb 4806 case 4: /* sev */
12b10571
MR
4807 case 5: /* sevl */
4808 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4809 default: /* nop */
4810 break;
4811 }
4812}
99c475ab 4813
ad69471c 4814#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4815
39d5492a 4816static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4817{
4818 switch (size) {
dd8fbd78
FN
4819 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4820 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4821 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4822 default: abort();
9ee6e8bb 4823 }
9ee6e8bb
PB
4824}
4825
39d5492a 4826static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4827{
4828 switch (size) {
dd8fbd78
FN
4829 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4830 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4831 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4832 default: return;
4833 }
4834}
4835
4836/* 32-bit pairwise ops end up the same as the elementwise versions. */
9ecd3c5c
RH
4837#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
4838#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
4839#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
4840#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
ad69471c 4841
ad69471c
PB
4842#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4843 switch ((size << 1) | u) { \
4844 case 0: \
dd8fbd78 4845 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4846 break; \
4847 case 1: \
dd8fbd78 4848 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4849 break; \
4850 case 2: \
dd8fbd78 4851 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4852 break; \
4853 case 3: \
dd8fbd78 4854 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4855 break; \
4856 case 4: \
dd8fbd78 4857 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4858 break; \
4859 case 5: \
dd8fbd78 4860 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4861 break; \
4862 default: return 1; \
4863 }} while (0)
9ee6e8bb
PB
4864
4865#define GEN_NEON_INTEGER_OP(name) do { \
4866 switch ((size << 1) | u) { \
ad69471c 4867 case 0: \
dd8fbd78 4868 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4869 break; \
4870 case 1: \
dd8fbd78 4871 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4872 break; \
4873 case 2: \
dd8fbd78 4874 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4875 break; \
4876 case 3: \
dd8fbd78 4877 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4878 break; \
4879 case 4: \
dd8fbd78 4880 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4881 break; \
4882 case 5: \
dd8fbd78 4883 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4884 break; \
9ee6e8bb
PB
4885 default: return 1; \
4886 }} while (0)
4887
39d5492a 4888static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4889{
39d5492a 4890 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4891 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4892 return tmp;
9ee6e8bb
PB
4893}
4894
39d5492a 4895static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4896{
dd8fbd78 4897 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4898 tcg_temp_free_i32(var);
9ee6e8bb
PB
4899}
4900
39d5492a 4901static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4902{
39d5492a 4903 TCGv_i32 tmp;
9ee6e8bb 4904 if (size == 1) {
0fad6efc
PM
4905 tmp = neon_load_reg(reg & 7, reg >> 4);
4906 if (reg & 8) {
dd8fbd78 4907 gen_neon_dup_high16(tmp);
0fad6efc
PM
4908 } else {
4909 gen_neon_dup_low16(tmp);
dd8fbd78 4910 }
0fad6efc
PM
4911 } else {
4912 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4913 }
dd8fbd78 4914 return tmp;
9ee6e8bb
PB
4915}
4916
02acedf9 4917static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4918{
b13708bb
RH
4919 TCGv_ptr pd, pm;
4920
600b828c 4921 if (!q && size == 2) {
02acedf9
PM
4922 return 1;
4923 }
b13708bb
RH
4924 pd = vfp_reg_ptr(true, rd);
4925 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4926 if (q) {
4927 switch (size) {
4928 case 0:
b13708bb 4929 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4930 break;
4931 case 1:
b13708bb 4932 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4933 break;
4934 case 2:
b13708bb 4935 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4936 break;
4937 default:
4938 abort();
4939 }
4940 } else {
4941 switch (size) {
4942 case 0:
b13708bb 4943 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4944 break;
4945 case 1:
b13708bb 4946 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4947 break;
4948 default:
4949 abort();
4950 }
4951 }
b13708bb
RH
4952 tcg_temp_free_ptr(pd);
4953 tcg_temp_free_ptr(pm);
02acedf9 4954 return 0;
19457615
FN
4955}
4956
d68a6f3a 4957static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4958{
b13708bb
RH
4959 TCGv_ptr pd, pm;
4960
600b828c 4961 if (!q && size == 2) {
d68a6f3a
PM
4962 return 1;
4963 }
b13708bb
RH
4964 pd = vfp_reg_ptr(true, rd);
4965 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4966 if (q) {
4967 switch (size) {
4968 case 0:
b13708bb 4969 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4970 break;
4971 case 1:
b13708bb 4972 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4973 break;
4974 case 2:
b13708bb 4975 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4976 break;
4977 default:
4978 abort();
4979 }
4980 } else {
4981 switch (size) {
4982 case 0:
b13708bb 4983 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4984 break;
4985 case 1:
b13708bb 4986 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4987 break;
4988 default:
4989 abort();
4990 }
4991 }
b13708bb
RH
4992 tcg_temp_free_ptr(pd);
4993 tcg_temp_free_ptr(pm);
d68a6f3a 4994 return 0;
19457615
FN
4995}
4996
39d5492a 4997static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4998{
39d5492a 4999 TCGv_i32 rd, tmp;
19457615 5000
7d1b0095
PM
5001 rd = tcg_temp_new_i32();
5002 tmp = tcg_temp_new_i32();
19457615
FN
5003
5004 tcg_gen_shli_i32(rd, t0, 8);
5005 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
5006 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
5007 tcg_gen_or_i32(rd, rd, tmp);
5008
5009 tcg_gen_shri_i32(t1, t1, 8);
5010 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
5011 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
5012 tcg_gen_or_i32(t1, t1, tmp);
5013 tcg_gen_mov_i32(t0, rd);
5014
7d1b0095
PM
5015 tcg_temp_free_i32(tmp);
5016 tcg_temp_free_i32(rd);
19457615
FN
5017}
5018
39d5492a 5019static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 5020{
39d5492a 5021 TCGv_i32 rd, tmp;
19457615 5022
7d1b0095
PM
5023 rd = tcg_temp_new_i32();
5024 tmp = tcg_temp_new_i32();
19457615
FN
5025
5026 tcg_gen_shli_i32(rd, t0, 16);
5027 tcg_gen_andi_i32(tmp, t1, 0xffff);
5028 tcg_gen_or_i32(rd, rd, tmp);
5029 tcg_gen_shri_i32(t1, t1, 16);
5030 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
5031 tcg_gen_or_i32(t1, t1, tmp);
5032 tcg_gen_mov_i32(t0, rd);
5033
7d1b0095
PM
5034 tcg_temp_free_i32(tmp);
5035 tcg_temp_free_i32(rd);
19457615
FN
5036}
5037
5038
9ee6e8bb
PB
5039static struct {
5040 int nregs;
5041 int interleave;
5042 int spacing;
308e5636 5043} const neon_ls_element_type[11] = {
ac55d007
RH
5044 {1, 4, 1},
5045 {1, 4, 2},
9ee6e8bb 5046 {4, 1, 1},
ac55d007
RH
5047 {2, 2, 2},
5048 {1, 3, 1},
5049 {1, 3, 2},
9ee6e8bb
PB
5050 {3, 1, 1},
5051 {1, 1, 1},
ac55d007
RH
5052 {1, 2, 1},
5053 {1, 2, 2},
9ee6e8bb
PB
5054 {2, 1, 1}
5055};
5056
5057/* Translate a NEON load/store element instruction. Return nonzero if the
5058 instruction is invalid. */
7dcc1f89 5059static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5060{
5061 int rd, rn, rm;
5062 int op;
5063 int nregs;
5064 int interleave;
84496233 5065 int spacing;
9ee6e8bb
PB
5066 int stride;
5067 int size;
5068 int reg;
9ee6e8bb 5069 int load;
9ee6e8bb 5070 int n;
7377c2c9 5071 int vec_size;
ac55d007
RH
5072 int mmu_idx;
5073 TCGMemOp endian;
39d5492a
PM
5074 TCGv_i32 addr;
5075 TCGv_i32 tmp;
5076 TCGv_i32 tmp2;
84496233 5077 TCGv_i64 tmp64;
9ee6e8bb 5078
2c7ffc41
PM
5079 /* FIXME: this access check should not take precedence over UNDEF
5080 * for invalid encodings; we will generate incorrect syndrome information
5081 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5082 */
9dbbc748 5083 if (s->fp_excp_el) {
2c7ffc41 5084 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 5085 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5086 return 0;
5087 }
5088
5df8bac1 5089 if (!s->vfp_enabled)
9ee6e8bb
PB
5090 return 1;
5091 VFP_DREG_D(rd, insn);
5092 rn = (insn >> 16) & 0xf;
5093 rm = insn & 0xf;
5094 load = (insn & (1 << 21)) != 0;
ac55d007
RH
5095 endian = s->be_data;
5096 mmu_idx = get_mem_index(s);
9ee6e8bb
PB
5097 if ((insn & (1 << 23)) == 0) {
5098 /* Load store all elements. */
5099 op = (insn >> 8) & 0xf;
5100 size = (insn >> 6) & 3;
84496233 5101 if (op > 10)
9ee6e8bb 5102 return 1;
f2dd89d0
PM
5103 /* Catch UNDEF cases for bad values of align field */
5104 switch (op & 0xc) {
5105 case 4:
5106 if (((insn >> 5) & 1) == 1) {
5107 return 1;
5108 }
5109 break;
5110 case 8:
5111 if (((insn >> 4) & 3) == 3) {
5112 return 1;
5113 }
5114 break;
5115 default:
5116 break;
5117 }
9ee6e8bb
PB
5118 nregs = neon_ls_element_type[op].nregs;
5119 interleave = neon_ls_element_type[op].interleave;
84496233 5120 spacing = neon_ls_element_type[op].spacing;
ac55d007 5121 if (size == 3 && (interleave | spacing) != 1) {
84496233 5122 return 1;
ac55d007 5123 }
e23f12b3
RH
5124 /* For our purposes, bytes are always little-endian. */
5125 if (size == 0) {
5126 endian = MO_LE;
5127 }
5128 /* Consecutive little-endian elements from a single register
5129 * can be promoted to a larger little-endian operation.
5130 */
5131 if (interleave == 1 && endian == MO_LE) {
5132 size = 3;
5133 }
ac55d007 5134 tmp64 = tcg_temp_new_i64();
e318a60b 5135 addr = tcg_temp_new_i32();
ac55d007 5136 tmp2 = tcg_const_i32(1 << size);
dcc65026 5137 load_reg_var(s, addr, rn);
9ee6e8bb 5138 for (reg = 0; reg < nregs; reg++) {
ac55d007
RH
5139 for (n = 0; n < 8 >> size; n++) {
5140 int xs;
5141 for (xs = 0; xs < interleave; xs++) {
5142 int tt = rd + reg + spacing * xs;
5143
5144 if (load) {
5145 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
5146 neon_store_element64(tt, n, size, tmp64);
5147 } else {
5148 neon_load_element64(tmp64, tt, n, size);
5149 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
9ee6e8bb 5150 }
ac55d007 5151 tcg_gen_add_i32(addr, addr, tmp2);
9ee6e8bb
PB
5152 }
5153 }
9ee6e8bb 5154 }
e318a60b 5155 tcg_temp_free_i32(addr);
ac55d007
RH
5156 tcg_temp_free_i32(tmp2);
5157 tcg_temp_free_i64(tmp64);
5158 stride = nregs * interleave * 8;
9ee6e8bb
PB
5159 } else {
5160 size = (insn >> 10) & 3;
5161 if (size == 3) {
5162 /* Load single element to all lanes. */
8e18cde3
PM
5163 int a = (insn >> 4) & 1;
5164 if (!load) {
9ee6e8bb 5165 return 1;
8e18cde3 5166 }
9ee6e8bb
PB
5167 size = (insn >> 6) & 3;
5168 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
5169
5170 if (size == 3) {
5171 if (nregs != 4 || a == 0) {
9ee6e8bb 5172 return 1;
99c475ab 5173 }
8e18cde3
PM
5174 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5175 size = 2;
5176 }
5177 if (nregs == 1 && a == 1 && size == 0) {
5178 return 1;
5179 }
5180 if (nregs == 3 && a == 1) {
5181 return 1;
5182 }
e318a60b 5183 addr = tcg_temp_new_i32();
8e18cde3 5184 load_reg_var(s, addr, rn);
7377c2c9
RH
5185
5186 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
5187 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
5188 */
5189 stride = (insn & (1 << 5)) ? 2 : 1;
5190 vec_size = nregs == 1 ? stride * 8 : 8;
5191
5192 tmp = tcg_temp_new_i32();
5193 for (reg = 0; reg < nregs; reg++) {
5194 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5195 s->be_data | size);
5196 if ((rd & 1) && vec_size == 16) {
5197 /* We cannot write 16 bytes at once because the
5198 * destination is unaligned.
5199 */
5200 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5201 8, 8, tmp);
5202 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
5203 neon_reg_offset(rd, 0), 8, 8);
5204 } else {
5205 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5206 vec_size, vec_size, tmp);
8e18cde3 5207 }
7377c2c9
RH
5208 tcg_gen_addi_i32(addr, addr, 1 << size);
5209 rd += stride;
9ee6e8bb 5210 }
7377c2c9 5211 tcg_temp_free_i32(tmp);
e318a60b 5212 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5213 stride = (1 << size) * nregs;
5214 } else {
5215 /* Single element. */
93262b16 5216 int idx = (insn >> 4) & 0xf;
2d6ac920 5217 int reg_idx;
9ee6e8bb
PB
5218 switch (size) {
5219 case 0:
2d6ac920 5220 reg_idx = (insn >> 5) & 7;
9ee6e8bb
PB
5221 stride = 1;
5222 break;
5223 case 1:
2d6ac920 5224 reg_idx = (insn >> 6) & 3;
9ee6e8bb
PB
5225 stride = (insn & (1 << 5)) ? 2 : 1;
5226 break;
5227 case 2:
2d6ac920 5228 reg_idx = (insn >> 7) & 1;
9ee6e8bb
PB
5229 stride = (insn & (1 << 6)) ? 2 : 1;
5230 break;
5231 default:
5232 abort();
5233 }
5234 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5235 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5236 switch (nregs) {
5237 case 1:
5238 if (((idx & (1 << size)) != 0) ||
5239 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5240 return 1;
5241 }
5242 break;
5243 case 3:
5244 if ((idx & 1) != 0) {
5245 return 1;
5246 }
5247 /* fall through */
5248 case 2:
5249 if (size == 2 && (idx & 2) != 0) {
5250 return 1;
5251 }
5252 break;
5253 case 4:
5254 if ((size == 2) && ((idx & 3) == 3)) {
5255 return 1;
5256 }
5257 break;
5258 default:
5259 abort();
5260 }
5261 if ((rd + stride * (nregs - 1)) > 31) {
5262 /* Attempts to write off the end of the register file
5263 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5264 * the neon_load_reg() would write off the end of the array.
5265 */
5266 return 1;
5267 }
2d6ac920 5268 tmp = tcg_temp_new_i32();
e318a60b 5269 addr = tcg_temp_new_i32();
dcc65026 5270 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5271 for (reg = 0; reg < nregs; reg++) {
5272 if (load) {
2d6ac920
RH
5273 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5274 s->be_data | size);
5275 neon_store_element(rd, reg_idx, size, tmp);
9ee6e8bb 5276 } else { /* Store */
2d6ac920
RH
5277 neon_load_element(tmp, rd, reg_idx, size);
5278 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
5279 s->be_data | size);
99c475ab 5280 }
9ee6e8bb 5281 rd += stride;
1b2b1e54 5282 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5283 }
e318a60b 5284 tcg_temp_free_i32(addr);
2d6ac920 5285 tcg_temp_free_i32(tmp);
9ee6e8bb 5286 stride = nregs * (1 << size);
99c475ab 5287 }
9ee6e8bb
PB
5288 }
5289 if (rm != 15) {
39d5492a 5290 TCGv_i32 base;
b26eefb6
PB
5291
5292 base = load_reg(s, rn);
9ee6e8bb 5293 if (rm == 13) {
b26eefb6 5294 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5295 } else {
39d5492a 5296 TCGv_i32 index;
b26eefb6
PB
5297 index = load_reg(s, rm);
5298 tcg_gen_add_i32(base, base, index);
7d1b0095 5299 tcg_temp_free_i32(index);
9ee6e8bb 5300 }
b26eefb6 5301 store_reg(s, rn, base);
9ee6e8bb
PB
5302 }
5303 return 0;
5304}
3b46e624 5305
39d5492a 5306static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5307{
5308 switch (size) {
5309 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5310 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5311 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5312 default: abort();
5313 }
5314}
5315
39d5492a 5316static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5317{
5318 switch (size) {
02da0b2d
PM
5319 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5320 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5321 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5322 default: abort();
5323 }
5324}
5325
39d5492a 5326static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5327{
5328 switch (size) {
02da0b2d
PM
5329 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5330 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5331 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5332 default: abort();
5333 }
5334}
5335
39d5492a 5336static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5337{
5338 switch (size) {
02da0b2d
PM
5339 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5340 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5341 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5342 default: abort();
5343 }
5344}
5345
39d5492a 5346static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5347 int q, int u)
5348{
5349 if (q) {
5350 if (u) {
5351 switch (size) {
5352 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5353 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5354 default: abort();
5355 }
5356 } else {
5357 switch (size) {
5358 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5359 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5360 default: abort();
5361 }
5362 }
5363 } else {
5364 if (u) {
5365 switch (size) {
b408a9b0
CL
5366 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5367 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5368 default: abort();
5369 }
5370 } else {
5371 switch (size) {
5372 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5373 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5374 default: abort();
5375 }
5376 }
5377 }
5378}
5379
39d5492a 5380static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5381{
5382 if (u) {
5383 switch (size) {
5384 case 0: gen_helper_neon_widen_u8(dest, src); break;
5385 case 1: gen_helper_neon_widen_u16(dest, src); break;
5386 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5387 default: abort();
5388 }
5389 } else {
5390 switch (size) {
5391 case 0: gen_helper_neon_widen_s8(dest, src); break;
5392 case 1: gen_helper_neon_widen_s16(dest, src); break;
5393 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5394 default: abort();
5395 }
5396 }
7d1b0095 5397 tcg_temp_free_i32(src);
ad69471c
PB
5398}
5399
5400static inline void gen_neon_addl(int size)
5401{
5402 switch (size) {
5403 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5404 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5405 case 2: tcg_gen_add_i64(CPU_V001); break;
5406 default: abort();
5407 }
5408}
5409
5410static inline void gen_neon_subl(int size)
5411{
5412 switch (size) {
5413 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5414 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5415 case 2: tcg_gen_sub_i64(CPU_V001); break;
5416 default: abort();
5417 }
5418}
5419
a7812ae4 5420static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5421{
5422 switch (size) {
5423 case 0: gen_helper_neon_negl_u16(var, var); break;
5424 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5425 case 2:
5426 tcg_gen_neg_i64(var, var);
5427 break;
ad69471c
PB
5428 default: abort();
5429 }
5430}
5431
a7812ae4 5432static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5433{
5434 switch (size) {
02da0b2d
PM
5435 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5436 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5437 default: abort();
5438 }
5439}
5440
39d5492a
PM
5441static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5442 int size, int u)
ad69471c 5443{
a7812ae4 5444 TCGv_i64 tmp;
ad69471c
PB
5445
5446 switch ((size << 1) | u) {
5447 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5448 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5449 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5450 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5451 case 4:
5452 tmp = gen_muls_i64_i32(a, b);
5453 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5454 tcg_temp_free_i64(tmp);
ad69471c
PB
5455 break;
5456 case 5:
5457 tmp = gen_mulu_i64_i32(a, b);
5458 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5459 tcg_temp_free_i64(tmp);
ad69471c
PB
5460 break;
5461 default: abort();
5462 }
c6067f04
CL
5463
5464 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5465 Don't forget to clean them now. */
5466 if (size < 2) {
7d1b0095
PM
5467 tcg_temp_free_i32(a);
5468 tcg_temp_free_i32(b);
c6067f04 5469 }
ad69471c
PB
5470}
5471
39d5492a
PM
5472static void gen_neon_narrow_op(int op, int u, int size,
5473 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5474{
5475 if (op) {
5476 if (u) {
5477 gen_neon_unarrow_sats(size, dest, src);
5478 } else {
5479 gen_neon_narrow(size, dest, src);
5480 }
5481 } else {
5482 if (u) {
5483 gen_neon_narrow_satu(size, dest, src);
5484 } else {
5485 gen_neon_narrow_sats(size, dest, src);
5486 }
5487 }
5488}
5489
62698be3
PM
5490/* Symbolic constants for op fields for Neon 3-register same-length.
5491 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5492 * table A7-9.
5493 */
5494#define NEON_3R_VHADD 0
5495#define NEON_3R_VQADD 1
5496#define NEON_3R_VRHADD 2
5497#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5498#define NEON_3R_VHSUB 4
5499#define NEON_3R_VQSUB 5
5500#define NEON_3R_VCGT 6
5501#define NEON_3R_VCGE 7
5502#define NEON_3R_VSHL 8
5503#define NEON_3R_VQSHL 9
5504#define NEON_3R_VRSHL 10
5505#define NEON_3R_VQRSHL 11
5506#define NEON_3R_VMAX 12
5507#define NEON_3R_VMIN 13
5508#define NEON_3R_VABD 14
5509#define NEON_3R_VABA 15
5510#define NEON_3R_VADD_VSUB 16
5511#define NEON_3R_VTST_VCEQ 17
4a7832b0 5512#define NEON_3R_VML 18 /* VMLA, VMLS */
62698be3
PM
5513#define NEON_3R_VMUL 19
5514#define NEON_3R_VPMAX 20
5515#define NEON_3R_VPMIN 21
5516#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 5517#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 5518#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 5519#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
5520#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5521#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5522#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5523#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5524#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5525#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5526
5527static const uint8_t neon_3r_sizes[] = {
5528 [NEON_3R_VHADD] = 0x7,
5529 [NEON_3R_VQADD] = 0xf,
5530 [NEON_3R_VRHADD] = 0x7,
5531 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5532 [NEON_3R_VHSUB] = 0x7,
5533 [NEON_3R_VQSUB] = 0xf,
5534 [NEON_3R_VCGT] = 0x7,
5535 [NEON_3R_VCGE] = 0x7,
5536 [NEON_3R_VSHL] = 0xf,
5537 [NEON_3R_VQSHL] = 0xf,
5538 [NEON_3R_VRSHL] = 0xf,
5539 [NEON_3R_VQRSHL] = 0xf,
5540 [NEON_3R_VMAX] = 0x7,
5541 [NEON_3R_VMIN] = 0x7,
5542 [NEON_3R_VABD] = 0x7,
5543 [NEON_3R_VABA] = 0x7,
5544 [NEON_3R_VADD_VSUB] = 0xf,
5545 [NEON_3R_VTST_VCEQ] = 0x7,
5546 [NEON_3R_VML] = 0x7,
5547 [NEON_3R_VMUL] = 0x7,
5548 [NEON_3R_VPMAX] = 0x7,
5549 [NEON_3R_VPMIN] = 0x7,
5550 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 5551 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 5552 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 5553 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
5554 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5555 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5556 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5557 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5558 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5559 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5560};
5561
600b828c
PM
5562/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5563 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5564 * table A7-13.
5565 */
5566#define NEON_2RM_VREV64 0
5567#define NEON_2RM_VREV32 1
5568#define NEON_2RM_VREV16 2
5569#define NEON_2RM_VPADDL 4
5570#define NEON_2RM_VPADDL_U 5
9d935509
AB
5571#define NEON_2RM_AESE 6 /* Includes AESD */
5572#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5573#define NEON_2RM_VCLS 8
5574#define NEON_2RM_VCLZ 9
5575#define NEON_2RM_VCNT 10
5576#define NEON_2RM_VMVN 11
5577#define NEON_2RM_VPADAL 12
5578#define NEON_2RM_VPADAL_U 13
5579#define NEON_2RM_VQABS 14
5580#define NEON_2RM_VQNEG 15
5581#define NEON_2RM_VCGT0 16
5582#define NEON_2RM_VCGE0 17
5583#define NEON_2RM_VCEQ0 18
5584#define NEON_2RM_VCLE0 19
5585#define NEON_2RM_VCLT0 20
f1ecb913 5586#define NEON_2RM_SHA1H 21
600b828c
PM
5587#define NEON_2RM_VABS 22
5588#define NEON_2RM_VNEG 23
5589#define NEON_2RM_VCGT0_F 24
5590#define NEON_2RM_VCGE0_F 25
5591#define NEON_2RM_VCEQ0_F 26
5592#define NEON_2RM_VCLE0_F 27
5593#define NEON_2RM_VCLT0_F 28
5594#define NEON_2RM_VABS_F 30
5595#define NEON_2RM_VNEG_F 31
5596#define NEON_2RM_VSWP 32
5597#define NEON_2RM_VTRN 33
5598#define NEON_2RM_VUZP 34
5599#define NEON_2RM_VZIP 35
5600#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5601#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5602#define NEON_2RM_VSHLL 38
f1ecb913 5603#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5604#define NEON_2RM_VRINTN 40
2ce70625 5605#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5606#define NEON_2RM_VRINTA 42
5607#define NEON_2RM_VRINTZ 43
600b828c 5608#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5609#define NEON_2RM_VRINTM 45
600b828c 5610#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5611#define NEON_2RM_VRINTP 47
901ad525
WN
5612#define NEON_2RM_VCVTAU 48
5613#define NEON_2RM_VCVTAS 49
5614#define NEON_2RM_VCVTNU 50
5615#define NEON_2RM_VCVTNS 51
5616#define NEON_2RM_VCVTPU 52
5617#define NEON_2RM_VCVTPS 53
5618#define NEON_2RM_VCVTMU 54
5619#define NEON_2RM_VCVTMS 55
600b828c
PM
5620#define NEON_2RM_VRECPE 56
5621#define NEON_2RM_VRSQRTE 57
5622#define NEON_2RM_VRECPE_F 58
5623#define NEON_2RM_VRSQRTE_F 59
5624#define NEON_2RM_VCVT_FS 60
5625#define NEON_2RM_VCVT_FU 61
5626#define NEON_2RM_VCVT_SF 62
5627#define NEON_2RM_VCVT_UF 63
5628
5629static int neon_2rm_is_float_op(int op)
5630{
5631 /* Return true if this neon 2reg-misc op is float-to-float */
5632 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5633 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5634 op == NEON_2RM_VRINTM ||
5635 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5636 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5637}
5638
fe8fcf3d
PM
5639static bool neon_2rm_is_v8_op(int op)
5640{
5641 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5642 switch (op) {
5643 case NEON_2RM_VRINTN:
5644 case NEON_2RM_VRINTA:
5645 case NEON_2RM_VRINTM:
5646 case NEON_2RM_VRINTP:
5647 case NEON_2RM_VRINTZ:
5648 case NEON_2RM_VRINTX:
5649 case NEON_2RM_VCVTAU:
5650 case NEON_2RM_VCVTAS:
5651 case NEON_2RM_VCVTNU:
5652 case NEON_2RM_VCVTNS:
5653 case NEON_2RM_VCVTPU:
5654 case NEON_2RM_VCVTPS:
5655 case NEON_2RM_VCVTMU:
5656 case NEON_2RM_VCVTMS:
5657 return true;
5658 default:
5659 return false;
5660 }
5661}
5662
600b828c
PM
5663/* Each entry in this array has bit n set if the insn allows
5664 * size value n (otherwise it will UNDEF). Since unallocated
5665 * op values will have no bits set they always UNDEF.
5666 */
5667static const uint8_t neon_2rm_sizes[] = {
5668 [NEON_2RM_VREV64] = 0x7,
5669 [NEON_2RM_VREV32] = 0x3,
5670 [NEON_2RM_VREV16] = 0x1,
5671 [NEON_2RM_VPADDL] = 0x7,
5672 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5673 [NEON_2RM_AESE] = 0x1,
5674 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5675 [NEON_2RM_VCLS] = 0x7,
5676 [NEON_2RM_VCLZ] = 0x7,
5677 [NEON_2RM_VCNT] = 0x1,
5678 [NEON_2RM_VMVN] = 0x1,
5679 [NEON_2RM_VPADAL] = 0x7,
5680 [NEON_2RM_VPADAL_U] = 0x7,
5681 [NEON_2RM_VQABS] = 0x7,
5682 [NEON_2RM_VQNEG] = 0x7,
5683 [NEON_2RM_VCGT0] = 0x7,
5684 [NEON_2RM_VCGE0] = 0x7,
5685 [NEON_2RM_VCEQ0] = 0x7,
5686 [NEON_2RM_VCLE0] = 0x7,
5687 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5688 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5689 [NEON_2RM_VABS] = 0x7,
5690 [NEON_2RM_VNEG] = 0x7,
5691 [NEON_2RM_VCGT0_F] = 0x4,
5692 [NEON_2RM_VCGE0_F] = 0x4,
5693 [NEON_2RM_VCEQ0_F] = 0x4,
5694 [NEON_2RM_VCLE0_F] = 0x4,
5695 [NEON_2RM_VCLT0_F] = 0x4,
5696 [NEON_2RM_VABS_F] = 0x4,
5697 [NEON_2RM_VNEG_F] = 0x4,
5698 [NEON_2RM_VSWP] = 0x1,
5699 [NEON_2RM_VTRN] = 0x7,
5700 [NEON_2RM_VUZP] = 0x7,
5701 [NEON_2RM_VZIP] = 0x7,
5702 [NEON_2RM_VMOVN] = 0x7,
5703 [NEON_2RM_VQMOVN] = 0x7,
5704 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5705 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5706 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5707 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5708 [NEON_2RM_VRINTA] = 0x4,
5709 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5710 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5711 [NEON_2RM_VRINTM] = 0x4,
600b828c 5712 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5713 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5714 [NEON_2RM_VCVTAU] = 0x4,
5715 [NEON_2RM_VCVTAS] = 0x4,
5716 [NEON_2RM_VCVTNU] = 0x4,
5717 [NEON_2RM_VCVTNS] = 0x4,
5718 [NEON_2RM_VCVTPU] = 0x4,
5719 [NEON_2RM_VCVTPS] = 0x4,
5720 [NEON_2RM_VCVTMU] = 0x4,
5721 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5722 [NEON_2RM_VRECPE] = 0x4,
5723 [NEON_2RM_VRSQRTE] = 0x4,
5724 [NEON_2RM_VRECPE_F] = 0x4,
5725 [NEON_2RM_VRSQRTE_F] = 0x4,
5726 [NEON_2RM_VCVT_FS] = 0x4,
5727 [NEON_2RM_VCVT_FU] = 0x4,
5728 [NEON_2RM_VCVT_SF] = 0x4,
5729 [NEON_2RM_VCVT_UF] = 0x4,
5730};
5731
36a71934
RH
5732
5733/* Expand v8.1 simd helper. */
5734static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5735 int q, int rd, int rn, int rm)
5736{
962fcbf2 5737 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
5738 int opr_sz = (1 + q) * 8;
5739 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5740 vfp_reg_offset(1, rn),
5741 vfp_reg_offset(1, rm), cpu_env,
5742 opr_sz, opr_sz, 0, fn);
5743 return 0;
5744 }
5745 return 1;
5746}
5747
41f6c113
RH
5748static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5749{
5750 tcg_gen_vec_sar8i_i64(a, a, shift);
5751 tcg_gen_vec_add8_i64(d, d, a);
5752}
5753
5754static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5755{
5756 tcg_gen_vec_sar16i_i64(a, a, shift);
5757 tcg_gen_vec_add16_i64(d, d, a);
5758}
5759
5760static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5761{
5762 tcg_gen_sari_i32(a, a, shift);
5763 tcg_gen_add_i32(d, d, a);
5764}
5765
5766static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5767{
5768 tcg_gen_sari_i64(a, a, shift);
5769 tcg_gen_add_i64(d, d, a);
5770}
5771
5772static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5773{
5774 tcg_gen_sari_vec(vece, a, a, sh);
5775 tcg_gen_add_vec(vece, d, d, a);
5776}
5777
53229a77
RH
5778static const TCGOpcode vecop_list_ssra[] = {
5779 INDEX_op_sari_vec, INDEX_op_add_vec, 0
5780};
5781
41f6c113
RH
5782const GVecGen2i ssra_op[4] = {
5783 { .fni8 = gen_ssra8_i64,
5784 .fniv = gen_ssra_vec,
5785 .load_dest = true,
53229a77 5786 .opt_opc = vecop_list_ssra,
41f6c113
RH
5787 .vece = MO_8 },
5788 { .fni8 = gen_ssra16_i64,
5789 .fniv = gen_ssra_vec,
5790 .load_dest = true,
53229a77 5791 .opt_opc = vecop_list_ssra,
41f6c113
RH
5792 .vece = MO_16 },
5793 { .fni4 = gen_ssra32_i32,
5794 .fniv = gen_ssra_vec,
5795 .load_dest = true,
53229a77 5796 .opt_opc = vecop_list_ssra,
41f6c113
RH
5797 .vece = MO_32 },
5798 { .fni8 = gen_ssra64_i64,
5799 .fniv = gen_ssra_vec,
5800 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 5801 .opt_opc = vecop_list_ssra,
41f6c113 5802 .load_dest = true,
41f6c113
RH
5803 .vece = MO_64 },
5804};
5805
5806static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5807{
5808 tcg_gen_vec_shr8i_i64(a, a, shift);
5809 tcg_gen_vec_add8_i64(d, d, a);
5810}
5811
5812static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5813{
5814 tcg_gen_vec_shr16i_i64(a, a, shift);
5815 tcg_gen_vec_add16_i64(d, d, a);
5816}
5817
5818static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5819{
5820 tcg_gen_shri_i32(a, a, shift);
5821 tcg_gen_add_i32(d, d, a);
5822}
5823
5824static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5825{
5826 tcg_gen_shri_i64(a, a, shift);
5827 tcg_gen_add_i64(d, d, a);
5828}
5829
5830static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5831{
5832 tcg_gen_shri_vec(vece, a, a, sh);
5833 tcg_gen_add_vec(vece, d, d, a);
5834}
5835
53229a77
RH
5836static const TCGOpcode vecop_list_usra[] = {
5837 INDEX_op_shri_vec, INDEX_op_add_vec, 0
5838};
5839
41f6c113
RH
5840const GVecGen2i usra_op[4] = {
5841 { .fni8 = gen_usra8_i64,
5842 .fniv = gen_usra_vec,
5843 .load_dest = true,
53229a77 5844 .opt_opc = vecop_list_usra,
41f6c113
RH
5845 .vece = MO_8, },
5846 { .fni8 = gen_usra16_i64,
5847 .fniv = gen_usra_vec,
5848 .load_dest = true,
53229a77 5849 .opt_opc = vecop_list_usra,
41f6c113
RH
5850 .vece = MO_16, },
5851 { .fni4 = gen_usra32_i32,
5852 .fniv = gen_usra_vec,
5853 .load_dest = true,
53229a77 5854 .opt_opc = vecop_list_usra,
41f6c113
RH
5855 .vece = MO_32, },
5856 { .fni8 = gen_usra64_i64,
5857 .fniv = gen_usra_vec,
5858 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5859 .load_dest = true,
53229a77 5860 .opt_opc = vecop_list_usra,
41f6c113
RH
5861 .vece = MO_64, },
5862};
eabcd6fa 5863
f3cd8218
RH
5864static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5865{
5866 uint64_t mask = dup_const(MO_8, 0xff >> shift);
5867 TCGv_i64 t = tcg_temp_new_i64();
5868
5869 tcg_gen_shri_i64(t, a, shift);
5870 tcg_gen_andi_i64(t, t, mask);
5871 tcg_gen_andi_i64(d, d, ~mask);
5872 tcg_gen_or_i64(d, d, t);
5873 tcg_temp_free_i64(t);
5874}
5875
5876static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5877{
5878 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
5879 TCGv_i64 t = tcg_temp_new_i64();
5880
5881 tcg_gen_shri_i64(t, a, shift);
5882 tcg_gen_andi_i64(t, t, mask);
5883 tcg_gen_andi_i64(d, d, ~mask);
5884 tcg_gen_or_i64(d, d, t);
5885 tcg_temp_free_i64(t);
5886}
5887
5888static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5889{
5890 tcg_gen_shri_i32(a, a, shift);
5891 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
5892}
5893
5894static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5895{
5896 tcg_gen_shri_i64(a, a, shift);
5897 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
5898}
5899
5900static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5901{
5902 if (sh == 0) {
5903 tcg_gen_mov_vec(d, a);
5904 } else {
5905 TCGv_vec t = tcg_temp_new_vec_matching(d);
5906 TCGv_vec m = tcg_temp_new_vec_matching(d);
5907
5908 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
5909 tcg_gen_shri_vec(vece, t, a, sh);
5910 tcg_gen_and_vec(vece, d, d, m);
5911 tcg_gen_or_vec(vece, d, d, t);
5912
5913 tcg_temp_free_vec(t);
5914 tcg_temp_free_vec(m);
5915 }
5916}
5917
53229a77
RH
5918static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
5919
f3cd8218
RH
5920const GVecGen2i sri_op[4] = {
5921 { .fni8 = gen_shr8_ins_i64,
5922 .fniv = gen_shr_ins_vec,
5923 .load_dest = true,
53229a77 5924 .opt_opc = vecop_list_sri,
f3cd8218
RH
5925 .vece = MO_8 },
5926 { .fni8 = gen_shr16_ins_i64,
5927 .fniv = gen_shr_ins_vec,
5928 .load_dest = true,
53229a77 5929 .opt_opc = vecop_list_sri,
f3cd8218
RH
5930 .vece = MO_16 },
5931 { .fni4 = gen_shr32_ins_i32,
5932 .fniv = gen_shr_ins_vec,
5933 .load_dest = true,
53229a77 5934 .opt_opc = vecop_list_sri,
f3cd8218
RH
5935 .vece = MO_32 },
5936 { .fni8 = gen_shr64_ins_i64,
5937 .fniv = gen_shr_ins_vec,
5938 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5939 .load_dest = true,
53229a77 5940 .opt_opc = vecop_list_sri,
f3cd8218
RH
5941 .vece = MO_64 },
5942};
5943
5944static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5945{
5946 uint64_t mask = dup_const(MO_8, 0xff << shift);
5947 TCGv_i64 t = tcg_temp_new_i64();
5948
5949 tcg_gen_shli_i64(t, a, shift);
5950 tcg_gen_andi_i64(t, t, mask);
5951 tcg_gen_andi_i64(d, d, ~mask);
5952 tcg_gen_or_i64(d, d, t);
5953 tcg_temp_free_i64(t);
5954}
5955
5956static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5957{
5958 uint64_t mask = dup_const(MO_16, 0xffff << shift);
5959 TCGv_i64 t = tcg_temp_new_i64();
5960
5961 tcg_gen_shli_i64(t, a, shift);
5962 tcg_gen_andi_i64(t, t, mask);
5963 tcg_gen_andi_i64(d, d, ~mask);
5964 tcg_gen_or_i64(d, d, t);
5965 tcg_temp_free_i64(t);
5966}
5967
5968static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5969{
5970 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
5971}
5972
5973static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5974{
5975 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
5976}
5977
5978static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5979{
5980 if (sh == 0) {
5981 tcg_gen_mov_vec(d, a);
5982 } else {
5983 TCGv_vec t = tcg_temp_new_vec_matching(d);
5984 TCGv_vec m = tcg_temp_new_vec_matching(d);
5985
5986 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
5987 tcg_gen_shli_vec(vece, t, a, sh);
5988 tcg_gen_and_vec(vece, d, d, m);
5989 tcg_gen_or_vec(vece, d, d, t);
5990
5991 tcg_temp_free_vec(t);
5992 tcg_temp_free_vec(m);
5993 }
5994}
5995
53229a77
RH
5996static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
5997
f3cd8218
RH
5998const GVecGen2i sli_op[4] = {
5999 { .fni8 = gen_shl8_ins_i64,
6000 .fniv = gen_shl_ins_vec,
6001 .load_dest = true,
53229a77 6002 .opt_opc = vecop_list_sli,
f3cd8218
RH
6003 .vece = MO_8 },
6004 { .fni8 = gen_shl16_ins_i64,
6005 .fniv = gen_shl_ins_vec,
6006 .load_dest = true,
53229a77 6007 .opt_opc = vecop_list_sli,
f3cd8218
RH
6008 .vece = MO_16 },
6009 { .fni4 = gen_shl32_ins_i32,
6010 .fniv = gen_shl_ins_vec,
6011 .load_dest = true,
53229a77 6012 .opt_opc = vecop_list_sli,
f3cd8218
RH
6013 .vece = MO_32 },
6014 { .fni8 = gen_shl64_ins_i64,
6015 .fniv = gen_shl_ins_vec,
6016 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6017 .load_dest = true,
53229a77 6018 .opt_opc = vecop_list_sli,
f3cd8218
RH
6019 .vece = MO_64 },
6020};
6021
4a7832b0
RH
6022static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6023{
6024 gen_helper_neon_mul_u8(a, a, b);
6025 gen_helper_neon_add_u8(d, d, a);
6026}
6027
6028static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6029{
6030 gen_helper_neon_mul_u8(a, a, b);
6031 gen_helper_neon_sub_u8(d, d, a);
6032}
6033
6034static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6035{
6036 gen_helper_neon_mul_u16(a, a, b);
6037 gen_helper_neon_add_u16(d, d, a);
6038}
6039
6040static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6041{
6042 gen_helper_neon_mul_u16(a, a, b);
6043 gen_helper_neon_sub_u16(d, d, a);
6044}
6045
6046static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6047{
6048 tcg_gen_mul_i32(a, a, b);
6049 tcg_gen_add_i32(d, d, a);
6050}
6051
6052static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6053{
6054 tcg_gen_mul_i32(a, a, b);
6055 tcg_gen_sub_i32(d, d, a);
6056}
6057
6058static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6059{
6060 tcg_gen_mul_i64(a, a, b);
6061 tcg_gen_add_i64(d, d, a);
6062}
6063
6064static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6065{
6066 tcg_gen_mul_i64(a, a, b);
6067 tcg_gen_sub_i64(d, d, a);
6068}
6069
6070static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6071{
6072 tcg_gen_mul_vec(vece, a, a, b);
6073 tcg_gen_add_vec(vece, d, d, a);
6074}
6075
6076static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6077{
6078 tcg_gen_mul_vec(vece, a, a, b);
6079 tcg_gen_sub_vec(vece, d, d, a);
6080}
6081
6082/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
6083 * these tables are shared with AArch64 which does support them.
6084 */
53229a77
RH
6085
6086static const TCGOpcode vecop_list_mla[] = {
6087 INDEX_op_mul_vec, INDEX_op_add_vec, 0
6088};
6089
6090static const TCGOpcode vecop_list_mls[] = {
6091 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
6092};
6093
4a7832b0
RH
6094const GVecGen3 mla_op[4] = {
6095 { .fni4 = gen_mla8_i32,
6096 .fniv = gen_mla_vec,
4a7832b0 6097 .load_dest = true,
53229a77 6098 .opt_opc = vecop_list_mla,
4a7832b0
RH
6099 .vece = MO_8 },
6100 { .fni4 = gen_mla16_i32,
6101 .fniv = gen_mla_vec,
4a7832b0 6102 .load_dest = true,
53229a77 6103 .opt_opc = vecop_list_mla,
4a7832b0
RH
6104 .vece = MO_16 },
6105 { .fni4 = gen_mla32_i32,
6106 .fniv = gen_mla_vec,
4a7832b0 6107 .load_dest = true,
53229a77 6108 .opt_opc = vecop_list_mla,
4a7832b0
RH
6109 .vece = MO_32 },
6110 { .fni8 = gen_mla64_i64,
6111 .fniv = gen_mla_vec,
4a7832b0
RH
6112 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6113 .load_dest = true,
53229a77 6114 .opt_opc = vecop_list_mla,
4a7832b0
RH
6115 .vece = MO_64 },
6116};
6117
6118const GVecGen3 mls_op[4] = {
6119 { .fni4 = gen_mls8_i32,
6120 .fniv = gen_mls_vec,
4a7832b0 6121 .load_dest = true,
53229a77 6122 .opt_opc = vecop_list_mls,
4a7832b0
RH
6123 .vece = MO_8 },
6124 { .fni4 = gen_mls16_i32,
6125 .fniv = gen_mls_vec,
4a7832b0 6126 .load_dest = true,
53229a77 6127 .opt_opc = vecop_list_mls,
4a7832b0
RH
6128 .vece = MO_16 },
6129 { .fni4 = gen_mls32_i32,
6130 .fniv = gen_mls_vec,
4a7832b0 6131 .load_dest = true,
53229a77 6132 .opt_opc = vecop_list_mls,
4a7832b0
RH
6133 .vece = MO_32 },
6134 { .fni8 = gen_mls64_i64,
6135 .fniv = gen_mls_vec,
4a7832b0
RH
6136 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6137 .load_dest = true,
53229a77 6138 .opt_opc = vecop_list_mls,
4a7832b0
RH
6139 .vece = MO_64 },
6140};
6141
ea580fa3
RH
6142/* CMTST : test is "if (X & Y != 0)". */
6143static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6144{
6145 tcg_gen_and_i32(d, a, b);
6146 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
6147 tcg_gen_neg_i32(d, d);
6148}
6149
6150void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6151{
6152 tcg_gen_and_i64(d, a, b);
6153 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
6154 tcg_gen_neg_i64(d, d);
6155}
6156
6157static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6158{
6159 tcg_gen_and_vec(vece, d, a, b);
6160 tcg_gen_dupi_vec(vece, a, 0);
6161 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
6162}
6163
53229a77
RH
6164static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
6165
ea580fa3
RH
6166const GVecGen3 cmtst_op[4] = {
6167 { .fni4 = gen_helper_neon_tst_u8,
6168 .fniv = gen_cmtst_vec,
53229a77 6169 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
6170 .vece = MO_8 },
6171 { .fni4 = gen_helper_neon_tst_u16,
6172 .fniv = gen_cmtst_vec,
53229a77 6173 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
6174 .vece = MO_16 },
6175 { .fni4 = gen_cmtst_i32,
6176 .fniv = gen_cmtst_vec,
53229a77 6177 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
6178 .vece = MO_32 },
6179 { .fni8 = gen_cmtst_i64,
6180 .fniv = gen_cmtst_vec,
6181 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
53229a77 6182 .opt_opc = vecop_list_cmtst,
ea580fa3
RH
6183 .vece = MO_64 },
6184};
6185
89e68b57
RH
6186static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6187 TCGv_vec a, TCGv_vec b)
6188{
6189 TCGv_vec x = tcg_temp_new_vec_matching(t);
6190 tcg_gen_add_vec(vece, x, a, b);
6191 tcg_gen_usadd_vec(vece, t, a, b);
6192 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6193 tcg_gen_or_vec(vece, sat, sat, x);
6194 tcg_temp_free_vec(x);
6195}
6196
53229a77
RH
6197static const TCGOpcode vecop_list_uqadd[] = {
6198 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
6199};
6200
89e68b57
RH
6201const GVecGen4 uqadd_op[4] = {
6202 { .fniv = gen_uqadd_vec,
6203 .fno = gen_helper_gvec_uqadd_b,
89e68b57 6204 .write_aofs = true,
53229a77 6205 .opt_opc = vecop_list_uqadd,
89e68b57
RH
6206 .vece = MO_8 },
6207 { .fniv = gen_uqadd_vec,
6208 .fno = gen_helper_gvec_uqadd_h,
89e68b57 6209 .write_aofs = true,
53229a77 6210 .opt_opc = vecop_list_uqadd,
89e68b57
RH
6211 .vece = MO_16 },
6212 { .fniv = gen_uqadd_vec,
6213 .fno = gen_helper_gvec_uqadd_s,
89e68b57 6214 .write_aofs = true,
53229a77 6215 .opt_opc = vecop_list_uqadd,
89e68b57
RH
6216 .vece = MO_32 },
6217 { .fniv = gen_uqadd_vec,
6218 .fno = gen_helper_gvec_uqadd_d,
89e68b57 6219 .write_aofs = true,
53229a77 6220 .opt_opc = vecop_list_uqadd,
89e68b57
RH
6221 .vece = MO_64 },
6222};
6223
6224static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6225 TCGv_vec a, TCGv_vec b)
6226{
6227 TCGv_vec x = tcg_temp_new_vec_matching(t);
6228 tcg_gen_add_vec(vece, x, a, b);
6229 tcg_gen_ssadd_vec(vece, t, a, b);
6230 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6231 tcg_gen_or_vec(vece, sat, sat, x);
6232 tcg_temp_free_vec(x);
6233}
6234
53229a77
RH
6235static const TCGOpcode vecop_list_sqadd[] = {
6236 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
6237};
6238
89e68b57
RH
6239const GVecGen4 sqadd_op[4] = {
6240 { .fniv = gen_sqadd_vec,
6241 .fno = gen_helper_gvec_sqadd_b,
53229a77 6242 .opt_opc = vecop_list_sqadd,
89e68b57
RH
6243 .write_aofs = true,
6244 .vece = MO_8 },
6245 { .fniv = gen_sqadd_vec,
6246 .fno = gen_helper_gvec_sqadd_h,
53229a77 6247 .opt_opc = vecop_list_sqadd,
89e68b57
RH
6248 .write_aofs = true,
6249 .vece = MO_16 },
6250 { .fniv = gen_sqadd_vec,
6251 .fno = gen_helper_gvec_sqadd_s,
53229a77 6252 .opt_opc = vecop_list_sqadd,
89e68b57
RH
6253 .write_aofs = true,
6254 .vece = MO_32 },
6255 { .fniv = gen_sqadd_vec,
6256 .fno = gen_helper_gvec_sqadd_d,
53229a77 6257 .opt_opc = vecop_list_sqadd,
89e68b57
RH
6258 .write_aofs = true,
6259 .vece = MO_64 },
6260};
6261
6262static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6263 TCGv_vec a, TCGv_vec b)
6264{
6265 TCGv_vec x = tcg_temp_new_vec_matching(t);
6266 tcg_gen_sub_vec(vece, x, a, b);
6267 tcg_gen_ussub_vec(vece, t, a, b);
6268 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6269 tcg_gen_or_vec(vece, sat, sat, x);
6270 tcg_temp_free_vec(x);
6271}
6272
53229a77
RH
6273static const TCGOpcode vecop_list_uqsub[] = {
6274 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
6275};
6276
89e68b57
RH
6277const GVecGen4 uqsub_op[4] = {
6278 { .fniv = gen_uqsub_vec,
6279 .fno = gen_helper_gvec_uqsub_b,
53229a77 6280 .opt_opc = vecop_list_uqsub,
89e68b57
RH
6281 .write_aofs = true,
6282 .vece = MO_8 },
6283 { .fniv = gen_uqsub_vec,
6284 .fno = gen_helper_gvec_uqsub_h,
53229a77 6285 .opt_opc = vecop_list_uqsub,
89e68b57
RH
6286 .write_aofs = true,
6287 .vece = MO_16 },
6288 { .fniv = gen_uqsub_vec,
6289 .fno = gen_helper_gvec_uqsub_s,
53229a77 6290 .opt_opc = vecop_list_uqsub,
89e68b57
RH
6291 .write_aofs = true,
6292 .vece = MO_32 },
6293 { .fniv = gen_uqsub_vec,
6294 .fno = gen_helper_gvec_uqsub_d,
53229a77 6295 .opt_opc = vecop_list_uqsub,
89e68b57
RH
6296 .write_aofs = true,
6297 .vece = MO_64 },
6298};
6299
6300static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6301 TCGv_vec a, TCGv_vec b)
6302{
6303 TCGv_vec x = tcg_temp_new_vec_matching(t);
6304 tcg_gen_sub_vec(vece, x, a, b);
6305 tcg_gen_sssub_vec(vece, t, a, b);
6306 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6307 tcg_gen_or_vec(vece, sat, sat, x);
6308 tcg_temp_free_vec(x);
6309}
6310
53229a77
RH
6311static const TCGOpcode vecop_list_sqsub[] = {
6312 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
6313};
6314
89e68b57
RH
6315const GVecGen4 sqsub_op[4] = {
6316 { .fniv = gen_sqsub_vec,
6317 .fno = gen_helper_gvec_sqsub_b,
53229a77 6318 .opt_opc = vecop_list_sqsub,
89e68b57
RH
6319 .write_aofs = true,
6320 .vece = MO_8 },
6321 { .fniv = gen_sqsub_vec,
6322 .fno = gen_helper_gvec_sqsub_h,
53229a77 6323 .opt_opc = vecop_list_sqsub,
89e68b57
RH
6324 .write_aofs = true,
6325 .vece = MO_16 },
6326 { .fniv = gen_sqsub_vec,
6327 .fno = gen_helper_gvec_sqsub_s,
53229a77 6328 .opt_opc = vecop_list_sqsub,
89e68b57
RH
6329 .write_aofs = true,
6330 .vece = MO_32 },
6331 { .fniv = gen_sqsub_vec,
6332 .fno = gen_helper_gvec_sqsub_d,
53229a77 6333 .opt_opc = vecop_list_sqsub,
89e68b57
RH
6334 .write_aofs = true,
6335 .vece = MO_64 },
6336};
6337
9ee6e8bb
PB
6338/* Translate a NEON data processing instruction. Return nonzero if the
6339 instruction is invalid.
ad69471c
PB
6340 We process data in a mixture of 32-bit and 64-bit chunks.
6341 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 6342
7dcc1f89 6343static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
6344{
6345 int op;
6346 int q;
eabcd6fa 6347 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
9ee6e8bb
PB
6348 int size;
6349 int shift;
6350 int pass;
6351 int count;
6352 int pairwise;
6353 int u;
eabcd6fa 6354 int vec_size;
f3cd8218 6355 uint32_t imm;
39d5492a 6356 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 6357 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 6358 TCGv_i64 tmp64;
9ee6e8bb 6359
2c7ffc41
PM
6360 /* FIXME: this access check should not take precedence over UNDEF
6361 * for invalid encodings; we will generate incorrect syndrome information
6362 * for attempts to execute invalid vfp/neon encodings with FP disabled.
6363 */
9dbbc748 6364 if (s->fp_excp_el) {
2c7ffc41 6365 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 6366 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
6367 return 0;
6368 }
6369
5df8bac1 6370 if (!s->vfp_enabled)
9ee6e8bb
PB
6371 return 1;
6372 q = (insn & (1 << 6)) != 0;
6373 u = (insn >> 24) & 1;
6374 VFP_DREG_D(rd, insn);
6375 VFP_DREG_N(rn, insn);
6376 VFP_DREG_M(rm, insn);
6377 size = (insn >> 20) & 3;
eabcd6fa
RH
6378 vec_size = q ? 16 : 8;
6379 rd_ofs = neon_reg_offset(rd, 0);
6380 rn_ofs = neon_reg_offset(rn, 0);
6381 rm_ofs = neon_reg_offset(rm, 0);
6382
9ee6e8bb
PB
6383 if ((insn & (1 << 23)) == 0) {
6384 /* Three register same length. */
6385 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
6386 /* Catch invalid op and bad size combinations: UNDEF */
6387 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
6388 return 1;
6389 }
25f84f79
PM
6390 /* All insns of this form UNDEF for either this condition or the
6391 * superset of cases "Q==1"; we catch the latter later.
6392 */
6393 if (q && ((rd | rn | rm) & 1)) {
6394 return 1;
6395 }
36a71934
RH
6396 switch (op) {
6397 case NEON_3R_SHA:
6398 /* The SHA-1/SHA-256 3-register instructions require special
6399 * treatment here, as their size field is overloaded as an
6400 * op type selector, and they all consume their input in a
6401 * single pass.
6402 */
f1ecb913
AB
6403 if (!q) {
6404 return 1;
6405 }
6406 if (!u) { /* SHA-1 */
962fcbf2 6407 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
6408 return 1;
6409 }
1a66ac61
RH
6410 ptr1 = vfp_reg_ptr(true, rd);
6411 ptr2 = vfp_reg_ptr(true, rn);
6412 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 6413 tmp4 = tcg_const_i32(size);
1a66ac61 6414 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
6415 tcg_temp_free_i32(tmp4);
6416 } else { /* SHA-256 */
962fcbf2 6417 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
6418 return 1;
6419 }
1a66ac61
RH
6420 ptr1 = vfp_reg_ptr(true, rd);
6421 ptr2 = vfp_reg_ptr(true, rn);
6422 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
6423 switch (size) {
6424 case 0:
1a66ac61 6425 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
6426 break;
6427 case 1:
1a66ac61 6428 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
6429 break;
6430 case 2:
1a66ac61 6431 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
6432 break;
6433 }
6434 }
1a66ac61
RH
6435 tcg_temp_free_ptr(ptr1);
6436 tcg_temp_free_ptr(ptr2);
6437 tcg_temp_free_ptr(ptr3);
f1ecb913 6438 return 0;
36a71934
RH
6439
6440 case NEON_3R_VPADD_VQRDMLAH:
6441 if (!u) {
6442 break; /* VPADD */
6443 }
6444 /* VQRDMLAH */
6445 switch (size) {
6446 case 1:
6447 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
6448 q, rd, rn, rm);
6449 case 2:
6450 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
6451 q, rd, rn, rm);
6452 }
6453 return 1;
6454
6455 case NEON_3R_VFM_VQRDMLSH:
6456 if (!u) {
6457 /* VFM, VFMS */
6458 if (size == 1) {
6459 return 1;
6460 }
6461 break;
6462 }
6463 /* VQRDMLSH */
6464 switch (size) {
6465 case 1:
6466 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
6467 q, rd, rn, rm);
6468 case 2:
6469 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
6470 q, rd, rn, rm);
6471 }
6472 return 1;
eabcd6fa
RH
6473
6474 case NEON_3R_LOGIC: /* Logic ops. */
6475 switch ((u << 2) | size) {
6476 case 0: /* VAND */
6477 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
6478 vec_size, vec_size);
6479 break;
6480 case 1: /* VBIC */
6481 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
6482 vec_size, vec_size);
6483 break;
2900847f
RH
6484 case 2: /* VORR */
6485 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
6486 vec_size, vec_size);
eabcd6fa
RH
6487 break;
6488 case 3: /* VORN */
6489 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
6490 vec_size, vec_size);
6491 break;
6492 case 4: /* VEOR */
6493 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
6494 vec_size, vec_size);
6495 break;
6496 case 5: /* VBSL */
3a7a2b4e
RH
6497 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
6498 vec_size, vec_size);
eabcd6fa
RH
6499 break;
6500 case 6: /* VBIT */
3a7a2b4e
RH
6501 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
6502 vec_size, vec_size);
eabcd6fa
RH
6503 break;
6504 case 7: /* VBIF */
3a7a2b4e
RH
6505 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
6506 vec_size, vec_size);
eabcd6fa
RH
6507 break;
6508 }
6509 return 0;
e4717ae0
RH
6510
6511 case NEON_3R_VADD_VSUB:
6512 if (u) {
6513 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
6514 vec_size, vec_size);
6515 } else {
6516 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
6517 vec_size, vec_size);
6518 }
6519 return 0;
82083184 6520
89e68b57
RH
6521 case NEON_3R_VQADD:
6522 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
6523 rn_ofs, rm_ofs, vec_size, vec_size,
6524 (u ? uqadd_op : sqadd_op) + size);
2f143d3a 6525 return 0;
89e68b57
RH
6526
6527 case NEON_3R_VQSUB:
6528 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
6529 rn_ofs, rm_ofs, vec_size, vec_size,
6530 (u ? uqsub_op : sqsub_op) + size);
2f143d3a 6531 return 0;
89e68b57 6532
82083184
RH
6533 case NEON_3R_VMUL: /* VMUL */
6534 if (u) {
6535 /* Polynomial case allows only P8 and is handled below. */
6536 if (size != 0) {
6537 return 1;
6538 }
6539 } else {
6540 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
6541 vec_size, vec_size);
6542 return 0;
6543 }
6544 break;
4a7832b0
RH
6545
6546 case NEON_3R_VML: /* VMLA, VMLS */
6547 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
6548 u ? &mls_op[size] : &mla_op[size]);
6549 return 0;
ea580fa3
RH
6550
6551 case NEON_3R_VTST_VCEQ:
6552 if (u) { /* VCEQ */
6553 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
6554 vec_size, vec_size);
6555 } else { /* VTST */
6556 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6557 vec_size, vec_size, &cmtst_op[size]);
6558 }
6559 return 0;
6560
6561 case NEON_3R_VCGT:
6562 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
6563 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6564 return 0;
6565
6566 case NEON_3R_VCGE:
6567 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
6568 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6569 return 0;
6f278221
RH
6570
6571 case NEON_3R_VMAX:
6572 if (u) {
6573 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
6574 vec_size, vec_size);
6575 } else {
6576 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
6577 vec_size, vec_size);
6578 }
6579 return 0;
6580 case NEON_3R_VMIN:
6581 if (u) {
6582 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
6583 vec_size, vec_size);
6584 } else {
6585 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
6586 vec_size, vec_size);
6587 }
6588 return 0;
f1ecb913 6589 }
4a7832b0 6590
eabcd6fa 6591 if (size == 3) {
62698be3 6592 /* 64-bit element instructions. */
9ee6e8bb 6593 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
6594 neon_load_reg64(cpu_V0, rn + pass);
6595 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 6596 switch (op) {
62698be3 6597 case NEON_3R_VSHL:
ad69471c
PB
6598 if (u) {
6599 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
6600 } else {
6601 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
6602 }
6603 break;
62698be3 6604 case NEON_3R_VQSHL:
ad69471c 6605 if (u) {
02da0b2d
PM
6606 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6607 cpu_V1, cpu_V0);
ad69471c 6608 } else {
02da0b2d
PM
6609 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6610 cpu_V1, cpu_V0);
ad69471c
PB
6611 }
6612 break;
62698be3 6613 case NEON_3R_VRSHL:
ad69471c
PB
6614 if (u) {
6615 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 6616 } else {
ad69471c
PB
6617 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
6618 }
6619 break;
62698be3 6620 case NEON_3R_VQRSHL:
ad69471c 6621 if (u) {
02da0b2d
PM
6622 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
6623 cpu_V1, cpu_V0);
ad69471c 6624 } else {
02da0b2d
PM
6625 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
6626 cpu_V1, cpu_V0);
1e8d4eec 6627 }
9ee6e8bb 6628 break;
9ee6e8bb
PB
6629 default:
6630 abort();
2c0262af 6631 }
ad69471c 6632 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 6633 }
9ee6e8bb 6634 return 0;
2c0262af 6635 }
25f84f79 6636 pairwise = 0;
9ee6e8bb 6637 switch (op) {
62698be3
PM
6638 case NEON_3R_VSHL:
6639 case NEON_3R_VQSHL:
6640 case NEON_3R_VRSHL:
6641 case NEON_3R_VQRSHL:
9ee6e8bb 6642 {
ad69471c
PB
6643 int rtmp;
6644 /* Shift instruction operands are reversed. */
6645 rtmp = rn;
9ee6e8bb 6646 rn = rm;
ad69471c 6647 rm = rtmp;
9ee6e8bb 6648 }
2c0262af 6649 break;
36a71934 6650 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
6651 case NEON_3R_VPMAX:
6652 case NEON_3R_VPMIN:
9ee6e8bb 6653 pairwise = 1;
2c0262af 6654 break;
25f84f79
PM
6655 case NEON_3R_FLOAT_ARITH:
6656 pairwise = (u && size < 2); /* if VPADD (float) */
6657 break;
6658 case NEON_3R_FLOAT_MINMAX:
6659 pairwise = u; /* if VPMIN/VPMAX (float) */
6660 break;
6661 case NEON_3R_FLOAT_CMP:
6662 if (!u && size) {
6663 /* no encoding for U=0 C=1x */
6664 return 1;
6665 }
6666 break;
6667 case NEON_3R_FLOAT_ACMP:
6668 if (!u) {
6669 return 1;
6670 }
6671 break;
505935fc
WN
6672 case NEON_3R_FLOAT_MISC:
6673 /* VMAXNM/VMINNM in ARMv8 */
d614a513 6674 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
6675 return 1;
6676 }
2c0262af 6677 break;
36a71934
RH
6678 case NEON_3R_VFM_VQRDMLSH:
6679 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
6680 return 1;
6681 }
6682 break;
9ee6e8bb 6683 default:
2c0262af 6684 break;
9ee6e8bb 6685 }
dd8fbd78 6686
25f84f79
PM
6687 if (pairwise && q) {
6688 /* All the pairwise insns UNDEF if Q is set */
6689 return 1;
6690 }
6691
9ee6e8bb
PB
6692 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6693
6694 if (pairwise) {
6695 /* Pairwise. */
a5a14945
JR
6696 if (pass < 1) {
6697 tmp = neon_load_reg(rn, 0);
6698 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 6699 } else {
a5a14945
JR
6700 tmp = neon_load_reg(rm, 0);
6701 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
6702 }
6703 } else {
6704 /* Elementwise. */
dd8fbd78
FN
6705 tmp = neon_load_reg(rn, pass);
6706 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
6707 }
6708 switch (op) {
62698be3 6709 case NEON_3R_VHADD:
9ee6e8bb
PB
6710 GEN_NEON_INTEGER_OP(hadd);
6711 break;
62698be3 6712 case NEON_3R_VRHADD:
9ee6e8bb 6713 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 6714 break;
62698be3 6715 case NEON_3R_VHSUB:
9ee6e8bb
PB
6716 GEN_NEON_INTEGER_OP(hsub);
6717 break;
62698be3 6718 case NEON_3R_VSHL:
ad69471c 6719 GEN_NEON_INTEGER_OP(shl);
2c0262af 6720 break;
62698be3 6721 case NEON_3R_VQSHL:
02da0b2d 6722 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 6723 break;
62698be3 6724 case NEON_3R_VRSHL:
ad69471c 6725 GEN_NEON_INTEGER_OP(rshl);
2c0262af 6726 break;
62698be3 6727 case NEON_3R_VQRSHL:
02da0b2d 6728 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 6729 break;
62698be3 6730 case NEON_3R_VABD:
9ee6e8bb
PB
6731 GEN_NEON_INTEGER_OP(abd);
6732 break;
62698be3 6733 case NEON_3R_VABA:
9ee6e8bb 6734 GEN_NEON_INTEGER_OP(abd);
7d1b0095 6735 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
6736 tmp2 = neon_load_reg(rd, pass);
6737 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 6738 break;
62698be3 6739 case NEON_3R_VMUL:
82083184
RH
6740 /* VMUL.P8; other cases already eliminated. */
6741 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb 6742 break;
62698be3 6743 case NEON_3R_VPMAX:
9ee6e8bb
PB
6744 GEN_NEON_INTEGER_OP(pmax);
6745 break;
62698be3 6746 case NEON_3R_VPMIN:
9ee6e8bb
PB
6747 GEN_NEON_INTEGER_OP(pmin);
6748 break;
62698be3 6749 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
6750 if (!u) { /* VQDMULH */
6751 switch (size) {
02da0b2d
PM
6752 case 1:
6753 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6754 break;
6755 case 2:
6756 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6757 break;
62698be3 6758 default: abort();
9ee6e8bb 6759 }
62698be3 6760 } else { /* VQRDMULH */
9ee6e8bb 6761 switch (size) {
02da0b2d
PM
6762 case 1:
6763 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6764 break;
6765 case 2:
6766 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6767 break;
62698be3 6768 default: abort();
9ee6e8bb
PB
6769 }
6770 }
6771 break;
36a71934 6772 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 6773 switch (size) {
dd8fbd78
FN
6774 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6775 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6776 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6777 default: abort();
9ee6e8bb
PB
6778 }
6779 break;
62698be3 6780 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6781 {
6782 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6783 switch ((u << 2) | size) {
6784 case 0: /* VADD */
aa47cfdd
PM
6785 case 4: /* VPADD */
6786 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6787 break;
6788 case 2: /* VSUB */
aa47cfdd 6789 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6790 break;
6791 case 6: /* VABD */
aa47cfdd 6792 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6793 break;
6794 default:
62698be3 6795 abort();
9ee6e8bb 6796 }
aa47cfdd 6797 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6798 break;
aa47cfdd 6799 }
62698be3 6800 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6801 {
6802 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6803 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6804 if (!u) {
7d1b0095 6805 tcg_temp_free_i32(tmp2);
dd8fbd78 6806 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6807 if (size == 0) {
aa47cfdd 6808 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6809 } else {
aa47cfdd 6810 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6811 }
6812 }
aa47cfdd 6813 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6814 break;
aa47cfdd 6815 }
62698be3 6816 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6817 {
6818 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6819 if (!u) {
aa47cfdd 6820 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6821 } else {
aa47cfdd
PM
6822 if (size == 0) {
6823 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6824 } else {
6825 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6826 }
b5ff1b31 6827 }
aa47cfdd 6828 tcg_temp_free_ptr(fpstatus);
2c0262af 6829 break;
aa47cfdd 6830 }
62698be3 6831 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6832 {
6833 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6834 if (size == 0) {
6835 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6836 } else {
6837 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6838 }
6839 tcg_temp_free_ptr(fpstatus);
2c0262af 6840 break;
aa47cfdd 6841 }
62698be3 6842 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6843 {
6844 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6845 if (size == 0) {
f71a2ae5 6846 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6847 } else {
f71a2ae5 6848 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6849 }
6850 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6851 break;
aa47cfdd 6852 }
505935fc
WN
6853 case NEON_3R_FLOAT_MISC:
6854 if (u) {
6855 /* VMAXNM/VMINNM */
6856 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6857 if (size == 0) {
f71a2ae5 6858 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6859 } else {
f71a2ae5 6860 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6861 }
6862 tcg_temp_free_ptr(fpstatus);
6863 } else {
6864 if (size == 0) {
6865 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6866 } else {
6867 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6868 }
6869 }
2c0262af 6870 break;
36a71934 6871 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
6872 {
6873 /* VFMA, VFMS: fused multiply-add */
6874 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6875 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6876 if (size) {
6877 /* VFMS */
6878 gen_helper_vfp_negs(tmp, tmp);
6879 }
6880 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6881 tcg_temp_free_i32(tmp3);
6882 tcg_temp_free_ptr(fpstatus);
6883 break;
6884 }
9ee6e8bb
PB
6885 default:
6886 abort();
2c0262af 6887 }
7d1b0095 6888 tcg_temp_free_i32(tmp2);
dd8fbd78 6889
9ee6e8bb
PB
6890 /* Save the result. For elementwise operations we can put it
6891 straight into the destination register. For pairwise operations
6892 we have to be careful to avoid clobbering the source operands. */
6893 if (pairwise && rd == rm) {
dd8fbd78 6894 neon_store_scratch(pass, tmp);
9ee6e8bb 6895 } else {
dd8fbd78 6896 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6897 }
6898
6899 } /* for pass */
6900 if (pairwise && rd == rm) {
6901 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6902 tmp = neon_load_scratch(pass);
6903 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6904 }
6905 }
ad69471c 6906 /* End of 3 register same size operations. */
9ee6e8bb
PB
6907 } else if (insn & (1 << 4)) {
6908 if ((insn & 0x00380080) != 0) {
6909 /* Two registers and shift. */
6910 op = (insn >> 8) & 0xf;
6911 if (insn & (1 << 7)) {
cc13115b
PM
6912 /* 64-bit shift. */
6913 if (op > 7) {
6914 return 1;
6915 }
9ee6e8bb
PB
6916 size = 3;
6917 } else {
6918 size = 2;
6919 while ((insn & (1 << (size + 19))) == 0)
6920 size--;
6921 }
6922 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
9ee6e8bb
PB
6923 if (op < 8) {
6924 /* Shift by immediate:
6925 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6926 if (q && ((rd | rm) & 1)) {
6927 return 1;
6928 }
6929 if (!u && (op == 4 || op == 6)) {
6930 return 1;
6931 }
9ee6e8bb
PB
6932 /* Right shifts are encoded as N - shift, where N is the
6933 element size in bits. */
1dc8425e 6934 if (op <= 4) {
9ee6e8bb 6935 shift = shift - (1 << (size + 3));
1dc8425e
RH
6936 }
6937
6938 switch (op) {
6939 case 0: /* VSHR */
6940 /* Right shift comes here negative. */
6941 shift = -shift;
6942 /* Shifts larger than the element size are architecturally
6943 * valid. Unsigned results in all zeros; signed results
6944 * in all sign bits.
6945 */
6946 if (!u) {
6947 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
6948 MIN(shift, (8 << size) - 1),
6949 vec_size, vec_size);
6950 } else if (shift >= 8 << size) {
6951 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6952 } else {
6953 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
6954 vec_size, vec_size);
6955 }
6956 return 0;
6957
41f6c113
RH
6958 case 1: /* VSRA */
6959 /* Right shift comes here negative. */
6960 shift = -shift;
6961 /* Shifts larger than the element size are architecturally
6962 * valid. Unsigned results in all zeros; signed results
6963 * in all sign bits.
6964 */
6965 if (!u) {
6966 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6967 MIN(shift, (8 << size) - 1),
6968 &ssra_op[size]);
6969 } else if (shift >= 8 << size) {
6970 /* rd += 0 */
6971 } else {
6972 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6973 shift, &usra_op[size]);
6974 }
6975 return 0;
6976
f3cd8218
RH
6977 case 4: /* VSRI */
6978 if (!u) {
6979 return 1;
6980 }
6981 /* Right shift comes here negative. */
6982 shift = -shift;
6983 /* Shift out of range leaves destination unchanged. */
6984 if (shift < 8 << size) {
6985 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6986 shift, &sri_op[size]);
6987 }
6988 return 0;
6989
1dc8425e 6990 case 5: /* VSHL, VSLI */
f3cd8218
RH
6991 if (u) { /* VSLI */
6992 /* Shift out of range leaves destination unchanged. */
6993 if (shift < 8 << size) {
6994 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
6995 vec_size, shift, &sli_op[size]);
6996 }
6997 } else { /* VSHL */
1dc8425e
RH
6998 /* Shifts larger than the element size are
6999 * architecturally valid and results in zero.
7000 */
7001 if (shift >= 8 << size) {
7002 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
7003 } else {
7004 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
7005 vec_size, vec_size);
7006 }
1dc8425e 7007 }
f3cd8218 7008 return 0;
1dc8425e
RH
7009 }
7010
9ee6e8bb
PB
7011 if (size == 3) {
7012 count = q + 1;
7013 } else {
7014 count = q ? 4: 2;
7015 }
1dc8425e
RH
7016
7017 /* To avoid excessive duplication of ops we implement shift
7018 * by immediate using the variable shift operations.
7019 */
7020 imm = dup_const(size, shift);
9ee6e8bb
PB
7021
7022 for (pass = 0; pass < count; pass++) {
ad69471c
PB
7023 if (size == 3) {
7024 neon_load_reg64(cpu_V0, rm + pass);
7025 tcg_gen_movi_i64(cpu_V1, imm);
7026 switch (op) {
ad69471c
PB
7027 case 2: /* VRSHR */
7028 case 3: /* VRSRA */
7029 if (u)
7030 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 7031 else
ad69471c 7032 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 7033 break;
0322b26e 7034 case 6: /* VQSHLU */
02da0b2d
PM
7035 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
7036 cpu_V0, cpu_V1);
ad69471c 7037 break;
0322b26e
PM
7038 case 7: /* VQSHL */
7039 if (u) {
02da0b2d 7040 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
7041 cpu_V0, cpu_V1);
7042 } else {
02da0b2d 7043 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
7044 cpu_V0, cpu_V1);
7045 }
9ee6e8bb 7046 break;
1dc8425e
RH
7047 default:
7048 g_assert_not_reached();
9ee6e8bb 7049 }
41f6c113 7050 if (op == 3) {
ad69471c 7051 /* Accumulate. */
5371cb81 7052 neon_load_reg64(cpu_V1, rd + pass);
ad69471c 7053 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
7054 }
7055 neon_store_reg64(cpu_V0, rd + pass);
7056 } else { /* size < 3 */
7057 /* Operands in T0 and T1. */
dd8fbd78 7058 tmp = neon_load_reg(rm, pass);
7d1b0095 7059 tmp2 = tcg_temp_new_i32();
dd8fbd78 7060 tcg_gen_movi_i32(tmp2, imm);
ad69471c 7061 switch (op) {
ad69471c
PB
7062 case 2: /* VRSHR */
7063 case 3: /* VRSRA */
7064 GEN_NEON_INTEGER_OP(rshl);
7065 break;
0322b26e 7066 case 6: /* VQSHLU */
ad69471c 7067 switch (size) {
0322b26e 7068 case 0:
02da0b2d
PM
7069 gen_helper_neon_qshlu_s8(tmp, cpu_env,
7070 tmp, tmp2);
0322b26e
PM
7071 break;
7072 case 1:
02da0b2d
PM
7073 gen_helper_neon_qshlu_s16(tmp, cpu_env,
7074 tmp, tmp2);
0322b26e
PM
7075 break;
7076 case 2:
02da0b2d
PM
7077 gen_helper_neon_qshlu_s32(tmp, cpu_env,
7078 tmp, tmp2);
0322b26e
PM
7079 break;
7080 default:
cc13115b 7081 abort();
ad69471c
PB
7082 }
7083 break;
0322b26e 7084 case 7: /* VQSHL */
02da0b2d 7085 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 7086 break;
1dc8425e
RH
7087 default:
7088 g_assert_not_reached();
ad69471c 7089 }
7d1b0095 7090 tcg_temp_free_i32(tmp2);
ad69471c 7091
41f6c113 7092 if (op == 3) {
ad69471c 7093 /* Accumulate. */
dd8fbd78 7094 tmp2 = neon_load_reg(rd, pass);
5371cb81 7095 gen_neon_add(size, tmp, tmp2);
7d1b0095 7096 tcg_temp_free_i32(tmp2);
ad69471c 7097 }
dd8fbd78 7098 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7099 }
7100 } /* for pass */
7101 } else if (op < 10) {
ad69471c 7102 /* Shift by immediate and narrow:
9ee6e8bb 7103 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 7104 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
7105 if (rm & 1) {
7106 return 1;
7107 }
9ee6e8bb
PB
7108 shift = shift - (1 << (size + 3));
7109 size++;
92cdfaeb 7110 if (size == 3) {
a7812ae4 7111 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
7112 neon_load_reg64(cpu_V0, rm);
7113 neon_load_reg64(cpu_V1, rm + 1);
7114 for (pass = 0; pass < 2; pass++) {
7115 TCGv_i64 in;
7116 if (pass == 0) {
7117 in = cpu_V0;
7118 } else {
7119 in = cpu_V1;
7120 }
ad69471c 7121 if (q) {
0b36f4cd 7122 if (input_unsigned) {
92cdfaeb 7123 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 7124 } else {
92cdfaeb 7125 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 7126 }
ad69471c 7127 } else {
0b36f4cd 7128 if (input_unsigned) {
92cdfaeb 7129 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 7130 } else {
92cdfaeb 7131 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 7132 }
ad69471c 7133 }
7d1b0095 7134 tmp = tcg_temp_new_i32();
92cdfaeb
PM
7135 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
7136 neon_store_reg(rd, pass, tmp);
7137 } /* for pass */
7138 tcg_temp_free_i64(tmp64);
7139 } else {
7140 if (size == 1) {
7141 imm = (uint16_t)shift;
7142 imm |= imm << 16;
2c0262af 7143 } else {
92cdfaeb
PM
7144 /* size == 2 */
7145 imm = (uint32_t)shift;
7146 }
7147 tmp2 = tcg_const_i32(imm);
7148 tmp4 = neon_load_reg(rm + 1, 0);
7149 tmp5 = neon_load_reg(rm + 1, 1);
7150 for (pass = 0; pass < 2; pass++) {
7151 if (pass == 0) {
7152 tmp = neon_load_reg(rm, 0);
7153 } else {
7154 tmp = tmp4;
7155 }
0b36f4cd
CL
7156 gen_neon_shift_narrow(size, tmp, tmp2, q,
7157 input_unsigned);
92cdfaeb
PM
7158 if (pass == 0) {
7159 tmp3 = neon_load_reg(rm, 1);
7160 } else {
7161 tmp3 = tmp5;
7162 }
0b36f4cd
CL
7163 gen_neon_shift_narrow(size, tmp3, tmp2, q,
7164 input_unsigned);
36aa55dc 7165 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
7166 tcg_temp_free_i32(tmp);
7167 tcg_temp_free_i32(tmp3);
7168 tmp = tcg_temp_new_i32();
92cdfaeb
PM
7169 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
7170 neon_store_reg(rd, pass, tmp);
7171 } /* for pass */
c6067f04 7172 tcg_temp_free_i32(tmp2);
b75263d6 7173 }
9ee6e8bb 7174 } else if (op == 10) {
cc13115b
PM
7175 /* VSHLL, VMOVL */
7176 if (q || (rd & 1)) {
9ee6e8bb 7177 return 1;
cc13115b 7178 }
ad69471c
PB
7179 tmp = neon_load_reg(rm, 0);
7180 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7181 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7182 if (pass == 1)
7183 tmp = tmp2;
7184
7185 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 7186
9ee6e8bb
PB
7187 if (shift != 0) {
7188 /* The shift is less than the width of the source
ad69471c
PB
7189 type, so we can just shift the whole register. */
7190 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
7191 /* Widen the result of shift: we need to clear
7192 * the potential overflow bits resulting from
7193 * left bits of the narrow input appearing as
7194 * right bits of left the neighbour narrow
7195 * input. */
ad69471c
PB
7196 if (size < 2 || !u) {
7197 uint64_t imm64;
7198 if (size == 0) {
7199 imm = (0xffu >> (8 - shift));
7200 imm |= imm << 16;
acdf01ef 7201 } else if (size == 1) {
ad69471c 7202 imm = 0xffff >> (16 - shift);
acdf01ef
CL
7203 } else {
7204 /* size == 2 */
7205 imm = 0xffffffff >> (32 - shift);
7206 }
7207 if (size < 2) {
7208 imm64 = imm | (((uint64_t)imm) << 32);
7209 } else {
7210 imm64 = imm;
9ee6e8bb 7211 }
acdf01ef 7212 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
7213 }
7214 }
ad69471c 7215 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7216 }
f73534a5 7217 } else if (op >= 14) {
9ee6e8bb 7218 /* VCVT fixed-point. */
cc13115b
PM
7219 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
7220 return 1;
7221 }
f73534a5
PM
7222 /* We have already masked out the must-be-1 top bit of imm6,
7223 * hence this 32-shift where the ARM ARM has 64-imm6.
7224 */
7225 shift = 32 - shift;
9ee6e8bb 7226 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 7227 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 7228 if (!(op & 1)) {
9ee6e8bb 7229 if (u)
5500b06c 7230 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 7231 else
5500b06c 7232 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
7233 } else {
7234 if (u)
5500b06c 7235 gen_vfp_toul(0, shift, 1);
9ee6e8bb 7236 else
5500b06c 7237 gen_vfp_tosl(0, shift, 1);
2c0262af 7238 }
4373f3ce 7239 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
7240 }
7241 } else {
9ee6e8bb
PB
7242 return 1;
7243 }
7244 } else { /* (insn & 0x00380080) == 0 */
246fa4ac
RH
7245 int invert, reg_ofs, vec_size;
7246
7d80fee5
PM
7247 if (q && (rd & 1)) {
7248 return 1;
7249 }
9ee6e8bb
PB
7250
7251 op = (insn >> 8) & 0xf;
7252 /* One register and immediate. */
7253 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
7254 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
7255 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
7256 * We choose to not special-case this and will behave as if a
7257 * valid constant encoding of 0 had been given.
7258 */
9ee6e8bb
PB
7259 switch (op) {
7260 case 0: case 1:
7261 /* no-op */
7262 break;
7263 case 2: case 3:
7264 imm <<= 8;
7265 break;
7266 case 4: case 5:
7267 imm <<= 16;
7268 break;
7269 case 6: case 7:
7270 imm <<= 24;
7271 break;
7272 case 8: case 9:
7273 imm |= imm << 16;
7274 break;
7275 case 10: case 11:
7276 imm = (imm << 8) | (imm << 24);
7277 break;
7278 case 12:
8e31209e 7279 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
7280 break;
7281 case 13:
7282 imm = (imm << 16) | 0xffff;
7283 break;
7284 case 14:
7285 imm |= (imm << 8) | (imm << 16) | (imm << 24);
246fa4ac 7286 if (invert) {
9ee6e8bb 7287 imm = ~imm;
246fa4ac 7288 }
9ee6e8bb
PB
7289 break;
7290 case 15:
7d80fee5
PM
7291 if (invert) {
7292 return 1;
7293 }
9ee6e8bb
PB
7294 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
7295 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
7296 break;
7297 }
246fa4ac 7298 if (invert) {
9ee6e8bb 7299 imm = ~imm;
246fa4ac 7300 }
9ee6e8bb 7301
246fa4ac
RH
7302 reg_ofs = neon_reg_offset(rd, 0);
7303 vec_size = q ? 16 : 8;
7304
7305 if (op & 1 && op < 12) {
7306 if (invert) {
7307 /* The immediate value has already been inverted,
7308 * so BIC becomes AND.
7309 */
7310 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
7311 vec_size, vec_size);
9ee6e8bb 7312 } else {
246fa4ac
RH
7313 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
7314 vec_size, vec_size);
7315 }
7316 } else {
7317 /* VMOV, VMVN. */
7318 if (op == 14 && invert) {
7319 TCGv_i64 t64 = tcg_temp_new_i64();
7320
7321 for (pass = 0; pass <= q; ++pass) {
7322 uint64_t val = 0;
a5a14945 7323 int n;
246fa4ac
RH
7324
7325 for (n = 0; n < 8; n++) {
7326 if (imm & (1 << (n + pass * 8))) {
7327 val |= 0xffull << (n * 8);
7328 }
9ee6e8bb 7329 }
246fa4ac
RH
7330 tcg_gen_movi_i64(t64, val);
7331 neon_store_reg64(t64, rd + pass);
9ee6e8bb 7332 }
246fa4ac
RH
7333 tcg_temp_free_i64(t64);
7334 } else {
7335 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
9ee6e8bb
PB
7336 }
7337 }
7338 }
e4b3861d 7339 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
7340 if (size != 3) {
7341 op = (insn >> 8) & 0xf;
7342 if ((insn & (1 << 6)) == 0) {
7343 /* Three registers of different lengths. */
7344 int src1_wide;
7345 int src2_wide;
7346 int prewiden;
526d0096
PM
7347 /* undefreq: bit 0 : UNDEF if size == 0
7348 * bit 1 : UNDEF if size == 1
7349 * bit 2 : UNDEF if size == 2
7350 * bit 3 : UNDEF if U == 1
7351 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
7352 */
7353 int undefreq;
7354 /* prewiden, src1_wide, src2_wide, undefreq */
7355 static const int neon_3reg_wide[16][4] = {
7356 {1, 0, 0, 0}, /* VADDL */
7357 {1, 1, 0, 0}, /* VADDW */
7358 {1, 0, 0, 0}, /* VSUBL */
7359 {1, 1, 0, 0}, /* VSUBW */
7360 {0, 1, 1, 0}, /* VADDHN */
7361 {0, 0, 0, 0}, /* VABAL */
7362 {0, 1, 1, 0}, /* VSUBHN */
7363 {0, 0, 0, 0}, /* VABDL */
7364 {0, 0, 0, 0}, /* VMLAL */
526d0096 7365 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 7366 {0, 0, 0, 0}, /* VMLSL */
526d0096 7367 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 7368 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 7369 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 7370 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 7371 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
7372 };
7373
7374 prewiden = neon_3reg_wide[op][0];
7375 src1_wide = neon_3reg_wide[op][1];
7376 src2_wide = neon_3reg_wide[op][2];
695272dc 7377 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 7378
526d0096
PM
7379 if ((undefreq & (1 << size)) ||
7380 ((undefreq & 8) && u)) {
695272dc
PM
7381 return 1;
7382 }
7383 if ((src1_wide && (rn & 1)) ||
7384 (src2_wide && (rm & 1)) ||
7385 (!src2_wide && (rd & 1))) {
ad69471c 7386 return 1;
695272dc 7387 }
ad69471c 7388
4e624eda
PM
7389 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
7390 * outside the loop below as it only performs a single pass.
7391 */
7392 if (op == 14 && size == 2) {
7393 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
7394
962fcbf2 7395 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
7396 return 1;
7397 }
7398 tcg_rn = tcg_temp_new_i64();
7399 tcg_rm = tcg_temp_new_i64();
7400 tcg_rd = tcg_temp_new_i64();
7401 neon_load_reg64(tcg_rn, rn);
7402 neon_load_reg64(tcg_rm, rm);
7403 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
7404 neon_store_reg64(tcg_rd, rd);
7405 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
7406 neon_store_reg64(tcg_rd, rd + 1);
7407 tcg_temp_free_i64(tcg_rn);
7408 tcg_temp_free_i64(tcg_rm);
7409 tcg_temp_free_i64(tcg_rd);
7410 return 0;
7411 }
7412
9ee6e8bb
PB
7413 /* Avoid overlapping operands. Wide source operands are
7414 always aligned so will never overlap with wide
7415 destinations in problematic ways. */
8f8e3aa4 7416 if (rd == rm && !src2_wide) {
dd8fbd78
FN
7417 tmp = neon_load_reg(rm, 1);
7418 neon_store_scratch(2, tmp);
8f8e3aa4 7419 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
7420 tmp = neon_load_reg(rn, 1);
7421 neon_store_scratch(2, tmp);
9ee6e8bb 7422 }
f764718d 7423 tmp3 = NULL;
9ee6e8bb 7424 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7425 if (src1_wide) {
7426 neon_load_reg64(cpu_V0, rn + pass);
f764718d 7427 tmp = NULL;
9ee6e8bb 7428 } else {
ad69471c 7429 if (pass == 1 && rd == rn) {
dd8fbd78 7430 tmp = neon_load_scratch(2);
9ee6e8bb 7431 } else {
ad69471c
PB
7432 tmp = neon_load_reg(rn, pass);
7433 }
7434 if (prewiden) {
7435 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
7436 }
7437 }
ad69471c
PB
7438 if (src2_wide) {
7439 neon_load_reg64(cpu_V1, rm + pass);
f764718d 7440 tmp2 = NULL;
9ee6e8bb 7441 } else {
ad69471c 7442 if (pass == 1 && rd == rm) {
dd8fbd78 7443 tmp2 = neon_load_scratch(2);
9ee6e8bb 7444 } else {
ad69471c
PB
7445 tmp2 = neon_load_reg(rm, pass);
7446 }
7447 if (prewiden) {
7448 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 7449 }
9ee6e8bb
PB
7450 }
7451 switch (op) {
7452 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 7453 gen_neon_addl(size);
9ee6e8bb 7454 break;
79b0e534 7455 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 7456 gen_neon_subl(size);
9ee6e8bb
PB
7457 break;
7458 case 5: case 7: /* VABAL, VABDL */
7459 switch ((size << 1) | u) {
ad69471c
PB
7460 case 0:
7461 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
7462 break;
7463 case 1:
7464 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
7465 break;
7466 case 2:
7467 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
7468 break;
7469 case 3:
7470 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
7471 break;
7472 case 4:
7473 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
7474 break;
7475 case 5:
7476 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
7477 break;
9ee6e8bb
PB
7478 default: abort();
7479 }
7d1b0095
PM
7480 tcg_temp_free_i32(tmp2);
7481 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7482 break;
7483 case 8: case 9: case 10: case 11: case 12: case 13:
7484 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 7485 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
7486 break;
7487 case 14: /* Polynomial VMULL */
e5ca24cb 7488 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
7489 tcg_temp_free_i32(tmp2);
7490 tcg_temp_free_i32(tmp);
e5ca24cb 7491 break;
695272dc
PM
7492 default: /* 15 is RESERVED: caught earlier */
7493 abort();
9ee6e8bb 7494 }
ebcd88ce
PM
7495 if (op == 13) {
7496 /* VQDMULL */
7497 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7498 neon_store_reg64(cpu_V0, rd + pass);
7499 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 7500 /* Accumulate. */
ebcd88ce 7501 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7502 switch (op) {
4dc064e6
PM
7503 case 10: /* VMLSL */
7504 gen_neon_negl(cpu_V0, size);
7505 /* Fall through */
7506 case 5: case 8: /* VABAL, VMLAL */
ad69471c 7507 gen_neon_addl(size);
9ee6e8bb
PB
7508 break;
7509 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 7510 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7511 if (op == 11) {
7512 gen_neon_negl(cpu_V0, size);
7513 }
ad69471c
PB
7514 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
7515 break;
9ee6e8bb
PB
7516 default:
7517 abort();
7518 }
ad69471c 7519 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7520 } else if (op == 4 || op == 6) {
7521 /* Narrowing operation. */
7d1b0095 7522 tmp = tcg_temp_new_i32();
79b0e534 7523 if (!u) {
9ee6e8bb 7524 switch (size) {
ad69471c
PB
7525 case 0:
7526 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
7527 break;
7528 case 1:
7529 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
7530 break;
7531 case 2:
7532 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 7533 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 7534 break;
9ee6e8bb
PB
7535 default: abort();
7536 }
7537 } else {
7538 switch (size) {
ad69471c
PB
7539 case 0:
7540 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
7541 break;
7542 case 1:
7543 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
7544 break;
7545 case 2:
7546 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
7547 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 7548 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 7549 break;
9ee6e8bb
PB
7550 default: abort();
7551 }
7552 }
ad69471c
PB
7553 if (pass == 0) {
7554 tmp3 = tmp;
7555 } else {
7556 neon_store_reg(rd, 0, tmp3);
7557 neon_store_reg(rd, 1, tmp);
7558 }
9ee6e8bb
PB
7559 } else {
7560 /* Write back the result. */
ad69471c 7561 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7562 }
7563 }
7564 } else {
3e3326df
PM
7565 /* Two registers and a scalar. NB that for ops of this form
7566 * the ARM ARM labels bit 24 as Q, but it is in our variable
7567 * 'u', not 'q'.
7568 */
7569 if (size == 0) {
7570 return 1;
7571 }
9ee6e8bb 7572 switch (op) {
9ee6e8bb 7573 case 1: /* Float VMLA scalar */
9ee6e8bb 7574 case 5: /* Floating point VMLS scalar */
9ee6e8bb 7575 case 9: /* Floating point VMUL scalar */
3e3326df
PM
7576 if (size == 1) {
7577 return 1;
7578 }
7579 /* fall through */
7580 case 0: /* Integer VMLA scalar */
7581 case 4: /* Integer VMLS scalar */
7582 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
7583 case 12: /* VQDMULH scalar */
7584 case 13: /* VQRDMULH scalar */
3e3326df
PM
7585 if (u && ((rd | rn) & 1)) {
7586 return 1;
7587 }
dd8fbd78
FN
7588 tmp = neon_get_scalar(size, rm);
7589 neon_store_scratch(0, tmp);
9ee6e8bb 7590 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
7591 tmp = neon_load_scratch(0);
7592 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
7593 if (op == 12) {
7594 if (size == 1) {
02da0b2d 7595 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7596 } else {
02da0b2d 7597 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
7598 }
7599 } else if (op == 13) {
7600 if (size == 1) {
02da0b2d 7601 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7602 } else {
02da0b2d 7603 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
7604 }
7605 } else if (op & 1) {
aa47cfdd
PM
7606 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7607 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
7608 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
7609 } else {
7610 switch (size) {
dd8fbd78
FN
7611 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
7612 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
7613 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 7614 default: abort();
9ee6e8bb
PB
7615 }
7616 }
7d1b0095 7617 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7618 if (op < 8) {
7619 /* Accumulate. */
dd8fbd78 7620 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
7621 switch (op) {
7622 case 0:
dd8fbd78 7623 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
7624 break;
7625 case 1:
aa47cfdd
PM
7626 {
7627 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7628 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
7629 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7630 break;
aa47cfdd 7631 }
9ee6e8bb 7632 case 4:
dd8fbd78 7633 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
7634 break;
7635 case 5:
aa47cfdd
PM
7636 {
7637 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7638 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
7639 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7640 break;
aa47cfdd 7641 }
9ee6e8bb
PB
7642 default:
7643 abort();
7644 }
7d1b0095 7645 tcg_temp_free_i32(tmp2);
9ee6e8bb 7646 }
dd8fbd78 7647 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7648 }
7649 break;
9ee6e8bb 7650 case 3: /* VQDMLAL scalar */
9ee6e8bb 7651 case 7: /* VQDMLSL scalar */
9ee6e8bb 7652 case 11: /* VQDMULL scalar */
3e3326df 7653 if (u == 1) {
ad69471c 7654 return 1;
3e3326df
PM
7655 }
7656 /* fall through */
7657 case 2: /* VMLAL sclar */
7658 case 6: /* VMLSL scalar */
7659 case 10: /* VMULL scalar */
7660 if (rd & 1) {
7661 return 1;
7662 }
dd8fbd78 7663 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
7664 /* We need a copy of tmp2 because gen_neon_mull
7665 * deletes it during pass 0. */
7d1b0095 7666 tmp4 = tcg_temp_new_i32();
c6067f04 7667 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 7668 tmp3 = neon_load_reg(rn, 1);
ad69471c 7669
9ee6e8bb 7670 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7671 if (pass == 0) {
7672 tmp = neon_load_reg(rn, 0);
9ee6e8bb 7673 } else {
dd8fbd78 7674 tmp = tmp3;
c6067f04 7675 tmp2 = tmp4;
9ee6e8bb 7676 }
ad69471c 7677 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
7678 if (op != 11) {
7679 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7680 }
9ee6e8bb 7681 switch (op) {
4dc064e6
PM
7682 case 6:
7683 gen_neon_negl(cpu_V0, size);
7684 /* Fall through */
7685 case 2:
ad69471c 7686 gen_neon_addl(size);
9ee6e8bb
PB
7687 break;
7688 case 3: case 7:
ad69471c 7689 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7690 if (op == 7) {
7691 gen_neon_negl(cpu_V0, size);
7692 }
ad69471c 7693 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
7694 break;
7695 case 10:
7696 /* no-op */
7697 break;
7698 case 11:
ad69471c 7699 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
7700 break;
7701 default:
7702 abort();
7703 }
ad69471c 7704 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7705 }
61adacc8
RH
7706 break;
7707 case 14: /* VQRDMLAH scalar */
7708 case 15: /* VQRDMLSH scalar */
7709 {
7710 NeonGenThreeOpEnvFn *fn;
dd8fbd78 7711
962fcbf2 7712 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
7713 return 1;
7714 }
7715 if (u && ((rd | rn) & 1)) {
7716 return 1;
7717 }
7718 if (op == 14) {
7719 if (size == 1) {
7720 fn = gen_helper_neon_qrdmlah_s16;
7721 } else {
7722 fn = gen_helper_neon_qrdmlah_s32;
7723 }
7724 } else {
7725 if (size == 1) {
7726 fn = gen_helper_neon_qrdmlsh_s16;
7727 } else {
7728 fn = gen_helper_neon_qrdmlsh_s32;
7729 }
7730 }
dd8fbd78 7731
61adacc8
RH
7732 tmp2 = neon_get_scalar(size, rm);
7733 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7734 tmp = neon_load_reg(rn, pass);
7735 tmp3 = neon_load_reg(rd, pass);
7736 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7737 tcg_temp_free_i32(tmp3);
7738 neon_store_reg(rd, pass, tmp);
7739 }
7740 tcg_temp_free_i32(tmp2);
7741 }
9ee6e8bb 7742 break;
61adacc8
RH
7743 default:
7744 g_assert_not_reached();
9ee6e8bb
PB
7745 }
7746 }
7747 } else { /* size == 3 */
7748 if (!u) {
7749 /* Extract. */
9ee6e8bb 7750 imm = (insn >> 8) & 0xf;
ad69471c
PB
7751
7752 if (imm > 7 && !q)
7753 return 1;
7754
52579ea1
PM
7755 if (q && ((rd | rn | rm) & 1)) {
7756 return 1;
7757 }
7758
ad69471c
PB
7759 if (imm == 0) {
7760 neon_load_reg64(cpu_V0, rn);
7761 if (q) {
7762 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 7763 }
ad69471c
PB
7764 } else if (imm == 8) {
7765 neon_load_reg64(cpu_V0, rn + 1);
7766 if (q) {
7767 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7768 }
ad69471c 7769 } else if (q) {
a7812ae4 7770 tmp64 = tcg_temp_new_i64();
ad69471c
PB
7771 if (imm < 8) {
7772 neon_load_reg64(cpu_V0, rn);
a7812ae4 7773 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
7774 } else {
7775 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 7776 neon_load_reg64(tmp64, rm);
ad69471c
PB
7777 }
7778 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 7779 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
7780 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7781 if (imm < 8) {
7782 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7783 } else {
ad69471c
PB
7784 neon_load_reg64(cpu_V1, rm + 1);
7785 imm -= 8;
9ee6e8bb 7786 }
ad69471c 7787 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
7788 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7789 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 7790 tcg_temp_free_i64(tmp64);
ad69471c 7791 } else {
a7812ae4 7792 /* BUGFIX */
ad69471c 7793 neon_load_reg64(cpu_V0, rn);
a7812ae4 7794 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 7795 neon_load_reg64(cpu_V1, rm);
a7812ae4 7796 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
7797 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7798 }
7799 neon_store_reg64(cpu_V0, rd);
7800 if (q) {
7801 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7802 }
7803 } else if ((insn & (1 << 11)) == 0) {
7804 /* Two register misc. */
7805 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7806 size = (insn >> 18) & 3;
600b828c
PM
7807 /* UNDEF for unknown op values and bad op-size combinations */
7808 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7809 return 1;
7810 }
fe8fcf3d
PM
7811 if (neon_2rm_is_v8_op(op) &&
7812 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7813 return 1;
7814 }
fc2a9b37
PM
7815 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7816 q && ((rm | rd) & 1)) {
7817 return 1;
7818 }
9ee6e8bb 7819 switch (op) {
600b828c 7820 case NEON_2RM_VREV64:
9ee6e8bb 7821 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7822 tmp = neon_load_reg(rm, pass * 2);
7823 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7824 switch (size) {
dd8fbd78
FN
7825 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7826 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7827 case 2: /* no-op */ break;
7828 default: abort();
7829 }
dd8fbd78 7830 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7831 if (size == 2) {
dd8fbd78 7832 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7833 } else {
9ee6e8bb 7834 switch (size) {
dd8fbd78
FN
7835 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7836 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7837 default: abort();
7838 }
dd8fbd78 7839 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7840 }
7841 }
7842 break;
600b828c
PM
7843 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7844 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7845 for (pass = 0; pass < q + 1; pass++) {
7846 tmp = neon_load_reg(rm, pass * 2);
7847 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7848 tmp = neon_load_reg(rm, pass * 2 + 1);
7849 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7850 switch (size) {
7851 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7852 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7853 case 2: tcg_gen_add_i64(CPU_V001); break;
7854 default: abort();
7855 }
600b828c 7856 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7857 /* Accumulate. */
ad69471c
PB
7858 neon_load_reg64(cpu_V1, rd + pass);
7859 gen_neon_addl(size);
9ee6e8bb 7860 }
ad69471c 7861 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7862 }
7863 break;
600b828c 7864 case NEON_2RM_VTRN:
9ee6e8bb 7865 if (size == 2) {
a5a14945 7866 int n;
9ee6e8bb 7867 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7868 tmp = neon_load_reg(rm, n);
7869 tmp2 = neon_load_reg(rd, n + 1);
7870 neon_store_reg(rm, n, tmp2);
7871 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7872 }
7873 } else {
7874 goto elementwise;
7875 }
7876 break;
600b828c 7877 case NEON_2RM_VUZP:
02acedf9 7878 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7879 return 1;
9ee6e8bb
PB
7880 }
7881 break;
600b828c 7882 case NEON_2RM_VZIP:
d68a6f3a 7883 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7884 return 1;
9ee6e8bb
PB
7885 }
7886 break;
600b828c
PM
7887 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7888 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7889 if (rm & 1) {
7890 return 1;
7891 }
f764718d 7892 tmp2 = NULL;
9ee6e8bb 7893 for (pass = 0; pass < 2; pass++) {
ad69471c 7894 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7895 tmp = tcg_temp_new_i32();
600b828c
PM
7896 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7897 tmp, cpu_V0);
ad69471c
PB
7898 if (pass == 0) {
7899 tmp2 = tmp;
7900 } else {
7901 neon_store_reg(rd, 0, tmp2);
7902 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7903 }
9ee6e8bb
PB
7904 }
7905 break;
600b828c 7906 case NEON_2RM_VSHLL:
fc2a9b37 7907 if (q || (rd & 1)) {
9ee6e8bb 7908 return 1;
600b828c 7909 }
ad69471c
PB
7910 tmp = neon_load_reg(rm, 0);
7911 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7912 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7913 if (pass == 1)
7914 tmp = tmp2;
7915 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7916 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7917 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7918 }
7919 break;
600b828c 7920 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
7921 {
7922 TCGv_ptr fpst;
7923 TCGv_i32 ahp;
7924
602f6e42 7925 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
7926 q || (rm & 1)) {
7927 return 1;
7928 }
7d1b0095
PM
7929 tmp = tcg_temp_new_i32();
7930 tmp2 = tcg_temp_new_i32();
486624fc
AB
7931 fpst = get_fpstatus_ptr(true);
7932 ahp = get_ahp_flag();
60011498 7933 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
486624fc 7934 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498 7935 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
486624fc 7936 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7937 tcg_gen_shli_i32(tmp2, tmp2, 16);
7938 tcg_gen_or_i32(tmp2, tmp2, tmp);
7939 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
486624fc 7940 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498
PB
7941 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7942 neon_store_reg(rd, 0, tmp2);
7d1b0095 7943 tmp2 = tcg_temp_new_i32();
486624fc 7944 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7945 tcg_gen_shli_i32(tmp2, tmp2, 16);
7946 tcg_gen_or_i32(tmp2, tmp2, tmp);
7947 neon_store_reg(rd, 1, tmp2);
7d1b0095 7948 tcg_temp_free_i32(tmp);
486624fc
AB
7949 tcg_temp_free_i32(ahp);
7950 tcg_temp_free_ptr(fpst);
60011498 7951 break;
486624fc 7952 }
600b828c 7953 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
7954 {
7955 TCGv_ptr fpst;
7956 TCGv_i32 ahp;
602f6e42 7957 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
7958 q || (rd & 1)) {
7959 return 1;
7960 }
486624fc
AB
7961 fpst = get_fpstatus_ptr(true);
7962 ahp = get_ahp_flag();
7d1b0095 7963 tmp3 = tcg_temp_new_i32();
60011498
PB
7964 tmp = neon_load_reg(rm, 0);
7965 tmp2 = neon_load_reg(rm, 1);
7966 tcg_gen_ext16u_i32(tmp3, tmp);
486624fc 7967 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7968 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7969 tcg_gen_shri_i32(tmp3, tmp, 16);
486624fc 7970 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7971 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7972 tcg_temp_free_i32(tmp);
60011498 7973 tcg_gen_ext16u_i32(tmp3, tmp2);
486624fc 7974 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7975 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7976 tcg_gen_shri_i32(tmp3, tmp2, 16);
486624fc 7977 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7978 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7979 tcg_temp_free_i32(tmp2);
7980 tcg_temp_free_i32(tmp3);
486624fc
AB
7981 tcg_temp_free_i32(ahp);
7982 tcg_temp_free_ptr(fpst);
60011498 7983 break;
486624fc 7984 }
9d935509 7985 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 7986 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
7987 return 1;
7988 }
1a66ac61
RH
7989 ptr1 = vfp_reg_ptr(true, rd);
7990 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7991
7992 /* Bit 6 is the lowest opcode bit; it distinguishes between
7993 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7994 */
7995 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7996
7997 if (op == NEON_2RM_AESE) {
1a66ac61 7998 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7999 } else {
1a66ac61 8000 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 8001 }
1a66ac61
RH
8002 tcg_temp_free_ptr(ptr1);
8003 tcg_temp_free_ptr(ptr2);
9d935509
AB
8004 tcg_temp_free_i32(tmp3);
8005 break;
f1ecb913 8006 case NEON_2RM_SHA1H:
962fcbf2 8007 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
8008 return 1;
8009 }
1a66ac61
RH
8010 ptr1 = vfp_reg_ptr(true, rd);
8011 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 8012
1a66ac61 8013 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 8014
1a66ac61
RH
8015 tcg_temp_free_ptr(ptr1);
8016 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
8017 break;
8018 case NEON_2RM_SHA1SU1:
8019 if ((rm | rd) & 1) {
8020 return 1;
8021 }
8022 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
8023 if (q) {
962fcbf2 8024 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
8025 return 1;
8026 }
962fcbf2 8027 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
8028 return 1;
8029 }
1a66ac61
RH
8030 ptr1 = vfp_reg_ptr(true, rd);
8031 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 8032 if (q) {
1a66ac61 8033 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 8034 } else {
1a66ac61 8035 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 8036 }
1a66ac61
RH
8037 tcg_temp_free_ptr(ptr1);
8038 tcg_temp_free_ptr(ptr2);
f1ecb913 8039 break;
4bf940be
RH
8040
8041 case NEON_2RM_VMVN:
8042 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
8043 break;
8044 case NEON_2RM_VNEG:
8045 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
8046 break;
4e027a71
RH
8047 case NEON_2RM_VABS:
8048 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
8049 break;
4bf940be 8050
9ee6e8bb
PB
8051 default:
8052 elementwise:
8053 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 8054 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
8055 tcg_gen_ld_f32(cpu_F0s, cpu_env,
8056 neon_reg_offset(rm, pass));
f764718d 8057 tmp = NULL;
9ee6e8bb 8058 } else {
dd8fbd78 8059 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
8060 }
8061 switch (op) {
600b828c 8062 case NEON_2RM_VREV32:
9ee6e8bb 8063 switch (size) {
dd8fbd78
FN
8064 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8065 case 1: gen_swap_half(tmp); break;
600b828c 8066 default: abort();
9ee6e8bb
PB
8067 }
8068 break;
600b828c 8069 case NEON_2RM_VREV16:
dd8fbd78 8070 gen_rev16(tmp);
9ee6e8bb 8071 break;
600b828c 8072 case NEON_2RM_VCLS:
9ee6e8bb 8073 switch (size) {
dd8fbd78
FN
8074 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
8075 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
8076 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 8077 default: abort();
9ee6e8bb
PB
8078 }
8079 break;
600b828c 8080 case NEON_2RM_VCLZ:
9ee6e8bb 8081 switch (size) {
dd8fbd78
FN
8082 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
8083 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 8084 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 8085 default: abort();
9ee6e8bb
PB
8086 }
8087 break;
600b828c 8088 case NEON_2RM_VCNT:
dd8fbd78 8089 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 8090 break;
600b828c 8091 case NEON_2RM_VQABS:
9ee6e8bb 8092 switch (size) {
02da0b2d
PM
8093 case 0:
8094 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
8095 break;
8096 case 1:
8097 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
8098 break;
8099 case 2:
8100 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
8101 break;
600b828c 8102 default: abort();
9ee6e8bb
PB
8103 }
8104 break;
600b828c 8105 case NEON_2RM_VQNEG:
9ee6e8bb 8106 switch (size) {
02da0b2d
PM
8107 case 0:
8108 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
8109 break;
8110 case 1:
8111 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
8112 break;
8113 case 2:
8114 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
8115 break;
600b828c 8116 default: abort();
9ee6e8bb
PB
8117 }
8118 break;
600b828c 8119 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 8120 tmp2 = tcg_const_i32(0);
9ee6e8bb 8121 switch(size) {
dd8fbd78
FN
8122 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
8123 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
8124 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 8125 default: abort();
9ee6e8bb 8126 }
39d5492a 8127 tcg_temp_free_i32(tmp2);
600b828c 8128 if (op == NEON_2RM_VCLE0) {
dd8fbd78 8129 tcg_gen_not_i32(tmp, tmp);
600b828c 8130 }
9ee6e8bb 8131 break;
600b828c 8132 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 8133 tmp2 = tcg_const_i32(0);
9ee6e8bb 8134 switch(size) {
dd8fbd78
FN
8135 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
8136 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
8137 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 8138 default: abort();
9ee6e8bb 8139 }
39d5492a 8140 tcg_temp_free_i32(tmp2);
600b828c 8141 if (op == NEON_2RM_VCLT0) {
dd8fbd78 8142 tcg_gen_not_i32(tmp, tmp);
600b828c 8143 }
9ee6e8bb 8144 break;
600b828c 8145 case NEON_2RM_VCEQ0:
dd8fbd78 8146 tmp2 = tcg_const_i32(0);
9ee6e8bb 8147 switch(size) {
dd8fbd78
FN
8148 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
8149 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
8150 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 8151 default: abort();
9ee6e8bb 8152 }
39d5492a 8153 tcg_temp_free_i32(tmp2);
9ee6e8bb 8154 break;
600b828c 8155 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
8156 {
8157 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 8158 tmp2 = tcg_const_i32(0);
aa47cfdd 8159 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8160 tcg_temp_free_i32(tmp2);
aa47cfdd 8161 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8162 break;
aa47cfdd 8163 }
600b828c 8164 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
8165 {
8166 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 8167 tmp2 = tcg_const_i32(0);
aa47cfdd 8168 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8169 tcg_temp_free_i32(tmp2);
aa47cfdd 8170 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8171 break;
aa47cfdd 8172 }
600b828c 8173 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
8174 {
8175 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 8176 tmp2 = tcg_const_i32(0);
aa47cfdd 8177 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8178 tcg_temp_free_i32(tmp2);
aa47cfdd 8179 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8180 break;
aa47cfdd 8181 }
600b828c 8182 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
8183 {
8184 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 8185 tmp2 = tcg_const_i32(0);
aa47cfdd 8186 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 8187 tcg_temp_free_i32(tmp2);
aa47cfdd 8188 tcg_temp_free_ptr(fpstatus);
0e326109 8189 break;
aa47cfdd 8190 }
600b828c 8191 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
8192 {
8193 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 8194 tmp2 = tcg_const_i32(0);
aa47cfdd 8195 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 8196 tcg_temp_free_i32(tmp2);
aa47cfdd 8197 tcg_temp_free_ptr(fpstatus);
0e326109 8198 break;
aa47cfdd 8199 }
600b828c 8200 case NEON_2RM_VABS_F:
4373f3ce 8201 gen_vfp_abs(0);
9ee6e8bb 8202 break;
600b828c 8203 case NEON_2RM_VNEG_F:
4373f3ce 8204 gen_vfp_neg(0);
9ee6e8bb 8205 break;
600b828c 8206 case NEON_2RM_VSWP:
dd8fbd78
FN
8207 tmp2 = neon_load_reg(rd, pass);
8208 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 8209 break;
600b828c 8210 case NEON_2RM_VTRN:
dd8fbd78 8211 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 8212 switch (size) {
dd8fbd78
FN
8213 case 0: gen_neon_trn_u8(tmp, tmp2); break;
8214 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 8215 default: abort();
9ee6e8bb 8216 }
dd8fbd78 8217 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 8218 break;
34f7b0a2
WN
8219 case NEON_2RM_VRINTN:
8220 case NEON_2RM_VRINTA:
8221 case NEON_2RM_VRINTM:
8222 case NEON_2RM_VRINTP:
8223 case NEON_2RM_VRINTZ:
8224 {
8225 TCGv_i32 tcg_rmode;
8226 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8227 int rmode;
8228
8229 if (op == NEON_2RM_VRINTZ) {
8230 rmode = FPROUNDING_ZERO;
8231 } else {
8232 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
8233 }
8234
8235 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8236 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8237 cpu_env);
8238 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
8239 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8240 cpu_env);
8241 tcg_temp_free_ptr(fpstatus);
8242 tcg_temp_free_i32(tcg_rmode);
8243 break;
8244 }
2ce70625
WN
8245 case NEON_2RM_VRINTX:
8246 {
8247 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8248 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
8249 tcg_temp_free_ptr(fpstatus);
8250 break;
8251 }
901ad525
WN
8252 case NEON_2RM_VCVTAU:
8253 case NEON_2RM_VCVTAS:
8254 case NEON_2RM_VCVTNU:
8255 case NEON_2RM_VCVTNS:
8256 case NEON_2RM_VCVTPU:
8257 case NEON_2RM_VCVTPS:
8258 case NEON_2RM_VCVTMU:
8259 case NEON_2RM_VCVTMS:
8260 {
8261 bool is_signed = !extract32(insn, 7, 1);
8262 TCGv_ptr fpst = get_fpstatus_ptr(1);
8263 TCGv_i32 tcg_rmode, tcg_shift;
8264 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
8265
8266 tcg_shift = tcg_const_i32(0);
8267 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8268 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8269 cpu_env);
8270
8271 if (is_signed) {
8272 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
8273 tcg_shift, fpst);
8274 } else {
8275 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
8276 tcg_shift, fpst);
8277 }
8278
8279 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8280 cpu_env);
8281 tcg_temp_free_i32(tcg_rmode);
8282 tcg_temp_free_i32(tcg_shift);
8283 tcg_temp_free_ptr(fpst);
8284 break;
8285 }
600b828c 8286 case NEON_2RM_VRECPE:
b6d4443a
AB
8287 {
8288 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8289 gen_helper_recpe_u32(tmp, tmp, fpstatus);
8290 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8291 break;
b6d4443a 8292 }
600b828c 8293 case NEON_2RM_VRSQRTE:
c2fb418e
AB
8294 {
8295 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8296 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
8297 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8298 break;
c2fb418e 8299 }
600b828c 8300 case NEON_2RM_VRECPE_F:
b6d4443a
AB
8301 {
8302 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8303 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
8304 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8305 break;
b6d4443a 8306 }
600b828c 8307 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
8308 {
8309 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8310 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
8311 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8312 break;
c2fb418e 8313 }
600b828c 8314 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 8315 gen_vfp_sito(0, 1);
9ee6e8bb 8316 break;
600b828c 8317 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 8318 gen_vfp_uito(0, 1);
9ee6e8bb 8319 break;
600b828c 8320 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 8321 gen_vfp_tosiz(0, 1);
9ee6e8bb 8322 break;
600b828c 8323 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 8324 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
8325 break;
8326 default:
600b828c
PM
8327 /* Reserved op values were caught by the
8328 * neon_2rm_sizes[] check earlier.
8329 */
8330 abort();
9ee6e8bb 8331 }
600b828c 8332 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
8333 tcg_gen_st_f32(cpu_F0s, cpu_env,
8334 neon_reg_offset(rd, pass));
9ee6e8bb 8335 } else {
dd8fbd78 8336 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
8337 }
8338 }
8339 break;
8340 }
8341 } else if ((insn & (1 << 10)) == 0) {
8342 /* VTBL, VTBX. */
56907d77
PM
8343 int n = ((insn >> 8) & 3) + 1;
8344 if ((rn + n) > 32) {
8345 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
8346 * helper function running off the end of the register file.
8347 */
8348 return 1;
8349 }
8350 n <<= 3;
9ee6e8bb 8351 if (insn & (1 << 6)) {
8f8e3aa4 8352 tmp = neon_load_reg(rd, 0);
9ee6e8bb 8353 } else {
7d1b0095 8354 tmp = tcg_temp_new_i32();
8f8e3aa4 8355 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8356 }
8f8e3aa4 8357 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 8358 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 8359 tmp5 = tcg_const_i32(n);
e7c06c4e 8360 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 8361 tcg_temp_free_i32(tmp);
9ee6e8bb 8362 if (insn & (1 << 6)) {
8f8e3aa4 8363 tmp = neon_load_reg(rd, 1);
9ee6e8bb 8364 } else {
7d1b0095 8365 tmp = tcg_temp_new_i32();
8f8e3aa4 8366 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8367 }
8f8e3aa4 8368 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 8369 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 8370 tcg_temp_free_i32(tmp5);
e7c06c4e 8371 tcg_temp_free_ptr(ptr1);
8f8e3aa4 8372 neon_store_reg(rd, 0, tmp2);
3018f259 8373 neon_store_reg(rd, 1, tmp3);
7d1b0095 8374 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8375 } else if ((insn & 0x380) == 0) {
8376 /* VDUP */
32f91fb7
RH
8377 int element;
8378 TCGMemOp size;
8379
133da6aa
JR
8380 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
8381 return 1;
8382 }
9ee6e8bb 8383 if (insn & (1 << 16)) {
32f91fb7
RH
8384 size = MO_8;
8385 element = (insn >> 17) & 7;
9ee6e8bb 8386 } else if (insn & (1 << 17)) {
32f91fb7
RH
8387 size = MO_16;
8388 element = (insn >> 18) & 3;
8389 } else {
8390 size = MO_32;
8391 element = (insn >> 19) & 1;
9ee6e8bb 8392 }
32f91fb7
RH
8393 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
8394 neon_element_offset(rm, element, size),
8395 q ? 16 : 8, q ? 16 : 8);
9ee6e8bb
PB
8396 } else {
8397 return 1;
8398 }
8399 }
8400 }
8401 return 0;
8402}
8403
8b7209fa
RH
8404/* Advanced SIMD three registers of the same length extension.
8405 * 31 25 23 22 20 16 12 11 10 9 8 3 0
8406 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8407 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8408 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8409 */
8410static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
8411{
26c470a7
RH
8412 gen_helper_gvec_3 *fn_gvec = NULL;
8413 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
8414 int rd, rn, rm, opr_sz;
8415 int data = 0;
87732318
RH
8416 int off_rn, off_rm;
8417 bool is_long = false, q = extract32(insn, 6, 1);
8418 bool ptr_is_env = false;
8b7209fa
RH
8419
8420 if ((insn & 0xfe200f10) == 0xfc200800) {
8421 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
8422 int size = extract32(insn, 20, 1);
8423 data = extract32(insn, 23, 2); /* rot */
962fcbf2 8424 if (!dc_isar_feature(aa32_vcma, s)
5763190f 8425 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
8426 return 1;
8427 }
8428 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
8429 } else if ((insn & 0xfea00f10) == 0xfc800800) {
8430 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
8431 int size = extract32(insn, 20, 1);
8432 data = extract32(insn, 24, 1); /* rot */
962fcbf2 8433 if (!dc_isar_feature(aa32_vcma, s)
5763190f 8434 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
8435 return 1;
8436 }
8437 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
8438 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
8439 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
8440 bool u = extract32(insn, 4, 1);
962fcbf2 8441 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
8442 return 1;
8443 }
8444 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
87732318
RH
8445 } else if ((insn & 0xff300f10) == 0xfc200810) {
8446 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
8447 int is_s = extract32(insn, 23, 1);
8448 if (!dc_isar_feature(aa32_fhm, s)) {
8449 return 1;
8450 }
8451 is_long = true;
8452 data = is_s; /* is_2 == 0 */
8453 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
8454 ptr_is_env = true;
8b7209fa
RH
8455 } else {
8456 return 1;
8457 }
8458
87732318
RH
8459 VFP_DREG_D(rd, insn);
8460 if (rd & q) {
8461 return 1;
8462 }
8463 if (q || !is_long) {
8464 VFP_DREG_N(rn, insn);
8465 VFP_DREG_M(rm, insn);
8466 if ((rn | rm) & q & !is_long) {
8467 return 1;
8468 }
8469 off_rn = vfp_reg_offset(1, rn);
8470 off_rm = vfp_reg_offset(1, rm);
8471 } else {
8472 rn = VFP_SREG_N(insn);
8473 rm = VFP_SREG_M(insn);
8474 off_rn = vfp_reg_offset(0, rn);
8475 off_rm = vfp_reg_offset(0, rm);
8476 }
8477
8b7209fa
RH
8478 if (s->fp_excp_el) {
8479 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 8480 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8b7209fa
RH
8481 return 0;
8482 }
8483 if (!s->vfp_enabled) {
8484 return 1;
8485 }
8486
8487 opr_sz = (1 + q) * 8;
26c470a7 8488 if (fn_gvec_ptr) {
87732318
RH
8489 TCGv_ptr ptr;
8490 if (ptr_is_env) {
8491 ptr = cpu_env;
8492 } else {
8493 ptr = get_fpstatus_ptr(1);
8494 }
8495 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 8496 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
8497 if (!ptr_is_env) {
8498 tcg_temp_free_ptr(ptr);
8499 }
26c470a7 8500 } else {
87732318 8501 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
8502 opr_sz, opr_sz, data, fn_gvec);
8503 }
8b7209fa
RH
8504 return 0;
8505}
8506
638808ff
RH
8507/* Advanced SIMD two registers and a scalar extension.
8508 * 31 24 23 22 20 16 12 11 10 9 8 3 0
8509 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8510 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8511 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8512 *
8513 */
8514
8515static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
8516{
26c470a7
RH
8517 gen_helper_gvec_3 *fn_gvec = NULL;
8518 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 8519 int rd, rn, rm, opr_sz, data;
87732318
RH
8520 int off_rn, off_rm;
8521 bool is_long = false, q = extract32(insn, 6, 1);
8522 bool ptr_is_env = false;
638808ff
RH
8523
8524 if ((insn & 0xff000f10) == 0xfe000800) {
8525 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
8526 int rot = extract32(insn, 20, 2);
8527 int size = extract32(insn, 23, 1);
8528 int index;
8529
962fcbf2 8530 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
8531 return 1;
8532 }
2cc99919 8533 if (size == 0) {
5763190f 8534 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2cc99919
RH
8535 return 1;
8536 }
8537 /* For fp16, rm is just Vm, and index is M. */
8538 rm = extract32(insn, 0, 4);
8539 index = extract32(insn, 5, 1);
8540 } else {
8541 /* For fp32, rm is the usual M:Vm, and index is 0. */
8542 VFP_DREG_M(rm, insn);
8543 index = 0;
8544 }
8545 data = (index << 2) | rot;
8546 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
8547 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
8548 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
8549 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
8550 int u = extract32(insn, 4, 1);
87732318 8551
962fcbf2 8552 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
8553 return 1;
8554 }
8555 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
8556 /* rm is just Vm, and index is M. */
8557 data = extract32(insn, 5, 1); /* index */
8558 rm = extract32(insn, 0, 4);
87732318
RH
8559 } else if ((insn & 0xffa00f10) == 0xfe000810) {
8560 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
8561 int is_s = extract32(insn, 20, 1);
8562 int vm20 = extract32(insn, 0, 3);
8563 int vm3 = extract32(insn, 3, 1);
8564 int m = extract32(insn, 5, 1);
8565 int index;
8566
8567 if (!dc_isar_feature(aa32_fhm, s)) {
8568 return 1;
8569 }
8570 if (q) {
8571 rm = vm20;
8572 index = m * 2 + vm3;
8573 } else {
8574 rm = vm20 * 2 + m;
8575 index = vm3;
8576 }
8577 is_long = true;
8578 data = (index << 2) | is_s; /* is_2 == 0 */
8579 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
8580 ptr_is_env = true;
638808ff
RH
8581 } else {
8582 return 1;
8583 }
8584
87732318
RH
8585 VFP_DREG_D(rd, insn);
8586 if (rd & q) {
8587 return 1;
8588 }
8589 if (q || !is_long) {
8590 VFP_DREG_N(rn, insn);
8591 if (rn & q & !is_long) {
8592 return 1;
8593 }
8594 off_rn = vfp_reg_offset(1, rn);
8595 off_rm = vfp_reg_offset(1, rm);
8596 } else {
8597 rn = VFP_SREG_N(insn);
8598 off_rn = vfp_reg_offset(0, rn);
8599 off_rm = vfp_reg_offset(0, rm);
8600 }
638808ff
RH
8601 if (s->fp_excp_el) {
8602 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 8603 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
638808ff
RH
8604 return 0;
8605 }
8606 if (!s->vfp_enabled) {
8607 return 1;
8608 }
8609
8610 opr_sz = (1 + q) * 8;
26c470a7 8611 if (fn_gvec_ptr) {
87732318
RH
8612 TCGv_ptr ptr;
8613 if (ptr_is_env) {
8614 ptr = cpu_env;
8615 } else {
8616 ptr = get_fpstatus_ptr(1);
8617 }
8618 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 8619 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
8620 if (!ptr_is_env) {
8621 tcg_temp_free_ptr(ptr);
8622 }
26c470a7 8623 } else {
87732318 8624 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
8625 opr_sz, opr_sz, data, fn_gvec);
8626 }
638808ff
RH
8627 return 0;
8628}
8629
7dcc1f89 8630static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 8631{
4b6a83fb
PM
8632 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
8633 const ARMCPRegInfo *ri;
9ee6e8bb
PB
8634
8635 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
8636
8637 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 8638 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
8639 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
8640 return 1;
8641 }
d614a513 8642 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 8643 return disas_iwmmxt_insn(s, insn);
d614a513 8644 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 8645 return disas_dsp_insn(s, insn);
c0f4af17
PM
8646 }
8647 return 1;
4b6a83fb
PM
8648 }
8649
8650 /* Otherwise treat as a generic register access */
8651 is64 = (insn & (1 << 25)) == 0;
8652 if (!is64 && ((insn & (1 << 4)) == 0)) {
8653 /* cdp */
8654 return 1;
8655 }
8656
8657 crm = insn & 0xf;
8658 if (is64) {
8659 crn = 0;
8660 opc1 = (insn >> 4) & 0xf;
8661 opc2 = 0;
8662 rt2 = (insn >> 16) & 0xf;
8663 } else {
8664 crn = (insn >> 16) & 0xf;
8665 opc1 = (insn >> 21) & 7;
8666 opc2 = (insn >> 5) & 7;
8667 rt2 = 0;
8668 }
8669 isread = (insn >> 20) & 1;
8670 rt = (insn >> 12) & 0xf;
8671
60322b39 8672 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 8673 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
8674 if (ri) {
8675 /* Check access permissions */
dcbff19b 8676 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
8677 return 1;
8678 }
8679
c0f4af17 8680 if (ri->accessfn ||
d614a513 8681 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
8682 /* Emit code to perform further access permissions checks at
8683 * runtime; this may result in an exception.
c0f4af17
PM
8684 * Note that on XScale all cp0..c13 registers do an access check
8685 * call in order to handle c15_cpar.
f59df3f2
PM
8686 */
8687 TCGv_ptr tmpptr;
3f208fd7 8688 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
8689 uint32_t syndrome;
8690
8691 /* Note that since we are an implementation which takes an
8692 * exception on a trapped conditional instruction only if the
8693 * instruction passes its condition code check, we can take
8694 * advantage of the clause in the ARM ARM that allows us to set
8695 * the COND field in the instruction to 0xE in all cases.
8696 * We could fish the actual condition out of the insn (ARM)
8697 * or the condexec bits (Thumb) but it isn't necessary.
8698 */
8699 switch (cpnum) {
8700 case 14:
8701 if (is64) {
8702 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8703 isread, false);
8bcbf37c
PM
8704 } else {
8705 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8706 rt, isread, false);
8bcbf37c
PM
8707 }
8708 break;
8709 case 15:
8710 if (is64) {
8711 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8712 isread, false);
8bcbf37c
PM
8713 } else {
8714 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8715 rt, isread, false);
8bcbf37c
PM
8716 }
8717 break;
8718 default:
8719 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8720 * so this can only happen if this is an ARMv7 or earlier CPU,
8721 * in which case the syndrome information won't actually be
8722 * guest visible.
8723 */
d614a513 8724 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
8725 syndrome = syn_uncategorized();
8726 break;
8727 }
8728
43bfa4a1 8729 gen_set_condexec(s);
3977ee5d 8730 gen_set_pc_im(s, s->pc - 4);
f59df3f2 8731 tmpptr = tcg_const_ptr(ri);
8bcbf37c 8732 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
8733 tcg_isread = tcg_const_i32(isread);
8734 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8735 tcg_isread);
f59df3f2 8736 tcg_temp_free_ptr(tmpptr);
8bcbf37c 8737 tcg_temp_free_i32(tcg_syn);
3f208fd7 8738 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
8739 }
8740
4b6a83fb
PM
8741 /* Handle special cases first */
8742 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8743 case ARM_CP_NOP:
8744 return 0;
8745 case ARM_CP_WFI:
8746 if (isread) {
8747 return 1;
8748 }
eaed129d 8749 gen_set_pc_im(s, s->pc);
dcba3a8d 8750 s->base.is_jmp = DISAS_WFI;
2bee5105 8751 return 0;
4b6a83fb
PM
8752 default:
8753 break;
8754 }
8755
c5a49c63 8756 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8757 gen_io_start();
8758 }
8759
4b6a83fb
PM
8760 if (isread) {
8761 /* Read */
8762 if (is64) {
8763 TCGv_i64 tmp64;
8764 TCGv_i32 tmp;
8765 if (ri->type & ARM_CP_CONST) {
8766 tmp64 = tcg_const_i64(ri->resetvalue);
8767 } else if (ri->readfn) {
8768 TCGv_ptr tmpptr;
4b6a83fb
PM
8769 tmp64 = tcg_temp_new_i64();
8770 tmpptr = tcg_const_ptr(ri);
8771 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8772 tcg_temp_free_ptr(tmpptr);
8773 } else {
8774 tmp64 = tcg_temp_new_i64();
8775 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8776 }
8777 tmp = tcg_temp_new_i32();
ecc7b3aa 8778 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
8779 store_reg(s, rt, tmp);
8780 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 8781 tmp = tcg_temp_new_i32();
ecc7b3aa 8782 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 8783 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
8784 store_reg(s, rt2, tmp);
8785 } else {
39d5492a 8786 TCGv_i32 tmp;
4b6a83fb
PM
8787 if (ri->type & ARM_CP_CONST) {
8788 tmp = tcg_const_i32(ri->resetvalue);
8789 } else if (ri->readfn) {
8790 TCGv_ptr tmpptr;
4b6a83fb
PM
8791 tmp = tcg_temp_new_i32();
8792 tmpptr = tcg_const_ptr(ri);
8793 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8794 tcg_temp_free_ptr(tmpptr);
8795 } else {
8796 tmp = load_cpu_offset(ri->fieldoffset);
8797 }
8798 if (rt == 15) {
8799 /* Destination register of r15 for 32 bit loads sets
8800 * the condition codes from the high 4 bits of the value
8801 */
8802 gen_set_nzcv(tmp);
8803 tcg_temp_free_i32(tmp);
8804 } else {
8805 store_reg(s, rt, tmp);
8806 }
8807 }
8808 } else {
8809 /* Write */
8810 if (ri->type & ARM_CP_CONST) {
8811 /* If not forbidden by access permissions, treat as WI */
8812 return 0;
8813 }
8814
8815 if (is64) {
39d5492a 8816 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
8817 TCGv_i64 tmp64 = tcg_temp_new_i64();
8818 tmplo = load_reg(s, rt);
8819 tmphi = load_reg(s, rt2);
8820 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8821 tcg_temp_free_i32(tmplo);
8822 tcg_temp_free_i32(tmphi);
8823 if (ri->writefn) {
8824 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
8825 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8826 tcg_temp_free_ptr(tmpptr);
8827 } else {
8828 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8829 }
8830 tcg_temp_free_i64(tmp64);
8831 } else {
8832 if (ri->writefn) {
39d5492a 8833 TCGv_i32 tmp;
4b6a83fb 8834 TCGv_ptr tmpptr;
4b6a83fb
PM
8835 tmp = load_reg(s, rt);
8836 tmpptr = tcg_const_ptr(ri);
8837 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8838 tcg_temp_free_ptr(tmpptr);
8839 tcg_temp_free_i32(tmp);
8840 } else {
39d5492a 8841 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
8842 store_cpu_offset(tmp, ri->fieldoffset);
8843 }
8844 }
2452731c
PM
8845 }
8846
c5a49c63 8847 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8848 /* I/O operations must end the TB here (whether read or write) */
8849 gen_io_end();
8850 gen_lookup_tb(s);
8851 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
8852 /* We default to ending the TB on a coprocessor register write,
8853 * but allow this to be suppressed by the register definition
8854 * (usually only necessary to work around guest bugs).
8855 */
2452731c 8856 gen_lookup_tb(s);
4b6a83fb 8857 }
2452731c 8858
4b6a83fb
PM
8859 return 0;
8860 }
8861
626187d8
PM
8862 /* Unknown register; this might be a guest error or a QEMU
8863 * unimplemented feature.
8864 */
8865 if (is64) {
8866 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8867 "64 bit system register cp:%d opc1: %d crm:%d "
8868 "(%s)\n",
8869 isread ? "read" : "write", cpnum, opc1, crm,
8870 s->ns ? "non-secure" : "secure");
626187d8
PM
8871 } else {
8872 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8873 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8874 "(%s)\n",
8875 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8876 s->ns ? "non-secure" : "secure");
626187d8
PM
8877 }
8878
4a9a539f 8879 return 1;
9ee6e8bb
PB
8880}
8881
5e3f878a
PB
8882
8883/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 8884static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 8885{
39d5492a 8886 TCGv_i32 tmp;
7d1b0095 8887 tmp = tcg_temp_new_i32();
ecc7b3aa 8888 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 8889 store_reg(s, rlow, tmp);
7d1b0095 8890 tmp = tcg_temp_new_i32();
5e3f878a 8891 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 8892 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
8893 store_reg(s, rhigh, tmp);
8894}
8895
8896/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 8897static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 8898{
a7812ae4 8899 TCGv_i64 tmp;
39d5492a 8900 TCGv_i32 tmp2;
5e3f878a 8901
36aa55dc 8902 /* Load value and extend to 64 bits. */
a7812ae4 8903 tmp = tcg_temp_new_i64();
5e3f878a
PB
8904 tmp2 = load_reg(s, rlow);
8905 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 8906 tcg_temp_free_i32(tmp2);
5e3f878a 8907 tcg_gen_add_i64(val, val, tmp);
b75263d6 8908 tcg_temp_free_i64(tmp);
5e3f878a
PB
8909}
8910
8911/* load and add a 64-bit value from a register pair. */
a7812ae4 8912static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 8913{
a7812ae4 8914 TCGv_i64 tmp;
39d5492a
PM
8915 TCGv_i32 tmpl;
8916 TCGv_i32 tmph;
5e3f878a
PB
8917
8918 /* Load 64-bit value rd:rn. */
36aa55dc
PB
8919 tmpl = load_reg(s, rlow);
8920 tmph = load_reg(s, rhigh);
a7812ae4 8921 tmp = tcg_temp_new_i64();
36aa55dc 8922 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
8923 tcg_temp_free_i32(tmpl);
8924 tcg_temp_free_i32(tmph);
5e3f878a 8925 tcg_gen_add_i64(val, val, tmp);
b75263d6 8926 tcg_temp_free_i64(tmp);
5e3f878a
PB
8927}
8928
c9f10124 8929/* Set N and Z flags from hi|lo. */
39d5492a 8930static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 8931{
c9f10124
RH
8932 tcg_gen_mov_i32(cpu_NF, hi);
8933 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
8934}
8935
426f5abc
PB
8936/* Load/Store exclusive instructions are implemented by remembering
8937 the value/address loaded, and seeing if these are the same
354161b3 8938 when the store is performed. This should be sufficient to implement
426f5abc 8939 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
8940 regular stores. The compare vs the remembered value is done during
8941 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 8942static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 8943 TCGv_i32 addr, int size)
426f5abc 8944{
94ee24e7 8945 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 8946 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 8947
50225ad0
PM
8948 s->is_ldex = true;
8949
426f5abc 8950 if (size == 3) {
39d5492a 8951 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 8952 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 8953
3448d47b
PM
8954 /* For AArch32, architecturally the 32-bit word at the lowest
8955 * address is always Rt and the one at addr+4 is Rt2, even if
8956 * the CPU is big-endian. That means we don't want to do a
8957 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8958 * for an architecturally 64-bit access, but instead do a
8959 * 64-bit access using MO_BE if appropriate and then split
8960 * the two halves.
8961 * This only makes a difference for BE32 user-mode, where
8962 * frob64() must not flip the two halves of the 64-bit data
8963 * but this code must treat BE32 user-mode like BE32 system.
8964 */
8965 TCGv taddr = gen_aa32_addr(s, addr, opc);
8966
8967 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8968 tcg_temp_free(taddr);
354161b3 8969 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
8970 if (s->be_data == MO_BE) {
8971 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8972 } else {
8973 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8974 }
354161b3
EC
8975 tcg_temp_free_i64(t64);
8976
8977 store_reg(s, rt2, tmp2);
03d05e2d 8978 } else {
354161b3 8979 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 8980 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 8981 }
03d05e2d
PM
8982
8983 store_reg(s, rt, tmp);
8984 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
8985}
8986
8987static void gen_clrex(DisasContext *s)
8988{
03d05e2d 8989 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
8990}
8991
426f5abc 8992static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 8993 TCGv_i32 addr, int size)
426f5abc 8994{
354161b3
EC
8995 TCGv_i32 t0, t1, t2;
8996 TCGv_i64 extaddr;
8997 TCGv taddr;
42a268c2
RH
8998 TCGLabel *done_label;
8999 TCGLabel *fail_label;
354161b3 9000 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
9001
9002 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
9003 [addr] = {Rt};
9004 {Rd} = 0;
9005 } else {
9006 {Rd} = 1;
9007 } */
9008 fail_label = gen_new_label();
9009 done_label = gen_new_label();
03d05e2d
PM
9010 extaddr = tcg_temp_new_i64();
9011 tcg_gen_extu_i32_i64(extaddr, addr);
9012 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
9013 tcg_temp_free_i64(extaddr);
9014
354161b3
EC
9015 taddr = gen_aa32_addr(s, addr, opc);
9016 t0 = tcg_temp_new_i32();
9017 t1 = load_reg(s, rt);
426f5abc 9018 if (size == 3) {
354161b3
EC
9019 TCGv_i64 o64 = tcg_temp_new_i64();
9020 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 9021
354161b3 9022 t2 = load_reg(s, rt2);
3448d47b
PM
9023 /* For AArch32, architecturally the 32-bit word at the lowest
9024 * address is always Rt and the one at addr+4 is Rt2, even if
9025 * the CPU is big-endian. Since we're going to treat this as a
9026 * single 64-bit BE store, we need to put the two halves in the
9027 * opposite order for BE to LE, so that they end up in the right
9028 * places.
9029 * We don't want gen_aa32_frob64() because that does the wrong
9030 * thing for BE32 usermode.
9031 */
9032 if (s->be_data == MO_BE) {
9033 tcg_gen_concat_i32_i64(n64, t2, t1);
9034 } else {
9035 tcg_gen_concat_i32_i64(n64, t1, t2);
9036 }
354161b3 9037 tcg_temp_free_i32(t2);
03d05e2d 9038
354161b3
EC
9039 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
9040 get_mem_index(s), opc);
9041 tcg_temp_free_i64(n64);
9042
354161b3
EC
9043 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
9044 tcg_gen_extrl_i64_i32(t0, o64);
9045
9046 tcg_temp_free_i64(o64);
9047 } else {
9048 t2 = tcg_temp_new_i32();
9049 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
9050 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
9051 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
9052 tcg_temp_free_i32(t2);
426f5abc 9053 }
354161b3
EC
9054 tcg_temp_free_i32(t1);
9055 tcg_temp_free(taddr);
9056 tcg_gen_mov_i32(cpu_R[rd], t0);
9057 tcg_temp_free_i32(t0);
426f5abc 9058 tcg_gen_br(done_label);
354161b3 9059
426f5abc
PB
9060 gen_set_label(fail_label);
9061 tcg_gen_movi_i32(cpu_R[rd], 1);
9062 gen_set_label(done_label);
03d05e2d 9063 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 9064}
426f5abc 9065
81465888
PM
9066/* gen_srs:
9067 * @env: CPUARMState
9068 * @s: DisasContext
9069 * @mode: mode field from insn (which stack to store to)
9070 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
9071 * @writeback: true if writeback bit set
9072 *
9073 * Generate code for the SRS (Store Return State) insn.
9074 */
9075static void gen_srs(DisasContext *s,
9076 uint32_t mode, uint32_t amode, bool writeback)
9077{
9078 int32_t offset;
cbc0326b
PM
9079 TCGv_i32 addr, tmp;
9080 bool undef = false;
9081
9082 /* SRS is:
9083 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 9084 * and specified mode is monitor mode
cbc0326b
PM
9085 * - UNDEFINED in Hyp mode
9086 * - UNPREDICTABLE in User or System mode
9087 * - UNPREDICTABLE if the specified mode is:
9088 * -- not implemented
9089 * -- not a valid mode number
9090 * -- a mode that's at a higher exception level
9091 * -- Monitor, if we are Non-secure
f01377f5 9092 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 9093 */
ba63cf47 9094 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
9095 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
9096 return;
9097 }
9098
9099 if (s->current_el == 0 || s->current_el == 2) {
9100 undef = true;
9101 }
9102
9103 switch (mode) {
9104 case ARM_CPU_MODE_USR:
9105 case ARM_CPU_MODE_FIQ:
9106 case ARM_CPU_MODE_IRQ:
9107 case ARM_CPU_MODE_SVC:
9108 case ARM_CPU_MODE_ABT:
9109 case ARM_CPU_MODE_UND:
9110 case ARM_CPU_MODE_SYS:
9111 break;
9112 case ARM_CPU_MODE_HYP:
9113 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
9114 undef = true;
9115 }
9116 break;
9117 case ARM_CPU_MODE_MON:
9118 /* No need to check specifically for "are we non-secure" because
9119 * we've already made EL0 UNDEF and handled the trap for S-EL1;
9120 * so if this isn't EL3 then we must be non-secure.
9121 */
9122 if (s->current_el != 3) {
9123 undef = true;
9124 }
9125 break;
9126 default:
9127 undef = true;
9128 }
9129
9130 if (undef) {
9131 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9132 default_exception_el(s));
9133 return;
9134 }
9135
9136 addr = tcg_temp_new_i32();
9137 tmp = tcg_const_i32(mode);
f01377f5
PM
9138 /* get_r13_banked() will raise an exception if called from System mode */
9139 gen_set_condexec(s);
9140 gen_set_pc_im(s, s->pc - 4);
81465888
PM
9141 gen_helper_get_r13_banked(addr, cpu_env, tmp);
9142 tcg_temp_free_i32(tmp);
9143 switch (amode) {
9144 case 0: /* DA */
9145 offset = -4;
9146 break;
9147 case 1: /* IA */
9148 offset = 0;
9149 break;
9150 case 2: /* DB */
9151 offset = -8;
9152 break;
9153 case 3: /* IB */
9154 offset = 4;
9155 break;
9156 default:
9157 abort();
9158 }
9159 tcg_gen_addi_i32(addr, addr, offset);
9160 tmp = load_reg(s, 14);
12dcc321 9161 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9162 tcg_temp_free_i32(tmp);
81465888
PM
9163 tmp = load_cpu_field(spsr);
9164 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 9165 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9166 tcg_temp_free_i32(tmp);
81465888
PM
9167 if (writeback) {
9168 switch (amode) {
9169 case 0:
9170 offset = -8;
9171 break;
9172 case 1:
9173 offset = 4;
9174 break;
9175 case 2:
9176 offset = -4;
9177 break;
9178 case 3:
9179 offset = 0;
9180 break;
9181 default:
9182 abort();
9183 }
9184 tcg_gen_addi_i32(addr, addr, offset);
9185 tmp = tcg_const_i32(mode);
9186 gen_helper_set_r13_banked(cpu_env, tmp, addr);
9187 tcg_temp_free_i32(tmp);
9188 }
9189 tcg_temp_free_i32(addr);
dcba3a8d 9190 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
9191}
9192
c2d9644e
RK
9193/* Generate a label used for skipping this instruction */
9194static void arm_gen_condlabel(DisasContext *s)
9195{
9196 if (!s->condjmp) {
9197 s->condlabel = gen_new_label();
9198 s->condjmp = 1;
9199 }
9200}
9201
9202/* Skip this instruction if the ARM condition is false */
9203static void arm_skip_unless(DisasContext *s, uint32_t cond)
9204{
9205 arm_gen_condlabel(s);
9206 arm_gen_test_cc(cond ^ 1, s->condlabel);
9207}
9208
f4df2210 9209static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 9210{
f4df2210 9211 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
9212 TCGv_i32 tmp;
9213 TCGv_i32 tmp2;
9214 TCGv_i32 tmp3;
9215 TCGv_i32 addr;
a7812ae4 9216 TCGv_i64 tmp64;
9ee6e8bb 9217
e13886e3
PM
9218 /* M variants do not implement ARM mode; this must raise the INVSTATE
9219 * UsageFault exception.
9220 */
b53d8923 9221 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
9222 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
9223 default_exception_el(s));
9224 return;
b53d8923 9225 }
9ee6e8bb
PB
9226 cond = insn >> 28;
9227 if (cond == 0xf){
be5e7a76
DES
9228 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
9229 * choose to UNDEF. In ARMv5 and above the space is used
9230 * for miscellaneous unconditional instructions.
9231 */
9232 ARCH(5);
9233
9ee6e8bb
PB
9234 /* Unconditional instructions. */
9235 if (((insn >> 25) & 7) == 1) {
9236 /* NEON Data processing. */
d614a513 9237 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 9238 goto illegal_op;
d614a513 9239 }
9ee6e8bb 9240
7dcc1f89 9241 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 9242 goto illegal_op;
7dcc1f89 9243 }
9ee6e8bb
PB
9244 return;
9245 }
9246 if ((insn & 0x0f100000) == 0x04000000) {
9247 /* NEON load/store. */
d614a513 9248 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 9249 goto illegal_op;
d614a513 9250 }
9ee6e8bb 9251
7dcc1f89 9252 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 9253 goto illegal_op;
7dcc1f89 9254 }
9ee6e8bb
PB
9255 return;
9256 }
6a57f3eb
WN
9257 if ((insn & 0x0f000e10) == 0x0e000a00) {
9258 /* VFP. */
7dcc1f89 9259 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9260 goto illegal_op;
9261 }
9262 return;
9263 }
3d185e5d
PM
9264 if (((insn & 0x0f30f000) == 0x0510f000) ||
9265 ((insn & 0x0f30f010) == 0x0710f000)) {
9266 if ((insn & (1 << 22)) == 0) {
9267 /* PLDW; v7MP */
d614a513 9268 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
9269 goto illegal_op;
9270 }
9271 }
9272 /* Otherwise PLD; v5TE+ */
be5e7a76 9273 ARCH(5TE);
3d185e5d
PM
9274 return;
9275 }
9276 if (((insn & 0x0f70f000) == 0x0450f000) ||
9277 ((insn & 0x0f70f010) == 0x0650f000)) {
9278 ARCH(7);
9279 return; /* PLI; V7 */
9280 }
9281 if (((insn & 0x0f700000) == 0x04100000) ||
9282 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 9283 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
9284 goto illegal_op;
9285 }
9286 return; /* v7MP: Unallocated memory hint: must NOP */
9287 }
9288
9289 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
9290 ARCH(6);
9291 /* setend */
9886ecdf
PB
9292 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
9293 gen_helper_setend(cpu_env);
dcba3a8d 9294 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
9295 }
9296 return;
9297 } else if ((insn & 0x0fffff00) == 0x057ff000) {
9298 switch ((insn >> 4) & 0xf) {
9299 case 1: /* clrex */
9300 ARCH(6K);
426f5abc 9301 gen_clrex(s);
9ee6e8bb
PB
9302 return;
9303 case 4: /* dsb */
9304 case 5: /* dmb */
9ee6e8bb 9305 ARCH(7);
61e4c432 9306 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 9307 return;
6df99dec
SS
9308 case 6: /* isb */
9309 /* We need to break the TB after this insn to execute
9310 * self-modifying code correctly and also to take
9311 * any pending interrupts immediately.
9312 */
0b609cc1 9313 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 9314 return;
9888bd1e
RH
9315 case 7: /* sb */
9316 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
9317 goto illegal_op;
9318 }
9319 /*
9320 * TODO: There is no speculation barrier opcode
9321 * for TCG; MB and end the TB instead.
9322 */
9323 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9324 gen_goto_tb(s, 0, s->pc & ~1);
9325 return;
9ee6e8bb
PB
9326 default:
9327 goto illegal_op;
9328 }
9329 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
9330 /* srs */
81465888
PM
9331 ARCH(6);
9332 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 9333 return;
ea825eee 9334 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 9335 /* rfe */
c67b6b71 9336 int32_t offset;
9ee6e8bb
PB
9337 if (IS_USER(s))
9338 goto illegal_op;
9339 ARCH(6);
9340 rn = (insn >> 16) & 0xf;
b0109805 9341 addr = load_reg(s, rn);
9ee6e8bb
PB
9342 i = (insn >> 23) & 3;
9343 switch (i) {
b0109805 9344 case 0: offset = -4; break; /* DA */
c67b6b71
FN
9345 case 1: offset = 0; break; /* IA */
9346 case 2: offset = -8; break; /* DB */
b0109805 9347 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
9348 default: abort();
9349 }
9350 if (offset)
b0109805
PB
9351 tcg_gen_addi_i32(addr, addr, offset);
9352 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 9353 tmp = tcg_temp_new_i32();
12dcc321 9354 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9355 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9356 tmp2 = tcg_temp_new_i32();
12dcc321 9357 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9358 if (insn & (1 << 21)) {
9359 /* Base writeback. */
9360 switch (i) {
b0109805 9361 case 0: offset = -8; break;
c67b6b71
FN
9362 case 1: offset = 4; break;
9363 case 2: offset = -4; break;
b0109805 9364 case 3: offset = 0; break;
9ee6e8bb
PB
9365 default: abort();
9366 }
9367 if (offset)
b0109805
PB
9368 tcg_gen_addi_i32(addr, addr, offset);
9369 store_reg(s, rn, addr);
9370 } else {
7d1b0095 9371 tcg_temp_free_i32(addr);
9ee6e8bb 9372 }
b0109805 9373 gen_rfe(s, tmp, tmp2);
c67b6b71 9374 return;
9ee6e8bb
PB
9375 } else if ((insn & 0x0e000000) == 0x0a000000) {
9376 /* branch link and change to thumb (blx <offset>) */
9377 int32_t offset;
9378
9379 val = (uint32_t)s->pc;
7d1b0095 9380 tmp = tcg_temp_new_i32();
d9ba4830
PB
9381 tcg_gen_movi_i32(tmp, val);
9382 store_reg(s, 14, tmp);
9ee6e8bb
PB
9383 /* Sign-extend the 24-bit offset */
9384 offset = (((int32_t)insn) << 8) >> 8;
9385 /* offset * 4 + bit24 * 2 + (thumb bit) */
9386 val += (offset << 2) | ((insn >> 23) & 2) | 1;
9387 /* pipeline offset */
9388 val += 4;
be5e7a76 9389 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 9390 gen_bx_im(s, val);
9ee6e8bb
PB
9391 return;
9392 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 9393 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 9394 /* iWMMXt register transfer. */
c0f4af17 9395 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 9396 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 9397 return;
c0f4af17
PM
9398 }
9399 }
9ee6e8bb 9400 }
8b7209fa
RH
9401 } else if ((insn & 0x0e000a00) == 0x0c000800
9402 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9403 if (disas_neon_insn_3same_ext(s, insn)) {
9404 goto illegal_op;
9405 }
9406 return;
638808ff
RH
9407 } else if ((insn & 0x0f000a00) == 0x0e000800
9408 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9409 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
9410 goto illegal_op;
9411 }
9412 return;
9ee6e8bb
PB
9413 } else if ((insn & 0x0fe00000) == 0x0c400000) {
9414 /* Coprocessor double register transfer. */
be5e7a76 9415 ARCH(5TE);
9ee6e8bb
PB
9416 } else if ((insn & 0x0f000010) == 0x0e000010) {
9417 /* Additional coprocessor register transfer. */
7997d92f 9418 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
9419 uint32_t mask;
9420 uint32_t val;
9421 /* cps (privileged) */
9422 if (IS_USER(s))
9423 return;
9424 mask = val = 0;
9425 if (insn & (1 << 19)) {
9426 if (insn & (1 << 8))
9427 mask |= CPSR_A;
9428 if (insn & (1 << 7))
9429 mask |= CPSR_I;
9430 if (insn & (1 << 6))
9431 mask |= CPSR_F;
9432 if (insn & (1 << 18))
9433 val |= mask;
9434 }
7997d92f 9435 if (insn & (1 << 17)) {
9ee6e8bb
PB
9436 mask |= CPSR_M;
9437 val |= (insn & 0x1f);
9438 }
9439 if (mask) {
2fbac54b 9440 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
9441 }
9442 return;
9443 }
9444 goto illegal_op;
9445 }
9446 if (cond != 0xe) {
9447 /* if not always execute, we generate a conditional jump to
9448 next instruction */
c2d9644e 9449 arm_skip_unless(s, cond);
9ee6e8bb
PB
9450 }
9451 if ((insn & 0x0f900000) == 0x03000000) {
9452 if ((insn & (1 << 21)) == 0) {
9453 ARCH(6T2);
9454 rd = (insn >> 12) & 0xf;
9455 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9456 if ((insn & (1 << 22)) == 0) {
9457 /* MOVW */
7d1b0095 9458 tmp = tcg_temp_new_i32();
5e3f878a 9459 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
9460 } else {
9461 /* MOVT */
5e3f878a 9462 tmp = load_reg(s, rd);
86831435 9463 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9464 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 9465 }
5e3f878a 9466 store_reg(s, rd, tmp);
9ee6e8bb
PB
9467 } else {
9468 if (((insn >> 12) & 0xf) != 0xf)
9469 goto illegal_op;
9470 if (((insn >> 16) & 0xf) == 0) {
9471 gen_nop_hint(s, insn & 0xff);
9472 } else {
9473 /* CPSR = immediate */
9474 val = insn & 0xff;
9475 shift = ((insn >> 8) & 0xf) * 2;
9476 if (shift)
9477 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 9478 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
9479 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
9480 i, val)) {
9ee6e8bb 9481 goto illegal_op;
7dcc1f89 9482 }
9ee6e8bb
PB
9483 }
9484 }
9485 } else if ((insn & 0x0f900000) == 0x01000000
9486 && (insn & 0x00000090) != 0x00000090) {
9487 /* miscellaneous instructions */
9488 op1 = (insn >> 21) & 3;
9489 sh = (insn >> 4) & 0xf;
9490 rm = insn & 0xf;
9491 switch (sh) {
8bfd0550
PM
9492 case 0x0: /* MSR, MRS */
9493 if (insn & (1 << 9)) {
9494 /* MSR (banked) and MRS (banked) */
9495 int sysm = extract32(insn, 16, 4) |
9496 (extract32(insn, 8, 1) << 4);
9497 int r = extract32(insn, 22, 1);
9498
9499 if (op1 & 1) {
9500 /* MSR (banked) */
9501 gen_msr_banked(s, r, sysm, rm);
9502 } else {
9503 /* MRS (banked) */
9504 int rd = extract32(insn, 12, 4);
9505
9506 gen_mrs_banked(s, r, sysm, rd);
9507 }
9508 break;
9509 }
9510
9511 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
9512 if (op1 & 1) {
9513 /* PSR = reg */
2fbac54b 9514 tmp = load_reg(s, rm);
9ee6e8bb 9515 i = ((op1 & 2) != 0);
7dcc1f89 9516 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
9517 goto illegal_op;
9518 } else {
9519 /* reg = PSR */
9520 rd = (insn >> 12) & 0xf;
9521 if (op1 & 2) {
9522 if (IS_USER(s))
9523 goto illegal_op;
d9ba4830 9524 tmp = load_cpu_field(spsr);
9ee6e8bb 9525 } else {
7d1b0095 9526 tmp = tcg_temp_new_i32();
9ef39277 9527 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9528 }
d9ba4830 9529 store_reg(s, rd, tmp);
9ee6e8bb
PB
9530 }
9531 break;
9532 case 0x1:
9533 if (op1 == 1) {
9534 /* branch/exchange thumb (bx). */
be5e7a76 9535 ARCH(4T);
d9ba4830
PB
9536 tmp = load_reg(s, rm);
9537 gen_bx(s, tmp);
9ee6e8bb
PB
9538 } else if (op1 == 3) {
9539 /* clz */
be5e7a76 9540 ARCH(5);
9ee6e8bb 9541 rd = (insn >> 12) & 0xf;
1497c961 9542 tmp = load_reg(s, rm);
7539a012 9543 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 9544 store_reg(s, rd, tmp);
9ee6e8bb
PB
9545 } else {
9546 goto illegal_op;
9547 }
9548 break;
9549 case 0x2:
9550 if (op1 == 1) {
9551 ARCH(5J); /* bxj */
9552 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9553 tmp = load_reg(s, rm);
9554 gen_bx(s, tmp);
9ee6e8bb
PB
9555 } else {
9556 goto illegal_op;
9557 }
9558 break;
9559 case 0x3:
9560 if (op1 != 1)
9561 goto illegal_op;
9562
be5e7a76 9563 ARCH(5);
9ee6e8bb 9564 /* branch link/exchange thumb (blx) */
d9ba4830 9565 tmp = load_reg(s, rm);
7d1b0095 9566 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
9567 tcg_gen_movi_i32(tmp2, s->pc);
9568 store_reg(s, 14, tmp2);
9569 gen_bx(s, tmp);
9ee6e8bb 9570 break;
eb0ecd5a
WN
9571 case 0x4:
9572 {
9573 /* crc32/crc32c */
9574 uint32_t c = extract32(insn, 8, 4);
9575
9576 /* Check this CPU supports ARMv8 CRC instructions.
9577 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
9578 * Bits 8, 10 and 11 should be zero.
9579 */
962fcbf2 9580 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
eb0ecd5a
WN
9581 goto illegal_op;
9582 }
9583
9584 rn = extract32(insn, 16, 4);
9585 rd = extract32(insn, 12, 4);
9586
9587 tmp = load_reg(s, rn);
9588 tmp2 = load_reg(s, rm);
aa633469
PM
9589 if (op1 == 0) {
9590 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9591 } else if (op1 == 1) {
9592 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9593 }
eb0ecd5a
WN
9594 tmp3 = tcg_const_i32(1 << op1);
9595 if (c & 0x2) {
9596 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9597 } else {
9598 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9599 }
9600 tcg_temp_free_i32(tmp2);
9601 tcg_temp_free_i32(tmp3);
9602 store_reg(s, rd, tmp);
9603 break;
9604 }
9ee6e8bb 9605 case 0x5: /* saturating add/subtract */
be5e7a76 9606 ARCH(5TE);
9ee6e8bb
PB
9607 rd = (insn >> 12) & 0xf;
9608 rn = (insn >> 16) & 0xf;
b40d0353 9609 tmp = load_reg(s, rm);
5e3f878a 9610 tmp2 = load_reg(s, rn);
9ee6e8bb 9611 if (op1 & 2)
9ef39277 9612 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 9613 if (op1 & 1)
9ef39277 9614 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9615 else
9ef39277 9616 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9617 tcg_temp_free_i32(tmp2);
5e3f878a 9618 store_reg(s, rd, tmp);
9ee6e8bb 9619 break;
55c544ed
PM
9620 case 0x6: /* ERET */
9621 if (op1 != 3) {
9622 goto illegal_op;
9623 }
9624 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
9625 goto illegal_op;
9626 }
9627 if ((insn & 0x000fff0f) != 0x0000000e) {
9628 /* UNPREDICTABLE; we choose to UNDEF */
9629 goto illegal_op;
9630 }
9631
9632 if (s->current_el == 2) {
9633 tmp = load_cpu_field(elr_el[2]);
9634 } else {
9635 tmp = load_reg(s, 14);
9636 }
9637 gen_exception_return(s, tmp);
9638 break;
49e14940 9639 case 7:
d4a2dc67
PM
9640 {
9641 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 9642 switch (op1) {
19a6e31c
PM
9643 case 0:
9644 /* HLT */
9645 gen_hlt(s, imm16);
9646 break;
37e6456e
PM
9647 case 1:
9648 /* bkpt */
9649 ARCH(5);
c900a2e6 9650 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
9651 break;
9652 case 2:
9653 /* Hypervisor call (v7) */
9654 ARCH(7);
9655 if (IS_USER(s)) {
9656 goto illegal_op;
9657 }
9658 gen_hvc(s, imm16);
9659 break;
9660 case 3:
9661 /* Secure monitor call (v6+) */
9662 ARCH(6K);
9663 if (IS_USER(s)) {
9664 goto illegal_op;
9665 }
9666 gen_smc(s);
9667 break;
9668 default:
19a6e31c 9669 g_assert_not_reached();
49e14940 9670 }
9ee6e8bb 9671 break;
d4a2dc67 9672 }
9ee6e8bb
PB
9673 case 0x8: /* signed multiply */
9674 case 0xa:
9675 case 0xc:
9676 case 0xe:
be5e7a76 9677 ARCH(5TE);
9ee6e8bb
PB
9678 rs = (insn >> 8) & 0xf;
9679 rn = (insn >> 12) & 0xf;
9680 rd = (insn >> 16) & 0xf;
9681 if (op1 == 1) {
9682 /* (32 * 16) >> 16 */
5e3f878a
PB
9683 tmp = load_reg(s, rm);
9684 tmp2 = load_reg(s, rs);
9ee6e8bb 9685 if (sh & 4)
5e3f878a 9686 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9687 else
5e3f878a 9688 gen_sxth(tmp2);
a7812ae4
PB
9689 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9690 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9691 tmp = tcg_temp_new_i32();
ecc7b3aa 9692 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 9693 tcg_temp_free_i64(tmp64);
9ee6e8bb 9694 if ((sh & 2) == 0) {
5e3f878a 9695 tmp2 = load_reg(s, rn);
9ef39277 9696 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9697 tcg_temp_free_i32(tmp2);
9ee6e8bb 9698 }
5e3f878a 9699 store_reg(s, rd, tmp);
9ee6e8bb
PB
9700 } else {
9701 /* 16 * 16 */
5e3f878a
PB
9702 tmp = load_reg(s, rm);
9703 tmp2 = load_reg(s, rs);
9704 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 9705 tcg_temp_free_i32(tmp2);
9ee6e8bb 9706 if (op1 == 2) {
a7812ae4
PB
9707 tmp64 = tcg_temp_new_i64();
9708 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9709 tcg_temp_free_i32(tmp);
a7812ae4
PB
9710 gen_addq(s, tmp64, rn, rd);
9711 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 9712 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9713 } else {
9714 if (op1 == 0) {
5e3f878a 9715 tmp2 = load_reg(s, rn);
9ef39277 9716 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9717 tcg_temp_free_i32(tmp2);
9ee6e8bb 9718 }
5e3f878a 9719 store_reg(s, rd, tmp);
9ee6e8bb
PB
9720 }
9721 }
9722 break;
9723 default:
9724 goto illegal_op;
9725 }
9726 } else if (((insn & 0x0e000000) == 0 &&
9727 (insn & 0x00000090) != 0x90) ||
9728 ((insn & 0x0e000000) == (1 << 25))) {
9729 int set_cc, logic_cc, shiftop;
9730
9731 op1 = (insn >> 21) & 0xf;
9732 set_cc = (insn >> 20) & 1;
9733 logic_cc = table_logic_cc[op1] & set_cc;
9734
9735 /* data processing instruction */
9736 if (insn & (1 << 25)) {
9737 /* immediate operand */
9738 val = insn & 0xff;
9739 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 9740 if (shift) {
9ee6e8bb 9741 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 9742 }
7d1b0095 9743 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
9744 tcg_gen_movi_i32(tmp2, val);
9745 if (logic_cc && shift) {
9746 gen_set_CF_bit31(tmp2);
9747 }
9ee6e8bb
PB
9748 } else {
9749 /* register */
9750 rm = (insn) & 0xf;
e9bb4aa9 9751 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9752 shiftop = (insn >> 5) & 3;
9753 if (!(insn & (1 << 4))) {
9754 shift = (insn >> 7) & 0x1f;
e9bb4aa9 9755 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
9756 } else {
9757 rs = (insn >> 8) & 0xf;
8984bd2e 9758 tmp = load_reg(s, rs);
e9bb4aa9 9759 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
9760 }
9761 }
9762 if (op1 != 0x0f && op1 != 0x0d) {
9763 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
9764 tmp = load_reg(s, rn);
9765 } else {
f764718d 9766 tmp = NULL;
9ee6e8bb
PB
9767 }
9768 rd = (insn >> 12) & 0xf;
9769 switch(op1) {
9770 case 0x00:
e9bb4aa9
JR
9771 tcg_gen_and_i32(tmp, tmp, tmp2);
9772 if (logic_cc) {
9773 gen_logic_CC(tmp);
9774 }
7dcc1f89 9775 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9776 break;
9777 case 0x01:
e9bb4aa9
JR
9778 tcg_gen_xor_i32(tmp, tmp, tmp2);
9779 if (logic_cc) {
9780 gen_logic_CC(tmp);
9781 }
7dcc1f89 9782 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9783 break;
9784 case 0x02:
9785 if (set_cc && rd == 15) {
9786 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 9787 if (IS_USER(s)) {
9ee6e8bb 9788 goto illegal_op;
e9bb4aa9 9789 }
72485ec4 9790 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 9791 gen_exception_return(s, tmp);
9ee6e8bb 9792 } else {
e9bb4aa9 9793 if (set_cc) {
72485ec4 9794 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9795 } else {
9796 tcg_gen_sub_i32(tmp, tmp, tmp2);
9797 }
7dcc1f89 9798 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9799 }
9800 break;
9801 case 0x03:
e9bb4aa9 9802 if (set_cc) {
72485ec4 9803 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9804 } else {
9805 tcg_gen_sub_i32(tmp, tmp2, tmp);
9806 }
7dcc1f89 9807 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9808 break;
9809 case 0x04:
e9bb4aa9 9810 if (set_cc) {
72485ec4 9811 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9812 } else {
9813 tcg_gen_add_i32(tmp, tmp, tmp2);
9814 }
7dcc1f89 9815 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9816 break;
9817 case 0x05:
e9bb4aa9 9818 if (set_cc) {
49b4c31e 9819 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9820 } else {
9821 gen_add_carry(tmp, tmp, tmp2);
9822 }
7dcc1f89 9823 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9824 break;
9825 case 0x06:
e9bb4aa9 9826 if (set_cc) {
2de68a49 9827 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9828 } else {
9829 gen_sub_carry(tmp, tmp, tmp2);
9830 }
7dcc1f89 9831 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9832 break;
9833 case 0x07:
e9bb4aa9 9834 if (set_cc) {
2de68a49 9835 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9836 } else {
9837 gen_sub_carry(tmp, tmp2, tmp);
9838 }
7dcc1f89 9839 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9840 break;
9841 case 0x08:
9842 if (set_cc) {
e9bb4aa9
JR
9843 tcg_gen_and_i32(tmp, tmp, tmp2);
9844 gen_logic_CC(tmp);
9ee6e8bb 9845 }
7d1b0095 9846 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9847 break;
9848 case 0x09:
9849 if (set_cc) {
e9bb4aa9
JR
9850 tcg_gen_xor_i32(tmp, tmp, tmp2);
9851 gen_logic_CC(tmp);
9ee6e8bb 9852 }
7d1b0095 9853 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9854 break;
9855 case 0x0a:
9856 if (set_cc) {
72485ec4 9857 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 9858 }
7d1b0095 9859 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9860 break;
9861 case 0x0b:
9862 if (set_cc) {
72485ec4 9863 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9864 }
7d1b0095 9865 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9866 break;
9867 case 0x0c:
e9bb4aa9
JR
9868 tcg_gen_or_i32(tmp, tmp, tmp2);
9869 if (logic_cc) {
9870 gen_logic_CC(tmp);
9871 }
7dcc1f89 9872 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9873 break;
9874 case 0x0d:
9875 if (logic_cc && rd == 15) {
9876 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 9877 if (IS_USER(s)) {
9ee6e8bb 9878 goto illegal_op;
e9bb4aa9
JR
9879 }
9880 gen_exception_return(s, tmp2);
9ee6e8bb 9881 } else {
e9bb4aa9
JR
9882 if (logic_cc) {
9883 gen_logic_CC(tmp2);
9884 }
7dcc1f89 9885 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9886 }
9887 break;
9888 case 0x0e:
f669df27 9889 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
9890 if (logic_cc) {
9891 gen_logic_CC(tmp);
9892 }
7dcc1f89 9893 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9894 break;
9895 default:
9896 case 0x0f:
e9bb4aa9
JR
9897 tcg_gen_not_i32(tmp2, tmp2);
9898 if (logic_cc) {
9899 gen_logic_CC(tmp2);
9900 }
7dcc1f89 9901 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9902 break;
9903 }
e9bb4aa9 9904 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 9905 tcg_temp_free_i32(tmp2);
e9bb4aa9 9906 }
9ee6e8bb
PB
9907 } else {
9908 /* other instructions */
9909 op1 = (insn >> 24) & 0xf;
9910 switch(op1) {
9911 case 0x0:
9912 case 0x1:
9913 /* multiplies, extra load/stores */
9914 sh = (insn >> 5) & 3;
9915 if (sh == 0) {
9916 if (op1 == 0x0) {
9917 rd = (insn >> 16) & 0xf;
9918 rn = (insn >> 12) & 0xf;
9919 rs = (insn >> 8) & 0xf;
9920 rm = (insn) & 0xf;
9921 op1 = (insn >> 20) & 0xf;
9922 switch (op1) {
9923 case 0: case 1: case 2: case 3: case 6:
9924 /* 32 bit mul */
5e3f878a
PB
9925 tmp = load_reg(s, rs);
9926 tmp2 = load_reg(s, rm);
9927 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9928 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9929 if (insn & (1 << 22)) {
9930 /* Subtract (mls) */
9931 ARCH(6T2);
5e3f878a
PB
9932 tmp2 = load_reg(s, rn);
9933 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 9934 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9935 } else if (insn & (1 << 21)) {
9936 /* Add */
5e3f878a
PB
9937 tmp2 = load_reg(s, rn);
9938 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9939 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9940 }
9941 if (insn & (1 << 20))
5e3f878a
PB
9942 gen_logic_CC(tmp);
9943 store_reg(s, rd, tmp);
9ee6e8bb 9944 break;
8aac08b1
AJ
9945 case 4:
9946 /* 64 bit mul double accumulate (UMAAL) */
9947 ARCH(6);
9948 tmp = load_reg(s, rs);
9949 tmp2 = load_reg(s, rm);
9950 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9951 gen_addq_lo(s, tmp64, rn);
9952 gen_addq_lo(s, tmp64, rd);
9953 gen_storeq_reg(s, rn, rd, tmp64);
9954 tcg_temp_free_i64(tmp64);
9955 break;
9956 case 8: case 9: case 10: case 11:
9957 case 12: case 13: case 14: case 15:
9958 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
9959 tmp = load_reg(s, rs);
9960 tmp2 = load_reg(s, rm);
8aac08b1 9961 if (insn & (1 << 22)) {
c9f10124 9962 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 9963 } else {
c9f10124 9964 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
9965 }
9966 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
9967 TCGv_i32 al = load_reg(s, rn);
9968 TCGv_i32 ah = load_reg(s, rd);
c9f10124 9969 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
9970 tcg_temp_free_i32(al);
9971 tcg_temp_free_i32(ah);
9ee6e8bb 9972 }
8aac08b1 9973 if (insn & (1 << 20)) {
c9f10124 9974 gen_logicq_cc(tmp, tmp2);
8aac08b1 9975 }
c9f10124
RH
9976 store_reg(s, rn, tmp);
9977 store_reg(s, rd, tmp2);
9ee6e8bb 9978 break;
8aac08b1
AJ
9979 default:
9980 goto illegal_op;
9ee6e8bb
PB
9981 }
9982 } else {
9983 rn = (insn >> 16) & 0xf;
9984 rd = (insn >> 12) & 0xf;
9985 if (insn & (1 << 23)) {
9986 /* load/store exclusive */
96c55295
PM
9987 bool is_ld = extract32(insn, 20, 1);
9988 bool is_lasr = !extract32(insn, 8, 1);
2359bf80 9989 int op2 = (insn >> 8) & 3;
86753403 9990 op1 = (insn >> 21) & 0x3;
2359bf80
MR
9991
9992 switch (op2) {
9993 case 0: /* lda/stl */
9994 if (op1 == 1) {
9995 goto illegal_op;
9996 }
9997 ARCH(8);
9998 break;
9999 case 1: /* reserved */
10000 goto illegal_op;
10001 case 2: /* ldaex/stlex */
10002 ARCH(8);
10003 break;
10004 case 3: /* ldrex/strex */
10005 if (op1) {
10006 ARCH(6K);
10007 } else {
10008 ARCH(6);
10009 }
10010 break;
10011 }
10012
3174f8e9 10013 addr = tcg_temp_local_new_i32();
98a46317 10014 load_reg_var(s, addr, rn);
2359bf80 10015
96c55295
PM
10016 if (is_lasr && !is_ld) {
10017 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
10018 }
10019
2359bf80 10020 if (op2 == 0) {
96c55295 10021 if (is_ld) {
2359bf80
MR
10022 tmp = tcg_temp_new_i32();
10023 switch (op1) {
10024 case 0: /* lda */
9bb6558a
PM
10025 gen_aa32_ld32u_iss(s, tmp, addr,
10026 get_mem_index(s),
10027 rd | ISSIsAcqRel);
2359bf80
MR
10028 break;
10029 case 2: /* ldab */
9bb6558a
PM
10030 gen_aa32_ld8u_iss(s, tmp, addr,
10031 get_mem_index(s),
10032 rd | ISSIsAcqRel);
2359bf80
MR
10033 break;
10034 case 3: /* ldah */
9bb6558a
PM
10035 gen_aa32_ld16u_iss(s, tmp, addr,
10036 get_mem_index(s),
10037 rd | ISSIsAcqRel);
2359bf80
MR
10038 break;
10039 default:
10040 abort();
10041 }
10042 store_reg(s, rd, tmp);
10043 } else {
10044 rm = insn & 0xf;
10045 tmp = load_reg(s, rm);
10046 switch (op1) {
10047 case 0: /* stl */
9bb6558a
PM
10048 gen_aa32_st32_iss(s, tmp, addr,
10049 get_mem_index(s),
10050 rm | ISSIsAcqRel);
2359bf80
MR
10051 break;
10052 case 2: /* stlb */
9bb6558a
PM
10053 gen_aa32_st8_iss(s, tmp, addr,
10054 get_mem_index(s),
10055 rm | ISSIsAcqRel);
2359bf80
MR
10056 break;
10057 case 3: /* stlh */
9bb6558a
PM
10058 gen_aa32_st16_iss(s, tmp, addr,
10059 get_mem_index(s),
10060 rm | ISSIsAcqRel);
2359bf80
MR
10061 break;
10062 default:
10063 abort();
10064 }
10065 tcg_temp_free_i32(tmp);
10066 }
96c55295 10067 } else if (is_ld) {
86753403
PB
10068 switch (op1) {
10069 case 0: /* ldrex */
426f5abc 10070 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
10071 break;
10072 case 1: /* ldrexd */
426f5abc 10073 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
10074 break;
10075 case 2: /* ldrexb */
426f5abc 10076 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
10077 break;
10078 case 3: /* ldrexh */
426f5abc 10079 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
10080 break;
10081 default:
10082 abort();
10083 }
9ee6e8bb
PB
10084 } else {
10085 rm = insn & 0xf;
86753403
PB
10086 switch (op1) {
10087 case 0: /* strex */
426f5abc 10088 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
10089 break;
10090 case 1: /* strexd */
502e64fe 10091 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
10092 break;
10093 case 2: /* strexb */
426f5abc 10094 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
10095 break;
10096 case 3: /* strexh */
426f5abc 10097 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
10098 break;
10099 default:
10100 abort();
10101 }
9ee6e8bb 10102 }
39d5492a 10103 tcg_temp_free_i32(addr);
96c55295
PM
10104
10105 if (is_lasr && is_ld) {
10106 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
10107 }
c4869ca6
OS
10108 } else if ((insn & 0x00300f00) == 0) {
10109 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
10110 * - SWP, SWPB
10111 */
10112
cf12bce0
EC
10113 TCGv taddr;
10114 TCGMemOp opc = s->be_data;
10115
9ee6e8bb
PB
10116 rm = (insn) & 0xf;
10117
9ee6e8bb 10118 if (insn & (1 << 22)) {
cf12bce0 10119 opc |= MO_UB;
9ee6e8bb 10120 } else {
cf12bce0 10121 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 10122 }
cf12bce0
EC
10123
10124 addr = load_reg(s, rn);
10125 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 10126 tcg_temp_free_i32(addr);
cf12bce0
EC
10127
10128 tmp = load_reg(s, rm);
10129 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
10130 get_mem_index(s), opc);
10131 tcg_temp_free(taddr);
10132 store_reg(s, rd, tmp);
c4869ca6
OS
10133 } else {
10134 goto illegal_op;
9ee6e8bb
PB
10135 }
10136 }
10137 } else {
10138 int address_offset;
3960c336 10139 bool load = insn & (1 << 20);
63f26fcf
PM
10140 bool wbit = insn & (1 << 21);
10141 bool pbit = insn & (1 << 24);
3960c336 10142 bool doubleword = false;
9bb6558a
PM
10143 ISSInfo issinfo;
10144
9ee6e8bb
PB
10145 /* Misc load/store */
10146 rn = (insn >> 16) & 0xf;
10147 rd = (insn >> 12) & 0xf;
3960c336 10148
9bb6558a
PM
10149 /* ISS not valid if writeback */
10150 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
10151
3960c336
PM
10152 if (!load && (sh & 2)) {
10153 /* doubleword */
10154 ARCH(5TE);
10155 if (rd & 1) {
10156 /* UNPREDICTABLE; we choose to UNDEF */
10157 goto illegal_op;
10158 }
10159 load = (sh & 1) == 0;
10160 doubleword = true;
10161 }
10162
b0109805 10163 addr = load_reg(s, rn);
63f26fcf 10164 if (pbit) {
b0109805 10165 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 10166 }
9ee6e8bb 10167 address_offset = 0;
3960c336
PM
10168
10169 if (doubleword) {
10170 if (!load) {
9ee6e8bb 10171 /* store */
b0109805 10172 tmp = load_reg(s, rd);
12dcc321 10173 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10174 tcg_temp_free_i32(tmp);
b0109805
PB
10175 tcg_gen_addi_i32(addr, addr, 4);
10176 tmp = load_reg(s, rd + 1);
12dcc321 10177 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10178 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10179 } else {
10180 /* load */
5a839c0d 10181 tmp = tcg_temp_new_i32();
12dcc321 10182 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10183 store_reg(s, rd, tmp);
10184 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 10185 tmp = tcg_temp_new_i32();
12dcc321 10186 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10187 rd++;
9ee6e8bb
PB
10188 }
10189 address_offset = -4;
3960c336
PM
10190 } else if (load) {
10191 /* load */
10192 tmp = tcg_temp_new_i32();
10193 switch (sh) {
10194 case 1:
9bb6558a
PM
10195 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10196 issinfo);
3960c336
PM
10197 break;
10198 case 2:
9bb6558a
PM
10199 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
10200 issinfo);
3960c336
PM
10201 break;
10202 default:
10203 case 3:
9bb6558a
PM
10204 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
10205 issinfo);
3960c336
PM
10206 break;
10207 }
9ee6e8bb
PB
10208 } else {
10209 /* store */
b0109805 10210 tmp = load_reg(s, rd);
9bb6558a 10211 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 10212 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10213 }
10214 /* Perform base writeback before the loaded value to
10215 ensure correct behavior with overlapping index registers.
b6af0975 10216 ldrd with base writeback is undefined if the
9ee6e8bb 10217 destination and index registers overlap. */
63f26fcf 10218 if (!pbit) {
b0109805
PB
10219 gen_add_datah_offset(s, insn, address_offset, addr);
10220 store_reg(s, rn, addr);
63f26fcf 10221 } else if (wbit) {
9ee6e8bb 10222 if (address_offset)
b0109805
PB
10223 tcg_gen_addi_i32(addr, addr, address_offset);
10224 store_reg(s, rn, addr);
10225 } else {
7d1b0095 10226 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10227 }
10228 if (load) {
10229 /* Complete the load. */
b0109805 10230 store_reg(s, rd, tmp);
9ee6e8bb
PB
10231 }
10232 }
10233 break;
10234 case 0x4:
10235 case 0x5:
10236 goto do_ldst;
10237 case 0x6:
10238 case 0x7:
10239 if (insn & (1 << 4)) {
10240 ARCH(6);
10241 /* Armv6 Media instructions. */
10242 rm = insn & 0xf;
10243 rn = (insn >> 16) & 0xf;
2c0262af 10244 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
10245 rs = (insn >> 8) & 0xf;
10246 switch ((insn >> 23) & 3) {
10247 case 0: /* Parallel add/subtract. */
10248 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
10249 tmp = load_reg(s, rn);
10250 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10251 sh = (insn >> 5) & 7;
10252 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
10253 goto illegal_op;
6ddbc6e4 10254 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 10255 tcg_temp_free_i32(tmp2);
6ddbc6e4 10256 store_reg(s, rd, tmp);
9ee6e8bb
PB
10257 break;
10258 case 1:
10259 if ((insn & 0x00700020) == 0) {
6c95676b 10260 /* Halfword pack. */
3670669c
PB
10261 tmp = load_reg(s, rn);
10262 tmp2 = load_reg(s, rm);
9ee6e8bb 10263 shift = (insn >> 7) & 0x1f;
3670669c
PB
10264 if (insn & (1 << 6)) {
10265 /* pkhtb */
22478e79
AZ
10266 if (shift == 0)
10267 shift = 31;
10268 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 10269 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 10270 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
10271 } else {
10272 /* pkhbt */
22478e79
AZ
10273 if (shift)
10274 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 10275 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
10276 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10277 }
10278 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10279 tcg_temp_free_i32(tmp2);
3670669c 10280 store_reg(s, rd, tmp);
9ee6e8bb
PB
10281 } else if ((insn & 0x00200020) == 0x00200000) {
10282 /* [us]sat */
6ddbc6e4 10283 tmp = load_reg(s, rm);
9ee6e8bb
PB
10284 shift = (insn >> 7) & 0x1f;
10285 if (insn & (1 << 6)) {
10286 if (shift == 0)
10287 shift = 31;
6ddbc6e4 10288 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10289 } else {
6ddbc6e4 10290 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
10291 }
10292 sh = (insn >> 16) & 0x1f;
40d3c433
CL
10293 tmp2 = tcg_const_i32(sh);
10294 if (insn & (1 << 22))
9ef39277 10295 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 10296 else
9ef39277 10297 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 10298 tcg_temp_free_i32(tmp2);
6ddbc6e4 10299 store_reg(s, rd, tmp);
9ee6e8bb
PB
10300 } else if ((insn & 0x00300fe0) == 0x00200f20) {
10301 /* [us]sat16 */
6ddbc6e4 10302 tmp = load_reg(s, rm);
9ee6e8bb 10303 sh = (insn >> 16) & 0x1f;
40d3c433
CL
10304 tmp2 = tcg_const_i32(sh);
10305 if (insn & (1 << 22))
9ef39277 10306 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 10307 else
9ef39277 10308 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 10309 tcg_temp_free_i32(tmp2);
6ddbc6e4 10310 store_reg(s, rd, tmp);
9ee6e8bb
PB
10311 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
10312 /* Select bytes. */
6ddbc6e4
PB
10313 tmp = load_reg(s, rn);
10314 tmp2 = load_reg(s, rm);
7d1b0095 10315 tmp3 = tcg_temp_new_i32();
0ecb72a5 10316 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 10317 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10318 tcg_temp_free_i32(tmp3);
10319 tcg_temp_free_i32(tmp2);
6ddbc6e4 10320 store_reg(s, rd, tmp);
9ee6e8bb 10321 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 10322 tmp = load_reg(s, rm);
9ee6e8bb 10323 shift = (insn >> 10) & 3;
1301f322 10324 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10325 rotate, a shift is sufficient. */
10326 if (shift != 0)
f669df27 10327 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10328 op1 = (insn >> 20) & 7;
10329 switch (op1) {
5e3f878a
PB
10330 case 0: gen_sxtb16(tmp); break;
10331 case 2: gen_sxtb(tmp); break;
10332 case 3: gen_sxth(tmp); break;
10333 case 4: gen_uxtb16(tmp); break;
10334 case 6: gen_uxtb(tmp); break;
10335 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
10336 default: goto illegal_op;
10337 }
10338 if (rn != 15) {
5e3f878a 10339 tmp2 = load_reg(s, rn);
9ee6e8bb 10340 if ((op1 & 3) == 0) {
5e3f878a 10341 gen_add16(tmp, tmp2);
9ee6e8bb 10342 } else {
5e3f878a 10343 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10344 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10345 }
10346 }
6c95676b 10347 store_reg(s, rd, tmp);
9ee6e8bb
PB
10348 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
10349 /* rev */
b0109805 10350 tmp = load_reg(s, rm);
9ee6e8bb
PB
10351 if (insn & (1 << 22)) {
10352 if (insn & (1 << 7)) {
b0109805 10353 gen_revsh(tmp);
9ee6e8bb
PB
10354 } else {
10355 ARCH(6T2);
b0109805 10356 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10357 }
10358 } else {
10359 if (insn & (1 << 7))
b0109805 10360 gen_rev16(tmp);
9ee6e8bb 10361 else
66896cb8 10362 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 10363 }
b0109805 10364 store_reg(s, rd, tmp);
9ee6e8bb
PB
10365 } else {
10366 goto illegal_op;
10367 }
10368 break;
10369 case 2: /* Multiplies (Type 3). */
41e9564d
PM
10370 switch ((insn >> 20) & 0x7) {
10371 case 5:
10372 if (((insn >> 6) ^ (insn >> 7)) & 1) {
10373 /* op2 not 00x or 11x : UNDEF */
10374 goto illegal_op;
10375 }
838fa72d
AJ
10376 /* Signed multiply most significant [accumulate].
10377 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
10378 tmp = load_reg(s, rm);
10379 tmp2 = load_reg(s, rs);
a7812ae4 10380 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 10381
955a7dd5 10382 if (rd != 15) {
838fa72d 10383 tmp = load_reg(s, rd);
9ee6e8bb 10384 if (insn & (1 << 6)) {
838fa72d 10385 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 10386 } else {
838fa72d 10387 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
10388 }
10389 }
838fa72d
AJ
10390 if (insn & (1 << 5)) {
10391 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10392 }
10393 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10394 tmp = tcg_temp_new_i32();
ecc7b3aa 10395 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10396 tcg_temp_free_i64(tmp64);
955a7dd5 10397 store_reg(s, rn, tmp);
41e9564d
PM
10398 break;
10399 case 0:
10400 case 4:
10401 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
10402 if (insn & (1 << 7)) {
10403 goto illegal_op;
10404 }
10405 tmp = load_reg(s, rm);
10406 tmp2 = load_reg(s, rs);
9ee6e8bb 10407 if (insn & (1 << 5))
5e3f878a
PB
10408 gen_swap_half(tmp2);
10409 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10410 if (insn & (1 << 22)) {
5e3f878a 10411 /* smlald, smlsld */
33bbd75a
PC
10412 TCGv_i64 tmp64_2;
10413
a7812ae4 10414 tmp64 = tcg_temp_new_i64();
33bbd75a 10415 tmp64_2 = tcg_temp_new_i64();
a7812ae4 10416 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 10417 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 10418 tcg_temp_free_i32(tmp);
33bbd75a
PC
10419 tcg_temp_free_i32(tmp2);
10420 if (insn & (1 << 6)) {
10421 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
10422 } else {
10423 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
10424 }
10425 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
10426 gen_addq(s, tmp64, rd, rn);
10427 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 10428 tcg_temp_free_i64(tmp64);
9ee6e8bb 10429 } else {
5e3f878a 10430 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
10431 if (insn & (1 << 6)) {
10432 /* This subtraction cannot overflow. */
10433 tcg_gen_sub_i32(tmp, tmp, tmp2);
10434 } else {
10435 /* This addition cannot overflow 32 bits;
10436 * however it may overflow considered as a
10437 * signed operation, in which case we must set
10438 * the Q flag.
10439 */
10440 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10441 }
10442 tcg_temp_free_i32(tmp2);
22478e79 10443 if (rd != 15)
9ee6e8bb 10444 {
22478e79 10445 tmp2 = load_reg(s, rd);
9ef39277 10446 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10447 tcg_temp_free_i32(tmp2);
9ee6e8bb 10448 }
22478e79 10449 store_reg(s, rn, tmp);
9ee6e8bb 10450 }
41e9564d 10451 break;
b8b8ea05
PM
10452 case 1:
10453 case 3:
10454 /* SDIV, UDIV */
7e0cf8b4 10455 if (!dc_isar_feature(arm_div, s)) {
b8b8ea05
PM
10456 goto illegal_op;
10457 }
10458 if (((insn >> 5) & 7) || (rd != 15)) {
10459 goto illegal_op;
10460 }
10461 tmp = load_reg(s, rm);
10462 tmp2 = load_reg(s, rs);
10463 if (insn & (1 << 21)) {
10464 gen_helper_udiv(tmp, tmp, tmp2);
10465 } else {
10466 gen_helper_sdiv(tmp, tmp, tmp2);
10467 }
10468 tcg_temp_free_i32(tmp2);
10469 store_reg(s, rn, tmp);
10470 break;
41e9564d
PM
10471 default:
10472 goto illegal_op;
9ee6e8bb
PB
10473 }
10474 break;
10475 case 3:
10476 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
10477 switch (op1) {
10478 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
10479 ARCH(6);
10480 tmp = load_reg(s, rm);
10481 tmp2 = load_reg(s, rs);
10482 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10483 tcg_temp_free_i32(tmp2);
ded9d295
AZ
10484 if (rd != 15) {
10485 tmp2 = load_reg(s, rd);
6ddbc6e4 10486 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10487 tcg_temp_free_i32(tmp2);
9ee6e8bb 10488 }
ded9d295 10489 store_reg(s, rn, tmp);
9ee6e8bb
PB
10490 break;
10491 case 0x20: case 0x24: case 0x28: case 0x2c:
10492 /* Bitfield insert/clear. */
10493 ARCH(6T2);
10494 shift = (insn >> 7) & 0x1f;
10495 i = (insn >> 16) & 0x1f;
45140a57
KB
10496 if (i < shift) {
10497 /* UNPREDICTABLE; we choose to UNDEF */
10498 goto illegal_op;
10499 }
9ee6e8bb
PB
10500 i = i + 1 - shift;
10501 if (rm == 15) {
7d1b0095 10502 tmp = tcg_temp_new_i32();
5e3f878a 10503 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 10504 } else {
5e3f878a 10505 tmp = load_reg(s, rm);
9ee6e8bb
PB
10506 }
10507 if (i != 32) {
5e3f878a 10508 tmp2 = load_reg(s, rd);
d593c48e 10509 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 10510 tcg_temp_free_i32(tmp2);
9ee6e8bb 10511 }
5e3f878a 10512 store_reg(s, rd, tmp);
9ee6e8bb
PB
10513 break;
10514 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
10515 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 10516 ARCH(6T2);
5e3f878a 10517 tmp = load_reg(s, rm);
9ee6e8bb
PB
10518 shift = (insn >> 7) & 0x1f;
10519 i = ((insn >> 16) & 0x1f) + 1;
10520 if (shift + i > 32)
10521 goto illegal_op;
10522 if (i < 32) {
10523 if (op1 & 0x20) {
59a71b4c 10524 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 10525 } else {
59a71b4c 10526 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
10527 }
10528 }
5e3f878a 10529 store_reg(s, rd, tmp);
9ee6e8bb
PB
10530 break;
10531 default:
10532 goto illegal_op;
10533 }
10534 break;
10535 }
10536 break;
10537 }
10538 do_ldst:
10539 /* Check for undefined extension instructions
10540 * per the ARM Bible IE:
10541 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
10542 */
10543 sh = (0xf << 20) | (0xf << 4);
10544 if (op1 == 0x7 && ((insn & sh) == sh))
10545 {
10546 goto illegal_op;
10547 }
10548 /* load/store byte/word */
10549 rn = (insn >> 16) & 0xf;
10550 rd = (insn >> 12) & 0xf;
b0109805 10551 tmp2 = load_reg(s, rn);
a99caa48
PM
10552 if ((insn & 0x01200000) == 0x00200000) {
10553 /* ldrt/strt */
579d21cc 10554 i = get_a32_user_mem_index(s);
a99caa48
PM
10555 } else {
10556 i = get_mem_index(s);
10557 }
9ee6e8bb 10558 if (insn & (1 << 24))
b0109805 10559 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
10560 if (insn & (1 << 20)) {
10561 /* load */
5a839c0d 10562 tmp = tcg_temp_new_i32();
9ee6e8bb 10563 if (insn & (1 << 22)) {
9bb6558a 10564 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 10565 } else {
9bb6558a 10566 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 10567 }
9ee6e8bb
PB
10568 } else {
10569 /* store */
b0109805 10570 tmp = load_reg(s, rd);
5a839c0d 10571 if (insn & (1 << 22)) {
9bb6558a 10572 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 10573 } else {
9bb6558a 10574 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
10575 }
10576 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10577 }
10578 if (!(insn & (1 << 24))) {
b0109805
PB
10579 gen_add_data_offset(s, insn, tmp2);
10580 store_reg(s, rn, tmp2);
10581 } else if (insn & (1 << 21)) {
10582 store_reg(s, rn, tmp2);
10583 } else {
7d1b0095 10584 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10585 }
10586 if (insn & (1 << 20)) {
10587 /* Complete the load. */
7dcc1f89 10588 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
10589 }
10590 break;
10591 case 0x08:
10592 case 0x09:
10593 {
da3e53dd
PM
10594 int j, n, loaded_base;
10595 bool exc_return = false;
10596 bool is_load = extract32(insn, 20, 1);
10597 bool user = false;
39d5492a 10598 TCGv_i32 loaded_var;
9ee6e8bb
PB
10599 /* load/store multiple words */
10600 /* XXX: store correct base if write back */
9ee6e8bb 10601 if (insn & (1 << 22)) {
da3e53dd 10602 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
10603 if (IS_USER(s))
10604 goto illegal_op; /* only usable in supervisor mode */
10605
da3e53dd
PM
10606 if (is_load && extract32(insn, 15, 1)) {
10607 exc_return = true;
10608 } else {
10609 user = true;
10610 }
9ee6e8bb
PB
10611 }
10612 rn = (insn >> 16) & 0xf;
b0109805 10613 addr = load_reg(s, rn);
9ee6e8bb
PB
10614
10615 /* compute total size */
10616 loaded_base = 0;
f764718d 10617 loaded_var = NULL;
9ee6e8bb
PB
10618 n = 0;
10619 for(i=0;i<16;i++) {
10620 if (insn & (1 << i))
10621 n++;
10622 }
10623 /* XXX: test invalid n == 0 case ? */
10624 if (insn & (1 << 23)) {
10625 if (insn & (1 << 24)) {
10626 /* pre increment */
b0109805 10627 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10628 } else {
10629 /* post increment */
10630 }
10631 } else {
10632 if (insn & (1 << 24)) {
10633 /* pre decrement */
b0109805 10634 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10635 } else {
10636 /* post decrement */
10637 if (n != 1)
b0109805 10638 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10639 }
10640 }
10641 j = 0;
10642 for(i=0;i<16;i++) {
10643 if (insn & (1 << i)) {
da3e53dd 10644 if (is_load) {
9ee6e8bb 10645 /* load */
5a839c0d 10646 tmp = tcg_temp_new_i32();
12dcc321 10647 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 10648 if (user) {
b75263d6 10649 tmp2 = tcg_const_i32(i);
1ce94f81 10650 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 10651 tcg_temp_free_i32(tmp2);
7d1b0095 10652 tcg_temp_free_i32(tmp);
9ee6e8bb 10653 } else if (i == rn) {
b0109805 10654 loaded_var = tmp;
9ee6e8bb 10655 loaded_base = 1;
9d090d17 10656 } else if (i == 15 && exc_return) {
fb0e8e79 10657 store_pc_exc_ret(s, tmp);
9ee6e8bb 10658 } else {
7dcc1f89 10659 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
10660 }
10661 } else {
10662 /* store */
10663 if (i == 15) {
10664 /* special case: r15 = PC + 8 */
10665 val = (long)s->pc + 4;
7d1b0095 10666 tmp = tcg_temp_new_i32();
b0109805 10667 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 10668 } else if (user) {
7d1b0095 10669 tmp = tcg_temp_new_i32();
b75263d6 10670 tmp2 = tcg_const_i32(i);
9ef39277 10671 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 10672 tcg_temp_free_i32(tmp2);
9ee6e8bb 10673 } else {
b0109805 10674 tmp = load_reg(s, i);
9ee6e8bb 10675 }
12dcc321 10676 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10677 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10678 }
10679 j++;
10680 /* no need to add after the last transfer */
10681 if (j != n)
b0109805 10682 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10683 }
10684 }
10685 if (insn & (1 << 21)) {
10686 /* write back */
10687 if (insn & (1 << 23)) {
10688 if (insn & (1 << 24)) {
10689 /* pre increment */
10690 } else {
10691 /* post increment */
b0109805 10692 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10693 }
10694 } else {
10695 if (insn & (1 << 24)) {
10696 /* pre decrement */
10697 if (n != 1)
b0109805 10698 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10699 } else {
10700 /* post decrement */
b0109805 10701 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10702 }
10703 }
b0109805
PB
10704 store_reg(s, rn, addr);
10705 } else {
7d1b0095 10706 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10707 }
10708 if (loaded_base) {
b0109805 10709 store_reg(s, rn, loaded_var);
9ee6e8bb 10710 }
da3e53dd 10711 if (exc_return) {
9ee6e8bb 10712 /* Restore CPSR from SPSR. */
d9ba4830 10713 tmp = load_cpu_field(spsr);
e69ad9df
AL
10714 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10715 gen_io_start();
10716 }
235ea1f5 10717 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
10718 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10719 gen_io_end();
10720 }
7d1b0095 10721 tcg_temp_free_i32(tmp);
b29fd33d 10722 /* Must exit loop to check un-masked IRQs */
dcba3a8d 10723 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
10724 }
10725 }
10726 break;
10727 case 0xa:
10728 case 0xb:
10729 {
10730 int32_t offset;
10731
10732 /* branch (and link) */
10733 val = (int32_t)s->pc;
10734 if (insn & (1 << 24)) {
7d1b0095 10735 tmp = tcg_temp_new_i32();
5e3f878a
PB
10736 tcg_gen_movi_i32(tmp, val);
10737 store_reg(s, 14, tmp);
9ee6e8bb 10738 }
534df156
PM
10739 offset = sextract32(insn << 2, 0, 26);
10740 val += offset + 4;
9ee6e8bb
PB
10741 gen_jmp(s, val);
10742 }
10743 break;
10744 case 0xc:
10745 case 0xd:
10746 case 0xe:
6a57f3eb
WN
10747 if (((insn >> 8) & 0xe) == 10) {
10748 /* VFP. */
7dcc1f89 10749 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10750 goto illegal_op;
10751 }
7dcc1f89 10752 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 10753 /* Coprocessor. */
9ee6e8bb 10754 goto illegal_op;
6a57f3eb 10755 }
9ee6e8bb
PB
10756 break;
10757 case 0xf:
10758 /* swi */
eaed129d 10759 gen_set_pc_im(s, s->pc);
d4a2dc67 10760 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 10761 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
10762 break;
10763 default:
10764 illegal_op:
73710361
GB
10765 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10766 default_exception_el(s));
9ee6e8bb
PB
10767 break;
10768 }
10769 }
10770}
10771
296e5a0a
PM
10772static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10773{
10774 /* Return true if this is a 16 bit instruction. We must be precise
10775 * about this (matching the decode). We assume that s->pc still
10776 * points to the first 16 bits of the insn.
10777 */
10778 if ((insn >> 11) < 0x1d) {
10779 /* Definitely a 16-bit instruction */
10780 return true;
10781 }
10782
10783 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10784 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10785 * end up actually treating this as two 16-bit insns, though,
10786 * if it's half of a bl/blx pair that might span a page boundary.
10787 */
14120108
JS
10788 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10789 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
10790 /* Thumb2 cores (including all M profile ones) always treat
10791 * 32-bit insns as 32-bit.
10792 */
10793 return false;
10794 }
10795
bfe7ad5b 10796 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
10797 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10798 * is not on the next page; we merge this into a 32-bit
10799 * insn.
10800 */
10801 return false;
10802 }
10803 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10804 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10805 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10806 * -- handle as single 16 bit insn
10807 */
10808 return true;
10809}
10810
9ee6e8bb
PB
10811/* Return true if this is a Thumb-2 logical op. */
10812static int
10813thumb2_logic_op(int op)
10814{
10815 return (op < 8);
10816}
10817
10818/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10819 then set condition code flags based on the result of the operation.
10820 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10821 to the high bit of T1.
10822 Returns zero if the opcode is valid. */
10823
10824static int
39d5492a
PM
10825gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10826 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
10827{
10828 int logic_cc;
10829
10830 logic_cc = 0;
10831 switch (op) {
10832 case 0: /* and */
396e467c 10833 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
10834 logic_cc = conds;
10835 break;
10836 case 1: /* bic */
f669df27 10837 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
10838 logic_cc = conds;
10839 break;
10840 case 2: /* orr */
396e467c 10841 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
10842 logic_cc = conds;
10843 break;
10844 case 3: /* orn */
29501f1b 10845 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
10846 logic_cc = conds;
10847 break;
10848 case 4: /* eor */
396e467c 10849 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
10850 logic_cc = conds;
10851 break;
10852 case 8: /* add */
10853 if (conds)
72485ec4 10854 gen_add_CC(t0, t0, t1);
9ee6e8bb 10855 else
396e467c 10856 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
10857 break;
10858 case 10: /* adc */
10859 if (conds)
49b4c31e 10860 gen_adc_CC(t0, t0, t1);
9ee6e8bb 10861 else
396e467c 10862 gen_adc(t0, t1);
9ee6e8bb
PB
10863 break;
10864 case 11: /* sbc */
2de68a49
RH
10865 if (conds) {
10866 gen_sbc_CC(t0, t0, t1);
10867 } else {
396e467c 10868 gen_sub_carry(t0, t0, t1);
2de68a49 10869 }
9ee6e8bb
PB
10870 break;
10871 case 13: /* sub */
10872 if (conds)
72485ec4 10873 gen_sub_CC(t0, t0, t1);
9ee6e8bb 10874 else
396e467c 10875 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
10876 break;
10877 case 14: /* rsb */
10878 if (conds)
72485ec4 10879 gen_sub_CC(t0, t1, t0);
9ee6e8bb 10880 else
396e467c 10881 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
10882 break;
10883 default: /* 5, 6, 7, 9, 12, 15. */
10884 return 1;
10885 }
10886 if (logic_cc) {
396e467c 10887 gen_logic_CC(t0);
9ee6e8bb 10888 if (shifter_out)
396e467c 10889 gen_set_CF_bit31(t1);
9ee6e8bb
PB
10890 }
10891 return 0;
10892}
10893
2eea841c
PM
10894/* Translate a 32-bit thumb instruction. */
10895static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 10896{
296e5a0a 10897 uint32_t imm, shift, offset;
9ee6e8bb 10898 uint32_t rd, rn, rm, rs;
39d5492a
PM
10899 TCGv_i32 tmp;
10900 TCGv_i32 tmp2;
10901 TCGv_i32 tmp3;
10902 TCGv_i32 addr;
a7812ae4 10903 TCGv_i64 tmp64;
9ee6e8bb
PB
10904 int op;
10905 int shiftop;
10906 int conds;
10907 int logic_cc;
10908
14120108
JS
10909 /*
10910 * ARMv6-M supports a limited subset of Thumb2 instructions.
10911 * Other Thumb1 architectures allow only 32-bit
10912 * combined BL/BLX prefix and suffix.
296e5a0a 10913 */
14120108
JS
10914 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10915 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10916 int i;
10917 bool found = false;
8297cb13
JS
10918 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10919 0xf3b08040 /* dsb */,
10920 0xf3b08050 /* dmb */,
10921 0xf3b08060 /* isb */,
10922 0xf3e08000 /* mrs */,
10923 0xf000d000 /* bl */};
10924 static const uint32_t armv6m_mask[] = {0xffe0d000,
10925 0xfff0d0f0,
10926 0xfff0d0f0,
10927 0xfff0d0f0,
10928 0xffe0d000,
10929 0xf800d000};
14120108
JS
10930
10931 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10932 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10933 found = true;
10934 break;
10935 }
10936 }
10937 if (!found) {
10938 goto illegal_op;
10939 }
10940 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
10941 ARCH(6T2);
10942 }
10943
10944 rn = (insn >> 16) & 0xf;
10945 rs = (insn >> 12) & 0xf;
10946 rd = (insn >> 8) & 0xf;
10947 rm = insn & 0xf;
10948 switch ((insn >> 25) & 0xf) {
10949 case 0: case 1: case 2: case 3:
10950 /* 16-bit instructions. Should never happen. */
10951 abort();
10952 case 4:
10953 if (insn & (1 << 22)) {
ebfe27c5
PM
10954 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10955 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 10956 * table branch, TT.
ebfe27c5 10957 */
76eff04d
PM
10958 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10959 arm_dc_feature(s, ARM_FEATURE_V8)) {
10960 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10961 * - SG (v8M only)
10962 * The bulk of the behaviour for this instruction is implemented
10963 * in v7m_handle_execute_nsc(), which deals with the insn when
10964 * it is executed by a CPU in non-secure state from memory
10965 * which is Secure & NonSecure-Callable.
10966 * Here we only need to handle the remaining cases:
10967 * * in NS memory (including the "security extension not
10968 * implemented" case) : NOP
10969 * * in S memory but CPU already secure (clear IT bits)
10970 * We know that the attribute for the memory this insn is
10971 * in must match the current CPU state, because otherwise
10972 * get_phys_addr_pmsav8 would have generated an exception.
10973 */
10974 if (s->v8m_secure) {
10975 /* Like the IT insn, we don't need to generate any code */
10976 s->condexec_cond = 0;
10977 s->condexec_mask = 0;
10978 }
10979 } else if (insn & 0x01200000) {
ebfe27c5
PM
10980 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10981 * - load/store dual (post-indexed)
10982 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10983 * - load/store dual (literal and immediate)
10984 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10985 * - load/store dual (pre-indexed)
10986 */
910d7692
PM
10987 bool wback = extract32(insn, 21, 1);
10988
9ee6e8bb 10989 if (rn == 15) {
ebfe27c5
PM
10990 if (insn & (1 << 21)) {
10991 /* UNPREDICTABLE */
10992 goto illegal_op;
10993 }
7d1b0095 10994 addr = tcg_temp_new_i32();
b0109805 10995 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 10996 } else {
b0109805 10997 addr = load_reg(s, rn);
9ee6e8bb
PB
10998 }
10999 offset = (insn & 0xff) * 4;
910d7692 11000 if ((insn & (1 << 23)) == 0) {
9ee6e8bb 11001 offset = -offset;
910d7692
PM
11002 }
11003
11004 if (s->v8m_stackcheck && rn == 13 && wback) {
11005 /*
11006 * Here 'addr' is the current SP; if offset is +ve we're
11007 * moving SP up, else down. It is UNKNOWN whether the limit
11008 * check triggers when SP starts below the limit and ends
11009 * up above it; check whichever of the current and final
11010 * SP is lower, so QEMU will trigger in that situation.
11011 */
11012 if ((int32_t)offset < 0) {
11013 TCGv_i32 newsp = tcg_temp_new_i32();
11014
11015 tcg_gen_addi_i32(newsp, addr, offset);
11016 gen_helper_v8m_stackcheck(cpu_env, newsp);
11017 tcg_temp_free_i32(newsp);
11018 } else {
11019 gen_helper_v8m_stackcheck(cpu_env, addr);
11020 }
11021 }
11022
9ee6e8bb 11023 if (insn & (1 << 24)) {
b0109805 11024 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
11025 offset = 0;
11026 }
11027 if (insn & (1 << 20)) {
11028 /* ldrd */
e2592fad 11029 tmp = tcg_temp_new_i32();
12dcc321 11030 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
11031 store_reg(s, rs, tmp);
11032 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 11033 tmp = tcg_temp_new_i32();
12dcc321 11034 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11035 store_reg(s, rd, tmp);
9ee6e8bb
PB
11036 } else {
11037 /* strd */
b0109805 11038 tmp = load_reg(s, rs);
12dcc321 11039 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 11040 tcg_temp_free_i32(tmp);
b0109805
PB
11041 tcg_gen_addi_i32(addr, addr, 4);
11042 tmp = load_reg(s, rd);
12dcc321 11043 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 11044 tcg_temp_free_i32(tmp);
9ee6e8bb 11045 }
910d7692 11046 if (wback) {
9ee6e8bb 11047 /* Base writeback. */
b0109805
PB
11048 tcg_gen_addi_i32(addr, addr, offset - 4);
11049 store_reg(s, rn, addr);
11050 } else {
7d1b0095 11051 tcg_temp_free_i32(addr);
9ee6e8bb
PB
11052 }
11053 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
11054 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
11055 * - load/store exclusive word
5158de24 11056 * - TT (v8M only)
ebfe27c5
PM
11057 */
11058 if (rs == 15) {
5158de24
PM
11059 if (!(insn & (1 << 20)) &&
11060 arm_dc_feature(s, ARM_FEATURE_M) &&
11061 arm_dc_feature(s, ARM_FEATURE_V8)) {
11062 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
11063 * - TT (v8M only)
11064 */
11065 bool alt = insn & (1 << 7);
11066 TCGv_i32 addr, op, ttresp;
11067
11068 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
11069 /* we UNDEF for these UNPREDICTABLE cases */
11070 goto illegal_op;
11071 }
11072
11073 if (alt && !s->v8m_secure) {
11074 goto illegal_op;
11075 }
11076
11077 addr = load_reg(s, rn);
11078 op = tcg_const_i32(extract32(insn, 6, 2));
11079 ttresp = tcg_temp_new_i32();
11080 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
11081 tcg_temp_free_i32(addr);
11082 tcg_temp_free_i32(op);
11083 store_reg(s, rd, ttresp);
384c6c03 11084 break;
5158de24 11085 }
ebfe27c5
PM
11086 goto illegal_op;
11087 }
39d5492a 11088 addr = tcg_temp_local_new_i32();
98a46317 11089 load_reg_var(s, addr, rn);
426f5abc 11090 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 11091 if (insn & (1 << 20)) {
426f5abc 11092 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 11093 } else {
426f5abc 11094 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 11095 }
39d5492a 11096 tcg_temp_free_i32(addr);
2359bf80 11097 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
11098 /* Table Branch. */
11099 if (rn == 15) {
7d1b0095 11100 addr = tcg_temp_new_i32();
b0109805 11101 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 11102 } else {
b0109805 11103 addr = load_reg(s, rn);
9ee6e8bb 11104 }
b26eefb6 11105 tmp = load_reg(s, rm);
b0109805 11106 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
11107 if (insn & (1 << 4)) {
11108 /* tbh */
b0109805 11109 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11110 tcg_temp_free_i32(tmp);
e2592fad 11111 tmp = tcg_temp_new_i32();
12dcc321 11112 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 11113 } else { /* tbb */
7d1b0095 11114 tcg_temp_free_i32(tmp);
e2592fad 11115 tmp = tcg_temp_new_i32();
12dcc321 11116 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 11117 }
7d1b0095 11118 tcg_temp_free_i32(addr);
b0109805
PB
11119 tcg_gen_shli_i32(tmp, tmp, 1);
11120 tcg_gen_addi_i32(tmp, tmp, s->pc);
11121 store_reg(s, 15, tmp);
9ee6e8bb 11122 } else {
96c55295
PM
11123 bool is_lasr = false;
11124 bool is_ld = extract32(insn, 20, 1);
2359bf80 11125 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 11126 op = (insn >> 4) & 0x3;
2359bf80
MR
11127 switch (op2) {
11128 case 0:
426f5abc 11129 goto illegal_op;
2359bf80
MR
11130 case 1:
11131 /* Load/store exclusive byte/halfword/doubleword */
11132 if (op == 2) {
11133 goto illegal_op;
11134 }
11135 ARCH(7);
11136 break;
11137 case 2:
11138 /* Load-acquire/store-release */
11139 if (op == 3) {
11140 goto illegal_op;
11141 }
11142 /* Fall through */
11143 case 3:
11144 /* Load-acquire/store-release exclusive */
11145 ARCH(8);
96c55295 11146 is_lasr = true;
2359bf80 11147 break;
426f5abc 11148 }
96c55295
PM
11149
11150 if (is_lasr && !is_ld) {
11151 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
11152 }
11153
39d5492a 11154 addr = tcg_temp_local_new_i32();
98a46317 11155 load_reg_var(s, addr, rn);
2359bf80 11156 if (!(op2 & 1)) {
96c55295 11157 if (is_ld) {
2359bf80
MR
11158 tmp = tcg_temp_new_i32();
11159 switch (op) {
11160 case 0: /* ldab */
9bb6558a
PM
11161 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
11162 rs | ISSIsAcqRel);
2359bf80
MR
11163 break;
11164 case 1: /* ldah */
9bb6558a
PM
11165 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
11166 rs | ISSIsAcqRel);
2359bf80
MR
11167 break;
11168 case 2: /* lda */
9bb6558a
PM
11169 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11170 rs | ISSIsAcqRel);
2359bf80
MR
11171 break;
11172 default:
11173 abort();
11174 }
11175 store_reg(s, rs, tmp);
11176 } else {
11177 tmp = load_reg(s, rs);
11178 switch (op) {
11179 case 0: /* stlb */
9bb6558a
PM
11180 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
11181 rs | ISSIsAcqRel);
2359bf80
MR
11182 break;
11183 case 1: /* stlh */
9bb6558a
PM
11184 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
11185 rs | ISSIsAcqRel);
2359bf80
MR
11186 break;
11187 case 2: /* stl */
9bb6558a
PM
11188 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
11189 rs | ISSIsAcqRel);
2359bf80
MR
11190 break;
11191 default:
11192 abort();
11193 }
11194 tcg_temp_free_i32(tmp);
11195 }
96c55295 11196 } else if (is_ld) {
426f5abc 11197 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 11198 } else {
426f5abc 11199 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 11200 }
39d5492a 11201 tcg_temp_free_i32(addr);
96c55295
PM
11202
11203 if (is_lasr && is_ld) {
11204 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
11205 }
9ee6e8bb
PB
11206 }
11207 } else {
11208 /* Load/store multiple, RFE, SRS. */
11209 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 11210 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 11211 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11212 goto illegal_op;
00115976 11213 }
9ee6e8bb
PB
11214 if (insn & (1 << 20)) {
11215 /* rfe */
b0109805
PB
11216 addr = load_reg(s, rn);
11217 if ((insn & (1 << 24)) == 0)
11218 tcg_gen_addi_i32(addr, addr, -8);
11219 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 11220 tmp = tcg_temp_new_i32();
12dcc321 11221 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11222 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 11223 tmp2 = tcg_temp_new_i32();
12dcc321 11224 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
11225 if (insn & (1 << 21)) {
11226 /* Base writeback. */
b0109805
PB
11227 if (insn & (1 << 24)) {
11228 tcg_gen_addi_i32(addr, addr, 4);
11229 } else {
11230 tcg_gen_addi_i32(addr, addr, -4);
11231 }
11232 store_reg(s, rn, addr);
11233 } else {
7d1b0095 11234 tcg_temp_free_i32(addr);
9ee6e8bb 11235 }
b0109805 11236 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
11237 } else {
11238 /* srs */
81465888
PM
11239 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
11240 insn & (1 << 21));
9ee6e8bb
PB
11241 }
11242 } else {
5856d44e 11243 int i, loaded_base = 0;
39d5492a 11244 TCGv_i32 loaded_var;
7c0ed88e 11245 bool wback = extract32(insn, 21, 1);
9ee6e8bb 11246 /* Load/store multiple. */
b0109805 11247 addr = load_reg(s, rn);
9ee6e8bb
PB
11248 offset = 0;
11249 for (i = 0; i < 16; i++) {
11250 if (insn & (1 << i))
11251 offset += 4;
11252 }
7c0ed88e 11253
9ee6e8bb 11254 if (insn & (1 << 24)) {
b0109805 11255 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
11256 }
11257
7c0ed88e
PM
11258 if (s->v8m_stackcheck && rn == 13 && wback) {
11259 /*
11260 * If the writeback is incrementing SP rather than
11261 * decrementing it, and the initial SP is below the
11262 * stack limit but the final written-back SP would
11263 * be above, then then we must not perform any memory
11264 * accesses, but it is IMPDEF whether we generate
11265 * an exception. We choose to do so in this case.
11266 * At this point 'addr' is the lowest address, so
11267 * either the original SP (if incrementing) or our
11268 * final SP (if decrementing), so that's what we check.
11269 */
11270 gen_helper_v8m_stackcheck(cpu_env, addr);
11271 }
11272
f764718d 11273 loaded_var = NULL;
9ee6e8bb
PB
11274 for (i = 0; i < 16; i++) {
11275 if ((insn & (1 << i)) == 0)
11276 continue;
11277 if (insn & (1 << 20)) {
11278 /* Load. */
e2592fad 11279 tmp = tcg_temp_new_i32();
12dcc321 11280 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 11281 if (i == 15) {
3bb8a96f 11282 gen_bx_excret(s, tmp);
5856d44e
YO
11283 } else if (i == rn) {
11284 loaded_var = tmp;
11285 loaded_base = 1;
9ee6e8bb 11286 } else {
b0109805 11287 store_reg(s, i, tmp);
9ee6e8bb
PB
11288 }
11289 } else {
11290 /* Store. */
b0109805 11291 tmp = load_reg(s, i);
12dcc321 11292 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 11293 tcg_temp_free_i32(tmp);
9ee6e8bb 11294 }
b0109805 11295 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 11296 }
5856d44e
YO
11297 if (loaded_base) {
11298 store_reg(s, rn, loaded_var);
11299 }
7c0ed88e 11300 if (wback) {
9ee6e8bb
PB
11301 /* Base register writeback. */
11302 if (insn & (1 << 24)) {
b0109805 11303 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
11304 }
11305 /* Fault if writeback register is in register list. */
11306 if (insn & (1 << rn))
11307 goto illegal_op;
b0109805
PB
11308 store_reg(s, rn, addr);
11309 } else {
7d1b0095 11310 tcg_temp_free_i32(addr);
9ee6e8bb
PB
11311 }
11312 }
11313 }
11314 break;
2af9ab77
JB
11315 case 5:
11316
9ee6e8bb 11317 op = (insn >> 21) & 0xf;
2af9ab77 11318 if (op == 6) {
62b44f05
AR
11319 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11320 goto illegal_op;
11321 }
2af9ab77
JB
11322 /* Halfword pack. */
11323 tmp = load_reg(s, rn);
11324 tmp2 = load_reg(s, rm);
11325 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
11326 if (insn & (1 << 5)) {
11327 /* pkhtb */
11328 if (shift == 0)
11329 shift = 31;
11330 tcg_gen_sari_i32(tmp2, tmp2, shift);
11331 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
11332 tcg_gen_ext16u_i32(tmp2, tmp2);
11333 } else {
11334 /* pkhbt */
11335 if (shift)
11336 tcg_gen_shli_i32(tmp2, tmp2, shift);
11337 tcg_gen_ext16u_i32(tmp, tmp);
11338 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
11339 }
11340 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 11341 tcg_temp_free_i32(tmp2);
3174f8e9
FN
11342 store_reg(s, rd, tmp);
11343 } else {
2af9ab77
JB
11344 /* Data processing register constant shift. */
11345 if (rn == 15) {
7d1b0095 11346 tmp = tcg_temp_new_i32();
2af9ab77
JB
11347 tcg_gen_movi_i32(tmp, 0);
11348 } else {
11349 tmp = load_reg(s, rn);
11350 }
11351 tmp2 = load_reg(s, rm);
11352
11353 shiftop = (insn >> 4) & 3;
11354 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
11355 conds = (insn & (1 << 20)) != 0;
11356 logic_cc = (conds && thumb2_logic_op(op));
11357 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
11358 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
11359 goto illegal_op;
7d1b0095 11360 tcg_temp_free_i32(tmp2);
55203189
PM
11361 if (rd == 13 &&
11362 ((op == 2 && rn == 15) ||
11363 (op == 8 && rn == 13) ||
11364 (op == 13 && rn == 13))) {
11365 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
11366 store_sp_checked(s, tmp);
11367 } else if (rd != 15) {
2af9ab77
JB
11368 store_reg(s, rd, tmp);
11369 } else {
7d1b0095 11370 tcg_temp_free_i32(tmp);
2af9ab77 11371 }
3174f8e9 11372 }
9ee6e8bb
PB
11373 break;
11374 case 13: /* Misc data processing. */
11375 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
11376 if (op < 4 && (insn & 0xf000) != 0xf000)
11377 goto illegal_op;
11378 switch (op) {
11379 case 0: /* Register controlled shift. */
8984bd2e
PB
11380 tmp = load_reg(s, rn);
11381 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11382 if ((insn & 0x70) != 0)
11383 goto illegal_op;
a2d12f0f
PM
11384 /*
11385 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
11386 * - MOV, MOVS (register-shifted register), flagsetting
11387 */
9ee6e8bb 11388 op = (insn >> 21) & 3;
8984bd2e
PB
11389 logic_cc = (insn & (1 << 20)) != 0;
11390 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
11391 if (logic_cc)
11392 gen_logic_CC(tmp);
bedb8a6b 11393 store_reg(s, rd, tmp);
9ee6e8bb
PB
11394 break;
11395 case 1: /* Sign/zero extend. */
62b44f05
AR
11396 op = (insn >> 20) & 7;
11397 switch (op) {
11398 case 0: /* SXTAH, SXTH */
11399 case 1: /* UXTAH, UXTH */
11400 case 4: /* SXTAB, SXTB */
11401 case 5: /* UXTAB, UXTB */
11402 break;
11403 case 2: /* SXTAB16, SXTB16 */
11404 case 3: /* UXTAB16, UXTB16 */
11405 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11406 goto illegal_op;
11407 }
11408 break;
11409 default:
11410 goto illegal_op;
11411 }
11412 if (rn != 15) {
11413 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11414 goto illegal_op;
11415 }
11416 }
5e3f878a 11417 tmp = load_reg(s, rm);
9ee6e8bb 11418 shift = (insn >> 4) & 3;
1301f322 11419 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
11420 rotate, a shift is sufficient. */
11421 if (shift != 0)
f669df27 11422 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
11423 op = (insn >> 20) & 7;
11424 switch (op) {
5e3f878a
PB
11425 case 0: gen_sxth(tmp); break;
11426 case 1: gen_uxth(tmp); break;
11427 case 2: gen_sxtb16(tmp); break;
11428 case 3: gen_uxtb16(tmp); break;
11429 case 4: gen_sxtb(tmp); break;
11430 case 5: gen_uxtb(tmp); break;
62b44f05
AR
11431 default:
11432 g_assert_not_reached();
9ee6e8bb
PB
11433 }
11434 if (rn != 15) {
5e3f878a 11435 tmp2 = load_reg(s, rn);
9ee6e8bb 11436 if ((op >> 1) == 1) {
5e3f878a 11437 gen_add16(tmp, tmp2);
9ee6e8bb 11438 } else {
5e3f878a 11439 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11440 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11441 }
11442 }
5e3f878a 11443 store_reg(s, rd, tmp);
9ee6e8bb
PB
11444 break;
11445 case 2: /* SIMD add/subtract. */
62b44f05
AR
11446 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11447 goto illegal_op;
11448 }
9ee6e8bb
PB
11449 op = (insn >> 20) & 7;
11450 shift = (insn >> 4) & 7;
11451 if ((op & 3) == 3 || (shift & 3) == 3)
11452 goto illegal_op;
6ddbc6e4
PB
11453 tmp = load_reg(s, rn);
11454 tmp2 = load_reg(s, rm);
11455 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 11456 tcg_temp_free_i32(tmp2);
6ddbc6e4 11457 store_reg(s, rd, tmp);
9ee6e8bb
PB
11458 break;
11459 case 3: /* Other data processing. */
11460 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
11461 if (op < 4) {
11462 /* Saturating add/subtract. */
62b44f05
AR
11463 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11464 goto illegal_op;
11465 }
d9ba4830
PB
11466 tmp = load_reg(s, rn);
11467 tmp2 = load_reg(s, rm);
9ee6e8bb 11468 if (op & 1)
9ef39277 11469 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 11470 if (op & 2)
9ef39277 11471 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 11472 else
9ef39277 11473 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 11474 tcg_temp_free_i32(tmp2);
9ee6e8bb 11475 } else {
62b44f05
AR
11476 switch (op) {
11477 case 0x0a: /* rbit */
11478 case 0x08: /* rev */
11479 case 0x09: /* rev16 */
11480 case 0x0b: /* revsh */
11481 case 0x18: /* clz */
11482 break;
11483 case 0x10: /* sel */
11484 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11485 goto illegal_op;
11486 }
11487 break;
11488 case 0x20: /* crc32/crc32c */
11489 case 0x21:
11490 case 0x22:
11491 case 0x28:
11492 case 0x29:
11493 case 0x2a:
962fcbf2 11494 if (!dc_isar_feature(aa32_crc32, s)) {
62b44f05
AR
11495 goto illegal_op;
11496 }
11497 break;
11498 default:
11499 goto illegal_op;
11500 }
d9ba4830 11501 tmp = load_reg(s, rn);
9ee6e8bb
PB
11502 switch (op) {
11503 case 0x0a: /* rbit */
d9ba4830 11504 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
11505 break;
11506 case 0x08: /* rev */
66896cb8 11507 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
11508 break;
11509 case 0x09: /* rev16 */
d9ba4830 11510 gen_rev16(tmp);
9ee6e8bb
PB
11511 break;
11512 case 0x0b: /* revsh */
d9ba4830 11513 gen_revsh(tmp);
9ee6e8bb
PB
11514 break;
11515 case 0x10: /* sel */
d9ba4830 11516 tmp2 = load_reg(s, rm);
7d1b0095 11517 tmp3 = tcg_temp_new_i32();
0ecb72a5 11518 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 11519 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
11520 tcg_temp_free_i32(tmp3);
11521 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11522 break;
11523 case 0x18: /* clz */
7539a012 11524 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 11525 break;
eb0ecd5a
WN
11526 case 0x20:
11527 case 0x21:
11528 case 0x22:
11529 case 0x28:
11530 case 0x29:
11531 case 0x2a:
11532 {
11533 /* crc32/crc32c */
11534 uint32_t sz = op & 0x3;
11535 uint32_t c = op & 0x8;
11536
eb0ecd5a 11537 tmp2 = load_reg(s, rm);
aa633469
PM
11538 if (sz == 0) {
11539 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
11540 } else if (sz == 1) {
11541 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
11542 }
eb0ecd5a
WN
11543 tmp3 = tcg_const_i32(1 << sz);
11544 if (c) {
11545 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
11546 } else {
11547 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
11548 }
11549 tcg_temp_free_i32(tmp2);
11550 tcg_temp_free_i32(tmp3);
11551 break;
11552 }
9ee6e8bb 11553 default:
62b44f05 11554 g_assert_not_reached();
9ee6e8bb
PB
11555 }
11556 }
d9ba4830 11557 store_reg(s, rd, tmp);
9ee6e8bb
PB
11558 break;
11559 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
11560 switch ((insn >> 20) & 7) {
11561 case 0: /* 32 x 32 -> 32 */
11562 case 7: /* Unsigned sum of absolute differences. */
11563 break;
11564 case 1: /* 16 x 16 -> 32 */
11565 case 2: /* Dual multiply add. */
11566 case 3: /* 32 * 16 -> 32msb */
11567 case 4: /* Dual multiply subtract. */
11568 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11569 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11570 goto illegal_op;
11571 }
11572 break;
11573 }
9ee6e8bb 11574 op = (insn >> 4) & 0xf;
d9ba4830
PB
11575 tmp = load_reg(s, rn);
11576 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11577 switch ((insn >> 20) & 7) {
11578 case 0: /* 32 x 32 -> 32 */
d9ba4830 11579 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 11580 tcg_temp_free_i32(tmp2);
9ee6e8bb 11581 if (rs != 15) {
d9ba4830 11582 tmp2 = load_reg(s, rs);
9ee6e8bb 11583 if (op)
d9ba4830 11584 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 11585 else
d9ba4830 11586 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11587 tcg_temp_free_i32(tmp2);
9ee6e8bb 11588 }
9ee6e8bb
PB
11589 break;
11590 case 1: /* 16 x 16 -> 32 */
d9ba4830 11591 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 11592 tcg_temp_free_i32(tmp2);
9ee6e8bb 11593 if (rs != 15) {
d9ba4830 11594 tmp2 = load_reg(s, rs);
9ef39277 11595 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11596 tcg_temp_free_i32(tmp2);
9ee6e8bb 11597 }
9ee6e8bb
PB
11598 break;
11599 case 2: /* Dual multiply add. */
11600 case 4: /* Dual multiply subtract. */
11601 if (op)
d9ba4830
PB
11602 gen_swap_half(tmp2);
11603 gen_smul_dual(tmp, tmp2);
9ee6e8bb 11604 if (insn & (1 << 22)) {
e1d177b9 11605 /* This subtraction cannot overflow. */
d9ba4830 11606 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11607 } else {
e1d177b9
PM
11608 /* This addition cannot overflow 32 bits;
11609 * however it may overflow considered as a signed
11610 * operation, in which case we must set the Q flag.
11611 */
9ef39277 11612 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 11613 }
7d1b0095 11614 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11615 if (rs != 15)
11616 {
d9ba4830 11617 tmp2 = load_reg(s, rs);
9ef39277 11618 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11619 tcg_temp_free_i32(tmp2);
9ee6e8bb 11620 }
9ee6e8bb
PB
11621 break;
11622 case 3: /* 32 * 16 -> 32msb */
11623 if (op)
d9ba4830 11624 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 11625 else
d9ba4830 11626 gen_sxth(tmp2);
a7812ae4
PB
11627 tmp64 = gen_muls_i64_i32(tmp, tmp2);
11628 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 11629 tmp = tcg_temp_new_i32();
ecc7b3aa 11630 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 11631 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
11632 if (rs != 15)
11633 {
d9ba4830 11634 tmp2 = load_reg(s, rs);
9ef39277 11635 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11636 tcg_temp_free_i32(tmp2);
9ee6e8bb 11637 }
9ee6e8bb 11638 break;
838fa72d
AJ
11639 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11640 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11641 if (rs != 15) {
838fa72d
AJ
11642 tmp = load_reg(s, rs);
11643 if (insn & (1 << 20)) {
11644 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 11645 } else {
838fa72d 11646 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 11647 }
2c0262af 11648 }
838fa72d
AJ
11649 if (insn & (1 << 4)) {
11650 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
11651 }
11652 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 11653 tmp = tcg_temp_new_i32();
ecc7b3aa 11654 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 11655 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
11656 break;
11657 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 11658 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 11659 tcg_temp_free_i32(tmp2);
9ee6e8bb 11660 if (rs != 15) {
d9ba4830
PB
11661 tmp2 = load_reg(s, rs);
11662 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11663 tcg_temp_free_i32(tmp2);
5fd46862 11664 }
9ee6e8bb 11665 break;
2c0262af 11666 }
d9ba4830 11667 store_reg(s, rd, tmp);
2c0262af 11668 break;
9ee6e8bb
PB
11669 case 6: case 7: /* 64-bit multiply, Divide. */
11670 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
11671 tmp = load_reg(s, rn);
11672 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11673 if ((op & 0x50) == 0x10) {
11674 /* sdiv, udiv */
7e0cf8b4 11675 if (!dc_isar_feature(thumb_div, s)) {
9ee6e8bb 11676 goto illegal_op;
47789990 11677 }
9ee6e8bb 11678 if (op & 0x20)
5e3f878a 11679 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 11680 else
5e3f878a 11681 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 11682 tcg_temp_free_i32(tmp2);
5e3f878a 11683 store_reg(s, rd, tmp);
9ee6e8bb
PB
11684 } else if ((op & 0xe) == 0xc) {
11685 /* Dual multiply accumulate long. */
62b44f05
AR
11686 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11687 tcg_temp_free_i32(tmp);
11688 tcg_temp_free_i32(tmp2);
11689 goto illegal_op;
11690 }
9ee6e8bb 11691 if (op & 1)
5e3f878a
PB
11692 gen_swap_half(tmp2);
11693 gen_smul_dual(tmp, tmp2);
9ee6e8bb 11694 if (op & 0x10) {
5e3f878a 11695 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 11696 } else {
5e3f878a 11697 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 11698 }
7d1b0095 11699 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11700 /* BUGFIX */
11701 tmp64 = tcg_temp_new_i64();
11702 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11703 tcg_temp_free_i32(tmp);
a7812ae4
PB
11704 gen_addq(s, tmp64, rs, rd);
11705 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11706 tcg_temp_free_i64(tmp64);
2c0262af 11707 } else {
9ee6e8bb
PB
11708 if (op & 0x20) {
11709 /* Unsigned 64-bit multiply */
a7812ae4 11710 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 11711 } else {
9ee6e8bb
PB
11712 if (op & 8) {
11713 /* smlalxy */
62b44f05
AR
11714 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11715 tcg_temp_free_i32(tmp2);
11716 tcg_temp_free_i32(tmp);
11717 goto illegal_op;
11718 }
5e3f878a 11719 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 11720 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11721 tmp64 = tcg_temp_new_i64();
11722 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11723 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11724 } else {
11725 /* Signed 64-bit multiply */
a7812ae4 11726 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11727 }
b5ff1b31 11728 }
9ee6e8bb
PB
11729 if (op & 4) {
11730 /* umaal */
62b44f05
AR
11731 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11732 tcg_temp_free_i64(tmp64);
11733 goto illegal_op;
11734 }
a7812ae4
PB
11735 gen_addq_lo(s, tmp64, rs);
11736 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
11737 } else if (op & 0x40) {
11738 /* 64-bit accumulate. */
a7812ae4 11739 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 11740 }
a7812ae4 11741 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11742 tcg_temp_free_i64(tmp64);
5fd46862 11743 }
2c0262af 11744 break;
9ee6e8bb
PB
11745 }
11746 break;
11747 case 6: case 7: case 14: case 15:
11748 /* Coprocessor. */
7517748e 11749 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8859ba3c
PM
11750 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
11751 if (extract32(insn, 24, 2) == 3) {
11752 goto illegal_op; /* op0 = 0b11 : unallocated */
11753 }
11754
11755 /*
11756 * Decode VLLDM and VLSTM first: these are nonstandard because:
11757 * * if there is no FPU then these insns must NOP in
11758 * Secure state and UNDEF in Nonsecure state
11759 * * if there is an FPU then these insns do not have
11760 * the usual behaviour that disas_vfp_insn() provides of
11761 * being controlled by CPACR/NSACR enable bits or the
11762 * lazy-stacking logic.
7517748e 11763 */
b1e5336a
PM
11764 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
11765 (insn & 0xffa00f00) == 0xec200a00) {
11766 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
11767 * - VLLDM, VLSTM
11768 * We choose to UNDEF if the RAZ bits are non-zero.
11769 */
11770 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
11771 goto illegal_op;
11772 }
019076b0
PM
11773
11774 if (arm_dc_feature(s, ARM_FEATURE_VFP)) {
11775 TCGv_i32 fptr = load_reg(s, rn);
11776
11777 if (extract32(insn, 20, 1)) {
956fe143 11778 gen_helper_v7m_vlldm(cpu_env, fptr);
019076b0
PM
11779 } else {
11780 gen_helper_v7m_vlstm(cpu_env, fptr);
11781 }
11782 tcg_temp_free_i32(fptr);
11783
11784 /* End the TB, because we have updated FP control bits */
11785 s->base.is_jmp = DISAS_UPDATE;
11786 }
b1e5336a
PM
11787 break;
11788 }
8859ba3c
PM
11789 if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
11790 ((insn >> 8) & 0xe) == 10) {
11791 /* FP, and the CPU supports it */
11792 if (disas_vfp_insn(s, insn)) {
11793 goto illegal_op;
11794 }
11795 break;
11796 }
11797
b1e5336a 11798 /* All other insns: NOCP */
7517748e
PM
11799 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
11800 default_exception_el(s));
11801 break;
11802 }
0052087e
RH
11803 if ((insn & 0xfe000a00) == 0xfc000800
11804 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11805 /* The Thumb2 and ARM encodings are identical. */
11806 if (disas_neon_insn_3same_ext(s, insn)) {
11807 goto illegal_op;
11808 }
11809 } else if ((insn & 0xff000a00) == 0xfe000800
11810 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11811 /* The Thumb2 and ARM encodings are identical. */
11812 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
11813 goto illegal_op;
11814 }
11815 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 11816 /* Translate into the equivalent ARM encoding. */
f06053e3 11817 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 11818 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 11819 goto illegal_op;
7dcc1f89 11820 }
6a57f3eb 11821 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 11822 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
11823 goto illegal_op;
11824 }
9ee6e8bb
PB
11825 } else {
11826 if (insn & (1 << 28))
11827 goto illegal_op;
7dcc1f89 11828 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 11829 goto illegal_op;
7dcc1f89 11830 }
9ee6e8bb
PB
11831 }
11832 break;
11833 case 8: case 9: case 10: case 11:
11834 if (insn & (1 << 15)) {
11835 /* Branches, misc control. */
11836 if (insn & 0x5000) {
11837 /* Unconditional branch. */
11838 /* signextend(hw1[10:0]) -> offset[:12]. */
11839 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
11840 /* hw1[10:0] -> offset[11:1]. */
11841 offset |= (insn & 0x7ff) << 1;
11842 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
11843 offset[24:22] already have the same value because of the
11844 sign extension above. */
11845 offset ^= ((~insn) & (1 << 13)) << 10;
11846 offset ^= ((~insn) & (1 << 11)) << 11;
11847
9ee6e8bb
PB
11848 if (insn & (1 << 14)) {
11849 /* Branch and link. */
3174f8e9 11850 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 11851 }
3b46e624 11852
b0109805 11853 offset += s->pc;
9ee6e8bb
PB
11854 if (insn & (1 << 12)) {
11855 /* b/bl */
b0109805 11856 gen_jmp(s, offset);
9ee6e8bb
PB
11857 } else {
11858 /* blx */
b0109805 11859 offset &= ~(uint32_t)2;
be5e7a76 11860 /* thumb2 bx, no need to check */
b0109805 11861 gen_bx_im(s, offset);
2c0262af 11862 }
9ee6e8bb
PB
11863 } else if (((insn >> 23) & 7) == 7) {
11864 /* Misc control */
11865 if (insn & (1 << 13))
11866 goto illegal_op;
11867
11868 if (insn & (1 << 26)) {
001b3cab
PM
11869 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11870 goto illegal_op;
11871 }
37e6456e
PM
11872 if (!(insn & (1 << 20))) {
11873 /* Hypervisor call (v7) */
11874 int imm16 = extract32(insn, 16, 4) << 12
11875 | extract32(insn, 0, 12);
11876 ARCH(7);
11877 if (IS_USER(s)) {
11878 goto illegal_op;
11879 }
11880 gen_hvc(s, imm16);
11881 } else {
11882 /* Secure monitor call (v6+) */
11883 ARCH(6K);
11884 if (IS_USER(s)) {
11885 goto illegal_op;
11886 }
11887 gen_smc(s);
11888 }
2c0262af 11889 } else {
9ee6e8bb
PB
11890 op = (insn >> 20) & 7;
11891 switch (op) {
11892 case 0: /* msr cpsr. */
b53d8923 11893 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 11894 tmp = load_reg(s, rn);
b28b3377
PM
11895 /* the constant is the mask and SYSm fields */
11896 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 11897 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 11898 tcg_temp_free_i32(addr);
7d1b0095 11899 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11900 gen_lookup_tb(s);
11901 break;
11902 }
11903 /* fall through */
11904 case 1: /* msr spsr. */
b53d8923 11905 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11906 goto illegal_op;
b53d8923 11907 }
8bfd0550
PM
11908
11909 if (extract32(insn, 5, 1)) {
11910 /* MSR (banked) */
11911 int sysm = extract32(insn, 8, 4) |
11912 (extract32(insn, 4, 1) << 4);
11913 int r = op & 1;
11914
11915 gen_msr_banked(s, r, sysm, rm);
11916 break;
11917 }
11918
11919 /* MSR (for PSRs) */
2fbac54b
FN
11920 tmp = load_reg(s, rn);
11921 if (gen_set_psr(s,
7dcc1f89 11922 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 11923 op == 1, tmp))
9ee6e8bb
PB
11924 goto illegal_op;
11925 break;
11926 case 2: /* cps, nop-hint. */
11927 if (((insn >> 8) & 7) == 0) {
11928 gen_nop_hint(s, insn & 0xff);
11929 }
11930 /* Implemented as NOP in user mode. */
11931 if (IS_USER(s))
11932 break;
11933 offset = 0;
11934 imm = 0;
11935 if (insn & (1 << 10)) {
11936 if (insn & (1 << 7))
11937 offset |= CPSR_A;
11938 if (insn & (1 << 6))
11939 offset |= CPSR_I;
11940 if (insn & (1 << 5))
11941 offset |= CPSR_F;
11942 if (insn & (1 << 9))
11943 imm = CPSR_A | CPSR_I | CPSR_F;
11944 }
11945 if (insn & (1 << 8)) {
11946 offset |= 0x1f;
11947 imm |= (insn & 0x1f);
11948 }
11949 if (offset) {
2fbac54b 11950 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
11951 }
11952 break;
11953 case 3: /* Special control operations. */
14120108 11954 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 11955 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
11956 goto illegal_op;
11957 }
9ee6e8bb
PB
11958 op = (insn >> 4) & 0xf;
11959 switch (op) {
11960 case 2: /* clrex */
426f5abc 11961 gen_clrex(s);
9ee6e8bb
PB
11962 break;
11963 case 4: /* dsb */
11964 case 5: /* dmb */
61e4c432 11965 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 11966 break;
6df99dec
SS
11967 case 6: /* isb */
11968 /* We need to break the TB after this insn
11969 * to execute self-modifying code correctly
11970 * and also to take any pending interrupts
11971 * immediately.
11972 */
0b609cc1 11973 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 11974 break;
9888bd1e
RH
11975 case 7: /* sb */
11976 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
11977 goto illegal_op;
11978 }
11979 /*
11980 * TODO: There is no speculation barrier opcode
11981 * for TCG; MB and end the TB instead.
11982 */
11983 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
11984 gen_goto_tb(s, 0, s->pc & ~1);
11985 break;
9ee6e8bb
PB
11986 default:
11987 goto illegal_op;
11988 }
11989 break;
11990 case 4: /* bxj */
9d7c59c8
PM
11991 /* Trivial implementation equivalent to bx.
11992 * This instruction doesn't exist at all for M-profile.
11993 */
11994 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11995 goto illegal_op;
11996 }
d9ba4830
PB
11997 tmp = load_reg(s, rn);
11998 gen_bx(s, tmp);
9ee6e8bb
PB
11999 break;
12000 case 5: /* Exception return. */
b8b45b68
RV
12001 if (IS_USER(s)) {
12002 goto illegal_op;
12003 }
12004 if (rn != 14 || rd != 15) {
12005 goto illegal_op;
12006 }
55c544ed
PM
12007 if (s->current_el == 2) {
12008 /* ERET from Hyp uses ELR_Hyp, not LR */
12009 if (insn & 0xff) {
12010 goto illegal_op;
12011 }
12012 tmp = load_cpu_field(elr_el[2]);
12013 } else {
12014 tmp = load_reg(s, rn);
12015 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
12016 }
b8b45b68
RV
12017 gen_exception_return(s, tmp);
12018 break;
8bfd0550 12019 case 6: /* MRS */
43ac6574
PM
12020 if (extract32(insn, 5, 1) &&
12021 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
12022 /* MRS (banked) */
12023 int sysm = extract32(insn, 16, 4) |
12024 (extract32(insn, 4, 1) << 4);
12025
12026 gen_mrs_banked(s, 0, sysm, rd);
12027 break;
12028 }
12029
3d54026f
PM
12030 if (extract32(insn, 16, 4) != 0xf) {
12031 goto illegal_op;
12032 }
12033 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
12034 extract32(insn, 0, 8) != 0) {
12035 goto illegal_op;
12036 }
12037
8bfd0550 12038 /* mrs cpsr */
7d1b0095 12039 tmp = tcg_temp_new_i32();
b53d8923 12040 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
12041 addr = tcg_const_i32(insn & 0xff);
12042 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 12043 tcg_temp_free_i32(addr);
9ee6e8bb 12044 } else {
9ef39277 12045 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 12046 }
8984bd2e 12047 store_reg(s, rd, tmp);
9ee6e8bb 12048 break;
8bfd0550 12049 case 7: /* MRS */
43ac6574
PM
12050 if (extract32(insn, 5, 1) &&
12051 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
12052 /* MRS (banked) */
12053 int sysm = extract32(insn, 16, 4) |
12054 (extract32(insn, 4, 1) << 4);
12055
12056 gen_mrs_banked(s, 1, sysm, rd);
12057 break;
12058 }
12059
12060 /* mrs spsr. */
9ee6e8bb 12061 /* Not accessible in user mode. */
b53d8923 12062 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 12063 goto illegal_op;
b53d8923 12064 }
3d54026f
PM
12065
12066 if (extract32(insn, 16, 4) != 0xf ||
12067 extract32(insn, 0, 8) != 0) {
12068 goto illegal_op;
12069 }
12070
d9ba4830
PB
12071 tmp = load_cpu_field(spsr);
12072 store_reg(s, rd, tmp);
9ee6e8bb 12073 break;
2c0262af
FB
12074 }
12075 }
9ee6e8bb
PB
12076 } else {
12077 /* Conditional branch. */
12078 op = (insn >> 22) & 0xf;
12079 /* Generate a conditional jump to next instruction. */
c2d9644e 12080 arm_skip_unless(s, op);
9ee6e8bb
PB
12081
12082 /* offset[11:1] = insn[10:0] */
12083 offset = (insn & 0x7ff) << 1;
12084 /* offset[17:12] = insn[21:16]. */
12085 offset |= (insn & 0x003f0000) >> 4;
12086 /* offset[31:20] = insn[26]. */
12087 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
12088 /* offset[18] = insn[13]. */
12089 offset |= (insn & (1 << 13)) << 5;
12090 /* offset[19] = insn[11]. */
12091 offset |= (insn & (1 << 11)) << 8;
12092
12093 /* jump to the offset */
b0109805 12094 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
12095 }
12096 } else {
55203189
PM
12097 /*
12098 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
12099 * - Data-processing (modified immediate, plain binary immediate)
12100 */
9ee6e8bb 12101 if (insn & (1 << 25)) {
55203189
PM
12102 /*
12103 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
12104 * - Data-processing (plain binary immediate)
12105 */
9ee6e8bb
PB
12106 if (insn & (1 << 24)) {
12107 if (insn & (1 << 20))
12108 goto illegal_op;
12109 /* Bitfield/Saturate. */
12110 op = (insn >> 21) & 7;
12111 imm = insn & 0x1f;
12112 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 12113 if (rn == 15) {
7d1b0095 12114 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
12115 tcg_gen_movi_i32(tmp, 0);
12116 } else {
12117 tmp = load_reg(s, rn);
12118 }
9ee6e8bb
PB
12119 switch (op) {
12120 case 2: /* Signed bitfield extract. */
12121 imm++;
12122 if (shift + imm > 32)
12123 goto illegal_op;
59a71b4c
RH
12124 if (imm < 32) {
12125 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
12126 }
9ee6e8bb
PB
12127 break;
12128 case 6: /* Unsigned bitfield extract. */
12129 imm++;
12130 if (shift + imm > 32)
12131 goto illegal_op;
59a71b4c
RH
12132 if (imm < 32) {
12133 tcg_gen_extract_i32(tmp, tmp, shift, imm);
12134 }
9ee6e8bb
PB
12135 break;
12136 case 3: /* Bitfield insert/clear. */
12137 if (imm < shift)
12138 goto illegal_op;
12139 imm = imm + 1 - shift;
12140 if (imm != 32) {
6ddbc6e4 12141 tmp2 = load_reg(s, rd);
d593c48e 12142 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 12143 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
12144 }
12145 break;
12146 case 7:
12147 goto illegal_op;
12148 default: /* Saturate. */
9ee6e8bb
PB
12149 if (shift) {
12150 if (op & 1)
6ddbc6e4 12151 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 12152 else
6ddbc6e4 12153 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 12154 }
6ddbc6e4 12155 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
12156 if (op & 4) {
12157 /* Unsigned. */
62b44f05
AR
12158 if ((op & 1) && shift == 0) {
12159 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
12160 tcg_temp_free_i32(tmp);
12161 tcg_temp_free_i32(tmp2);
12162 goto illegal_op;
12163 }
9ef39277 12164 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 12165 } else {
9ef39277 12166 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 12167 }
2c0262af 12168 } else {
9ee6e8bb 12169 /* Signed. */
62b44f05
AR
12170 if ((op & 1) && shift == 0) {
12171 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
12172 tcg_temp_free_i32(tmp);
12173 tcg_temp_free_i32(tmp2);
12174 goto illegal_op;
12175 }
9ef39277 12176 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 12177 } else {
9ef39277 12178 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 12179 }
2c0262af 12180 }
b75263d6 12181 tcg_temp_free_i32(tmp2);
9ee6e8bb 12182 break;
2c0262af 12183 }
6ddbc6e4 12184 store_reg(s, rd, tmp);
9ee6e8bb
PB
12185 } else {
12186 imm = ((insn & 0x04000000) >> 15)
12187 | ((insn & 0x7000) >> 4) | (insn & 0xff);
12188 if (insn & (1 << 22)) {
12189 /* 16-bit immediate. */
12190 imm |= (insn >> 4) & 0xf000;
12191 if (insn & (1 << 23)) {
12192 /* movt */
5e3f878a 12193 tmp = load_reg(s, rd);
86831435 12194 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 12195 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 12196 } else {
9ee6e8bb 12197 /* movw */
7d1b0095 12198 tmp = tcg_temp_new_i32();
5e3f878a 12199 tcg_gen_movi_i32(tmp, imm);
2c0262af 12200 }
55203189 12201 store_reg(s, rd, tmp);
2c0262af 12202 } else {
9ee6e8bb
PB
12203 /* Add/sub 12-bit immediate. */
12204 if (rn == 15) {
b0109805 12205 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 12206 if (insn & (1 << 23))
b0109805 12207 offset -= imm;
9ee6e8bb 12208 else
b0109805 12209 offset += imm;
7d1b0095 12210 tmp = tcg_temp_new_i32();
5e3f878a 12211 tcg_gen_movi_i32(tmp, offset);
55203189 12212 store_reg(s, rd, tmp);
2c0262af 12213 } else {
5e3f878a 12214 tmp = load_reg(s, rn);
9ee6e8bb 12215 if (insn & (1 << 23))
5e3f878a 12216 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 12217 else
5e3f878a 12218 tcg_gen_addi_i32(tmp, tmp, imm);
55203189
PM
12219 if (rn == 13 && rd == 13) {
12220 /* ADD SP, SP, imm or SUB SP, SP, imm */
12221 store_sp_checked(s, tmp);
12222 } else {
12223 store_reg(s, rd, tmp);
12224 }
2c0262af 12225 }
9ee6e8bb 12226 }
191abaa2 12227 }
9ee6e8bb 12228 } else {
55203189
PM
12229 /*
12230 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
12231 * - Data-processing (modified immediate)
12232 */
9ee6e8bb
PB
12233 int shifter_out = 0;
12234 /* modified 12-bit immediate. */
12235 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
12236 imm = (insn & 0xff);
12237 switch (shift) {
12238 case 0: /* XY */
12239 /* Nothing to do. */
12240 break;
12241 case 1: /* 00XY00XY */
12242 imm |= imm << 16;
12243 break;
12244 case 2: /* XY00XY00 */
12245 imm |= imm << 16;
12246 imm <<= 8;
12247 break;
12248 case 3: /* XYXYXYXY */
12249 imm |= imm << 16;
12250 imm |= imm << 8;
12251 break;
12252 default: /* Rotated constant. */
12253 shift = (shift << 1) | (imm >> 7);
12254 imm |= 0x80;
12255 imm = imm << (32 - shift);
12256 shifter_out = 1;
12257 break;
b5ff1b31 12258 }
7d1b0095 12259 tmp2 = tcg_temp_new_i32();
3174f8e9 12260 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 12261 rn = (insn >> 16) & 0xf;
3174f8e9 12262 if (rn == 15) {
7d1b0095 12263 tmp = tcg_temp_new_i32();
3174f8e9
FN
12264 tcg_gen_movi_i32(tmp, 0);
12265 } else {
12266 tmp = load_reg(s, rn);
12267 }
9ee6e8bb
PB
12268 op = (insn >> 21) & 0xf;
12269 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 12270 shifter_out, tmp, tmp2))
9ee6e8bb 12271 goto illegal_op;
7d1b0095 12272 tcg_temp_free_i32(tmp2);
9ee6e8bb 12273 rd = (insn >> 8) & 0xf;
55203189
PM
12274 if (rd == 13 && rn == 13
12275 && (op == 8 || op == 13)) {
12276 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
12277 store_sp_checked(s, tmp);
12278 } else if (rd != 15) {
3174f8e9
FN
12279 store_reg(s, rd, tmp);
12280 } else {
7d1b0095 12281 tcg_temp_free_i32(tmp);
2c0262af 12282 }
2c0262af 12283 }
9ee6e8bb
PB
12284 }
12285 break;
12286 case 12: /* Load/store single data item. */
12287 {
12288 int postinc = 0;
12289 int writeback = 0;
a99caa48 12290 int memidx;
9bb6558a
PM
12291 ISSInfo issinfo;
12292
9ee6e8bb 12293 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 12294 if (disas_neon_ls_insn(s, insn)) {
c1713132 12295 goto illegal_op;
7dcc1f89 12296 }
9ee6e8bb
PB
12297 break;
12298 }
a2fdc890
PM
12299 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
12300 if (rs == 15) {
12301 if (!(insn & (1 << 20))) {
12302 goto illegal_op;
12303 }
12304 if (op != 2) {
12305 /* Byte or halfword load space with dest == r15 : memory hints.
12306 * Catch them early so we don't emit pointless addressing code.
12307 * This space is a mix of:
12308 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
12309 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
12310 * cores)
12311 * unallocated hints, which must be treated as NOPs
12312 * UNPREDICTABLE space, which we NOP or UNDEF depending on
12313 * which is easiest for the decoding logic
12314 * Some space which must UNDEF
12315 */
12316 int op1 = (insn >> 23) & 3;
12317 int op2 = (insn >> 6) & 0x3f;
12318 if (op & 2) {
12319 goto illegal_op;
12320 }
12321 if (rn == 15) {
02afbf64
PM
12322 /* UNPREDICTABLE, unallocated hint or
12323 * PLD/PLDW/PLI (literal)
12324 */
2eea841c 12325 return;
a2fdc890
PM
12326 }
12327 if (op1 & 1) {
2eea841c 12328 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
12329 }
12330 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 12331 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
12332 }
12333 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 12334 goto illegal_op;
a2fdc890
PM
12335 }
12336 }
a99caa48 12337 memidx = get_mem_index(s);
9ee6e8bb 12338 if (rn == 15) {
7d1b0095 12339 addr = tcg_temp_new_i32();
9ee6e8bb
PB
12340 /* PC relative. */
12341 /* s->pc has already been incremented by 4. */
12342 imm = s->pc & 0xfffffffc;
12343 if (insn & (1 << 23))
12344 imm += insn & 0xfff;
12345 else
12346 imm -= insn & 0xfff;
b0109805 12347 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 12348 } else {
b0109805 12349 addr = load_reg(s, rn);
9ee6e8bb
PB
12350 if (insn & (1 << 23)) {
12351 /* Positive offset. */
12352 imm = insn & 0xfff;
b0109805 12353 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 12354 } else {
9ee6e8bb 12355 imm = insn & 0xff;
2a0308c5
PM
12356 switch ((insn >> 8) & 0xf) {
12357 case 0x0: /* Shifted Register. */
9ee6e8bb 12358 shift = (insn >> 4) & 0xf;
2a0308c5
PM
12359 if (shift > 3) {
12360 tcg_temp_free_i32(addr);
18c9b560 12361 goto illegal_op;
2a0308c5 12362 }
b26eefb6 12363 tmp = load_reg(s, rm);
9ee6e8bb 12364 if (shift)
b26eefb6 12365 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 12366 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12367 tcg_temp_free_i32(tmp);
9ee6e8bb 12368 break;
2a0308c5 12369 case 0xc: /* Negative offset. */
b0109805 12370 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 12371 break;
2a0308c5 12372 case 0xe: /* User privilege. */
b0109805 12373 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 12374 memidx = get_a32_user_mem_index(s);
9ee6e8bb 12375 break;
2a0308c5 12376 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
12377 imm = -imm;
12378 /* Fall through. */
2a0308c5 12379 case 0xb: /* Post-increment. */
9ee6e8bb
PB
12380 postinc = 1;
12381 writeback = 1;
12382 break;
2a0308c5 12383 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
12384 imm = -imm;
12385 /* Fall through. */
2a0308c5 12386 case 0xf: /* Pre-increment. */
9ee6e8bb
PB
12387 writeback = 1;
12388 break;
12389 default:
2a0308c5 12390 tcg_temp_free_i32(addr);
b7bcbe95 12391 goto illegal_op;
9ee6e8bb
PB
12392 }
12393 }
12394 }
9bb6558a
PM
12395
12396 issinfo = writeback ? ISSInvalid : rs;
12397
0bc003ba
PM
12398 if (s->v8m_stackcheck && rn == 13 && writeback) {
12399 /*
12400 * Stackcheck. Here we know 'addr' is the current SP;
12401 * if imm is +ve we're moving SP up, else down. It is
12402 * UNKNOWN whether the limit check triggers when SP starts
12403 * below the limit and ends up above it; we chose to do so.
12404 */
12405 if ((int32_t)imm < 0) {
12406 TCGv_i32 newsp = tcg_temp_new_i32();
12407
12408 tcg_gen_addi_i32(newsp, addr, imm);
12409 gen_helper_v8m_stackcheck(cpu_env, newsp);
12410 tcg_temp_free_i32(newsp);
12411 } else {
12412 gen_helper_v8m_stackcheck(cpu_env, addr);
12413 }
12414 }
12415
12416 if (writeback && !postinc) {
12417 tcg_gen_addi_i32(addr, addr, imm);
12418 }
12419
9ee6e8bb
PB
12420 if (insn & (1 << 20)) {
12421 /* Load. */
5a839c0d 12422 tmp = tcg_temp_new_i32();
a2fdc890 12423 switch (op) {
5a839c0d 12424 case 0:
9bb6558a 12425 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12426 break;
12427 case 4:
9bb6558a 12428 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12429 break;
12430 case 1:
9bb6558a 12431 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12432 break;
12433 case 5:
9bb6558a 12434 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12435 break;
12436 case 2:
9bb6558a 12437 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 12438 break;
2a0308c5 12439 default:
5a839c0d 12440 tcg_temp_free_i32(tmp);
2a0308c5
PM
12441 tcg_temp_free_i32(addr);
12442 goto illegal_op;
a2fdc890
PM
12443 }
12444 if (rs == 15) {
3bb8a96f 12445 gen_bx_excret(s, tmp);
9ee6e8bb 12446 } else {
a2fdc890 12447 store_reg(s, rs, tmp);
9ee6e8bb
PB
12448 }
12449 } else {
12450 /* Store. */
b0109805 12451 tmp = load_reg(s, rs);
9ee6e8bb 12452 switch (op) {
5a839c0d 12453 case 0:
9bb6558a 12454 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12455 break;
12456 case 1:
9bb6558a 12457 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12458 break;
12459 case 2:
9bb6558a 12460 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 12461 break;
2a0308c5 12462 default:
5a839c0d 12463 tcg_temp_free_i32(tmp);
2a0308c5
PM
12464 tcg_temp_free_i32(addr);
12465 goto illegal_op;
b7bcbe95 12466 }
5a839c0d 12467 tcg_temp_free_i32(tmp);
2c0262af 12468 }
9ee6e8bb 12469 if (postinc)
b0109805
PB
12470 tcg_gen_addi_i32(addr, addr, imm);
12471 if (writeback) {
12472 store_reg(s, rn, addr);
12473 } else {
7d1b0095 12474 tcg_temp_free_i32(addr);
b0109805 12475 }
9ee6e8bb
PB
12476 }
12477 break;
12478 default:
12479 goto illegal_op;
2c0262af 12480 }
2eea841c 12481 return;
9ee6e8bb 12482illegal_op:
2eea841c
PM
12483 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
12484 default_exception_el(s));
2c0262af
FB
12485}
12486
296e5a0a 12487static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 12488{
296e5a0a 12489 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
12490 int32_t offset;
12491 int i;
39d5492a
PM
12492 TCGv_i32 tmp;
12493 TCGv_i32 tmp2;
12494 TCGv_i32 addr;
99c475ab 12495
99c475ab
FB
12496 switch (insn >> 12) {
12497 case 0: case 1:
396e467c 12498
99c475ab
FB
12499 rd = insn & 7;
12500 op = (insn >> 11) & 3;
12501 if (op == 3) {
a2d12f0f
PM
12502 /*
12503 * 0b0001_1xxx_xxxx_xxxx
12504 * - Add, subtract (three low registers)
12505 * - Add, subtract (two low registers and immediate)
12506 */
99c475ab 12507 rn = (insn >> 3) & 7;
396e467c 12508 tmp = load_reg(s, rn);
99c475ab
FB
12509 if (insn & (1 << 10)) {
12510 /* immediate */
7d1b0095 12511 tmp2 = tcg_temp_new_i32();
396e467c 12512 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
12513 } else {
12514 /* reg */
12515 rm = (insn >> 6) & 7;
396e467c 12516 tmp2 = load_reg(s, rm);
99c475ab 12517 }
9ee6e8bb
PB
12518 if (insn & (1 << 9)) {
12519 if (s->condexec_mask)
396e467c 12520 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 12521 else
72485ec4 12522 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
12523 } else {
12524 if (s->condexec_mask)
396e467c 12525 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 12526 else
72485ec4 12527 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 12528 }
7d1b0095 12529 tcg_temp_free_i32(tmp2);
396e467c 12530 store_reg(s, rd, tmp);
99c475ab
FB
12531 } else {
12532 /* shift immediate */
12533 rm = (insn >> 3) & 7;
12534 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
12535 tmp = load_reg(s, rm);
12536 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
12537 if (!s->condexec_mask)
12538 gen_logic_CC(tmp);
12539 store_reg(s, rd, tmp);
99c475ab
FB
12540 }
12541 break;
12542 case 2: case 3:
a2d12f0f
PM
12543 /*
12544 * 0b001x_xxxx_xxxx_xxxx
12545 * - Add, subtract, compare, move (one low register and immediate)
12546 */
99c475ab
FB
12547 op = (insn >> 11) & 3;
12548 rd = (insn >> 8) & 0x7;
396e467c 12549 if (op == 0) { /* mov */
7d1b0095 12550 tmp = tcg_temp_new_i32();
396e467c 12551 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 12552 if (!s->condexec_mask)
396e467c
FN
12553 gen_logic_CC(tmp);
12554 store_reg(s, rd, tmp);
12555 } else {
12556 tmp = load_reg(s, rd);
7d1b0095 12557 tmp2 = tcg_temp_new_i32();
396e467c
FN
12558 tcg_gen_movi_i32(tmp2, insn & 0xff);
12559 switch (op) {
12560 case 1: /* cmp */
72485ec4 12561 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
12562 tcg_temp_free_i32(tmp);
12563 tcg_temp_free_i32(tmp2);
396e467c
FN
12564 break;
12565 case 2: /* add */
12566 if (s->condexec_mask)
12567 tcg_gen_add_i32(tmp, tmp, tmp2);
12568 else
72485ec4 12569 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 12570 tcg_temp_free_i32(tmp2);
396e467c
FN
12571 store_reg(s, rd, tmp);
12572 break;
12573 case 3: /* sub */
12574 if (s->condexec_mask)
12575 tcg_gen_sub_i32(tmp, tmp, tmp2);
12576 else
72485ec4 12577 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 12578 tcg_temp_free_i32(tmp2);
396e467c
FN
12579 store_reg(s, rd, tmp);
12580 break;
12581 }
99c475ab 12582 }
99c475ab
FB
12583 break;
12584 case 4:
12585 if (insn & (1 << 11)) {
12586 rd = (insn >> 8) & 7;
5899f386
FB
12587 /* load pc-relative. Bit 1 of PC is ignored. */
12588 val = s->pc + 2 + ((insn & 0xff) * 4);
12589 val &= ~(uint32_t)2;
7d1b0095 12590 addr = tcg_temp_new_i32();
b0109805 12591 tcg_gen_movi_i32(addr, val);
c40c8556 12592 tmp = tcg_temp_new_i32();
9bb6558a
PM
12593 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
12594 rd | ISSIs16Bit);
7d1b0095 12595 tcg_temp_free_i32(addr);
b0109805 12596 store_reg(s, rd, tmp);
99c475ab
FB
12597 break;
12598 }
12599 if (insn & (1 << 10)) {
ebfe27c5
PM
12600 /* 0b0100_01xx_xxxx_xxxx
12601 * - data processing extended, branch and exchange
12602 */
99c475ab
FB
12603 rd = (insn & 7) | ((insn >> 4) & 8);
12604 rm = (insn >> 3) & 0xf;
12605 op = (insn >> 8) & 3;
12606 switch (op) {
12607 case 0: /* add */
396e467c
FN
12608 tmp = load_reg(s, rd);
12609 tmp2 = load_reg(s, rm);
12610 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 12611 tcg_temp_free_i32(tmp2);
55203189
PM
12612 if (rd == 13) {
12613 /* ADD SP, SP, reg */
12614 store_sp_checked(s, tmp);
12615 } else {
12616 store_reg(s, rd, tmp);
12617 }
99c475ab
FB
12618 break;
12619 case 1: /* cmp */
396e467c
FN
12620 tmp = load_reg(s, rd);
12621 tmp2 = load_reg(s, rm);
72485ec4 12622 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
12623 tcg_temp_free_i32(tmp2);
12624 tcg_temp_free_i32(tmp);
99c475ab
FB
12625 break;
12626 case 2: /* mov/cpy */
396e467c 12627 tmp = load_reg(s, rm);
55203189
PM
12628 if (rd == 13) {
12629 /* MOV SP, reg */
12630 store_sp_checked(s, tmp);
12631 } else {
12632 store_reg(s, rd, tmp);
12633 }
99c475ab 12634 break;
ebfe27c5
PM
12635 case 3:
12636 {
12637 /* 0b0100_0111_xxxx_xxxx
12638 * - branch [and link] exchange thumb register
12639 */
12640 bool link = insn & (1 << 7);
12641
fb602cb7 12642 if (insn & 3) {
ebfe27c5
PM
12643 goto undef;
12644 }
12645 if (link) {
be5e7a76 12646 ARCH(5);
ebfe27c5 12647 }
fb602cb7
PM
12648 if ((insn & 4)) {
12649 /* BXNS/BLXNS: only exists for v8M with the
12650 * security extensions, and always UNDEF if NonSecure.
12651 * We don't implement these in the user-only mode
12652 * either (in theory you can use them from Secure User
12653 * mode but they are too tied in to system emulation.)
12654 */
12655 if (!s->v8m_secure || IS_USER_ONLY) {
12656 goto undef;
12657 }
12658 if (link) {
3e3fa230 12659 gen_blxns(s, rm);
fb602cb7
PM
12660 } else {
12661 gen_bxns(s, rm);
12662 }
12663 break;
12664 }
12665 /* BLX/BX */
ebfe27c5
PM
12666 tmp = load_reg(s, rm);
12667 if (link) {
99c475ab 12668 val = (uint32_t)s->pc | 1;
7d1b0095 12669 tmp2 = tcg_temp_new_i32();
b0109805
PB
12670 tcg_gen_movi_i32(tmp2, val);
12671 store_reg(s, 14, tmp2);
3bb8a96f
PM
12672 gen_bx(s, tmp);
12673 } else {
12674 /* Only BX works as exception-return, not BLX */
12675 gen_bx_excret(s, tmp);
99c475ab 12676 }
99c475ab
FB
12677 break;
12678 }
ebfe27c5 12679 }
99c475ab
FB
12680 break;
12681 }
12682
a2d12f0f
PM
12683 /*
12684 * 0b0100_00xx_xxxx_xxxx
12685 * - Data-processing (two low registers)
12686 */
99c475ab
FB
12687 rd = insn & 7;
12688 rm = (insn >> 3) & 7;
12689 op = (insn >> 6) & 0xf;
12690 if (op == 2 || op == 3 || op == 4 || op == 7) {
12691 /* the shift/rotate ops want the operands backwards */
12692 val = rm;
12693 rm = rd;
12694 rd = val;
12695 val = 1;
12696 } else {
12697 val = 0;
12698 }
12699
396e467c 12700 if (op == 9) { /* neg */
7d1b0095 12701 tmp = tcg_temp_new_i32();
396e467c
FN
12702 tcg_gen_movi_i32(tmp, 0);
12703 } else if (op != 0xf) { /* mvn doesn't read its first operand */
12704 tmp = load_reg(s, rd);
12705 } else {
f764718d 12706 tmp = NULL;
396e467c 12707 }
99c475ab 12708
396e467c 12709 tmp2 = load_reg(s, rm);
5899f386 12710 switch (op) {
99c475ab 12711 case 0x0: /* and */
396e467c 12712 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 12713 if (!s->condexec_mask)
396e467c 12714 gen_logic_CC(tmp);
99c475ab
FB
12715 break;
12716 case 0x1: /* eor */
396e467c 12717 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 12718 if (!s->condexec_mask)
396e467c 12719 gen_logic_CC(tmp);
99c475ab
FB
12720 break;
12721 case 0x2: /* lsl */
9ee6e8bb 12722 if (s->condexec_mask) {
365af80e 12723 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 12724 } else {
9ef39277 12725 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12726 gen_logic_CC(tmp2);
9ee6e8bb 12727 }
99c475ab
FB
12728 break;
12729 case 0x3: /* lsr */
9ee6e8bb 12730 if (s->condexec_mask) {
365af80e 12731 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 12732 } else {
9ef39277 12733 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12734 gen_logic_CC(tmp2);
9ee6e8bb 12735 }
99c475ab
FB
12736 break;
12737 case 0x4: /* asr */
9ee6e8bb 12738 if (s->condexec_mask) {
365af80e 12739 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 12740 } else {
9ef39277 12741 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12742 gen_logic_CC(tmp2);
9ee6e8bb 12743 }
99c475ab
FB
12744 break;
12745 case 0x5: /* adc */
49b4c31e 12746 if (s->condexec_mask) {
396e467c 12747 gen_adc(tmp, tmp2);
49b4c31e
RH
12748 } else {
12749 gen_adc_CC(tmp, tmp, tmp2);
12750 }
99c475ab
FB
12751 break;
12752 case 0x6: /* sbc */
2de68a49 12753 if (s->condexec_mask) {
396e467c 12754 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
12755 } else {
12756 gen_sbc_CC(tmp, tmp, tmp2);
12757 }
99c475ab
FB
12758 break;
12759 case 0x7: /* ror */
9ee6e8bb 12760 if (s->condexec_mask) {
f669df27
AJ
12761 tcg_gen_andi_i32(tmp, tmp, 0x1f);
12762 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 12763 } else {
9ef39277 12764 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12765 gen_logic_CC(tmp2);
9ee6e8bb 12766 }
99c475ab
FB
12767 break;
12768 case 0x8: /* tst */
396e467c
FN
12769 tcg_gen_and_i32(tmp, tmp, tmp2);
12770 gen_logic_CC(tmp);
99c475ab 12771 rd = 16;
5899f386 12772 break;
99c475ab 12773 case 0x9: /* neg */
9ee6e8bb 12774 if (s->condexec_mask)
396e467c 12775 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 12776 else
72485ec4 12777 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12778 break;
12779 case 0xa: /* cmp */
72485ec4 12780 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12781 rd = 16;
12782 break;
12783 case 0xb: /* cmn */
72485ec4 12784 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
12785 rd = 16;
12786 break;
12787 case 0xc: /* orr */
396e467c 12788 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 12789 if (!s->condexec_mask)
396e467c 12790 gen_logic_CC(tmp);
99c475ab
FB
12791 break;
12792 case 0xd: /* mul */
7b2919a0 12793 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 12794 if (!s->condexec_mask)
396e467c 12795 gen_logic_CC(tmp);
99c475ab
FB
12796 break;
12797 case 0xe: /* bic */
f669df27 12798 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 12799 if (!s->condexec_mask)
396e467c 12800 gen_logic_CC(tmp);
99c475ab
FB
12801 break;
12802 case 0xf: /* mvn */
396e467c 12803 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 12804 if (!s->condexec_mask)
396e467c 12805 gen_logic_CC(tmp2);
99c475ab 12806 val = 1;
5899f386 12807 rm = rd;
99c475ab
FB
12808 break;
12809 }
12810 if (rd != 16) {
396e467c
FN
12811 if (val) {
12812 store_reg(s, rm, tmp2);
12813 if (op != 0xf)
7d1b0095 12814 tcg_temp_free_i32(tmp);
396e467c
FN
12815 } else {
12816 store_reg(s, rd, tmp);
7d1b0095 12817 tcg_temp_free_i32(tmp2);
396e467c
FN
12818 }
12819 } else {
7d1b0095
PM
12820 tcg_temp_free_i32(tmp);
12821 tcg_temp_free_i32(tmp2);
99c475ab
FB
12822 }
12823 break;
12824
12825 case 5:
12826 /* load/store register offset. */
12827 rd = insn & 7;
12828 rn = (insn >> 3) & 7;
12829 rm = (insn >> 6) & 7;
12830 op = (insn >> 9) & 7;
b0109805 12831 addr = load_reg(s, rn);
b26eefb6 12832 tmp = load_reg(s, rm);
b0109805 12833 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12834 tcg_temp_free_i32(tmp);
99c475ab 12835
c40c8556 12836 if (op < 3) { /* store */
b0109805 12837 tmp = load_reg(s, rd);
c40c8556
PM
12838 } else {
12839 tmp = tcg_temp_new_i32();
12840 }
99c475ab
FB
12841
12842 switch (op) {
12843 case 0: /* str */
9bb6558a 12844 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12845 break;
12846 case 1: /* strh */
9bb6558a 12847 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12848 break;
12849 case 2: /* strb */
9bb6558a 12850 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12851 break;
12852 case 3: /* ldrsb */
9bb6558a 12853 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12854 break;
12855 case 4: /* ldr */
9bb6558a 12856 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12857 break;
12858 case 5: /* ldrh */
9bb6558a 12859 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12860 break;
12861 case 6: /* ldrb */
9bb6558a 12862 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12863 break;
12864 case 7: /* ldrsh */
9bb6558a 12865 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12866 break;
12867 }
c40c8556 12868 if (op >= 3) { /* load */
b0109805 12869 store_reg(s, rd, tmp);
c40c8556
PM
12870 } else {
12871 tcg_temp_free_i32(tmp);
12872 }
7d1b0095 12873 tcg_temp_free_i32(addr);
99c475ab
FB
12874 break;
12875
12876 case 6:
12877 /* load/store word immediate offset */
12878 rd = insn & 7;
12879 rn = (insn >> 3) & 7;
b0109805 12880 addr = load_reg(s, rn);
99c475ab 12881 val = (insn >> 4) & 0x7c;
b0109805 12882 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12883
12884 if (insn & (1 << 11)) {
12885 /* load */
c40c8556 12886 tmp = tcg_temp_new_i32();
12dcc321 12887 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12888 store_reg(s, rd, tmp);
99c475ab
FB
12889 } else {
12890 /* store */
b0109805 12891 tmp = load_reg(s, rd);
12dcc321 12892 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12893 tcg_temp_free_i32(tmp);
99c475ab 12894 }
7d1b0095 12895 tcg_temp_free_i32(addr);
99c475ab
FB
12896 break;
12897
12898 case 7:
12899 /* load/store byte immediate offset */
12900 rd = insn & 7;
12901 rn = (insn >> 3) & 7;
b0109805 12902 addr = load_reg(s, rn);
99c475ab 12903 val = (insn >> 6) & 0x1f;
b0109805 12904 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12905
12906 if (insn & (1 << 11)) {
12907 /* load */
c40c8556 12908 tmp = tcg_temp_new_i32();
9bb6558a 12909 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12910 store_reg(s, rd, tmp);
99c475ab
FB
12911 } else {
12912 /* store */
b0109805 12913 tmp = load_reg(s, rd);
9bb6558a 12914 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12915 tcg_temp_free_i32(tmp);
99c475ab 12916 }
7d1b0095 12917 tcg_temp_free_i32(addr);
99c475ab
FB
12918 break;
12919
12920 case 8:
12921 /* load/store halfword immediate offset */
12922 rd = insn & 7;
12923 rn = (insn >> 3) & 7;
b0109805 12924 addr = load_reg(s, rn);
99c475ab 12925 val = (insn >> 5) & 0x3e;
b0109805 12926 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12927
12928 if (insn & (1 << 11)) {
12929 /* load */
c40c8556 12930 tmp = tcg_temp_new_i32();
9bb6558a 12931 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12932 store_reg(s, rd, tmp);
99c475ab
FB
12933 } else {
12934 /* store */
b0109805 12935 tmp = load_reg(s, rd);
9bb6558a 12936 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12937 tcg_temp_free_i32(tmp);
99c475ab 12938 }
7d1b0095 12939 tcg_temp_free_i32(addr);
99c475ab
FB
12940 break;
12941
12942 case 9:
12943 /* load/store from stack */
12944 rd = (insn >> 8) & 7;
b0109805 12945 addr = load_reg(s, 13);
99c475ab 12946 val = (insn & 0xff) * 4;
b0109805 12947 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12948
12949 if (insn & (1 << 11)) {
12950 /* load */
c40c8556 12951 tmp = tcg_temp_new_i32();
9bb6558a 12952 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12953 store_reg(s, rd, tmp);
99c475ab
FB
12954 } else {
12955 /* store */
b0109805 12956 tmp = load_reg(s, rd);
9bb6558a 12957 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12958 tcg_temp_free_i32(tmp);
99c475ab 12959 }
7d1b0095 12960 tcg_temp_free_i32(addr);
99c475ab
FB
12961 break;
12962
12963 case 10:
55203189
PM
12964 /*
12965 * 0b1010_xxxx_xxxx_xxxx
12966 * - Add PC/SP (immediate)
12967 */
99c475ab 12968 rd = (insn >> 8) & 7;
5899f386
FB
12969 if (insn & (1 << 11)) {
12970 /* SP */
5e3f878a 12971 tmp = load_reg(s, 13);
5899f386
FB
12972 } else {
12973 /* PC. bit 1 is ignored. */
7d1b0095 12974 tmp = tcg_temp_new_i32();
5e3f878a 12975 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 12976 }
99c475ab 12977 val = (insn & 0xff) * 4;
5e3f878a
PB
12978 tcg_gen_addi_i32(tmp, tmp, val);
12979 store_reg(s, rd, tmp);
99c475ab
FB
12980 break;
12981
12982 case 11:
12983 /* misc */
12984 op = (insn >> 8) & 0xf;
12985 switch (op) {
12986 case 0:
55203189
PM
12987 /*
12988 * 0b1011_0000_xxxx_xxxx
12989 * - ADD (SP plus immediate)
12990 * - SUB (SP minus immediate)
12991 */
b26eefb6 12992 tmp = load_reg(s, 13);
99c475ab
FB
12993 val = (insn & 0x7f) * 4;
12994 if (insn & (1 << 7))
6a0d8a1d 12995 val = -(int32_t)val;
b26eefb6 12996 tcg_gen_addi_i32(tmp, tmp, val);
55203189 12997 store_sp_checked(s, tmp);
99c475ab
FB
12998 break;
12999
9ee6e8bb
PB
13000 case 2: /* sign/zero extend. */
13001 ARCH(6);
13002 rd = insn & 7;
13003 rm = (insn >> 3) & 7;
b0109805 13004 tmp = load_reg(s, rm);
9ee6e8bb 13005 switch ((insn >> 6) & 3) {
b0109805
PB
13006 case 0: gen_sxth(tmp); break;
13007 case 1: gen_sxtb(tmp); break;
13008 case 2: gen_uxth(tmp); break;
13009 case 3: gen_uxtb(tmp); break;
9ee6e8bb 13010 }
b0109805 13011 store_reg(s, rd, tmp);
9ee6e8bb 13012 break;
99c475ab 13013 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
13014 /*
13015 * 0b1011_x10x_xxxx_xxxx
13016 * - push/pop
13017 */
b0109805 13018 addr = load_reg(s, 13);
5899f386
FB
13019 if (insn & (1 << 8))
13020 offset = 4;
99c475ab 13021 else
5899f386
FB
13022 offset = 0;
13023 for (i = 0; i < 8; i++) {
13024 if (insn & (1 << i))
13025 offset += 4;
13026 }
13027 if ((insn & (1 << 11)) == 0) {
b0109805 13028 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 13029 }
aa369e5c
PM
13030
13031 if (s->v8m_stackcheck) {
13032 /*
13033 * Here 'addr' is the lower of "old SP" and "new SP";
13034 * if this is a pop that starts below the limit and ends
13035 * above it, it is UNKNOWN whether the limit check triggers;
13036 * we choose to trigger.
13037 */
13038 gen_helper_v8m_stackcheck(cpu_env, addr);
13039 }
13040
99c475ab
FB
13041 for (i = 0; i < 8; i++) {
13042 if (insn & (1 << i)) {
13043 if (insn & (1 << 11)) {
13044 /* pop */
c40c8556 13045 tmp = tcg_temp_new_i32();
12dcc321 13046 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 13047 store_reg(s, i, tmp);
99c475ab
FB
13048 } else {
13049 /* push */
b0109805 13050 tmp = load_reg(s, i);
12dcc321 13051 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 13052 tcg_temp_free_i32(tmp);
99c475ab 13053 }
5899f386 13054 /* advance to the next address. */
b0109805 13055 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
13056 }
13057 }
f764718d 13058 tmp = NULL;
99c475ab
FB
13059 if (insn & (1 << 8)) {
13060 if (insn & (1 << 11)) {
13061 /* pop pc */
c40c8556 13062 tmp = tcg_temp_new_i32();
12dcc321 13063 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
13064 /* don't set the pc until the rest of the instruction
13065 has completed */
13066 } else {
13067 /* push lr */
b0109805 13068 tmp = load_reg(s, 14);
12dcc321 13069 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 13070 tcg_temp_free_i32(tmp);
99c475ab 13071 }
b0109805 13072 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 13073 }
5899f386 13074 if ((insn & (1 << 11)) == 0) {
b0109805 13075 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 13076 }
99c475ab 13077 /* write back the new stack pointer */
b0109805 13078 store_reg(s, 13, addr);
99c475ab 13079 /* set the new PC value */
be5e7a76 13080 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 13081 store_reg_from_load(s, 15, tmp);
be5e7a76 13082 }
99c475ab
FB
13083 break;
13084
9ee6e8bb
PB
13085 case 1: case 3: case 9: case 11: /* czb */
13086 rm = insn & 7;
d9ba4830 13087 tmp = load_reg(s, rm);
c2d9644e 13088 arm_gen_condlabel(s);
9ee6e8bb 13089 if (insn & (1 << 11))
cb63669a 13090 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 13091 else
cb63669a 13092 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 13093 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
13094 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
13095 val = (uint32_t)s->pc + 2;
13096 val += offset;
13097 gen_jmp(s, val);
13098 break;
13099
13100 case 15: /* IT, nop-hint. */
13101 if ((insn & 0xf) == 0) {
13102 gen_nop_hint(s, (insn >> 4) & 0xf);
13103 break;
13104 }
13105 /* If Then. */
13106 s->condexec_cond = (insn >> 4) & 0xe;
13107 s->condexec_mask = insn & 0x1f;
13108 /* No actual code generated for this insn, just setup state. */
13109 break;
13110
06c949e6 13111 case 0xe: /* bkpt */
d4a2dc67
PM
13112 {
13113 int imm8 = extract32(insn, 0, 8);
be5e7a76 13114 ARCH(5);
c900a2e6 13115 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 13116 break;
d4a2dc67 13117 }
06c949e6 13118
19a6e31c
PM
13119 case 0xa: /* rev, and hlt */
13120 {
13121 int op1 = extract32(insn, 6, 2);
13122
13123 if (op1 == 2) {
13124 /* HLT */
13125 int imm6 = extract32(insn, 0, 6);
13126
13127 gen_hlt(s, imm6);
13128 break;
13129 }
13130
13131 /* Otherwise this is rev */
9ee6e8bb
PB
13132 ARCH(6);
13133 rn = (insn >> 3) & 0x7;
13134 rd = insn & 0x7;
b0109805 13135 tmp = load_reg(s, rn);
19a6e31c 13136 switch (op1) {
66896cb8 13137 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
13138 case 1: gen_rev16(tmp); break;
13139 case 3: gen_revsh(tmp); break;
19a6e31c
PM
13140 default:
13141 g_assert_not_reached();
9ee6e8bb 13142 }
b0109805 13143 store_reg(s, rd, tmp);
9ee6e8bb 13144 break;
19a6e31c 13145 }
9ee6e8bb 13146
d9e028c1
PM
13147 case 6:
13148 switch ((insn >> 5) & 7) {
13149 case 2:
13150 /* setend */
13151 ARCH(6);
9886ecdf
PB
13152 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
13153 gen_helper_setend(cpu_env);
dcba3a8d 13154 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 13155 }
9ee6e8bb 13156 break;
d9e028c1
PM
13157 case 3:
13158 /* cps */
13159 ARCH(6);
13160 if (IS_USER(s)) {
13161 break;
8984bd2e 13162 }
b53d8923 13163 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
13164 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
13165 /* FAULTMASK */
13166 if (insn & 1) {
13167 addr = tcg_const_i32(19);
13168 gen_helper_v7m_msr(cpu_env, addr, tmp);
13169 tcg_temp_free_i32(addr);
13170 }
13171 /* PRIMASK */
13172 if (insn & 2) {
13173 addr = tcg_const_i32(16);
13174 gen_helper_v7m_msr(cpu_env, addr, tmp);
13175 tcg_temp_free_i32(addr);
13176 }
13177 tcg_temp_free_i32(tmp);
13178 gen_lookup_tb(s);
13179 } else {
13180 if (insn & (1 << 4)) {
13181 shift = CPSR_A | CPSR_I | CPSR_F;
13182 } else {
13183 shift = 0;
13184 }
13185 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 13186 }
d9e028c1
PM
13187 break;
13188 default:
13189 goto undef;
9ee6e8bb
PB
13190 }
13191 break;
13192
99c475ab
FB
13193 default:
13194 goto undef;
13195 }
13196 break;
13197
13198 case 12:
a7d3970d 13199 {
99c475ab 13200 /* load/store multiple */
f764718d 13201 TCGv_i32 loaded_var = NULL;
99c475ab 13202 rn = (insn >> 8) & 0x7;
b0109805 13203 addr = load_reg(s, rn);
99c475ab
FB
13204 for (i = 0; i < 8; i++) {
13205 if (insn & (1 << i)) {
99c475ab
FB
13206 if (insn & (1 << 11)) {
13207 /* load */
c40c8556 13208 tmp = tcg_temp_new_i32();
12dcc321 13209 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
13210 if (i == rn) {
13211 loaded_var = tmp;
13212 } else {
13213 store_reg(s, i, tmp);
13214 }
99c475ab
FB
13215 } else {
13216 /* store */
b0109805 13217 tmp = load_reg(s, i);
12dcc321 13218 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 13219 tcg_temp_free_i32(tmp);
99c475ab 13220 }
5899f386 13221 /* advance to the next address */
b0109805 13222 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
13223 }
13224 }
b0109805 13225 if ((insn & (1 << rn)) == 0) {
a7d3970d 13226 /* base reg not in list: base register writeback */
b0109805
PB
13227 store_reg(s, rn, addr);
13228 } else {
a7d3970d
PM
13229 /* base reg in list: if load, complete it now */
13230 if (insn & (1 << 11)) {
13231 store_reg(s, rn, loaded_var);
13232 }
7d1b0095 13233 tcg_temp_free_i32(addr);
b0109805 13234 }
99c475ab 13235 break;
a7d3970d 13236 }
99c475ab
FB
13237 case 13:
13238 /* conditional branch or swi */
13239 cond = (insn >> 8) & 0xf;
13240 if (cond == 0xe)
13241 goto undef;
13242
13243 if (cond == 0xf) {
13244 /* swi */
eaed129d 13245 gen_set_pc_im(s, s->pc);
d4a2dc67 13246 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 13247 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
13248 break;
13249 }
13250 /* generate a conditional jump to next instruction */
c2d9644e 13251 arm_skip_unless(s, cond);
99c475ab
FB
13252
13253 /* jump to the offset */
5899f386 13254 val = (uint32_t)s->pc + 2;
99c475ab 13255 offset = ((int32_t)insn << 24) >> 24;
5899f386 13256 val += offset << 1;
8aaca4c0 13257 gen_jmp(s, val);
99c475ab
FB
13258 break;
13259
13260 case 14:
358bf29e 13261 if (insn & (1 << 11)) {
296e5a0a
PM
13262 /* thumb_insn_is_16bit() ensures we can't get here for
13263 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
13264 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
13265 */
13266 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
13267 ARCH(5);
13268 offset = ((insn & 0x7ff) << 1);
13269 tmp = load_reg(s, 14);
13270 tcg_gen_addi_i32(tmp, tmp, offset);
13271 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
13272
13273 tmp2 = tcg_temp_new_i32();
13274 tcg_gen_movi_i32(tmp2, s->pc | 1);
13275 store_reg(s, 14, tmp2);
13276 gen_bx(s, tmp);
358bf29e
PB
13277 break;
13278 }
9ee6e8bb 13279 /* unconditional branch */
99c475ab
FB
13280 val = (uint32_t)s->pc;
13281 offset = ((int32_t)insn << 21) >> 21;
13282 val += (offset << 1) + 2;
8aaca4c0 13283 gen_jmp(s, val);
99c475ab
FB
13284 break;
13285
13286 case 15:
296e5a0a
PM
13287 /* thumb_insn_is_16bit() ensures we can't get here for
13288 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
13289 */
13290 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
13291
13292 if (insn & (1 << 11)) {
13293 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
13294 offset = ((insn & 0x7ff) << 1) | 1;
13295 tmp = load_reg(s, 14);
13296 tcg_gen_addi_i32(tmp, tmp, offset);
13297
13298 tmp2 = tcg_temp_new_i32();
13299 tcg_gen_movi_i32(tmp2, s->pc | 1);
13300 store_reg(s, 14, tmp2);
13301 gen_bx(s, tmp);
13302 } else {
13303 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
13304 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
13305
13306 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
13307 }
9ee6e8bb 13308 break;
99c475ab
FB
13309 }
13310 return;
9ee6e8bb 13311illegal_op:
99c475ab 13312undef:
73710361
GB
13313 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
13314 default_exception_el(s));
99c475ab
FB
13315}
13316
541ebcd4
PM
13317static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
13318{
13319 /* Return true if the insn at dc->pc might cross a page boundary.
13320 * (False positives are OK, false negatives are not.)
5b8d7289
PM
13321 * We know this is a Thumb insn, and our caller ensures we are
13322 * only called if dc->pc is less than 4 bytes from the page
13323 * boundary, so we cross the page if the first 16 bits indicate
13324 * that this is a 32 bit insn.
541ebcd4 13325 */
5b8d7289 13326 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 13327
5b8d7289 13328 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
13329}
13330
b542683d 13331static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 13332{
1d8a5535 13333 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 13334 CPUARMState *env = cs->env_ptr;
2fc0cc0e 13335 ARMCPU *cpu = env_archcpu(env);
aad821ac
RH
13336 uint32_t tb_flags = dc->base.tb->flags;
13337 uint32_t condexec, core_mmu_idx;
3b46e624 13338
962fcbf2 13339 dc->isar = &cpu->isar;
dcba3a8d 13340 dc->pc = dc->base.pc_first;
e50e6a20 13341 dc->condjmp = 0;
3926cc84 13342
40f860cd 13343 dc->aarch64 = 0;
cef9ee70
SS
13344 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
13345 * there is no secure EL1, so we route exceptions to EL3.
13346 */
13347 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
13348 !arm_el_is_aa64(env, 3);
aad821ac
RH
13349 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
13350 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
13351 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
13352 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
13353 dc->condexec_mask = (condexec & 0xf) << 1;
13354 dc->condexec_cond = condexec >> 4;
13355 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
13356 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
c1e37810 13357 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 13358#if !defined(CONFIG_USER_ONLY)
c1e37810 13359 dc->user = (dc->current_el == 0);
3926cc84 13360#endif
aad821ac
RH
13361 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
13362 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
13363 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
13364 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
ea7ac69d
PM
13365 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
13366 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
13367 dc->vec_stride = 0;
13368 } else {
13369 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
13370 dc->c15_cpar = 0;
13371 }
aad821ac 13372 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
fb602cb7
PM
13373 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
13374 regime_is_secure(env, dc->mmu_idx);
aad821ac 13375 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
6d60c67a 13376 dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
6000531e
PM
13377 dc->v7m_new_fp_ctxt_needed =
13378 FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
e33cf0f8 13379 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
60322b39 13380 dc->cp_regs = cpu->cp_regs;
a984e42c 13381 dc->features = env->features;
40f860cd 13382
50225ad0
PM
13383 /* Single step state. The code-generation logic here is:
13384 * SS_ACTIVE == 0:
13385 * generate code with no special handling for single-stepping (except
13386 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13387 * this happens anyway because those changes are all system register or
13388 * PSTATE writes).
13389 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13390 * emit code for one insn
13391 * emit code to clear PSTATE.SS
13392 * emit code to generate software step exception for completed step
13393 * end TB (as usual for having generated an exception)
13394 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13395 * emit code to generate a software step exception
13396 * end the TB
13397 */
aad821ac
RH
13398 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
13399 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
50225ad0
PM
13400 dc->is_ldex = false;
13401 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
13402
bfe7ad5b 13403 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 13404
f7708456
RH
13405 /* If architectural single step active, limit to 1. */
13406 if (is_singlestepping(dc)) {
b542683d 13407 dc->base.max_insns = 1;
f7708456
RH
13408 }
13409
d0264d86
RH
13410 /* ARM is a fixed-length ISA. Bound the number of insns to execute
13411 to those left on the page. */
13412 if (!dc->thumb) {
bfe7ad5b 13413 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 13414 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
13415 }
13416
a7812ae4
PB
13417 cpu_F0s = tcg_temp_new_i32();
13418 cpu_F1s = tcg_temp_new_i32();
13419 cpu_F0d = tcg_temp_new_i64();
13420 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
13421 cpu_V0 = cpu_F0d;
13422 cpu_V1 = cpu_F1d;
e677137d 13423 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 13424 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
13425}
13426
b1476854
LV
13427static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
13428{
13429 DisasContext *dc = container_of(dcbase, DisasContext, base);
13430
13431 /* A note on handling of the condexec (IT) bits:
13432 *
13433 * We want to avoid the overhead of having to write the updated condexec
13434 * bits back to the CPUARMState for every instruction in an IT block. So:
13435 * (1) if the condexec bits are not already zero then we write
13436 * zero back into the CPUARMState now. This avoids complications trying
13437 * to do it at the end of the block. (For example if we don't do this
13438 * it's hard to identify whether we can safely skip writing condexec
13439 * at the end of the TB, which we definitely want to do for the case
13440 * where a TB doesn't do anything with the IT state at all.)
13441 * (2) if we are going to leave the TB then we call gen_set_condexec()
13442 * which will write the correct value into CPUARMState if zero is wrong.
13443 * This is done both for leaving the TB at the end, and for leaving
13444 * it because of an exception we know will happen, which is done in
13445 * gen_exception_insn(). The latter is necessary because we need to
13446 * leave the TB with the PC/IT state just prior to execution of the
13447 * instruction which caused the exception.
13448 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
13449 * then the CPUARMState will be wrong and we need to reset it.
13450 * This is handled in the same way as restoration of the
13451 * PC in these situations; we save the value of the condexec bits
13452 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
13453 * then uses this to restore them after an exception.
13454 *
13455 * Note that there are no instructions which can read the condexec
13456 * bits, and none which can write non-static values to them, so
13457 * we don't need to care about whether CPUARMState is correct in the
13458 * middle of a TB.
13459 */
13460
13461 /* Reset the conditional execution bits immediately. This avoids
13462 complications trying to do it at the end of the block. */
13463 if (dc->condexec_mask || dc->condexec_cond) {
13464 TCGv_i32 tmp = tcg_temp_new_i32();
13465 tcg_gen_movi_i32(tmp, 0);
13466 store_cpu_field(tmp, condexec_bits);
13467 }
13468}
13469
f62bd897
LV
13470static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
13471{
13472 DisasContext *dc = container_of(dcbase, DisasContext, base);
13473
f62bd897
LV
13474 tcg_gen_insn_start(dc->pc,
13475 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
13476 0);
15fa08f8 13477 dc->insn_start = tcg_last_op();
f62bd897
LV
13478}
13479
a68956ad
LV
13480static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
13481 const CPUBreakpoint *bp)
13482{
13483 DisasContext *dc = container_of(dcbase, DisasContext, base);
13484
13485 if (bp->flags & BP_CPU) {
13486 gen_set_condexec(dc);
13487 gen_set_pc_im(dc, dc->pc);
13488 gen_helper_check_breakpoints(cpu_env);
13489 /* End the TB early; it's likely not going to be executed */
13490 dc->base.is_jmp = DISAS_TOO_MANY;
13491 } else {
13492 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
13493 /* The address covered by the breakpoint must be
13494 included in [tb->pc, tb->pc + tb->size) in order
13495 to for it to be properly cleared -- thus we
13496 increment the PC here so that the logic setting
13497 tb->size below does the right thing. */
13498 /* TODO: Advance PC by correct instruction length to
13499 * avoid disassembler error messages */
13500 dc->pc += 2;
13501 dc->base.is_jmp = DISAS_NORETURN;
13502 }
13503
13504 return true;
13505}
13506
722ef0a5 13507static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 13508{
13189a90
LV
13509#ifdef CONFIG_USER_ONLY
13510 /* Intercept jump to the magic kernel page. */
13511 if (dc->pc >= 0xffff0000) {
13512 /* We always get here via a jump, so know we are not in a
13513 conditional execution block. */
13514 gen_exception_internal(EXCP_KERNEL_TRAP);
13515 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 13516 return true;
13189a90
LV
13517 }
13518#endif
13519
13520 if (dc->ss_active && !dc->pstate_ss) {
13521 /* Singlestep state is Active-pending.
13522 * If we're in this state at the start of a TB then either
13523 * a) we just took an exception to an EL which is being debugged
13524 * and this is the first insn in the exception handler
13525 * b) debug exceptions were masked and we just unmasked them
13526 * without changing EL (eg by clearing PSTATE.D)
13527 * In either case we're going to take a swstep exception in the
13528 * "did not step an insn" case, and so the syndrome ISV and EX
13529 * bits should be zero.
13530 */
13531 assert(dc->base.num_insns == 1);
13532 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
13533 default_exception_el(dc));
13534 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 13535 return true;
13189a90
LV
13536 }
13537
722ef0a5
RH
13538 return false;
13539}
13189a90 13540
d0264d86 13541static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 13542{
13189a90
LV
13543 if (dc->condjmp && !dc->base.is_jmp) {
13544 gen_set_label(dc->condlabel);
13545 dc->condjmp = 0;
13546 }
13189a90 13547 dc->base.pc_next = dc->pc;
23169224 13548 translator_loop_temp_check(&dc->base);
13189a90
LV
13549}
13550
722ef0a5
RH
13551static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13552{
13553 DisasContext *dc = container_of(dcbase, DisasContext, base);
13554 CPUARMState *env = cpu->env_ptr;
13555 unsigned int insn;
13556
13557 if (arm_pre_translate_insn(dc)) {
13558 return;
13559 }
13560
13561 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 13562 dc->insn = insn;
722ef0a5
RH
13563 dc->pc += 4;
13564 disas_arm_insn(dc, insn);
13565
d0264d86
RH
13566 arm_post_translate_insn(dc);
13567
13568 /* ARM is a fixed-length ISA. We performed the cross-page check
13569 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
13570}
13571
dcf14dfb
PM
13572static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
13573{
13574 /* Return true if this Thumb insn is always unconditional,
13575 * even inside an IT block. This is true of only a very few
13576 * instructions: BKPT, HLT, and SG.
13577 *
13578 * A larger class of instructions are UNPREDICTABLE if used
13579 * inside an IT block; we do not need to detect those here, because
13580 * what we do by default (perform the cc check and update the IT
13581 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
13582 * choice for those situations.
13583 *
13584 * insn is either a 16-bit or a 32-bit instruction; the two are
13585 * distinguishable because for the 16-bit case the top 16 bits
13586 * are zeroes, and that isn't a valid 32-bit encoding.
13587 */
13588 if ((insn & 0xffffff00) == 0xbe00) {
13589 /* BKPT */
13590 return true;
13591 }
13592
13593 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
13594 !arm_dc_feature(s, ARM_FEATURE_M)) {
13595 /* HLT: v8A only. This is unconditional even when it is going to
13596 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
13597 * For v7 cores this was a plain old undefined encoding and so
13598 * honours its cc check. (We might be using the encoding as
13599 * a semihosting trap, but we don't change the cc check behaviour
13600 * on that account, because a debugger connected to a real v7A
13601 * core and emulating semihosting traps by catching the UNDEF
13602 * exception would also only see cases where the cc check passed.
13603 * No guest code should be trying to do a HLT semihosting trap
13604 * in an IT block anyway.
13605 */
13606 return true;
13607 }
13608
13609 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
13610 arm_dc_feature(s, ARM_FEATURE_M)) {
13611 /* SG: v8M only */
13612 return true;
13613 }
13614
13615 return false;
13616}
13617
722ef0a5
RH
13618static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13619{
13620 DisasContext *dc = container_of(dcbase, DisasContext, base);
13621 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
13622 uint32_t insn;
13623 bool is_16bit;
722ef0a5
RH
13624
13625 if (arm_pre_translate_insn(dc)) {
13626 return;
13627 }
13628
296e5a0a
PM
13629 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13630 is_16bit = thumb_insn_is_16bit(dc, insn);
13631 dc->pc += 2;
13632 if (!is_16bit) {
13633 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13634
13635 insn = insn << 16 | insn2;
13636 dc->pc += 2;
13637 }
58803318 13638 dc->insn = insn;
296e5a0a 13639
dcf14dfb 13640 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
13641 uint32_t cond = dc->condexec_cond;
13642
13643 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
c2d9644e 13644 arm_skip_unless(dc, cond);
296e5a0a
PM
13645 }
13646 }
13647
13648 if (is_16bit) {
13649 disas_thumb_insn(dc, insn);
13650 } else {
2eea841c 13651 disas_thumb2_insn(dc, insn);
296e5a0a 13652 }
722ef0a5
RH
13653
13654 /* Advance the Thumb condexec condition. */
13655 if (dc->condexec_mask) {
13656 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
13657 ((dc->condexec_mask >> 4) & 1));
13658 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
13659 if (dc->condexec_mask == 0) {
13660 dc->condexec_cond = 0;
13661 }
13662 }
13663
d0264d86
RH
13664 arm_post_translate_insn(dc);
13665
13666 /* Thumb is a variable-length ISA. Stop translation when the next insn
13667 * will touch a new page. This ensures that prefetch aborts occur at
13668 * the right place.
13669 *
13670 * We want to stop the TB if the next insn starts in a new page,
13671 * or if it spans between this page and the next. This means that
13672 * if we're looking at the last halfword in the page we need to
13673 * see if it's a 16-bit Thumb insn (which will fit in this TB)
13674 * or a 32-bit Thumb insn (which won't).
13675 * This is to avoid generating a silly TB with a single 16-bit insn
13676 * in it at the end of this page (which would execute correctly
13677 * but isn't very efficient).
13678 */
13679 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
13680 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
13681 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
13682 && insn_crosses_page(env, dc)))) {
13683 dc->base.is_jmp = DISAS_TOO_MANY;
13684 }
722ef0a5
RH
13685}
13686
70d3c035 13687static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 13688{
70d3c035 13689 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 13690
c5a49c63 13691 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
13692 /* FIXME: This can theoretically happen with self-modifying code. */
13693 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 13694 }
9ee6e8bb 13695
b5ff1b31 13696 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
13697 instruction was a conditional branch or trap, and the PC has
13698 already been written. */
f021b2c4 13699 gen_set_condexec(dc);
dcba3a8d 13700 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
13701 /* Exception return branches need some special case code at the
13702 * end of the TB, which is complex enough that it has to
13703 * handle the single-step vs not and the condition-failed
13704 * insn codepath itself.
13705 */
13706 gen_bx_excret_final_code(dc);
13707 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 13708 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 13709 switch (dc->base.is_jmp) {
7999a5c8 13710 case DISAS_SWI:
50225ad0 13711 gen_ss_advance(dc);
73710361
GB
13712 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13713 default_exception_el(dc));
7999a5c8
SF
13714 break;
13715 case DISAS_HVC:
37e6456e 13716 gen_ss_advance(dc);
73710361 13717 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
13718 break;
13719 case DISAS_SMC:
37e6456e 13720 gen_ss_advance(dc);
73710361 13721 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
13722 break;
13723 case DISAS_NEXT:
a68956ad 13724 case DISAS_TOO_MANY:
7999a5c8
SF
13725 case DISAS_UPDATE:
13726 gen_set_pc_im(dc, dc->pc);
13727 /* fall through */
13728 default:
5425415e
PM
13729 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
13730 gen_singlestep_exception(dc);
a0c231e6
RH
13731 break;
13732 case DISAS_NORETURN:
13733 break;
7999a5c8 13734 }
8aaca4c0 13735 } else {
9ee6e8bb
PB
13736 /* While branches must always occur at the end of an IT block,
13737 there are a few other things that can cause us to terminate
65626741 13738 the TB in the middle of an IT block:
9ee6e8bb
PB
13739 - Exception generating instructions (bkpt, swi, undefined).
13740 - Page boundaries.
13741 - Hardware watchpoints.
13742 Hardware breakpoints have already been handled and skip this code.
13743 */
dcba3a8d 13744 switch(dc->base.is_jmp) {
8aaca4c0 13745 case DISAS_NEXT:
a68956ad 13746 case DISAS_TOO_MANY:
6e256c93 13747 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 13748 break;
577bf808 13749 case DISAS_JUMP:
8a6b28c7
EC
13750 gen_goto_ptr();
13751 break;
e8d52302
AB
13752 case DISAS_UPDATE:
13753 gen_set_pc_im(dc, dc->pc);
13754 /* fall through */
577bf808 13755 default:
8aaca4c0 13756 /* indicate that the hash table must be used to find the next TB */
07ea28b4 13757 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 13758 break;
a0c231e6 13759 case DISAS_NORETURN:
8aaca4c0
FB
13760 /* nothing more to generate */
13761 break;
9ee6e8bb 13762 case DISAS_WFI:
58803318
SS
13763 {
13764 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
13765 !(dc->insn & (1U << 31))) ? 2 : 4);
13766
13767 gen_helper_wfi(cpu_env, tmp);
13768 tcg_temp_free_i32(tmp);
84549b6d
PM
13769 /* The helper doesn't necessarily throw an exception, but we
13770 * must go back to the main loop to check for interrupts anyway.
13771 */
07ea28b4 13772 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 13773 break;
58803318 13774 }
72c1d3af
PM
13775 case DISAS_WFE:
13776 gen_helper_wfe(cpu_env);
13777 break;
c87e5a61
PM
13778 case DISAS_YIELD:
13779 gen_helper_yield(cpu_env);
13780 break;
9ee6e8bb 13781 case DISAS_SWI:
73710361
GB
13782 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13783 default_exception_el(dc));
9ee6e8bb 13784 break;
37e6456e 13785 case DISAS_HVC:
73710361 13786 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
13787 break;
13788 case DISAS_SMC:
73710361 13789 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 13790 break;
8aaca4c0 13791 }
f021b2c4
PM
13792 }
13793
13794 if (dc->condjmp) {
13795 /* "Condition failed" instruction codepath for the branch/trap insn */
13796 gen_set_label(dc->condlabel);
13797 gen_set_condexec(dc);
b636649f 13798 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
13799 gen_set_pc_im(dc, dc->pc);
13800 gen_singlestep_exception(dc);
13801 } else {
6e256c93 13802 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 13803 }
2c0262af 13804 }
23169224
LV
13805
13806 /* Functions above can change dc->pc, so re-align db->pc_next */
13807 dc->base.pc_next = dc->pc;
70d3c035
LV
13808}
13809
4013f7fc
LV
13810static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
13811{
13812 DisasContext *dc = container_of(dcbase, DisasContext, base);
13813
13814 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 13815 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
13816}
13817
23169224
LV
13818static const TranslatorOps arm_translator_ops = {
13819 .init_disas_context = arm_tr_init_disas_context,
13820 .tb_start = arm_tr_tb_start,
13821 .insn_start = arm_tr_insn_start,
13822 .breakpoint_check = arm_tr_breakpoint_check,
13823 .translate_insn = arm_tr_translate_insn,
13824 .tb_stop = arm_tr_tb_stop,
13825 .disas_log = arm_tr_disas_log,
13826};
13827
722ef0a5
RH
13828static const TranslatorOps thumb_translator_ops = {
13829 .init_disas_context = arm_tr_init_disas_context,
13830 .tb_start = arm_tr_tb_start,
13831 .insn_start = arm_tr_insn_start,
13832 .breakpoint_check = arm_tr_breakpoint_check,
13833 .translate_insn = thumb_tr_translate_insn,
13834 .tb_stop = arm_tr_tb_stop,
13835 .disas_log = arm_tr_disas_log,
13836};
13837
70d3c035 13838/* generate intermediate code for basic block 'tb'. */
8b86d6d2 13839void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
70d3c035 13840{
23169224
LV
13841 DisasContext dc;
13842 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 13843
aad821ac 13844 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
722ef0a5
RH
13845 ops = &thumb_translator_ops;
13846 }
23169224 13847#ifdef TARGET_AARCH64
aad821ac 13848 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
23169224 13849 ops = &aarch64_translator_ops;
2c0262af
FB
13850 }
13851#endif
23169224 13852
8b86d6d2 13853 translator_loop(ops, &dc.base, cpu, tb, max_insns);
2c0262af
FB
13854}
13855
90c84c56 13856void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
2c0262af 13857{
878096ee
AF
13858 ARMCPU *cpu = ARM_CPU(cs);
13859 CPUARMState *env = &cpu->env;
2c0262af
FB
13860 int i;
13861
17731115 13862 if (is_a64(env)) {
90c84c56 13863 aarch64_cpu_dump_state(cs, f, flags);
17731115
PM
13864 return;
13865 }
13866
2c0262af 13867 for(i=0;i<16;i++) {
90c84c56 13868 qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 13869 if ((i % 4) == 3)
90c84c56 13870 qemu_fprintf(f, "\n");
2c0262af 13871 else
90c84c56 13872 qemu_fprintf(f, " ");
2c0262af 13873 }
06e5cf7a 13874
5b906f35
PM
13875 if (arm_feature(env, ARM_FEATURE_M)) {
13876 uint32_t xpsr = xpsr_read(env);
13877 const char *mode;
1e577cc7
PM
13878 const char *ns_status = "";
13879
13880 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
13881 ns_status = env->v7m.secure ? "S " : "NS ";
13882 }
5b906f35
PM
13883
13884 if (xpsr & XPSR_EXCP) {
13885 mode = "handler";
13886 } else {
8bfc26ea 13887 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
13888 mode = "unpriv-thread";
13889 } else {
13890 mode = "priv-thread";
13891 }
13892 }
13893
90c84c56
MA
13894 qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
13895 xpsr,
13896 xpsr & XPSR_N ? 'N' : '-',
13897 xpsr & XPSR_Z ? 'Z' : '-',
13898 xpsr & XPSR_C ? 'C' : '-',
13899 xpsr & XPSR_V ? 'V' : '-',
13900 xpsr & XPSR_T ? 'T' : 'A',
13901 ns_status,
13902 mode);
06e5cf7a 13903 } else {
5b906f35
PM
13904 uint32_t psr = cpsr_read(env);
13905 const char *ns_status = "";
13906
13907 if (arm_feature(env, ARM_FEATURE_EL3) &&
13908 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
13909 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
13910 }
13911
90c84c56
MA
13912 qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
13913 psr,
13914 psr & CPSR_N ? 'N' : '-',
13915 psr & CPSR_Z ? 'Z' : '-',
13916 psr & CPSR_C ? 'C' : '-',
13917 psr & CPSR_V ? 'V' : '-',
13918 psr & CPSR_T ? 'T' : 'A',
13919 ns_status,
13920 aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
5b906f35 13921 }
b7bcbe95 13922
f2617cfc
PM
13923 if (flags & CPU_DUMP_FPU) {
13924 int numvfpregs = 0;
13925 if (arm_feature(env, ARM_FEATURE_VFP)) {
13926 numvfpregs += 16;
13927 }
13928 if (arm_feature(env, ARM_FEATURE_VFP3)) {
13929 numvfpregs += 16;
13930 }
13931 for (i = 0; i < numvfpregs; i++) {
9a2b5256 13932 uint64_t v = *aa32_vfp_dreg(env, i);
90c84c56
MA
13933 qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
13934 i * 2, (uint32_t)v,
13935 i * 2 + 1, (uint32_t)(v >> 32),
13936 i, v);
f2617cfc 13937 }
90c84c56 13938 qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
b7bcbe95 13939 }
2c0262af 13940}
a6b025d3 13941
bad729e2
RH
13942void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
13943 target_ulong *data)
d2856f1a 13944{
3926cc84 13945 if (is_a64(env)) {
bad729e2 13946 env->pc = data[0];
40f860cd 13947 env->condexec_bits = 0;
aaa1f954 13948 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13949 } else {
bad729e2
RH
13950 env->regs[15] = data[0];
13951 env->condexec_bits = data[1];
aaa1f954 13952 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13953 }
d2856f1a 13954}