]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Overlap VECSTRIDE and XSCALE_CPAR TB flags
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
90c84c56 31#include "qemu/qemu-print.h"
1d854765 32#include "arm_ldst.h"
19a6e31c 33#include "exec/semihost.h"
1497c961 34
2ef6175a
RH
35#include "exec/helper-proto.h"
36#include "exec/helper-gen.h"
2c0262af 37
a7e30d84 38#include "trace-tcg.h"
508127e2 39#include "exec/log.h"
a7e30d84
LV
40
41
2b51668f
PM
42#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
43#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 44/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 45#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
09cbd501 46#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
2b51668f
PM
47#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
48#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
49#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
50#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
51#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 52
86753403 53#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 54
f570c61e 55#include "translate.h"
e12ce78d 56
b5ff1b31
FB
57#if defined(CONFIG_USER_ONLY)
58#define IS_USER(s) 1
59#else
60#define IS_USER(s) (s->user)
61#endif
62
ad69471c 63/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 64static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 65static TCGv_i32 cpu_R[16];
78bcaa3e
RH
66TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
67TCGv_i64 cpu_exclusive_addr;
68TCGv_i64 cpu_exclusive_val;
ad69471c 69
b26eefb6 70/* FIXME: These should be removed. */
39d5492a 71static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 72static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 73
022c62cb 74#include "exec/gen-icount.h"
2e70f6ef 75
308e5636 76static const char * const regnames[] =
155c3eac
FN
77 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
78 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79
61adacc8
RH
80/* Function prototypes for gen_ functions calling Neon helpers. */
81typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
82 TCGv_i32, TCGv_i32);
83
b26eefb6
PB
84/* initialize TCG globals. */
85void arm_translate_init(void)
86{
155c3eac
FN
87 int i;
88
155c3eac 89 for (i = 0; i < 16; i++) {
e1ccc054 90 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 91 offsetof(CPUARMState, regs[i]),
155c3eac
FN
92 regnames[i]);
93 }
e1ccc054
RH
94 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
95 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
96 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
97 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 98
e1ccc054 99 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 100 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 101 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 102 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 103
14ade10f 104 a64_translate_init();
b26eefb6
PB
105}
106
9bb6558a
PM
107/* Flags for the disas_set_da_iss info argument:
108 * lower bits hold the Rt register number, higher bits are flags.
109 */
110typedef enum ISSInfo {
111 ISSNone = 0,
112 ISSRegMask = 0x1f,
113 ISSInvalid = (1 << 5),
114 ISSIsAcqRel = (1 << 6),
115 ISSIsWrite = (1 << 7),
116 ISSIs16Bit = (1 << 8),
117} ISSInfo;
118
119/* Save the syndrome information for a Data Abort */
120static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
121{
122 uint32_t syn;
123 int sas = memop & MO_SIZE;
124 bool sse = memop & MO_SIGN;
125 bool is_acqrel = issinfo & ISSIsAcqRel;
126 bool is_write = issinfo & ISSIsWrite;
127 bool is_16bit = issinfo & ISSIs16Bit;
128 int srt = issinfo & ISSRegMask;
129
130 if (issinfo & ISSInvalid) {
131 /* Some callsites want to conditionally provide ISS info,
132 * eg "only if this was not a writeback"
133 */
134 return;
135 }
136
137 if (srt == 15) {
138 /* For AArch32, insns where the src/dest is R15 never generate
139 * ISS information. Catching that here saves checking at all
140 * the call sites.
141 */
142 return;
143 }
144
145 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
146 0, 0, 0, is_write, 0, is_16bit);
147 disas_set_insn_syndrome(s, syn);
148}
149
8bd5c820 150static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 151{
8bd5c820 152 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
153 * insns:
154 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
155 * otherwise, access as if at PL0.
156 */
157 switch (s->mmu_idx) {
158 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
159 case ARMMMUIdx_S12NSE0:
160 case ARMMMUIdx_S12NSE1:
8bd5c820 161 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
162 case ARMMMUIdx_S1E3:
163 case ARMMMUIdx_S1SE0:
164 case ARMMMUIdx_S1SE1:
8bd5c820 165 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
166 case ARMMMUIdx_MUser:
167 case ARMMMUIdx_MPriv:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
169 case ARMMMUIdx_MUserNegPri:
170 case ARMMMUIdx_MPrivNegPri:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
172 case ARMMMUIdx_MSUser:
173 case ARMMMUIdx_MSPriv:
b9f587d6 174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
175 case ARMMMUIdx_MSUserNegPri:
176 case ARMMMUIdx_MSPrivNegPri:
177 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
178 case ARMMMUIdx_S2NS:
179 default:
180 g_assert_not_reached();
181 }
182}
183
39d5492a 184static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 185{
39d5492a 186 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
187 tcg_gen_ld_i32(tmp, cpu_env, offset);
188 return tmp;
189}
190
0ecb72a5 191#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 192
39d5492a 193static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
194{
195 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 196 tcg_temp_free_i32(var);
d9ba4830
PB
197}
198
199#define store_cpu_field(var, name) \
0ecb72a5 200 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 201
b26eefb6 202/* Set a variable to the value of a CPU register. */
39d5492a 203static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
204{
205 if (reg == 15) {
206 uint32_t addr;
b90372ad 207 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
208 if (s->thumb)
209 addr = (long)s->pc + 2;
210 else
211 addr = (long)s->pc + 4;
212 tcg_gen_movi_i32(var, addr);
213 } else {
155c3eac 214 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
215 }
216}
217
218/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 219static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 220{
39d5492a 221 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
222 load_reg_var(s, tmp, reg);
223 return tmp;
224}
225
226/* Set a CPU register. The source must be a temporary and will be
227 marked as dead. */
39d5492a 228static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
229{
230 if (reg == 15) {
9b6a3ea7
PM
231 /* In Thumb mode, we must ignore bit 0.
232 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
233 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
234 * We choose to ignore [1:0] in ARM mode for all architecture versions.
235 */
236 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 237 s->base.is_jmp = DISAS_JUMP;
b26eefb6 238 }
155c3eac 239 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 240 tcg_temp_free_i32(var);
b26eefb6
PB
241}
242
55203189
PM
243/*
244 * Variant of store_reg which applies v8M stack-limit checks before updating
245 * SP. If the check fails this will result in an exception being taken.
246 * We disable the stack checks for CONFIG_USER_ONLY because we have
247 * no idea what the stack limits should be in that case.
248 * If stack checking is not being done this just acts like store_reg().
249 */
250static void store_sp_checked(DisasContext *s, TCGv_i32 var)
251{
252#ifndef CONFIG_USER_ONLY
253 if (s->v8m_stackcheck) {
254 gen_helper_v8m_stackcheck(cpu_env, var);
255 }
256#endif
257 store_reg(s, 13, var);
258}
259
b26eefb6 260/* Value extensions. */
86831435
PB
261#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
262#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
263#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
264#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
265
1497c961
PB
266#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
267#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 268
b26eefb6 269
39d5492a 270static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 271{
39d5492a 272 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 273 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
274 tcg_temp_free_i32(tmp_mask);
275}
d9ba4830
PB
276/* Set NZCV flags from the high 4 bits of var. */
277#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
278
d4a2dc67 279static void gen_exception_internal(int excp)
d9ba4830 280{
d4a2dc67
PM
281 TCGv_i32 tcg_excp = tcg_const_i32(excp);
282
283 assert(excp_is_internal(excp));
284 gen_helper_exception_internal(cpu_env, tcg_excp);
285 tcg_temp_free_i32(tcg_excp);
286}
287
73710361 288static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
289{
290 TCGv_i32 tcg_excp = tcg_const_i32(excp);
291 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 292 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 293
73710361
GB
294 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
295 tcg_syn, tcg_el);
296
297 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
298 tcg_temp_free_i32(tcg_syn);
299 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
300}
301
50225ad0
PM
302static void gen_step_complete_exception(DisasContext *s)
303{
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
312 */
313 gen_ss_advance(s);
73710361
GB
314 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
315 default_exception_el(s));
dcba3a8d 316 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
317}
318
5425415e
PM
319static void gen_singlestep_exception(DisasContext *s)
320{
321 /* Generate the right kind of exception for singlestep, which is
322 * either the architectural singlestep or EXCP_DEBUG for QEMU's
323 * gdb singlestepping.
324 */
325 if (s->ss_active) {
326 gen_step_complete_exception(s);
327 } else {
328 gen_exception_internal(EXCP_DEBUG);
329 }
330}
331
b636649f
PM
332static inline bool is_singlestepping(DisasContext *s)
333{
334 /* Return true if we are singlestepping either because of
335 * architectural singlestep or QEMU gdbstub singlestep. This does
336 * not include the command line '-singlestep' mode which is rather
337 * misnamed as it only means "one instruction per TB" and doesn't
338 * affect the code we generate.
339 */
dcba3a8d 340 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
341}
342
39d5492a 343static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 344{
39d5492a
PM
345 TCGv_i32 tmp1 = tcg_temp_new_i32();
346 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
347 tcg_gen_ext16s_i32(tmp1, a);
348 tcg_gen_ext16s_i32(tmp2, b);
3670669c 349 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 350 tcg_temp_free_i32(tmp2);
3670669c
PB
351 tcg_gen_sari_i32(a, a, 16);
352 tcg_gen_sari_i32(b, b, 16);
353 tcg_gen_mul_i32(b, b, a);
354 tcg_gen_mov_i32(a, tmp1);
7d1b0095 355 tcg_temp_free_i32(tmp1);
3670669c
PB
356}
357
358/* Byteswap each halfword. */
39d5492a 359static void gen_rev16(TCGv_i32 var)
3670669c 360{
39d5492a 361 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 362 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 363 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
364 tcg_gen_and_i32(tmp, tmp, mask);
365 tcg_gen_and_i32(var, var, mask);
3670669c 366 tcg_gen_shli_i32(var, var, 8);
3670669c 367 tcg_gen_or_i32(var, var, tmp);
68cedf73 368 tcg_temp_free_i32(mask);
7d1b0095 369 tcg_temp_free_i32(tmp);
3670669c
PB
370}
371
372/* Byteswap low halfword and sign extend. */
39d5492a 373static void gen_revsh(TCGv_i32 var)
3670669c 374{
1a855029
AJ
375 tcg_gen_ext16u_i32(var, var);
376 tcg_gen_bswap16_i32(var, var);
377 tcg_gen_ext16s_i32(var, var);
3670669c
PB
378}
379
838fa72d 380/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 381static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 382{
838fa72d
AJ
383 TCGv_i64 tmp64 = tcg_temp_new_i64();
384
385 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 386 tcg_temp_free_i32(b);
838fa72d
AJ
387 tcg_gen_shli_i64(tmp64, tmp64, 32);
388 tcg_gen_add_i64(a, tmp64, a);
389
390 tcg_temp_free_i64(tmp64);
391 return a;
392}
393
394/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 395static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
396{
397 TCGv_i64 tmp64 = tcg_temp_new_i64();
398
399 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 400 tcg_temp_free_i32(b);
838fa72d
AJ
401 tcg_gen_shli_i64(tmp64, tmp64, 32);
402 tcg_gen_sub_i64(a, tmp64, a);
403
404 tcg_temp_free_i64(tmp64);
405 return a;
3670669c
PB
406}
407
5e3f878a 408/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 409static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 410{
39d5492a
PM
411 TCGv_i32 lo = tcg_temp_new_i32();
412 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 413 TCGv_i64 ret;
5e3f878a 414
831d7fe8 415 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 416 tcg_temp_free_i32(a);
7d1b0095 417 tcg_temp_free_i32(b);
831d7fe8
RH
418
419 ret = tcg_temp_new_i64();
420 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
421 tcg_temp_free_i32(lo);
422 tcg_temp_free_i32(hi);
831d7fe8
RH
423
424 return ret;
5e3f878a
PB
425}
426
39d5492a 427static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 428{
39d5492a
PM
429 TCGv_i32 lo = tcg_temp_new_i32();
430 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 431 TCGv_i64 ret;
5e3f878a 432
831d7fe8 433 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 434 tcg_temp_free_i32(a);
7d1b0095 435 tcg_temp_free_i32(b);
831d7fe8
RH
436
437 ret = tcg_temp_new_i64();
438 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
439 tcg_temp_free_i32(lo);
440 tcg_temp_free_i32(hi);
831d7fe8
RH
441
442 return ret;
5e3f878a
PB
443}
444
8f01245e 445/* Swap low and high halfwords. */
39d5492a 446static void gen_swap_half(TCGv_i32 var)
8f01245e 447{
39d5492a 448 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
449 tcg_gen_shri_i32(tmp, var, 16);
450 tcg_gen_shli_i32(var, var, 16);
451 tcg_gen_or_i32(var, var, tmp);
7d1b0095 452 tcg_temp_free_i32(tmp);
8f01245e
PB
453}
454
b26eefb6
PB
455/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
456 tmp = (t0 ^ t1) & 0x8000;
457 t0 &= ~0x8000;
458 t1 &= ~0x8000;
459 t0 = (t0 + t1) ^ tmp;
460 */
461
39d5492a 462static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 463{
39d5492a 464 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
465 tcg_gen_xor_i32(tmp, t0, t1);
466 tcg_gen_andi_i32(tmp, tmp, 0x8000);
467 tcg_gen_andi_i32(t0, t0, ~0x8000);
468 tcg_gen_andi_i32(t1, t1, ~0x8000);
469 tcg_gen_add_i32(t0, t0, t1);
470 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
471 tcg_temp_free_i32(tmp);
472 tcg_temp_free_i32(t1);
b26eefb6
PB
473}
474
475/* Set CF to the top bit of var. */
39d5492a 476static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 477{
66c374de 478 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
479}
480
481/* Set N and Z flags from var. */
39d5492a 482static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 483{
66c374de
AJ
484 tcg_gen_mov_i32(cpu_NF, var);
485 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
486}
487
488/* T0 += T1 + CF. */
39d5492a 489static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 490{
396e467c 491 tcg_gen_add_i32(t0, t0, t1);
66c374de 492 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
493}
494
e9bb4aa9 495/* dest = T0 + T1 + CF. */
39d5492a 496static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 497{
e9bb4aa9 498 tcg_gen_add_i32(dest, t0, t1);
66c374de 499 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
500}
501
3670669c 502/* dest = T0 - T1 + CF - 1. */
39d5492a 503static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 504{
3670669c 505 tcg_gen_sub_i32(dest, t0, t1);
66c374de 506 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 507 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
508}
509
72485ec4 510/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 511static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 512{
39d5492a 513 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
514 tcg_gen_movi_i32(tmp, 0);
515 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 516 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 517 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
518 tcg_gen_xor_i32(tmp, t0, t1);
519 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
520 tcg_temp_free_i32(tmp);
521 tcg_gen_mov_i32(dest, cpu_NF);
522}
523
49b4c31e 524/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 525static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 526{
39d5492a 527 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
528 if (TCG_TARGET_HAS_add2_i32) {
529 tcg_gen_movi_i32(tmp, 0);
530 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 531 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
532 } else {
533 TCGv_i64 q0 = tcg_temp_new_i64();
534 TCGv_i64 q1 = tcg_temp_new_i64();
535 tcg_gen_extu_i32_i64(q0, t0);
536 tcg_gen_extu_i32_i64(q1, t1);
537 tcg_gen_add_i64(q0, q0, q1);
538 tcg_gen_extu_i32_i64(q1, cpu_CF);
539 tcg_gen_add_i64(q0, q0, q1);
540 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
541 tcg_temp_free_i64(q0);
542 tcg_temp_free_i64(q1);
543 }
544 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
545 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
546 tcg_gen_xor_i32(tmp, t0, t1);
547 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
548 tcg_temp_free_i32(tmp);
549 tcg_gen_mov_i32(dest, cpu_NF);
550}
551
72485ec4 552/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 553static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 554{
39d5492a 555 TCGv_i32 tmp;
72485ec4
AJ
556 tcg_gen_sub_i32(cpu_NF, t0, t1);
557 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
558 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
559 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
560 tmp = tcg_temp_new_i32();
561 tcg_gen_xor_i32(tmp, t0, t1);
562 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
563 tcg_temp_free_i32(tmp);
564 tcg_gen_mov_i32(dest, cpu_NF);
565}
566
e77f0832 567/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 568static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 569{
39d5492a 570 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
571 tcg_gen_not_i32(tmp, t1);
572 gen_adc_CC(dest, t0, tmp);
39d5492a 573 tcg_temp_free_i32(tmp);
2de68a49
RH
574}
575
365af80e 576#define GEN_SHIFT(name) \
39d5492a 577static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 578{ \
39d5492a 579 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
580 tmp1 = tcg_temp_new_i32(); \
581 tcg_gen_andi_i32(tmp1, t1, 0xff); \
582 tmp2 = tcg_const_i32(0); \
583 tmp3 = tcg_const_i32(0x1f); \
584 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
585 tcg_temp_free_i32(tmp3); \
586 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
587 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
588 tcg_temp_free_i32(tmp2); \
589 tcg_temp_free_i32(tmp1); \
590}
591GEN_SHIFT(shl)
592GEN_SHIFT(shr)
593#undef GEN_SHIFT
594
39d5492a 595static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 596{
39d5492a 597 TCGv_i32 tmp1, tmp2;
365af80e
AJ
598 tmp1 = tcg_temp_new_i32();
599 tcg_gen_andi_i32(tmp1, t1, 0xff);
600 tmp2 = tcg_const_i32(0x1f);
601 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
602 tcg_temp_free_i32(tmp2);
603 tcg_gen_sar_i32(dest, t0, tmp1);
604 tcg_temp_free_i32(tmp1);
605}
606
39d5492a 607static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 608{
39d5492a
PM
609 TCGv_i32 c0 = tcg_const_i32(0);
610 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
611 tcg_gen_neg_i32(tmp, src);
612 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
613 tcg_temp_free_i32(c0);
614 tcg_temp_free_i32(tmp);
615}
ad69471c 616
39d5492a 617static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 618{
9a119ff6 619 if (shift == 0) {
66c374de 620 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 621 } else {
66c374de
AJ
622 tcg_gen_shri_i32(cpu_CF, var, shift);
623 if (shift != 31) {
624 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
625 }
9a119ff6 626 }
9a119ff6 627}
b26eefb6 628
9a119ff6 629/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
630static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
631 int shift, int flags)
9a119ff6
PB
632{
633 switch (shiftop) {
634 case 0: /* LSL */
635 if (shift != 0) {
636 if (flags)
637 shifter_out_im(var, 32 - shift);
638 tcg_gen_shli_i32(var, var, shift);
639 }
640 break;
641 case 1: /* LSR */
642 if (shift == 0) {
643 if (flags) {
66c374de 644 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
645 }
646 tcg_gen_movi_i32(var, 0);
647 } else {
648 if (flags)
649 shifter_out_im(var, shift - 1);
650 tcg_gen_shri_i32(var, var, shift);
651 }
652 break;
653 case 2: /* ASR */
654 if (shift == 0)
655 shift = 32;
656 if (flags)
657 shifter_out_im(var, shift - 1);
658 if (shift == 32)
659 shift = 31;
660 tcg_gen_sari_i32(var, var, shift);
661 break;
662 case 3: /* ROR/RRX */
663 if (shift != 0) {
664 if (flags)
665 shifter_out_im(var, shift - 1);
f669df27 666 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 667 } else {
39d5492a 668 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 669 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
670 if (flags)
671 shifter_out_im(var, 0);
672 tcg_gen_shri_i32(var, var, 1);
b26eefb6 673 tcg_gen_or_i32(var, var, tmp);
7d1b0095 674 tcg_temp_free_i32(tmp);
b26eefb6
PB
675 }
676 }
677};
678
39d5492a
PM
679static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
680 TCGv_i32 shift, int flags)
8984bd2e
PB
681{
682 if (flags) {
683 switch (shiftop) {
9ef39277
BS
684 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
685 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
686 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
687 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
688 }
689 } else {
690 switch (shiftop) {
365af80e
AJ
691 case 0:
692 gen_shl(var, var, shift);
693 break;
694 case 1:
695 gen_shr(var, var, shift);
696 break;
697 case 2:
698 gen_sar(var, var, shift);
699 break;
f669df27
AJ
700 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
701 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
702 }
703 }
7d1b0095 704 tcg_temp_free_i32(shift);
8984bd2e
PB
705}
706
6ddbc6e4
PB
707#define PAS_OP(pfx) \
708 switch (op2) { \
709 case 0: gen_pas_helper(glue(pfx,add16)); break; \
710 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
711 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
712 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
713 case 4: gen_pas_helper(glue(pfx,add8)); break; \
714 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
715 }
39d5492a 716static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 717{
a7812ae4 718 TCGv_ptr tmp;
6ddbc6e4
PB
719
720 switch (op1) {
721#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
722 case 1:
a7812ae4 723 tmp = tcg_temp_new_ptr();
0ecb72a5 724 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 725 PAS_OP(s)
b75263d6 726 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
727 break;
728 case 5:
a7812ae4 729 tmp = tcg_temp_new_ptr();
0ecb72a5 730 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 731 PAS_OP(u)
b75263d6 732 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
733 break;
734#undef gen_pas_helper
735#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
736 case 2:
737 PAS_OP(q);
738 break;
739 case 3:
740 PAS_OP(sh);
741 break;
742 case 6:
743 PAS_OP(uq);
744 break;
745 case 7:
746 PAS_OP(uh);
747 break;
748#undef gen_pas_helper
749 }
750}
9ee6e8bb
PB
751#undef PAS_OP
752
6ddbc6e4
PB
753/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
754#define PAS_OP(pfx) \
ed89a2f1 755 switch (op1) { \
6ddbc6e4
PB
756 case 0: gen_pas_helper(glue(pfx,add8)); break; \
757 case 1: gen_pas_helper(glue(pfx,add16)); break; \
758 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
759 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
760 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
761 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
762 }
39d5492a 763static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 764{
a7812ae4 765 TCGv_ptr tmp;
6ddbc6e4 766
ed89a2f1 767 switch (op2) {
6ddbc6e4
PB
768#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
769 case 0:
a7812ae4 770 tmp = tcg_temp_new_ptr();
0ecb72a5 771 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 772 PAS_OP(s)
b75263d6 773 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
774 break;
775 case 4:
a7812ae4 776 tmp = tcg_temp_new_ptr();
0ecb72a5 777 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 778 PAS_OP(u)
b75263d6 779 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
780 break;
781#undef gen_pas_helper
782#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
783 case 1:
784 PAS_OP(q);
785 break;
786 case 2:
787 PAS_OP(sh);
788 break;
789 case 5:
790 PAS_OP(uq);
791 break;
792 case 6:
793 PAS_OP(uh);
794 break;
795#undef gen_pas_helper
796 }
797}
9ee6e8bb
PB
798#undef PAS_OP
799
39fb730a 800/*
6c2c63d3 801 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
802 * This is common between ARM and Aarch64 targets.
803 */
6c2c63d3 804void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 805{
6c2c63d3
RH
806 TCGv_i32 value;
807 TCGCond cond;
808 bool global = true;
d9ba4830 809
d9ba4830
PB
810 switch (cc) {
811 case 0: /* eq: Z */
d9ba4830 812 case 1: /* ne: !Z */
6c2c63d3
RH
813 cond = TCG_COND_EQ;
814 value = cpu_ZF;
d9ba4830 815 break;
6c2c63d3 816
d9ba4830 817 case 2: /* cs: C */
d9ba4830 818 case 3: /* cc: !C */
6c2c63d3
RH
819 cond = TCG_COND_NE;
820 value = cpu_CF;
d9ba4830 821 break;
6c2c63d3 822
d9ba4830 823 case 4: /* mi: N */
d9ba4830 824 case 5: /* pl: !N */
6c2c63d3
RH
825 cond = TCG_COND_LT;
826 value = cpu_NF;
d9ba4830 827 break;
6c2c63d3 828
d9ba4830 829 case 6: /* vs: V */
d9ba4830 830 case 7: /* vc: !V */
6c2c63d3
RH
831 cond = TCG_COND_LT;
832 value = cpu_VF;
d9ba4830 833 break;
6c2c63d3 834
d9ba4830 835 case 8: /* hi: C && !Z */
6c2c63d3
RH
836 case 9: /* ls: !C || Z -> !(C && !Z) */
837 cond = TCG_COND_NE;
838 value = tcg_temp_new_i32();
839 global = false;
840 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
841 ZF is non-zero for !Z; so AND the two subexpressions. */
842 tcg_gen_neg_i32(value, cpu_CF);
843 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 844 break;
6c2c63d3 845
d9ba4830 846 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 847 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
848 /* Since we're only interested in the sign bit, == 0 is >= 0. */
849 cond = TCG_COND_GE;
850 value = tcg_temp_new_i32();
851 global = false;
852 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 853 break;
6c2c63d3 854
d9ba4830 855 case 12: /* gt: !Z && N == V */
d9ba4830 856 case 13: /* le: Z || N != V */
6c2c63d3
RH
857 cond = TCG_COND_NE;
858 value = tcg_temp_new_i32();
859 global = false;
860 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
861 * the sign bit then AND with ZF to yield the result. */
862 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
863 tcg_gen_sari_i32(value, value, 31);
864 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 865 break;
6c2c63d3 866
9305eac0
RH
867 case 14: /* always */
868 case 15: /* always */
869 /* Use the ALWAYS condition, which will fold early.
870 * It doesn't matter what we use for the value. */
871 cond = TCG_COND_ALWAYS;
872 value = cpu_ZF;
873 goto no_invert;
874
d9ba4830
PB
875 default:
876 fprintf(stderr, "Bad condition code 0x%x\n", cc);
877 abort();
878 }
6c2c63d3
RH
879
880 if (cc & 1) {
881 cond = tcg_invert_cond(cond);
882 }
883
9305eac0 884 no_invert:
6c2c63d3
RH
885 cmp->cond = cond;
886 cmp->value = value;
887 cmp->value_global = global;
888}
889
890void arm_free_cc(DisasCompare *cmp)
891{
892 if (!cmp->value_global) {
893 tcg_temp_free_i32(cmp->value);
894 }
895}
896
897void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
898{
899 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
900}
901
902void arm_gen_test_cc(int cc, TCGLabel *label)
903{
904 DisasCompare cmp;
905 arm_test_cc(&cmp, cc);
906 arm_jump_cc(&cmp, label);
907 arm_free_cc(&cmp);
d9ba4830 908}
2c0262af 909
b1d8e52e 910static const uint8_t table_logic_cc[16] = {
2c0262af
FB
911 1, /* and */
912 1, /* xor */
913 0, /* sub */
914 0, /* rsb */
915 0, /* add */
916 0, /* adc */
917 0, /* sbc */
918 0, /* rsc */
919 1, /* andl */
920 1, /* xorl */
921 0, /* cmp */
922 0, /* cmn */
923 1, /* orr */
924 1, /* mov */
925 1, /* bic */
926 1, /* mvn */
927};
3b46e624 928
4d5e8c96
PM
929static inline void gen_set_condexec(DisasContext *s)
930{
931 if (s->condexec_mask) {
932 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
933 TCGv_i32 tmp = tcg_temp_new_i32();
934 tcg_gen_movi_i32(tmp, val);
935 store_cpu_field(tmp, condexec_bits);
936 }
937}
938
939static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
940{
941 tcg_gen_movi_i32(cpu_R[15], val);
942}
943
d9ba4830
PB
944/* Set PC and Thumb state from an immediate address. */
945static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 946{
39d5492a 947 TCGv_i32 tmp;
99c475ab 948
dcba3a8d 949 s->base.is_jmp = DISAS_JUMP;
d9ba4830 950 if (s->thumb != (addr & 1)) {
7d1b0095 951 tmp = tcg_temp_new_i32();
d9ba4830 952 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 953 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 954 tcg_temp_free_i32(tmp);
d9ba4830 955 }
155c3eac 956 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
957}
958
959/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 960static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 961{
dcba3a8d 962 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
963 tcg_gen_andi_i32(cpu_R[15], var, ~1);
964 tcg_gen_andi_i32(var, var, 1);
965 store_cpu_field(var, thumb);
d9ba4830
PB
966}
967
3bb8a96f
PM
968/* Set PC and Thumb state from var. var is marked as dead.
969 * For M-profile CPUs, include logic to detect exception-return
970 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
971 * and BX reg, and no others, and happens only for code in Handler mode.
972 */
973static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
974{
975 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 976 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
977 */
978 gen_bx(s, var);
d02a8698
PM
979 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
980 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 981 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
982 }
983}
984
985static inline void gen_bx_excret_final_code(DisasContext *s)
986{
987 /* Generate the code to finish possible exception return and end the TB */
988 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
989 uint32_t min_magic;
990
991 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
992 /* Covers FNC_RETURN and EXC_RETURN magic */
993 min_magic = FNC_RETURN_MIN_MAGIC;
994 } else {
995 /* EXC_RETURN magic only */
996 min_magic = EXC_RETURN_MIN_MAGIC;
997 }
3bb8a96f
PM
998
999 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 1000 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
1001 /* No: end the TB as we would for a DISAS_JMP */
1002 if (is_singlestepping(s)) {
1003 gen_singlestep_exception(s);
1004 } else {
07ea28b4 1005 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
1006 }
1007 gen_set_label(excret_label);
1008 /* Yes: this is an exception return.
1009 * At this point in runtime env->regs[15] and env->thumb will hold
1010 * the exception-return magic number, which do_v7m_exception_exit()
1011 * will read. Nothing else will be able to see those values because
1012 * the cpu-exec main loop guarantees that we will always go straight
1013 * from raising the exception to the exception-handling code.
1014 *
1015 * gen_ss_advance(s) does nothing on M profile currently but
1016 * calling it is conceptually the right thing as we have executed
1017 * this instruction (compare SWI, HVC, SMC handling).
1018 */
1019 gen_ss_advance(s);
1020 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1021}
1022
fb602cb7
PM
1023static inline void gen_bxns(DisasContext *s, int rm)
1024{
1025 TCGv_i32 var = load_reg(s, rm);
1026
1027 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1028 * we need to sync state before calling it, but:
1029 * - we don't need to do gen_set_pc_im() because the bxns helper will
1030 * always set the PC itself
1031 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1032 * unless it's outside an IT block or the last insn in an IT block,
1033 * so we know that condexec == 0 (already set at the top of the TB)
1034 * is correct in the non-UNPREDICTABLE cases, and we can choose
1035 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1036 */
1037 gen_helper_v7m_bxns(cpu_env, var);
1038 tcg_temp_free_i32(var);
ef475b5d 1039 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1040}
1041
3e3fa230
PM
1042static inline void gen_blxns(DisasContext *s, int rm)
1043{
1044 TCGv_i32 var = load_reg(s, rm);
1045
1046 /* We don't need to sync condexec state, for the same reason as bxns.
1047 * We do however need to set the PC, because the blxns helper reads it.
1048 * The blxns helper may throw an exception.
1049 */
1050 gen_set_pc_im(s, s->pc);
1051 gen_helper_v7m_blxns(cpu_env, var);
1052 tcg_temp_free_i32(var);
1053 s->base.is_jmp = DISAS_EXIT;
1054}
1055
21aeb343
JR
1056/* Variant of store_reg which uses branch&exchange logic when storing
1057 to r15 in ARM architecture v7 and above. The source must be a temporary
1058 and will be marked as dead. */
7dcc1f89 1059static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1060{
1061 if (reg == 15 && ENABLE_ARCH_7) {
1062 gen_bx(s, var);
1063 } else {
1064 store_reg(s, reg, var);
1065 }
1066}
1067
be5e7a76
DES
1068/* Variant of store_reg which uses branch&exchange logic when storing
1069 * to r15 in ARM architecture v5T and above. This is used for storing
1070 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1071 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1072static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1073{
1074 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1075 gen_bx_excret(s, var);
be5e7a76
DES
1076 } else {
1077 store_reg(s, reg, var);
1078 }
1079}
1080
e334bd31
PB
1081#ifdef CONFIG_USER_ONLY
1082#define IS_USER_ONLY 1
1083#else
1084#define IS_USER_ONLY 0
1085#endif
1086
08307563
PM
1087/* Abstractions of "generate code to do a guest load/store for
1088 * AArch32", where a vaddr is always 32 bits (and is zero
1089 * extended if we're a 64 bit core) and data is also
1090 * 32 bits unless specifically doing a 64 bit access.
1091 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1092 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1093 */
08307563 1094
7f5616f5 1095static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1096{
7f5616f5
RH
1097 TCGv addr = tcg_temp_new();
1098 tcg_gen_extu_i32_tl(addr, a32);
1099
e334bd31 1100 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1101 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1102 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1103 }
7f5616f5 1104 return addr;
08307563
PM
1105}
1106
7f5616f5
RH
1107static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1108 int index, TCGMemOp opc)
08307563 1109{
2aeba0d0
JS
1110 TCGv addr;
1111
1112 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1113 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1114 opc |= MO_ALIGN;
1115 }
1116
1117 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1118 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1119 tcg_temp_free(addr);
08307563
PM
1120}
1121
7f5616f5
RH
1122static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1123 int index, TCGMemOp opc)
1124{
2aeba0d0
JS
1125 TCGv addr;
1126
1127 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1128 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1129 opc |= MO_ALIGN;
1130 }
1131
1132 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1133 tcg_gen_qemu_st_i32(val, addr, index, opc);
1134 tcg_temp_free(addr);
1135}
08307563 1136
7f5616f5 1137#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1138static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1139 TCGv_i32 a32, int index) \
08307563 1140{ \
7f5616f5 1141 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1142} \
1143static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1144 TCGv_i32 val, \
1145 TCGv_i32 a32, int index, \
1146 ISSInfo issinfo) \
1147{ \
1148 gen_aa32_ld##SUFF(s, val, a32, index); \
1149 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1150}
1151
7f5616f5 1152#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1153static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1154 TCGv_i32 a32, int index) \
08307563 1155{ \
7f5616f5 1156 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1157} \
1158static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1159 TCGv_i32 val, \
1160 TCGv_i32 a32, int index, \
1161 ISSInfo issinfo) \
1162{ \
1163 gen_aa32_st##SUFF(s, val, a32, index); \
1164 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1165}
1166
7f5616f5 1167static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1168{
e334bd31
PB
1169 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1170 if (!IS_USER_ONLY && s->sctlr_b) {
1171 tcg_gen_rotri_i64(val, val, 32);
1172 }
08307563
PM
1173}
1174
7f5616f5
RH
1175static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1176 int index, TCGMemOp opc)
08307563 1177{
7f5616f5
RH
1178 TCGv addr = gen_aa32_addr(s, a32, opc);
1179 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1180 gen_aa32_frob64(s, val);
1181 tcg_temp_free(addr);
1182}
1183
1184static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1185 TCGv_i32 a32, int index)
1186{
1187 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1188}
1189
1190static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1191 int index, TCGMemOp opc)
1192{
1193 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1194
1195 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1196 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1197 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1198 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1199 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1200 tcg_temp_free_i64(tmp);
e334bd31 1201 } else {
7f5616f5 1202 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1203 }
7f5616f5 1204 tcg_temp_free(addr);
08307563
PM
1205}
1206
7f5616f5
RH
1207static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1208 TCGv_i32 a32, int index)
1209{
1210 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1211}
08307563 1212
7f5616f5
RH
1213DO_GEN_LD(8s, MO_SB)
1214DO_GEN_LD(8u, MO_UB)
1215DO_GEN_LD(16s, MO_SW)
1216DO_GEN_LD(16u, MO_UW)
1217DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1218DO_GEN_ST(8, MO_UB)
1219DO_GEN_ST(16, MO_UW)
1220DO_GEN_ST(32, MO_UL)
08307563 1221
37e6456e
PM
1222static inline void gen_hvc(DisasContext *s, int imm16)
1223{
1224 /* The pre HVC helper handles cases when HVC gets trapped
1225 * as an undefined insn by runtime configuration (ie before
1226 * the insn really executes).
1227 */
1228 gen_set_pc_im(s, s->pc - 4);
1229 gen_helper_pre_hvc(cpu_env);
1230 /* Otherwise we will treat this as a real exception which
1231 * happens after execution of the insn. (The distinction matters
1232 * for the PC value reported to the exception handler and also
1233 * for single stepping.)
1234 */
1235 s->svc_imm = imm16;
1236 gen_set_pc_im(s, s->pc);
dcba3a8d 1237 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1238}
1239
1240static inline void gen_smc(DisasContext *s)
1241{
1242 /* As with HVC, we may take an exception either before or after
1243 * the insn executes.
1244 */
1245 TCGv_i32 tmp;
1246
1247 gen_set_pc_im(s, s->pc - 4);
1248 tmp = tcg_const_i32(syn_aa32_smc());
1249 gen_helper_pre_smc(cpu_env, tmp);
1250 tcg_temp_free_i32(tmp);
1251 gen_set_pc_im(s, s->pc);
dcba3a8d 1252 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1253}
1254
d4a2dc67
PM
1255static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1256{
1257 gen_set_condexec(s);
1258 gen_set_pc_im(s, s->pc - offset);
1259 gen_exception_internal(excp);
dcba3a8d 1260 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1261}
1262
73710361
GB
1263static void gen_exception_insn(DisasContext *s, int offset, int excp,
1264 int syn, uint32_t target_el)
d4a2dc67
PM
1265{
1266 gen_set_condexec(s);
1267 gen_set_pc_im(s, s->pc - offset);
73710361 1268 gen_exception(excp, syn, target_el);
dcba3a8d 1269 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1270}
1271
c900a2e6
PM
1272static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1273{
1274 TCGv_i32 tcg_syn;
1275
1276 gen_set_condexec(s);
1277 gen_set_pc_im(s, s->pc - offset);
1278 tcg_syn = tcg_const_i32(syn);
1279 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1280 tcg_temp_free_i32(tcg_syn);
1281 s->base.is_jmp = DISAS_NORETURN;
1282}
1283
b5ff1b31
FB
1284/* Force a TB lookup after an instruction that changes the CPU state. */
1285static inline void gen_lookup_tb(DisasContext *s)
1286{
a6445c52 1287 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1288 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1289}
1290
19a6e31c
PM
1291static inline void gen_hlt(DisasContext *s, int imm)
1292{
1293 /* HLT. This has two purposes.
1294 * Architecturally, it is an external halting debug instruction.
1295 * Since QEMU doesn't implement external debug, we treat this as
1296 * it is required for halting debug disabled: it will UNDEF.
1297 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1298 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1299 * must trigger semihosting even for ARMv7 and earlier, where
1300 * HLT was an undefined encoding.
1301 * In system mode, we don't allow userspace access to
1302 * semihosting, to provide some semblance of security
1303 * (and for consistency with our 32-bit semihosting).
1304 */
1305 if (semihosting_enabled() &&
1306#ifndef CONFIG_USER_ONLY
1307 s->current_el != 0 &&
1308#endif
1309 (imm == (s->thumb ? 0x3c : 0xf000))) {
1310 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1311 return;
1312 }
1313
1314 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1315 default_exception_el(s));
1316}
1317
b0109805 1318static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1319 TCGv_i32 var)
2c0262af 1320{
1e8d4eec 1321 int val, rm, shift, shiftop;
39d5492a 1322 TCGv_i32 offset;
2c0262af
FB
1323
1324 if (!(insn & (1 << 25))) {
1325 /* immediate */
1326 val = insn & 0xfff;
1327 if (!(insn & (1 << 23)))
1328 val = -val;
537730b9 1329 if (val != 0)
b0109805 1330 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1331 } else {
1332 /* shift/register */
1333 rm = (insn) & 0xf;
1334 shift = (insn >> 7) & 0x1f;
1e8d4eec 1335 shiftop = (insn >> 5) & 3;
b26eefb6 1336 offset = load_reg(s, rm);
9a119ff6 1337 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1338 if (!(insn & (1 << 23)))
b0109805 1339 tcg_gen_sub_i32(var, var, offset);
2c0262af 1340 else
b0109805 1341 tcg_gen_add_i32(var, var, offset);
7d1b0095 1342 tcg_temp_free_i32(offset);
2c0262af
FB
1343 }
1344}
1345
191f9a93 1346static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1347 int extra, TCGv_i32 var)
2c0262af
FB
1348{
1349 int val, rm;
39d5492a 1350 TCGv_i32 offset;
3b46e624 1351
2c0262af
FB
1352 if (insn & (1 << 22)) {
1353 /* immediate */
1354 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1355 if (!(insn & (1 << 23)))
1356 val = -val;
18acad92 1357 val += extra;
537730b9 1358 if (val != 0)
b0109805 1359 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1360 } else {
1361 /* register */
191f9a93 1362 if (extra)
b0109805 1363 tcg_gen_addi_i32(var, var, extra);
2c0262af 1364 rm = (insn) & 0xf;
b26eefb6 1365 offset = load_reg(s, rm);
2c0262af 1366 if (!(insn & (1 << 23)))
b0109805 1367 tcg_gen_sub_i32(var, var, offset);
2c0262af 1368 else
b0109805 1369 tcg_gen_add_i32(var, var, offset);
7d1b0095 1370 tcg_temp_free_i32(offset);
2c0262af
FB
1371 }
1372}
1373
5aaebd13
PM
1374static TCGv_ptr get_fpstatus_ptr(int neon)
1375{
1376 TCGv_ptr statusptr = tcg_temp_new_ptr();
1377 int offset;
1378 if (neon) {
0ecb72a5 1379 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1380 } else {
0ecb72a5 1381 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1382 }
1383 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1384 return statusptr;
1385}
1386
4373f3ce
PB
1387#define VFP_OP2(name) \
1388static inline void gen_vfp_##name(int dp) \
1389{ \
ae1857ec
PM
1390 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1391 if (dp) { \
1392 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1393 } else { \
1394 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1395 } \
1396 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1397}
1398
4373f3ce
PB
1399VFP_OP2(add)
1400VFP_OP2(sub)
1401VFP_OP2(mul)
1402VFP_OP2(div)
1403
1404#undef VFP_OP2
1405
605a6aed
PM
1406static inline void gen_vfp_F1_mul(int dp)
1407{
1408 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1409 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1410 if (dp) {
ae1857ec 1411 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1412 } else {
ae1857ec 1413 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1414 }
ae1857ec 1415 tcg_temp_free_ptr(fpst);
605a6aed
PM
1416}
1417
1418static inline void gen_vfp_F1_neg(int dp)
1419{
1420 /* Like gen_vfp_neg() but put result in F1 */
1421 if (dp) {
1422 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1423 } else {
1424 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1425 }
1426}
1427
4373f3ce
PB
1428static inline void gen_vfp_abs(int dp)
1429{
1430 if (dp)
1431 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1432 else
1433 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1434}
1435
1436static inline void gen_vfp_neg(int dp)
1437{
1438 if (dp)
1439 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1440 else
1441 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1442}
1443
1444static inline void gen_vfp_sqrt(int dp)
1445{
1446 if (dp)
1447 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1448 else
1449 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1450}
1451
1452static inline void gen_vfp_cmp(int dp)
1453{
1454 if (dp)
1455 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1456 else
1457 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1458}
1459
1460static inline void gen_vfp_cmpe(int dp)
1461{
1462 if (dp)
1463 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1464 else
1465 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1466}
1467
1468static inline void gen_vfp_F1_ld0(int dp)
1469{
1470 if (dp)
5b340b51 1471 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1472 else
5b340b51 1473 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1474}
1475
5500b06c
PM
1476#define VFP_GEN_ITOF(name) \
1477static inline void gen_vfp_##name(int dp, int neon) \
1478{ \
5aaebd13 1479 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1480 if (dp) { \
1481 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1482 } else { \
1483 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1484 } \
b7fa9214 1485 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1486}
1487
5500b06c
PM
1488VFP_GEN_ITOF(uito)
1489VFP_GEN_ITOF(sito)
1490#undef VFP_GEN_ITOF
4373f3ce 1491
5500b06c
PM
1492#define VFP_GEN_FTOI(name) \
1493static inline void gen_vfp_##name(int dp, int neon) \
1494{ \
5aaebd13 1495 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1496 if (dp) { \
1497 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1498 } else { \
1499 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1500 } \
b7fa9214 1501 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1502}
1503
5500b06c
PM
1504VFP_GEN_FTOI(toui)
1505VFP_GEN_FTOI(touiz)
1506VFP_GEN_FTOI(tosi)
1507VFP_GEN_FTOI(tosiz)
1508#undef VFP_GEN_FTOI
4373f3ce 1509
16d5b3ca 1510#define VFP_GEN_FIX(name, round) \
5500b06c 1511static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1512{ \
39d5492a 1513 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1514 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1515 if (dp) { \
16d5b3ca
WN
1516 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1517 statusptr); \
5500b06c 1518 } else { \
16d5b3ca
WN
1519 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1520 statusptr); \
5500b06c 1521 } \
b75263d6 1522 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1523 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1524}
16d5b3ca
WN
1525VFP_GEN_FIX(tosh, _round_to_zero)
1526VFP_GEN_FIX(tosl, _round_to_zero)
1527VFP_GEN_FIX(touh, _round_to_zero)
1528VFP_GEN_FIX(toul, _round_to_zero)
1529VFP_GEN_FIX(shto, )
1530VFP_GEN_FIX(slto, )
1531VFP_GEN_FIX(uhto, )
1532VFP_GEN_FIX(ulto, )
4373f3ce 1533#undef VFP_GEN_FIX
9ee6e8bb 1534
39d5492a 1535static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1536{
08307563 1537 if (dp) {
12dcc321 1538 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1539 } else {
12dcc321 1540 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1541 }
b5ff1b31
FB
1542}
1543
39d5492a 1544static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1545{
08307563 1546 if (dp) {
12dcc321 1547 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1548 } else {
12dcc321 1549 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1550 }
b5ff1b31
FB
1551}
1552
c39c2b90 1553static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1554{
9a2b5256 1555 if (dp) {
c39c2b90 1556 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1557 } else {
c39c2b90 1558 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1559 if (reg & 1) {
1560 ofs += offsetof(CPU_DoubleU, l.upper);
1561 } else {
1562 ofs += offsetof(CPU_DoubleU, l.lower);
1563 }
1564 return ofs;
8e96005d
FB
1565 }
1566}
9ee6e8bb
PB
1567
1568/* Return the offset of a 32-bit piece of a NEON register.
1569 zero is the least significant end of the register. */
1570static inline long
1571neon_reg_offset (int reg, int n)
1572{
1573 int sreg;
1574 sreg = reg * 2 + n;
1575 return vfp_reg_offset(0, sreg);
1576}
1577
32f91fb7
RH
1578/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1579 * where 0 is the least significant end of the register.
1580 */
1581static inline long
1582neon_element_offset(int reg, int element, TCGMemOp size)
1583{
1584 int element_size = 1 << size;
1585 int ofs = element * element_size;
1586#ifdef HOST_WORDS_BIGENDIAN
1587 /* Calculate the offset assuming fully little-endian,
1588 * then XOR to account for the order of the 8-byte units.
1589 */
1590 if (element_size < 8) {
1591 ofs ^= 8 - element_size;
1592 }
1593#endif
1594 return neon_reg_offset(reg, 0) + ofs;
1595}
1596
39d5492a 1597static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1598{
39d5492a 1599 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1600 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1601 return tmp;
1602}
1603
2d6ac920
RH
1604static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1605{
1606 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1607
1608 switch (mop) {
1609 case MO_UB:
1610 tcg_gen_ld8u_i32(var, cpu_env, offset);
1611 break;
1612 case MO_UW:
1613 tcg_gen_ld16u_i32(var, cpu_env, offset);
1614 break;
1615 case MO_UL:
1616 tcg_gen_ld_i32(var, cpu_env, offset);
1617 break;
1618 default:
1619 g_assert_not_reached();
1620 }
1621}
1622
ac55d007
RH
1623static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1624{
1625 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1626
1627 switch (mop) {
1628 case MO_UB:
1629 tcg_gen_ld8u_i64(var, cpu_env, offset);
1630 break;
1631 case MO_UW:
1632 tcg_gen_ld16u_i64(var, cpu_env, offset);
1633 break;
1634 case MO_UL:
1635 tcg_gen_ld32u_i64(var, cpu_env, offset);
1636 break;
1637 case MO_Q:
1638 tcg_gen_ld_i64(var, cpu_env, offset);
1639 break;
1640 default:
1641 g_assert_not_reached();
1642 }
1643}
1644
39d5492a 1645static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1646{
1647 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1648 tcg_temp_free_i32(var);
8f8e3aa4
PB
1649}
1650
2d6ac920
RH
1651static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1652{
1653 long offset = neon_element_offset(reg, ele, size);
1654
1655 switch (size) {
1656 case MO_8:
1657 tcg_gen_st8_i32(var, cpu_env, offset);
1658 break;
1659 case MO_16:
1660 tcg_gen_st16_i32(var, cpu_env, offset);
1661 break;
1662 case MO_32:
1663 tcg_gen_st_i32(var, cpu_env, offset);
1664 break;
1665 default:
1666 g_assert_not_reached();
1667 }
1668}
1669
ac55d007
RH
1670static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1671{
1672 long offset = neon_element_offset(reg, ele, size);
1673
1674 switch (size) {
1675 case MO_8:
1676 tcg_gen_st8_i64(var, cpu_env, offset);
1677 break;
1678 case MO_16:
1679 tcg_gen_st16_i64(var, cpu_env, offset);
1680 break;
1681 case MO_32:
1682 tcg_gen_st32_i64(var, cpu_env, offset);
1683 break;
1684 case MO_64:
1685 tcg_gen_st_i64(var, cpu_env, offset);
1686 break;
1687 default:
1688 g_assert_not_reached();
1689 }
1690}
1691
a7812ae4 1692static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1693{
1694 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1695}
1696
a7812ae4 1697static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1698{
1699 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1700}
1701
1a66ac61
RH
1702static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1703{
1704 TCGv_ptr ret = tcg_temp_new_ptr();
1705 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1706 return ret;
1707}
1708
4373f3ce
PB
1709#define tcg_gen_ld_f32 tcg_gen_ld_i32
1710#define tcg_gen_ld_f64 tcg_gen_ld_i64
1711#define tcg_gen_st_f32 tcg_gen_st_i32
1712#define tcg_gen_st_f64 tcg_gen_st_i64
1713
b7bcbe95
FB
1714static inline void gen_mov_F0_vreg(int dp, int reg)
1715{
1716 if (dp)
4373f3ce 1717 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1718 else
4373f3ce 1719 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1720}
1721
1722static inline void gen_mov_F1_vreg(int dp, int reg)
1723{
1724 if (dp)
4373f3ce 1725 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1726 else
4373f3ce 1727 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1728}
1729
1730static inline void gen_mov_vreg_F0(int dp, int reg)
1731{
1732 if (dp)
4373f3ce 1733 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1734 else
4373f3ce 1735 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1736}
1737
d00584b7 1738#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1739
a7812ae4 1740static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1741{
0ecb72a5 1742 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1743}
1744
a7812ae4 1745static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1746{
0ecb72a5 1747 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1748}
1749
39d5492a 1750static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1751{
39d5492a 1752 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1753 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1754 return var;
e677137d
PB
1755}
1756
39d5492a 1757static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1758{
0ecb72a5 1759 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1760 tcg_temp_free_i32(var);
e677137d
PB
1761}
1762
1763static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1764{
1765 iwmmxt_store_reg(cpu_M0, rn);
1766}
1767
1768static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1769{
1770 iwmmxt_load_reg(cpu_M0, rn);
1771}
1772
1773static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1774{
1775 iwmmxt_load_reg(cpu_V1, rn);
1776 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1777}
1778
1779static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1780{
1781 iwmmxt_load_reg(cpu_V1, rn);
1782 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1783}
1784
1785static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1786{
1787 iwmmxt_load_reg(cpu_V1, rn);
1788 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1789}
1790
1791#define IWMMXT_OP(name) \
1792static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1793{ \
1794 iwmmxt_load_reg(cpu_V1, rn); \
1795 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1796}
1797
477955bd
PM
1798#define IWMMXT_OP_ENV(name) \
1799static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1800{ \
1801 iwmmxt_load_reg(cpu_V1, rn); \
1802 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1803}
1804
1805#define IWMMXT_OP_ENV_SIZE(name) \
1806IWMMXT_OP_ENV(name##b) \
1807IWMMXT_OP_ENV(name##w) \
1808IWMMXT_OP_ENV(name##l)
e677137d 1809
477955bd 1810#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1811static inline void gen_op_iwmmxt_##name##_M0(void) \
1812{ \
477955bd 1813 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1814}
1815
1816IWMMXT_OP(maddsq)
1817IWMMXT_OP(madduq)
1818IWMMXT_OP(sadb)
1819IWMMXT_OP(sadw)
1820IWMMXT_OP(mulslw)
1821IWMMXT_OP(mulshw)
1822IWMMXT_OP(mululw)
1823IWMMXT_OP(muluhw)
1824IWMMXT_OP(macsw)
1825IWMMXT_OP(macuw)
1826
477955bd
PM
1827IWMMXT_OP_ENV_SIZE(unpackl)
1828IWMMXT_OP_ENV_SIZE(unpackh)
1829
1830IWMMXT_OP_ENV1(unpacklub)
1831IWMMXT_OP_ENV1(unpackluw)
1832IWMMXT_OP_ENV1(unpacklul)
1833IWMMXT_OP_ENV1(unpackhub)
1834IWMMXT_OP_ENV1(unpackhuw)
1835IWMMXT_OP_ENV1(unpackhul)
1836IWMMXT_OP_ENV1(unpacklsb)
1837IWMMXT_OP_ENV1(unpacklsw)
1838IWMMXT_OP_ENV1(unpacklsl)
1839IWMMXT_OP_ENV1(unpackhsb)
1840IWMMXT_OP_ENV1(unpackhsw)
1841IWMMXT_OP_ENV1(unpackhsl)
1842
1843IWMMXT_OP_ENV_SIZE(cmpeq)
1844IWMMXT_OP_ENV_SIZE(cmpgtu)
1845IWMMXT_OP_ENV_SIZE(cmpgts)
1846
1847IWMMXT_OP_ENV_SIZE(mins)
1848IWMMXT_OP_ENV_SIZE(minu)
1849IWMMXT_OP_ENV_SIZE(maxs)
1850IWMMXT_OP_ENV_SIZE(maxu)
1851
1852IWMMXT_OP_ENV_SIZE(subn)
1853IWMMXT_OP_ENV_SIZE(addn)
1854IWMMXT_OP_ENV_SIZE(subu)
1855IWMMXT_OP_ENV_SIZE(addu)
1856IWMMXT_OP_ENV_SIZE(subs)
1857IWMMXT_OP_ENV_SIZE(adds)
1858
1859IWMMXT_OP_ENV(avgb0)
1860IWMMXT_OP_ENV(avgb1)
1861IWMMXT_OP_ENV(avgw0)
1862IWMMXT_OP_ENV(avgw1)
e677137d 1863
477955bd
PM
1864IWMMXT_OP_ENV(packuw)
1865IWMMXT_OP_ENV(packul)
1866IWMMXT_OP_ENV(packuq)
1867IWMMXT_OP_ENV(packsw)
1868IWMMXT_OP_ENV(packsl)
1869IWMMXT_OP_ENV(packsq)
e677137d 1870
e677137d
PB
1871static void gen_op_iwmmxt_set_mup(void)
1872{
39d5492a 1873 TCGv_i32 tmp;
e677137d
PB
1874 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1875 tcg_gen_ori_i32(tmp, tmp, 2);
1876 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1877}
1878
1879static void gen_op_iwmmxt_set_cup(void)
1880{
39d5492a 1881 TCGv_i32 tmp;
e677137d
PB
1882 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1883 tcg_gen_ori_i32(tmp, tmp, 1);
1884 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1885}
1886
1887static void gen_op_iwmmxt_setpsr_nz(void)
1888{
39d5492a 1889 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1890 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1891 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1892}
1893
1894static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1895{
1896 iwmmxt_load_reg(cpu_V1, rn);
86831435 1897 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1898 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1899}
1900
39d5492a
PM
1901static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1902 TCGv_i32 dest)
18c9b560
AZ
1903{
1904 int rd;
1905 uint32_t offset;
39d5492a 1906 TCGv_i32 tmp;
18c9b560
AZ
1907
1908 rd = (insn >> 16) & 0xf;
da6b5335 1909 tmp = load_reg(s, rd);
18c9b560
AZ
1910
1911 offset = (insn & 0xff) << ((insn >> 7) & 2);
1912 if (insn & (1 << 24)) {
1913 /* Pre indexed */
1914 if (insn & (1 << 23))
da6b5335 1915 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1916 else
da6b5335
FN
1917 tcg_gen_addi_i32(tmp, tmp, -offset);
1918 tcg_gen_mov_i32(dest, tmp);
18c9b560 1919 if (insn & (1 << 21))
da6b5335
FN
1920 store_reg(s, rd, tmp);
1921 else
7d1b0095 1922 tcg_temp_free_i32(tmp);
18c9b560
AZ
1923 } else if (insn & (1 << 21)) {
1924 /* Post indexed */
da6b5335 1925 tcg_gen_mov_i32(dest, tmp);
18c9b560 1926 if (insn & (1 << 23))
da6b5335 1927 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1928 else
da6b5335
FN
1929 tcg_gen_addi_i32(tmp, tmp, -offset);
1930 store_reg(s, rd, tmp);
18c9b560
AZ
1931 } else if (!(insn & (1 << 23)))
1932 return 1;
1933 return 0;
1934}
1935
39d5492a 1936static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1937{
1938 int rd = (insn >> 0) & 0xf;
39d5492a 1939 TCGv_i32 tmp;
18c9b560 1940
da6b5335
FN
1941 if (insn & (1 << 8)) {
1942 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1943 return 1;
da6b5335
FN
1944 } else {
1945 tmp = iwmmxt_load_creg(rd);
1946 }
1947 } else {
7d1b0095 1948 tmp = tcg_temp_new_i32();
da6b5335 1949 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1950 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1951 }
1952 tcg_gen_andi_i32(tmp, tmp, mask);
1953 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1954 tcg_temp_free_i32(tmp);
18c9b560
AZ
1955 return 0;
1956}
1957
a1c7273b 1958/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1959 (ie. an undefined instruction). */
7dcc1f89 1960static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1961{
1962 int rd, wrd;
1963 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1964 TCGv_i32 addr;
1965 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1966
1967 if ((insn & 0x0e000e00) == 0x0c000000) {
1968 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1969 wrd = insn & 0xf;
1970 rdlo = (insn >> 12) & 0xf;
1971 rdhi = (insn >> 16) & 0xf;
d00584b7 1972 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1973 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1974 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1975 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1976 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1977 } else { /* TMCRR */
da6b5335
FN
1978 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1979 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1980 gen_op_iwmmxt_set_mup();
1981 }
1982 return 0;
1983 }
1984
1985 wrd = (insn >> 12) & 0xf;
7d1b0095 1986 addr = tcg_temp_new_i32();
da6b5335 1987 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1988 tcg_temp_free_i32(addr);
18c9b560 1989 return 1;
da6b5335 1990 }
18c9b560 1991 if (insn & ARM_CP_RW_BIT) {
d00584b7 1992 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1993 tmp = tcg_temp_new_i32();
12dcc321 1994 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1995 iwmmxt_store_creg(wrd, tmp);
18c9b560 1996 } else {
e677137d
PB
1997 i = 1;
1998 if (insn & (1 << 8)) {
d00584b7 1999 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 2000 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 2001 i = 0;
d00584b7 2002 } else { /* WLDRW wRd */
29531141 2003 tmp = tcg_temp_new_i32();
12dcc321 2004 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
2005 }
2006 } else {
29531141 2007 tmp = tcg_temp_new_i32();
d00584b7 2008 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 2009 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 2010 } else { /* WLDRB */
12dcc321 2011 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
2012 }
2013 }
2014 if (i) {
2015 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 2016 tcg_temp_free_i32(tmp);
e677137d 2017 }
18c9b560
AZ
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 }
2020 } else {
d00584b7 2021 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 2022 tmp = iwmmxt_load_creg(wrd);
12dcc321 2023 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
2024 } else {
2025 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2026 tmp = tcg_temp_new_i32();
e677137d 2027 if (insn & (1 << 8)) {
d00584b7 2028 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 2029 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 2030 } else { /* WSTRW wRd */
ecc7b3aa 2031 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2032 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
2033 }
2034 } else {
d00584b7 2035 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 2036 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2037 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 2038 } else { /* WSTRB */
ecc7b3aa 2039 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2040 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
2041 }
2042 }
18c9b560 2043 }
29531141 2044 tcg_temp_free_i32(tmp);
18c9b560 2045 }
7d1b0095 2046 tcg_temp_free_i32(addr);
18c9b560
AZ
2047 return 0;
2048 }
2049
2050 if ((insn & 0x0f000000) != 0x0e000000)
2051 return 1;
2052
2053 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 2054 case 0x000: /* WOR */
18c9b560
AZ
2055 wrd = (insn >> 12) & 0xf;
2056 rd0 = (insn >> 0) & 0xf;
2057 rd1 = (insn >> 16) & 0xf;
2058 gen_op_iwmmxt_movq_M0_wRn(rd0);
2059 gen_op_iwmmxt_orq_M0_wRn(rd1);
2060 gen_op_iwmmxt_setpsr_nz();
2061 gen_op_iwmmxt_movq_wRn_M0(wrd);
2062 gen_op_iwmmxt_set_mup();
2063 gen_op_iwmmxt_set_cup();
2064 break;
d00584b7 2065 case 0x011: /* TMCR */
18c9b560
AZ
2066 if (insn & 0xf)
2067 return 1;
2068 rd = (insn >> 12) & 0xf;
2069 wrd = (insn >> 16) & 0xf;
2070 switch (wrd) {
2071 case ARM_IWMMXT_wCID:
2072 case ARM_IWMMXT_wCASF:
2073 break;
2074 case ARM_IWMMXT_wCon:
2075 gen_op_iwmmxt_set_cup();
2076 /* Fall through. */
2077 case ARM_IWMMXT_wCSSF:
da6b5335
FN
2078 tmp = iwmmxt_load_creg(wrd);
2079 tmp2 = load_reg(s, rd);
f669df27 2080 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 2081 tcg_temp_free_i32(tmp2);
da6b5335 2082 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2083 break;
2084 case ARM_IWMMXT_wCGR0:
2085 case ARM_IWMMXT_wCGR1:
2086 case ARM_IWMMXT_wCGR2:
2087 case ARM_IWMMXT_wCGR3:
2088 gen_op_iwmmxt_set_cup();
da6b5335
FN
2089 tmp = load_reg(s, rd);
2090 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2091 break;
2092 default:
2093 return 1;
2094 }
2095 break;
d00584b7 2096 case 0x100: /* WXOR */
18c9b560
AZ
2097 wrd = (insn >> 12) & 0xf;
2098 rd0 = (insn >> 0) & 0xf;
2099 rd1 = (insn >> 16) & 0xf;
2100 gen_op_iwmmxt_movq_M0_wRn(rd0);
2101 gen_op_iwmmxt_xorq_M0_wRn(rd1);
2102 gen_op_iwmmxt_setpsr_nz();
2103 gen_op_iwmmxt_movq_wRn_M0(wrd);
2104 gen_op_iwmmxt_set_mup();
2105 gen_op_iwmmxt_set_cup();
2106 break;
d00584b7 2107 case 0x111: /* TMRC */
18c9b560
AZ
2108 if (insn & 0xf)
2109 return 1;
2110 rd = (insn >> 12) & 0xf;
2111 wrd = (insn >> 16) & 0xf;
da6b5335
FN
2112 tmp = iwmmxt_load_creg(wrd);
2113 store_reg(s, rd, tmp);
18c9b560 2114 break;
d00584b7 2115 case 0x300: /* WANDN */
18c9b560
AZ
2116 wrd = (insn >> 12) & 0xf;
2117 rd0 = (insn >> 0) & 0xf;
2118 rd1 = (insn >> 16) & 0xf;
2119 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 2120 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
2121 gen_op_iwmmxt_andq_M0_wRn(rd1);
2122 gen_op_iwmmxt_setpsr_nz();
2123 gen_op_iwmmxt_movq_wRn_M0(wrd);
2124 gen_op_iwmmxt_set_mup();
2125 gen_op_iwmmxt_set_cup();
2126 break;
d00584b7 2127 case 0x200: /* WAND */
18c9b560
AZ
2128 wrd = (insn >> 12) & 0xf;
2129 rd0 = (insn >> 0) & 0xf;
2130 rd1 = (insn >> 16) & 0xf;
2131 gen_op_iwmmxt_movq_M0_wRn(rd0);
2132 gen_op_iwmmxt_andq_M0_wRn(rd1);
2133 gen_op_iwmmxt_setpsr_nz();
2134 gen_op_iwmmxt_movq_wRn_M0(wrd);
2135 gen_op_iwmmxt_set_mup();
2136 gen_op_iwmmxt_set_cup();
2137 break;
d00584b7 2138 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
2139 wrd = (insn >> 12) & 0xf;
2140 rd0 = (insn >> 0) & 0xf;
2141 rd1 = (insn >> 16) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0);
2143 if (insn & (1 << 21))
2144 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2145 else
2146 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
d00584b7 2150 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
2151 wrd = (insn >> 12) & 0xf;
2152 rd0 = (insn >> 16) & 0xf;
2153 rd1 = (insn >> 0) & 0xf;
2154 gen_op_iwmmxt_movq_M0_wRn(rd0);
2155 switch ((insn >> 22) & 3) {
2156 case 0:
2157 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2158 break;
2159 case 1:
2160 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2161 break;
2162 case 2:
2163 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2164 break;
2165 case 3:
2166 return 1;
2167 }
2168 gen_op_iwmmxt_movq_wRn_M0(wrd);
2169 gen_op_iwmmxt_set_mup();
2170 gen_op_iwmmxt_set_cup();
2171 break;
d00584b7 2172 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 rd1 = (insn >> 0) & 0xf;
2176 gen_op_iwmmxt_movq_M0_wRn(rd0);
2177 switch ((insn >> 22) & 3) {
2178 case 0:
2179 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2180 break;
2181 case 1:
2182 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2183 break;
2184 case 2:
2185 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2186 break;
2187 case 3:
2188 return 1;
2189 }
2190 gen_op_iwmmxt_movq_wRn_M0(wrd);
2191 gen_op_iwmmxt_set_mup();
2192 gen_op_iwmmxt_set_cup();
2193 break;
d00584b7 2194 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
2195 wrd = (insn >> 12) & 0xf;
2196 rd0 = (insn >> 16) & 0xf;
2197 rd1 = (insn >> 0) & 0xf;
2198 gen_op_iwmmxt_movq_M0_wRn(rd0);
2199 if (insn & (1 << 22))
2200 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2201 else
2202 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2203 if (!(insn & (1 << 20)))
2204 gen_op_iwmmxt_addl_M0_wRn(wrd);
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 break;
d00584b7 2208 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
2209 wrd = (insn >> 12) & 0xf;
2210 rd0 = (insn >> 16) & 0xf;
2211 rd1 = (insn >> 0) & 0xf;
2212 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2213 if (insn & (1 << 21)) {
2214 if (insn & (1 << 20))
2215 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2216 else
2217 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2218 } else {
2219 if (insn & (1 << 20))
2220 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2221 else
2222 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2223 }
18c9b560
AZ
2224 gen_op_iwmmxt_movq_wRn_M0(wrd);
2225 gen_op_iwmmxt_set_mup();
2226 break;
d00584b7 2227 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
2228 wrd = (insn >> 12) & 0xf;
2229 rd0 = (insn >> 16) & 0xf;
2230 rd1 = (insn >> 0) & 0xf;
2231 gen_op_iwmmxt_movq_M0_wRn(rd0);
2232 if (insn & (1 << 21))
2233 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2234 else
2235 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2236 if (!(insn & (1 << 20))) {
e677137d
PB
2237 iwmmxt_load_reg(cpu_V1, wrd);
2238 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2239 }
2240 gen_op_iwmmxt_movq_wRn_M0(wrd);
2241 gen_op_iwmmxt_set_mup();
2242 break;
d00584b7 2243 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2244 wrd = (insn >> 12) & 0xf;
2245 rd0 = (insn >> 16) & 0xf;
2246 rd1 = (insn >> 0) & 0xf;
2247 gen_op_iwmmxt_movq_M0_wRn(rd0);
2248 switch ((insn >> 22) & 3) {
2249 case 0:
2250 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2251 break;
2252 case 1:
2253 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2254 break;
2255 case 2:
2256 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2257 break;
2258 case 3:
2259 return 1;
2260 }
2261 gen_op_iwmmxt_movq_wRn_M0(wrd);
2262 gen_op_iwmmxt_set_mup();
2263 gen_op_iwmmxt_set_cup();
2264 break;
d00584b7 2265 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2266 wrd = (insn >> 12) & 0xf;
2267 rd0 = (insn >> 16) & 0xf;
2268 rd1 = (insn >> 0) & 0xf;
2269 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2270 if (insn & (1 << 22)) {
2271 if (insn & (1 << 20))
2272 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2273 else
2274 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2275 } else {
2276 if (insn & (1 << 20))
2277 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2278 else
2279 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2280 }
18c9b560
AZ
2281 gen_op_iwmmxt_movq_wRn_M0(wrd);
2282 gen_op_iwmmxt_set_mup();
2283 gen_op_iwmmxt_set_cup();
2284 break;
d00584b7 2285 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2286 wrd = (insn >> 12) & 0xf;
2287 rd0 = (insn >> 16) & 0xf;
2288 rd1 = (insn >> 0) & 0xf;
2289 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2290 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2291 tcg_gen_andi_i32(tmp, tmp, 7);
2292 iwmmxt_load_reg(cpu_V1, rd1);
2293 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2294 tcg_temp_free_i32(tmp);
18c9b560
AZ
2295 gen_op_iwmmxt_movq_wRn_M0(wrd);
2296 gen_op_iwmmxt_set_mup();
2297 break;
d00584b7 2298 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2299 if (((insn >> 6) & 3) == 3)
2300 return 1;
18c9b560
AZ
2301 rd = (insn >> 12) & 0xf;
2302 wrd = (insn >> 16) & 0xf;
da6b5335 2303 tmp = load_reg(s, rd);
18c9b560
AZ
2304 gen_op_iwmmxt_movq_M0_wRn(wrd);
2305 switch ((insn >> 6) & 3) {
2306 case 0:
da6b5335
FN
2307 tmp2 = tcg_const_i32(0xff);
2308 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2309 break;
2310 case 1:
da6b5335
FN
2311 tmp2 = tcg_const_i32(0xffff);
2312 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2313 break;
2314 case 2:
da6b5335
FN
2315 tmp2 = tcg_const_i32(0xffffffff);
2316 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2317 break;
da6b5335 2318 default:
f764718d
RH
2319 tmp2 = NULL;
2320 tmp3 = NULL;
18c9b560 2321 }
da6b5335 2322 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2323 tcg_temp_free_i32(tmp3);
2324 tcg_temp_free_i32(tmp2);
7d1b0095 2325 tcg_temp_free_i32(tmp);
18c9b560
AZ
2326 gen_op_iwmmxt_movq_wRn_M0(wrd);
2327 gen_op_iwmmxt_set_mup();
2328 break;
d00584b7 2329 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2330 rd = (insn >> 12) & 0xf;
2331 wrd = (insn >> 16) & 0xf;
da6b5335 2332 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2333 return 1;
2334 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2335 tmp = tcg_temp_new_i32();
18c9b560
AZ
2336 switch ((insn >> 22) & 3) {
2337 case 0:
da6b5335 2338 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2339 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2340 if (insn & 8) {
2341 tcg_gen_ext8s_i32(tmp, tmp);
2342 } else {
2343 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2344 }
2345 break;
2346 case 1:
da6b5335 2347 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2348 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2349 if (insn & 8) {
2350 tcg_gen_ext16s_i32(tmp, tmp);
2351 } else {
2352 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2353 }
2354 break;
2355 case 2:
da6b5335 2356 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2357 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2358 break;
18c9b560 2359 }
da6b5335 2360 store_reg(s, rd, tmp);
18c9b560 2361 break;
d00584b7 2362 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2363 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2364 return 1;
da6b5335 2365 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2366 switch ((insn >> 22) & 3) {
2367 case 0:
da6b5335 2368 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2369 break;
2370 case 1:
da6b5335 2371 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2372 break;
2373 case 2:
da6b5335 2374 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2375 break;
18c9b560 2376 }
da6b5335
FN
2377 tcg_gen_shli_i32(tmp, tmp, 28);
2378 gen_set_nzcv(tmp);
7d1b0095 2379 tcg_temp_free_i32(tmp);
18c9b560 2380 break;
d00584b7 2381 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2382 if (((insn >> 6) & 3) == 3)
2383 return 1;
18c9b560
AZ
2384 rd = (insn >> 12) & 0xf;
2385 wrd = (insn >> 16) & 0xf;
da6b5335 2386 tmp = load_reg(s, rd);
18c9b560
AZ
2387 switch ((insn >> 6) & 3) {
2388 case 0:
da6b5335 2389 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2390 break;
2391 case 1:
da6b5335 2392 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2393 break;
2394 case 2:
da6b5335 2395 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2396 break;
18c9b560 2397 }
7d1b0095 2398 tcg_temp_free_i32(tmp);
18c9b560
AZ
2399 gen_op_iwmmxt_movq_wRn_M0(wrd);
2400 gen_op_iwmmxt_set_mup();
2401 break;
d00584b7 2402 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2403 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2404 return 1;
da6b5335 2405 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2406 tmp2 = tcg_temp_new_i32();
da6b5335 2407 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2408 switch ((insn >> 22) & 3) {
2409 case 0:
2410 for (i = 0; i < 7; i ++) {
da6b5335
FN
2411 tcg_gen_shli_i32(tmp2, tmp2, 4);
2412 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2413 }
2414 break;
2415 case 1:
2416 for (i = 0; i < 3; i ++) {
da6b5335
FN
2417 tcg_gen_shli_i32(tmp2, tmp2, 8);
2418 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2419 }
2420 break;
2421 case 2:
da6b5335
FN
2422 tcg_gen_shli_i32(tmp2, tmp2, 16);
2423 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2424 break;
18c9b560 2425 }
da6b5335 2426 gen_set_nzcv(tmp);
7d1b0095
PM
2427 tcg_temp_free_i32(tmp2);
2428 tcg_temp_free_i32(tmp);
18c9b560 2429 break;
d00584b7 2430 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2431 wrd = (insn >> 12) & 0xf;
2432 rd0 = (insn >> 16) & 0xf;
2433 gen_op_iwmmxt_movq_M0_wRn(rd0);
2434 switch ((insn >> 22) & 3) {
2435 case 0:
e677137d 2436 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2437 break;
2438 case 1:
e677137d 2439 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2440 break;
2441 case 2:
e677137d 2442 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2443 break;
2444 case 3:
2445 return 1;
2446 }
2447 gen_op_iwmmxt_movq_wRn_M0(wrd);
2448 gen_op_iwmmxt_set_mup();
2449 break;
d00584b7 2450 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2451 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2452 return 1;
da6b5335 2453 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2454 tmp2 = tcg_temp_new_i32();
da6b5335 2455 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2456 switch ((insn >> 22) & 3) {
2457 case 0:
2458 for (i = 0; i < 7; i ++) {
da6b5335
FN
2459 tcg_gen_shli_i32(tmp2, tmp2, 4);
2460 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2461 }
2462 break;
2463 case 1:
2464 for (i = 0; i < 3; i ++) {
da6b5335
FN
2465 tcg_gen_shli_i32(tmp2, tmp2, 8);
2466 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2467 }
2468 break;
2469 case 2:
da6b5335
FN
2470 tcg_gen_shli_i32(tmp2, tmp2, 16);
2471 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2472 break;
18c9b560 2473 }
da6b5335 2474 gen_set_nzcv(tmp);
7d1b0095
PM
2475 tcg_temp_free_i32(tmp2);
2476 tcg_temp_free_i32(tmp);
18c9b560 2477 break;
d00584b7 2478 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2479 rd = (insn >> 12) & 0xf;
2480 rd0 = (insn >> 16) & 0xf;
da6b5335 2481 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2482 return 1;
2483 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2484 tmp = tcg_temp_new_i32();
18c9b560
AZ
2485 switch ((insn >> 22) & 3) {
2486 case 0:
da6b5335 2487 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2488 break;
2489 case 1:
da6b5335 2490 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2491 break;
2492 case 2:
da6b5335 2493 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2494 break;
18c9b560 2495 }
da6b5335 2496 store_reg(s, rd, tmp);
18c9b560 2497 break;
d00584b7 2498 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2499 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2500 wrd = (insn >> 12) & 0xf;
2501 rd0 = (insn >> 16) & 0xf;
2502 rd1 = (insn >> 0) & 0xf;
2503 gen_op_iwmmxt_movq_M0_wRn(rd0);
2504 switch ((insn >> 22) & 3) {
2505 case 0:
2506 if (insn & (1 << 21))
2507 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2508 else
2509 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2510 break;
2511 case 1:
2512 if (insn & (1 << 21))
2513 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2514 else
2515 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2516 break;
2517 case 2:
2518 if (insn & (1 << 21))
2519 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2520 else
2521 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2522 break;
2523 case 3:
2524 return 1;
2525 }
2526 gen_op_iwmmxt_movq_wRn_M0(wrd);
2527 gen_op_iwmmxt_set_mup();
2528 gen_op_iwmmxt_set_cup();
2529 break;
d00584b7 2530 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2531 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2532 wrd = (insn >> 12) & 0xf;
2533 rd0 = (insn >> 16) & 0xf;
2534 gen_op_iwmmxt_movq_M0_wRn(rd0);
2535 switch ((insn >> 22) & 3) {
2536 case 0:
2537 if (insn & (1 << 21))
2538 gen_op_iwmmxt_unpacklsb_M0();
2539 else
2540 gen_op_iwmmxt_unpacklub_M0();
2541 break;
2542 case 1:
2543 if (insn & (1 << 21))
2544 gen_op_iwmmxt_unpacklsw_M0();
2545 else
2546 gen_op_iwmmxt_unpackluw_M0();
2547 break;
2548 case 2:
2549 if (insn & (1 << 21))
2550 gen_op_iwmmxt_unpacklsl_M0();
2551 else
2552 gen_op_iwmmxt_unpacklul_M0();
2553 break;
2554 case 3:
2555 return 1;
2556 }
2557 gen_op_iwmmxt_movq_wRn_M0(wrd);
2558 gen_op_iwmmxt_set_mup();
2559 gen_op_iwmmxt_set_cup();
2560 break;
d00584b7 2561 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2562 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2563 wrd = (insn >> 12) & 0xf;
2564 rd0 = (insn >> 16) & 0xf;
2565 gen_op_iwmmxt_movq_M0_wRn(rd0);
2566 switch ((insn >> 22) & 3) {
2567 case 0:
2568 if (insn & (1 << 21))
2569 gen_op_iwmmxt_unpackhsb_M0();
2570 else
2571 gen_op_iwmmxt_unpackhub_M0();
2572 break;
2573 case 1:
2574 if (insn & (1 << 21))
2575 gen_op_iwmmxt_unpackhsw_M0();
2576 else
2577 gen_op_iwmmxt_unpackhuw_M0();
2578 break;
2579 case 2:
2580 if (insn & (1 << 21))
2581 gen_op_iwmmxt_unpackhsl_M0();
2582 else
2583 gen_op_iwmmxt_unpackhul_M0();
2584 break;
2585 case 3:
2586 return 1;
2587 }
2588 gen_op_iwmmxt_movq_wRn_M0(wrd);
2589 gen_op_iwmmxt_set_mup();
2590 gen_op_iwmmxt_set_cup();
2591 break;
d00584b7 2592 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2593 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2594 if (((insn >> 22) & 3) == 0)
2595 return 1;
18c9b560
AZ
2596 wrd = (insn >> 12) & 0xf;
2597 rd0 = (insn >> 16) & 0xf;
2598 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2599 tmp = tcg_temp_new_i32();
da6b5335 2600 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2601 tcg_temp_free_i32(tmp);
18c9b560 2602 return 1;
da6b5335 2603 }
18c9b560 2604 switch ((insn >> 22) & 3) {
18c9b560 2605 case 1:
477955bd 2606 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2607 break;
2608 case 2:
477955bd 2609 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2610 break;
2611 case 3:
477955bd 2612 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2613 break;
2614 }
7d1b0095 2615 tcg_temp_free_i32(tmp);
18c9b560
AZ
2616 gen_op_iwmmxt_movq_wRn_M0(wrd);
2617 gen_op_iwmmxt_set_mup();
2618 gen_op_iwmmxt_set_cup();
2619 break;
d00584b7 2620 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2621 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2622 if (((insn >> 22) & 3) == 0)
2623 return 1;
18c9b560
AZ
2624 wrd = (insn >> 12) & 0xf;
2625 rd0 = (insn >> 16) & 0xf;
2626 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2627 tmp = tcg_temp_new_i32();
da6b5335 2628 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2629 tcg_temp_free_i32(tmp);
18c9b560 2630 return 1;
da6b5335 2631 }
18c9b560 2632 switch ((insn >> 22) & 3) {
18c9b560 2633 case 1:
477955bd 2634 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2635 break;
2636 case 2:
477955bd 2637 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2638 break;
2639 case 3:
477955bd 2640 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2641 break;
2642 }
7d1b0095 2643 tcg_temp_free_i32(tmp);
18c9b560
AZ
2644 gen_op_iwmmxt_movq_wRn_M0(wrd);
2645 gen_op_iwmmxt_set_mup();
2646 gen_op_iwmmxt_set_cup();
2647 break;
d00584b7 2648 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2649 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2650 if (((insn >> 22) & 3) == 0)
2651 return 1;
18c9b560
AZ
2652 wrd = (insn >> 12) & 0xf;
2653 rd0 = (insn >> 16) & 0xf;
2654 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2655 tmp = tcg_temp_new_i32();
da6b5335 2656 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2657 tcg_temp_free_i32(tmp);
18c9b560 2658 return 1;
da6b5335 2659 }
18c9b560 2660 switch ((insn >> 22) & 3) {
18c9b560 2661 case 1:
477955bd 2662 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2663 break;
2664 case 2:
477955bd 2665 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2666 break;
2667 case 3:
477955bd 2668 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2669 break;
2670 }
7d1b0095 2671 tcg_temp_free_i32(tmp);
18c9b560
AZ
2672 gen_op_iwmmxt_movq_wRn_M0(wrd);
2673 gen_op_iwmmxt_set_mup();
2674 gen_op_iwmmxt_set_cup();
2675 break;
d00584b7 2676 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2677 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2678 if (((insn >> 22) & 3) == 0)
2679 return 1;
18c9b560
AZ
2680 wrd = (insn >> 12) & 0xf;
2681 rd0 = (insn >> 16) & 0xf;
2682 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2683 tmp = tcg_temp_new_i32();
18c9b560 2684 switch ((insn >> 22) & 3) {
18c9b560 2685 case 1:
da6b5335 2686 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2687 tcg_temp_free_i32(tmp);
18c9b560 2688 return 1;
da6b5335 2689 }
477955bd 2690 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2691 break;
2692 case 2:
da6b5335 2693 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2694 tcg_temp_free_i32(tmp);
18c9b560 2695 return 1;
da6b5335 2696 }
477955bd 2697 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2698 break;
2699 case 3:
da6b5335 2700 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2701 tcg_temp_free_i32(tmp);
18c9b560 2702 return 1;
da6b5335 2703 }
477955bd 2704 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2705 break;
2706 }
7d1b0095 2707 tcg_temp_free_i32(tmp);
18c9b560
AZ
2708 gen_op_iwmmxt_movq_wRn_M0(wrd);
2709 gen_op_iwmmxt_set_mup();
2710 gen_op_iwmmxt_set_cup();
2711 break;
d00584b7 2712 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2713 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2714 wrd = (insn >> 12) & 0xf;
2715 rd0 = (insn >> 16) & 0xf;
2716 rd1 = (insn >> 0) & 0xf;
2717 gen_op_iwmmxt_movq_M0_wRn(rd0);
2718 switch ((insn >> 22) & 3) {
2719 case 0:
2720 if (insn & (1 << 21))
2721 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2722 else
2723 gen_op_iwmmxt_minub_M0_wRn(rd1);
2724 break;
2725 case 1:
2726 if (insn & (1 << 21))
2727 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2728 else
2729 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2730 break;
2731 case 2:
2732 if (insn & (1 << 21))
2733 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2734 else
2735 gen_op_iwmmxt_minul_M0_wRn(rd1);
2736 break;
2737 case 3:
2738 return 1;
2739 }
2740 gen_op_iwmmxt_movq_wRn_M0(wrd);
2741 gen_op_iwmmxt_set_mup();
2742 break;
d00584b7 2743 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2744 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2745 wrd = (insn >> 12) & 0xf;
2746 rd0 = (insn >> 16) & 0xf;
2747 rd1 = (insn >> 0) & 0xf;
2748 gen_op_iwmmxt_movq_M0_wRn(rd0);
2749 switch ((insn >> 22) & 3) {
2750 case 0:
2751 if (insn & (1 << 21))
2752 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2753 else
2754 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2755 break;
2756 case 1:
2757 if (insn & (1 << 21))
2758 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2759 else
2760 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2761 break;
2762 case 2:
2763 if (insn & (1 << 21))
2764 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2765 else
2766 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2767 break;
2768 case 3:
2769 return 1;
2770 }
2771 gen_op_iwmmxt_movq_wRn_M0(wrd);
2772 gen_op_iwmmxt_set_mup();
2773 break;
d00584b7 2774 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2775 case 0x402: case 0x502: case 0x602: case 0x702:
2776 wrd = (insn >> 12) & 0xf;
2777 rd0 = (insn >> 16) & 0xf;
2778 rd1 = (insn >> 0) & 0xf;
2779 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2780 tmp = tcg_const_i32((insn >> 20) & 3);
2781 iwmmxt_load_reg(cpu_V1, rd1);
2782 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2783 tcg_temp_free_i32(tmp);
18c9b560
AZ
2784 gen_op_iwmmxt_movq_wRn_M0(wrd);
2785 gen_op_iwmmxt_set_mup();
2786 break;
d00584b7 2787 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2788 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2789 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2790 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2791 wrd = (insn >> 12) & 0xf;
2792 rd0 = (insn >> 16) & 0xf;
2793 rd1 = (insn >> 0) & 0xf;
2794 gen_op_iwmmxt_movq_M0_wRn(rd0);
2795 switch ((insn >> 20) & 0xf) {
2796 case 0x0:
2797 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2798 break;
2799 case 0x1:
2800 gen_op_iwmmxt_subub_M0_wRn(rd1);
2801 break;
2802 case 0x3:
2803 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2804 break;
2805 case 0x4:
2806 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2807 break;
2808 case 0x5:
2809 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2810 break;
2811 case 0x7:
2812 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2813 break;
2814 case 0x8:
2815 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2816 break;
2817 case 0x9:
2818 gen_op_iwmmxt_subul_M0_wRn(rd1);
2819 break;
2820 case 0xb:
2821 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2822 break;
2823 default:
2824 return 1;
2825 }
2826 gen_op_iwmmxt_movq_wRn_M0(wrd);
2827 gen_op_iwmmxt_set_mup();
2828 gen_op_iwmmxt_set_cup();
2829 break;
d00584b7 2830 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2831 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2832 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2833 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2834 wrd = (insn >> 12) & 0xf;
2835 rd0 = (insn >> 16) & 0xf;
2836 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2837 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2838 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2839 tcg_temp_free_i32(tmp);
18c9b560
AZ
2840 gen_op_iwmmxt_movq_wRn_M0(wrd);
2841 gen_op_iwmmxt_set_mup();
2842 gen_op_iwmmxt_set_cup();
2843 break;
d00584b7 2844 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2845 case 0x418: case 0x518: case 0x618: case 0x718:
2846 case 0x818: case 0x918: case 0xa18: case 0xb18:
2847 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2848 wrd = (insn >> 12) & 0xf;
2849 rd0 = (insn >> 16) & 0xf;
2850 rd1 = (insn >> 0) & 0xf;
2851 gen_op_iwmmxt_movq_M0_wRn(rd0);
2852 switch ((insn >> 20) & 0xf) {
2853 case 0x0:
2854 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2855 break;
2856 case 0x1:
2857 gen_op_iwmmxt_addub_M0_wRn(rd1);
2858 break;
2859 case 0x3:
2860 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2861 break;
2862 case 0x4:
2863 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2864 break;
2865 case 0x5:
2866 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2867 break;
2868 case 0x7:
2869 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2870 break;
2871 case 0x8:
2872 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2873 break;
2874 case 0x9:
2875 gen_op_iwmmxt_addul_M0_wRn(rd1);
2876 break;
2877 case 0xb:
2878 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2879 break;
2880 default:
2881 return 1;
2882 }
2883 gen_op_iwmmxt_movq_wRn_M0(wrd);
2884 gen_op_iwmmxt_set_mup();
2885 gen_op_iwmmxt_set_cup();
2886 break;
d00584b7 2887 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2888 case 0x408: case 0x508: case 0x608: case 0x708:
2889 case 0x808: case 0x908: case 0xa08: case 0xb08:
2890 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2891 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2892 return 1;
18c9b560
AZ
2893 wrd = (insn >> 12) & 0xf;
2894 rd0 = (insn >> 16) & 0xf;
2895 rd1 = (insn >> 0) & 0xf;
2896 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2897 switch ((insn >> 22) & 3) {
18c9b560
AZ
2898 case 1:
2899 if (insn & (1 << 21))
2900 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2901 else
2902 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2903 break;
2904 case 2:
2905 if (insn & (1 << 21))
2906 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2907 else
2908 gen_op_iwmmxt_packul_M0_wRn(rd1);
2909 break;
2910 case 3:
2911 if (insn & (1 << 21))
2912 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2913 else
2914 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2915 break;
2916 }
2917 gen_op_iwmmxt_movq_wRn_M0(wrd);
2918 gen_op_iwmmxt_set_mup();
2919 gen_op_iwmmxt_set_cup();
2920 break;
2921 case 0x201: case 0x203: case 0x205: case 0x207:
2922 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2923 case 0x211: case 0x213: case 0x215: case 0x217:
2924 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2925 wrd = (insn >> 5) & 0xf;
2926 rd0 = (insn >> 12) & 0xf;
2927 rd1 = (insn >> 0) & 0xf;
2928 if (rd0 == 0xf || rd1 == 0xf)
2929 return 1;
2930 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2931 tmp = load_reg(s, rd0);
2932 tmp2 = load_reg(s, rd1);
18c9b560 2933 switch ((insn >> 16) & 0xf) {
d00584b7 2934 case 0x0: /* TMIA */
da6b5335 2935 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2936 break;
d00584b7 2937 case 0x8: /* TMIAPH */
da6b5335 2938 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2939 break;
d00584b7 2940 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2941 if (insn & (1 << 16))
da6b5335 2942 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2943 if (insn & (1 << 17))
da6b5335
FN
2944 tcg_gen_shri_i32(tmp2, tmp2, 16);
2945 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2946 break;
2947 default:
7d1b0095
PM
2948 tcg_temp_free_i32(tmp2);
2949 tcg_temp_free_i32(tmp);
18c9b560
AZ
2950 return 1;
2951 }
7d1b0095
PM
2952 tcg_temp_free_i32(tmp2);
2953 tcg_temp_free_i32(tmp);
18c9b560
AZ
2954 gen_op_iwmmxt_movq_wRn_M0(wrd);
2955 gen_op_iwmmxt_set_mup();
2956 break;
2957 default:
2958 return 1;
2959 }
2960
2961 return 0;
2962}
2963
a1c7273b 2964/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2965 (ie. an undefined instruction). */
7dcc1f89 2966static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2967{
2968 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2969 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2970
2971 if ((insn & 0x0ff00f10) == 0x0e200010) {
2972 /* Multiply with Internal Accumulate Format */
2973 rd0 = (insn >> 12) & 0xf;
2974 rd1 = insn & 0xf;
2975 acc = (insn >> 5) & 7;
2976
2977 if (acc != 0)
2978 return 1;
2979
3a554c0f
FN
2980 tmp = load_reg(s, rd0);
2981 tmp2 = load_reg(s, rd1);
18c9b560 2982 switch ((insn >> 16) & 0xf) {
d00584b7 2983 case 0x0: /* MIA */
3a554c0f 2984 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2985 break;
d00584b7 2986 case 0x8: /* MIAPH */
3a554c0f 2987 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2988 break;
d00584b7
PM
2989 case 0xc: /* MIABB */
2990 case 0xd: /* MIABT */
2991 case 0xe: /* MIATB */
2992 case 0xf: /* MIATT */
18c9b560 2993 if (insn & (1 << 16))
3a554c0f 2994 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2995 if (insn & (1 << 17))
3a554c0f
FN
2996 tcg_gen_shri_i32(tmp2, tmp2, 16);
2997 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2998 break;
2999 default:
3000 return 1;
3001 }
7d1b0095
PM
3002 tcg_temp_free_i32(tmp2);
3003 tcg_temp_free_i32(tmp);
18c9b560
AZ
3004
3005 gen_op_iwmmxt_movq_wRn_M0(acc);
3006 return 0;
3007 }
3008
3009 if ((insn & 0x0fe00ff8) == 0x0c400000) {
3010 /* Internal Accumulator Access Format */
3011 rdhi = (insn >> 16) & 0xf;
3012 rdlo = (insn >> 12) & 0xf;
3013 acc = insn & 7;
3014
3015 if (acc != 0)
3016 return 1;
3017
d00584b7 3018 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 3019 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 3020 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 3021 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 3022 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 3023 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 3024 } else { /* MAR */
3a554c0f
FN
3025 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
3026 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
3027 }
3028 return 0;
3029 }
3030
3031 return 1;
3032}
3033
9ee6e8bb
PB
3034#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
3035#define VFP_SREG(insn, bigbit, smallbit) \
3036 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
3037#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 3038 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
3039 reg = (((insn) >> (bigbit)) & 0x0f) \
3040 | (((insn) >> ((smallbit) - 4)) & 0x10); \
3041 } else { \
3042 if (insn & (1 << (smallbit))) \
3043 return 1; \
3044 reg = ((insn) >> (bigbit)) & 0x0f; \
3045 }} while (0)
3046
3047#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
3048#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
3049#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
3050#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
3051#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
3052#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
3053
4373f3ce 3054/* Move between integer and VFP cores. */
39d5492a 3055static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 3056{
39d5492a 3057 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
3058 tcg_gen_mov_i32(tmp, cpu_F0s);
3059 return tmp;
3060}
3061
39d5492a 3062static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
3063{
3064 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 3065 tcg_temp_free_i32(tmp);
4373f3ce
PB
3066}
3067
39d5492a 3068static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 3069{
39d5492a 3070 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 3071 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
3072 tcg_gen_shli_i32(tmp, var, 16);
3073 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3074 tcg_temp_free_i32(tmp);
ad69471c
PB
3075}
3076
39d5492a 3077static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 3078{
39d5492a 3079 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
3080 tcg_gen_andi_i32(var, var, 0xffff0000);
3081 tcg_gen_shri_i32(tmp, var, 16);
3082 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3083 tcg_temp_free_i32(tmp);
ad69471c
PB
3084}
3085
04731fb5
WN
3086static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3087 uint32_t dp)
3088{
3089 uint32_t cc = extract32(insn, 20, 2);
3090
3091 if (dp) {
3092 TCGv_i64 frn, frm, dest;
3093 TCGv_i64 tmp, zero, zf, nf, vf;
3094
3095 zero = tcg_const_i64(0);
3096
3097 frn = tcg_temp_new_i64();
3098 frm = tcg_temp_new_i64();
3099 dest = tcg_temp_new_i64();
3100
3101 zf = tcg_temp_new_i64();
3102 nf = tcg_temp_new_i64();
3103 vf = tcg_temp_new_i64();
3104
3105 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3106 tcg_gen_ext_i32_i64(nf, cpu_NF);
3107 tcg_gen_ext_i32_i64(vf, cpu_VF);
3108
3109 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3110 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3111 switch (cc) {
3112 case 0: /* eq: Z */
3113 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3114 frn, frm);
3115 break;
3116 case 1: /* vs: V */
3117 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3118 frn, frm);
3119 break;
3120 case 2: /* ge: N == V -> N ^ V == 0 */
3121 tmp = tcg_temp_new_i64();
3122 tcg_gen_xor_i64(tmp, vf, nf);
3123 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3124 frn, frm);
3125 tcg_temp_free_i64(tmp);
3126 break;
3127 case 3: /* gt: !Z && N == V */
3128 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3129 frn, frm);
3130 tmp = tcg_temp_new_i64();
3131 tcg_gen_xor_i64(tmp, vf, nf);
3132 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3133 dest, frm);
3134 tcg_temp_free_i64(tmp);
3135 break;
3136 }
3137 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3138 tcg_temp_free_i64(frn);
3139 tcg_temp_free_i64(frm);
3140 tcg_temp_free_i64(dest);
3141
3142 tcg_temp_free_i64(zf);
3143 tcg_temp_free_i64(nf);
3144 tcg_temp_free_i64(vf);
3145
3146 tcg_temp_free_i64(zero);
3147 } else {
3148 TCGv_i32 frn, frm, dest;
3149 TCGv_i32 tmp, zero;
3150
3151 zero = tcg_const_i32(0);
3152
3153 frn = tcg_temp_new_i32();
3154 frm = tcg_temp_new_i32();
3155 dest = tcg_temp_new_i32();
3156 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3157 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3158 switch (cc) {
3159 case 0: /* eq: Z */
3160 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3161 frn, frm);
3162 break;
3163 case 1: /* vs: V */
3164 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3165 frn, frm);
3166 break;
3167 case 2: /* ge: N == V -> N ^ V == 0 */
3168 tmp = tcg_temp_new_i32();
3169 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3170 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3171 frn, frm);
3172 tcg_temp_free_i32(tmp);
3173 break;
3174 case 3: /* gt: !Z && N == V */
3175 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3176 frn, frm);
3177 tmp = tcg_temp_new_i32();
3178 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3179 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3180 dest, frm);
3181 tcg_temp_free_i32(tmp);
3182 break;
3183 }
3184 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3185 tcg_temp_free_i32(frn);
3186 tcg_temp_free_i32(frm);
3187 tcg_temp_free_i32(dest);
3188
3189 tcg_temp_free_i32(zero);
3190 }
3191
3192 return 0;
3193}
3194
40cfacdd
WN
3195static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3196 uint32_t rm, uint32_t dp)
3197{
3198 uint32_t vmin = extract32(insn, 6, 1);
3199 TCGv_ptr fpst = get_fpstatus_ptr(0);
3200
3201 if (dp) {
3202 TCGv_i64 frn, frm, dest;
3203
3204 frn = tcg_temp_new_i64();
3205 frm = tcg_temp_new_i64();
3206 dest = tcg_temp_new_i64();
3207
3208 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3209 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3210 if (vmin) {
f71a2ae5 3211 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3212 } else {
f71a2ae5 3213 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3214 }
3215 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3216 tcg_temp_free_i64(frn);
3217 tcg_temp_free_i64(frm);
3218 tcg_temp_free_i64(dest);
3219 } else {
3220 TCGv_i32 frn, frm, dest;
3221
3222 frn = tcg_temp_new_i32();
3223 frm = tcg_temp_new_i32();
3224 dest = tcg_temp_new_i32();
3225
3226 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3227 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3228 if (vmin) {
f71a2ae5 3229 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3230 } else {
f71a2ae5 3231 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3232 }
3233 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3234 tcg_temp_free_i32(frn);
3235 tcg_temp_free_i32(frm);
3236 tcg_temp_free_i32(dest);
3237 }
3238
3239 tcg_temp_free_ptr(fpst);
3240 return 0;
3241}
3242
7655f39b
WN
3243static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3244 int rounding)
3245{
3246 TCGv_ptr fpst = get_fpstatus_ptr(0);
3247 TCGv_i32 tcg_rmode;
3248
3249 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3250 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3251
3252 if (dp) {
3253 TCGv_i64 tcg_op;
3254 TCGv_i64 tcg_res;
3255 tcg_op = tcg_temp_new_i64();
3256 tcg_res = tcg_temp_new_i64();
3257 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3258 gen_helper_rintd(tcg_res, tcg_op, fpst);
3259 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3260 tcg_temp_free_i64(tcg_op);
3261 tcg_temp_free_i64(tcg_res);
3262 } else {
3263 TCGv_i32 tcg_op;
3264 TCGv_i32 tcg_res;
3265 tcg_op = tcg_temp_new_i32();
3266 tcg_res = tcg_temp_new_i32();
3267 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3268 gen_helper_rints(tcg_res, tcg_op, fpst);
3269 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3270 tcg_temp_free_i32(tcg_op);
3271 tcg_temp_free_i32(tcg_res);
3272 }
3273
9b049916 3274 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3275 tcg_temp_free_i32(tcg_rmode);
3276
3277 tcg_temp_free_ptr(fpst);
3278 return 0;
3279}
3280
c9975a83
WN
3281static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3282 int rounding)
3283{
3284 bool is_signed = extract32(insn, 7, 1);
3285 TCGv_ptr fpst = get_fpstatus_ptr(0);
3286 TCGv_i32 tcg_rmode, tcg_shift;
3287
3288 tcg_shift = tcg_const_i32(0);
3289
3290 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3291 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3292
3293 if (dp) {
3294 TCGv_i64 tcg_double, tcg_res;
3295 TCGv_i32 tcg_tmp;
3296 /* Rd is encoded as a single precision register even when the source
3297 * is double precision.
3298 */
3299 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3300 tcg_double = tcg_temp_new_i64();
3301 tcg_res = tcg_temp_new_i64();
3302 tcg_tmp = tcg_temp_new_i32();
3303 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3304 if (is_signed) {
3305 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3306 } else {
3307 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3308 }
ecc7b3aa 3309 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3310 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3311 tcg_temp_free_i32(tcg_tmp);
3312 tcg_temp_free_i64(tcg_res);
3313 tcg_temp_free_i64(tcg_double);
3314 } else {
3315 TCGv_i32 tcg_single, tcg_res;
3316 tcg_single = tcg_temp_new_i32();
3317 tcg_res = tcg_temp_new_i32();
3318 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3319 if (is_signed) {
3320 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3321 } else {
3322 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3323 }
3324 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3325 tcg_temp_free_i32(tcg_res);
3326 tcg_temp_free_i32(tcg_single);
3327 }
3328
9b049916 3329 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3330 tcg_temp_free_i32(tcg_rmode);
3331
3332 tcg_temp_free_i32(tcg_shift);
3333
3334 tcg_temp_free_ptr(fpst);
3335
3336 return 0;
3337}
7655f39b
WN
3338
3339/* Table for converting the most common AArch32 encoding of
3340 * rounding mode to arm_fprounding order (which matches the
3341 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3342 */
3343static const uint8_t fp_decode_rm[] = {
3344 FPROUNDING_TIEAWAY,
3345 FPROUNDING_TIEEVEN,
3346 FPROUNDING_POSINF,
3347 FPROUNDING_NEGINF,
3348};
3349
c0c760af 3350static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3351{
3352 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3353
04731fb5
WN
3354 if (dp) {
3355 VFP_DREG_D(rd, insn);
3356 VFP_DREG_N(rn, insn);
3357 VFP_DREG_M(rm, insn);
3358 } else {
3359 rd = VFP_SREG_D(insn);
3360 rn = VFP_SREG_N(insn);
3361 rm = VFP_SREG_M(insn);
3362 }
3363
c0c760af 3364 if ((insn & 0x0f800e50) == 0x0e000a00 && dc_isar_feature(aa32_vsel, s)) {
04731fb5 3365 return handle_vsel(insn, rd, rn, rm, dp);
c0c760af
PM
3366 } else if ((insn & 0x0fb00e10) == 0x0e800a00 &&
3367 dc_isar_feature(aa32_vminmaxnm, s)) {
40cfacdd 3368 return handle_vminmaxnm(insn, rd, rn, rm, dp);
c0c760af
PM
3369 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40 &&
3370 dc_isar_feature(aa32_vrint, s)) {
7655f39b
WN
3371 /* VRINTA, VRINTN, VRINTP, VRINTM */
3372 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3373 return handle_vrint(insn, rd, rm, dp, rounding);
c0c760af
PM
3374 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40 &&
3375 dc_isar_feature(aa32_vcvt_dr, s)) {
c9975a83
WN
3376 /* VCVTA, VCVTN, VCVTP, VCVTM */
3377 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3378 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3379 }
3380 return 1;
3381}
3382
a1c7273b 3383/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3384 (ie. an undefined instruction). */
7dcc1f89 3385static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3386{
3387 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3388 int dp, veclen;
39d5492a
PM
3389 TCGv_i32 addr;
3390 TCGv_i32 tmp;
3391 TCGv_i32 tmp2;
b7bcbe95 3392
d614a513 3393 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3394 return 1;
d614a513 3395 }
40f137e1 3396
2c7ffc41
PM
3397 /* FIXME: this access check should not take precedence over UNDEF
3398 * for invalid encodings; we will generate incorrect syndrome information
3399 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3400 */
9dbbc748 3401 if (s->fp_excp_el) {
d87513c0
PM
3402 if (arm_dc_feature(s, ARM_FEATURE_M)) {
3403 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
3404 s->fp_excp_el);
3405 } else {
3406 gen_exception_insn(s, 4, EXCP_UDEF,
3407 syn_fp_access_trap(1, 0xe, false),
3408 s->fp_excp_el);
3409 }
2c7ffc41
PM
3410 return 0;
3411 }
3412
5df8bac1 3413 if (!s->vfp_enabled) {
9ee6e8bb 3414 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3415 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3416 return 1;
3417 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3418 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3419 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3420 return 1;
a50c0f51 3421 }
40f137e1 3422 }
6a57f3eb
WN
3423
3424 if (extract32(insn, 28, 4) == 0xf) {
c0c760af
PM
3425 /*
3426 * Encodings with T=1 (Thumb) or unconditional (ARM):
3427 * only used for the "miscellaneous VFP features" added in v8A
3428 * and v7M (and gated on the MVFR2.FPMisc field).
6a57f3eb 3429 */
c0c760af 3430 return disas_vfp_misc_insn(s, insn);
6a57f3eb
WN
3431 }
3432
b7bcbe95
FB
3433 dp = ((insn & 0xf00) == 0xb00);
3434 switch ((insn >> 24) & 0xf) {
3435 case 0xe:
3436 if (insn & (1 << 4)) {
3437 /* single register transfer */
b7bcbe95
FB
3438 rd = (insn >> 12) & 0xf;
3439 if (dp) {
9ee6e8bb
PB
3440 int size;
3441 int pass;
3442
3443 VFP_DREG_N(rn, insn);
3444 if (insn & 0xf)
b7bcbe95 3445 return 1;
9ee6e8bb 3446 if (insn & 0x00c00060
d614a513 3447 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3448 return 1;
d614a513 3449 }
9ee6e8bb
PB
3450
3451 pass = (insn >> 21) & 1;
3452 if (insn & (1 << 22)) {
3453 size = 0;
3454 offset = ((insn >> 5) & 3) * 8;
3455 } else if (insn & (1 << 5)) {
3456 size = 1;
3457 offset = (insn & (1 << 6)) ? 16 : 0;
3458 } else {
3459 size = 2;
3460 offset = 0;
3461 }
18c9b560 3462 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3463 /* vfp->arm */
ad69471c 3464 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3465 switch (size) {
3466 case 0:
9ee6e8bb 3467 if (offset)
ad69471c 3468 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3469 if (insn & (1 << 23))
ad69471c 3470 gen_uxtb(tmp);
9ee6e8bb 3471 else
ad69471c 3472 gen_sxtb(tmp);
9ee6e8bb
PB
3473 break;
3474 case 1:
9ee6e8bb
PB
3475 if (insn & (1 << 23)) {
3476 if (offset) {
ad69471c 3477 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3478 } else {
ad69471c 3479 gen_uxth(tmp);
9ee6e8bb
PB
3480 }
3481 } else {
3482 if (offset) {
ad69471c 3483 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3484 } else {
ad69471c 3485 gen_sxth(tmp);
9ee6e8bb
PB
3486 }
3487 }
3488 break;
3489 case 2:
9ee6e8bb
PB
3490 break;
3491 }
ad69471c 3492 store_reg(s, rd, tmp);
b7bcbe95
FB
3493 } else {
3494 /* arm->vfp */
ad69471c 3495 tmp = load_reg(s, rd);
9ee6e8bb
PB
3496 if (insn & (1 << 23)) {
3497 /* VDUP */
32f91fb7
RH
3498 int vec_size = pass ? 16 : 8;
3499 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0),
3500 vec_size, vec_size, tmp);
3501 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
3502 } else {
3503 /* VMOV */
3504 switch (size) {
3505 case 0:
ad69471c 3506 tmp2 = neon_load_reg(rn, pass);
d593c48e 3507 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3508 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3509 break;
3510 case 1:
ad69471c 3511 tmp2 = neon_load_reg(rn, pass);
d593c48e 3512 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3513 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3514 break;
3515 case 2:
9ee6e8bb
PB
3516 break;
3517 }
ad69471c 3518 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3519 }
b7bcbe95 3520 }
9ee6e8bb 3521 } else { /* !dp */
ef9aae25
PM
3522 bool is_sysreg;
3523
9ee6e8bb
PB
3524 if ((insn & 0x6f) != 0x00)
3525 return 1;
3526 rn = VFP_SREG_N(insn);
ef9aae25
PM
3527
3528 is_sysreg = extract32(insn, 21, 1);
3529
3530 if (arm_dc_feature(s, ARM_FEATURE_M)) {
3531 /*
3532 * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
3533 * Writes to R15 are UNPREDICTABLE; we choose to undef.
3534 */
3535 if (is_sysreg && (rd == 15 || (rn >> 1) != ARM_VFP_FPSCR)) {
3536 return 1;
3537 }
3538 }
3539
18c9b560 3540 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3541 /* vfp->arm */
ef9aae25 3542 if (is_sysreg) {
b7bcbe95 3543 /* system register */
40f137e1 3544 rn >>= 1;
9ee6e8bb 3545
b7bcbe95 3546 switch (rn) {
40f137e1 3547 case ARM_VFP_FPSID:
4373f3ce 3548 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3549 VFP3 restricts all id registers to privileged
3550 accesses. */
3551 if (IS_USER(s)
d614a513 3552 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3553 return 1;
d614a513 3554 }
4373f3ce 3555 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3556 break;
40f137e1 3557 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3558 if (IS_USER(s))
3559 return 1;
4373f3ce 3560 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3561 break;
40f137e1
PB
3562 case ARM_VFP_FPINST:
3563 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3564 /* Not present in VFP3. */
3565 if (IS_USER(s)
d614a513 3566 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3567 return 1;
d614a513 3568 }
4373f3ce 3569 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3570 break;
40f137e1 3571 case ARM_VFP_FPSCR:
601d70b9 3572 if (rd == 15) {
4373f3ce
PB
3573 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3574 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3575 } else {
7d1b0095 3576 tmp = tcg_temp_new_i32();
4373f3ce
PB
3577 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3578 }
b7bcbe95 3579 break;
a50c0f51 3580 case ARM_VFP_MVFR2:
d614a513 3581 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3582 return 1;
3583 }
3584 /* fall through */
9ee6e8bb
PB
3585 case ARM_VFP_MVFR0:
3586 case ARM_VFP_MVFR1:
3587 if (IS_USER(s)
d614a513 3588 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3589 return 1;
d614a513 3590 }
4373f3ce 3591 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3592 break;
b7bcbe95
FB
3593 default:
3594 return 1;
3595 }
3596 } else {
3597 gen_mov_F0_vreg(0, rn);
4373f3ce 3598 tmp = gen_vfp_mrs();
b7bcbe95
FB
3599 }
3600 if (rd == 15) {
b5ff1b31 3601 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3602 gen_set_nzcv(tmp);
7d1b0095 3603 tcg_temp_free_i32(tmp);
4373f3ce
PB
3604 } else {
3605 store_reg(s, rd, tmp);
3606 }
b7bcbe95
FB
3607 } else {
3608 /* arm->vfp */
ef9aae25 3609 if (is_sysreg) {
40f137e1 3610 rn >>= 1;
b7bcbe95
FB
3611 /* system register */
3612 switch (rn) {
40f137e1 3613 case ARM_VFP_FPSID:
9ee6e8bb
PB
3614 case ARM_VFP_MVFR0:
3615 case ARM_VFP_MVFR1:
b7bcbe95
FB
3616 /* Writes are ignored. */
3617 break;
40f137e1 3618 case ARM_VFP_FPSCR:
e4c1cfa5 3619 tmp = load_reg(s, rd);
4373f3ce 3620 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3621 tcg_temp_free_i32(tmp);
b5ff1b31 3622 gen_lookup_tb(s);
b7bcbe95 3623 break;
40f137e1 3624 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3625 if (IS_USER(s))
3626 return 1;
71b3c3de
JR
3627 /* TODO: VFP subarchitecture support.
3628 * For now, keep the EN bit only */
e4c1cfa5 3629 tmp = load_reg(s, rd);
71b3c3de 3630 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3631 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3632 gen_lookup_tb(s);
3633 break;
3634 case ARM_VFP_FPINST:
3635 case ARM_VFP_FPINST2:
23adb861
PM
3636 if (IS_USER(s)) {
3637 return 1;
3638 }
e4c1cfa5 3639 tmp = load_reg(s, rd);
4373f3ce 3640 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3641 break;
b7bcbe95
FB
3642 default:
3643 return 1;
3644 }
3645 } else {
e4c1cfa5 3646 tmp = load_reg(s, rd);
4373f3ce 3647 gen_vfp_msr(tmp);
b7bcbe95
FB
3648 gen_mov_vreg_F0(0, rn);
3649 }
3650 }
3651 }
3652 } else {
3653 /* data processing */
e80941bd
RH
3654 bool rd_is_dp = dp;
3655 bool rm_is_dp = dp;
3656 bool no_output = false;
3657
b7bcbe95
FB
3658 /* The opcode is in bits 23, 21, 20 and 6. */
3659 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
e80941bd 3660 rn = VFP_SREG_N(insn);
b7bcbe95 3661
e80941bd
RH
3662 if (op == 15) {
3663 /* rn is opcode, encoded as per VFP_SREG_N. */
3664 switch (rn) {
3665 case 0x00: /* vmov */
3666 case 0x01: /* vabs */
3667 case 0x02: /* vneg */
3668 case 0x03: /* vsqrt */
3669 break;
3670
3671 case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */
3672 case 0x05: /* vcvtt.f64.f16, vcvtt.f32.f16 */
3673 /*
3674 * VCVTB, VCVTT: only present with the halfprec extension
3675 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3676 * (we choose to UNDEF)
04595bf6 3677 */
602f6e42
PM
3678 if (dp) {
3679 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
3680 return 1;
3681 }
3682 } else {
3683 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
3684 return 1;
3685 }
e80941bd
RH
3686 }
3687 rm_is_dp = false;
3688 break;
3689 case 0x06: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3690 case 0x07: /* vcvtt.f16.f32, vcvtt.f16.f64 */
602f6e42
PM
3691 if (dp) {
3692 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
3693 return 1;
3694 }
3695 } else {
3696 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
3697 return 1;
3698 }
e80941bd
RH
3699 }
3700 rd_is_dp = false;
3701 break;
3702
3703 case 0x08: case 0x0a: /* vcmp, vcmpz */
3704 case 0x09: case 0x0b: /* vcmpe, vcmpez */
3705 no_output = true;
3706 break;
3707
3708 case 0x0c: /* vrintr */
3709 case 0x0d: /* vrintz */
3710 case 0x0e: /* vrintx */
3711 break;
3712
3713 case 0x0f: /* vcvt double<->single */
3714 rd_is_dp = !dp;
3715 break;
3716
3717 case 0x10: /* vcvt.fxx.u32 */
3718 case 0x11: /* vcvt.fxx.s32 */
3719 rm_is_dp = false;
3720 break;
3721 case 0x18: /* vcvtr.u32.fxx */
3722 case 0x19: /* vcvtz.u32.fxx */
3723 case 0x1a: /* vcvtr.s32.fxx */
3724 case 0x1b: /* vcvtz.s32.fxx */
3725 rd_is_dp = false;
3726 break;
3727
3728 case 0x14: /* vcvt fp <-> fixed */
3729 case 0x15:
3730 case 0x16:
3731 case 0x17:
3732 case 0x1c:
3733 case 0x1d:
3734 case 0x1e:
3735 case 0x1f:
3736 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3737 return 1;
3738 }
3739 /* Immediate frac_bits has same format as SREG_M. */
3740 rm_is_dp = false;
3741 break;
3742
6c1f6f27
RH
3743 case 0x13: /* vjcvt */
3744 if (!dp || !dc_isar_feature(aa32_jscvt, s)) {
3745 return 1;
3746 }
3747 rd_is_dp = false;
3748 break;
3749
e80941bd
RH
3750 default:
3751 return 1;
b7bcbe95 3752 }
e80941bd
RH
3753 } else if (dp) {
3754 /* rn is register number */
3755 VFP_DREG_N(rn, insn);
3756 }
3757
3758 if (rd_is_dp) {
3759 VFP_DREG_D(rd, insn);
3760 } else {
3761 rd = VFP_SREG_D(insn);
3762 }
3763 if (rm_is_dp) {
3764 VFP_DREG_M(rm, insn);
b7bcbe95 3765 } else {
9ee6e8bb 3766 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3767 }
3768
69d1fc22 3769 veclen = s->vec_len;
e80941bd 3770 if (op == 15 && rn > 3) {
b7bcbe95 3771 veclen = 0;
e80941bd 3772 }
b7bcbe95
FB
3773
3774 /* Shut up compiler warnings. */
3775 delta_m = 0;
3776 delta_d = 0;
3777 bank_mask = 0;
3b46e624 3778
b7bcbe95
FB
3779 if (veclen > 0) {
3780 if (dp)
3781 bank_mask = 0xc;
3782 else
3783 bank_mask = 0x18;
3784
3785 /* Figure out what type of vector operation this is. */
3786 if ((rd & bank_mask) == 0) {
3787 /* scalar */
3788 veclen = 0;
3789 } else {
3790 if (dp)
69d1fc22 3791 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3792 else
69d1fc22 3793 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3794
3795 if ((rm & bank_mask) == 0) {
3796 /* mixed scalar/vector */
3797 delta_m = 0;
3798 } else {
3799 /* vector */
3800 delta_m = delta_d;
3801 }
3802 }
3803 }
3804
3805 /* Load the initial operands. */
3806 if (op == 15) {
3807 switch (rn) {
e80941bd 3808 case 0x08: case 0x09: /* Compare */
b7bcbe95
FB
3809 gen_mov_F0_vreg(dp, rd);
3810 gen_mov_F1_vreg(dp, rm);
3811 break;
e80941bd 3812 case 0x0a: case 0x0b: /* Compare with zero */
b7bcbe95
FB
3813 gen_mov_F0_vreg(dp, rd);
3814 gen_vfp_F1_ld0(dp);
3815 break;
e80941bd
RH
3816 case 0x14: /* vcvt fp <-> fixed */
3817 case 0x15:
3818 case 0x16:
3819 case 0x17:
3820 case 0x1c:
3821 case 0x1d:
3822 case 0x1e:
3823 case 0x1f:
9ee6e8bb
PB
3824 /* Source and destination the same. */
3825 gen_mov_F0_vreg(dp, rd);
3826 break;
b7bcbe95
FB
3827 default:
3828 /* One source operand. */
e80941bd 3829 gen_mov_F0_vreg(rm_is_dp, rm);
9ee6e8bb 3830 break;
b7bcbe95
FB
3831 }
3832 } else {
3833 /* Two source operands. */
3834 gen_mov_F0_vreg(dp, rn);
3835 gen_mov_F1_vreg(dp, rm);
3836 }
3837
3838 for (;;) {
3839 /* Perform the calculation. */
3840 switch (op) {
605a6aed
PM
3841 case 0: /* VMLA: fd + (fn * fm) */
3842 /* Note that order of inputs to the add matters for NaNs */
3843 gen_vfp_F1_mul(dp);
3844 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3845 gen_vfp_add(dp);
3846 break;
605a6aed 3847 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3848 gen_vfp_mul(dp);
605a6aed
PM
3849 gen_vfp_F1_neg(dp);
3850 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3851 gen_vfp_add(dp);
3852 break;
605a6aed
PM
3853 case 2: /* VNMLS: -fd + (fn * fm) */
3854 /* Note that it isn't valid to replace (-A + B) with (B - A)
3855 * or similar plausible looking simplifications
3856 * because this will give wrong results for NaNs.
3857 */
3858 gen_vfp_F1_mul(dp);
3859 gen_mov_F0_vreg(dp, rd);
3860 gen_vfp_neg(dp);
3861 gen_vfp_add(dp);
b7bcbe95 3862 break;
605a6aed 3863 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3864 gen_vfp_mul(dp);
605a6aed
PM
3865 gen_vfp_F1_neg(dp);
3866 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3867 gen_vfp_neg(dp);
605a6aed 3868 gen_vfp_add(dp);
b7bcbe95
FB
3869 break;
3870 case 4: /* mul: fn * fm */
3871 gen_vfp_mul(dp);
3872 break;
3873 case 5: /* nmul: -(fn * fm) */
3874 gen_vfp_mul(dp);
3875 gen_vfp_neg(dp);
3876 break;
3877 case 6: /* add: fn + fm */
3878 gen_vfp_add(dp);
3879 break;
3880 case 7: /* sub: fn - fm */
3881 gen_vfp_sub(dp);
3882 break;
3883 case 8: /* div: fn / fm */
3884 gen_vfp_div(dp);
3885 break;
da97f52c
PM
3886 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3887 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3888 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3889 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3890 /* These are fused multiply-add, and must be done as one
3891 * floating point operation with no rounding between the
3892 * multiplication and addition steps.
3893 * NB that doing the negations here as separate steps is
3894 * correct : an input NaN should come out with its sign bit
3895 * flipped if it is a negated-input.
3896 */
d614a513 3897 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3898 return 1;
3899 }
3900 if (dp) {
3901 TCGv_ptr fpst;
3902 TCGv_i64 frd;
3903 if (op & 1) {
3904 /* VFNMS, VFMS */
3905 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3906 }
3907 frd = tcg_temp_new_i64();
3908 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3909 if (op & 2) {
3910 /* VFNMA, VFNMS */
3911 gen_helper_vfp_negd(frd, frd);
3912 }
3913 fpst = get_fpstatus_ptr(0);
3914 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3915 cpu_F1d, frd, fpst);
3916 tcg_temp_free_ptr(fpst);
3917 tcg_temp_free_i64(frd);
3918 } else {
3919 TCGv_ptr fpst;
3920 TCGv_i32 frd;
3921 if (op & 1) {
3922 /* VFNMS, VFMS */
3923 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3924 }
3925 frd = tcg_temp_new_i32();
3926 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3927 if (op & 2) {
3928 gen_helper_vfp_negs(frd, frd);
3929 }
3930 fpst = get_fpstatus_ptr(0);
3931 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3932 cpu_F1s, frd, fpst);
3933 tcg_temp_free_ptr(fpst);
3934 tcg_temp_free_i32(frd);
3935 }
3936 break;
9ee6e8bb 3937 case 14: /* fconst */
d614a513
PM
3938 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3939 return 1;
3940 }
9ee6e8bb
PB
3941
3942 n = (insn << 12) & 0x80000000;
3943 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3944 if (dp) {
3945 if (i & 0x40)
3946 i |= 0x3f80;
3947 else
3948 i |= 0x4000;
3949 n |= i << 16;
4373f3ce 3950 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3951 } else {
3952 if (i & 0x40)
3953 i |= 0x780;
3954 else
3955 i |= 0x800;
3956 n |= i << 19;
5b340b51 3957 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3958 }
9ee6e8bb 3959 break;
b7bcbe95
FB
3960 case 15: /* extension space */
3961 switch (rn) {
3962 case 0: /* cpy */
3963 /* no-op */
3964 break;
3965 case 1: /* abs */
3966 gen_vfp_abs(dp);
3967 break;
3968 case 2: /* neg */
3969 gen_vfp_neg(dp);
3970 break;
3971 case 3: /* sqrt */
3972 gen_vfp_sqrt(dp);
3973 break;
239c20c7 3974 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
486624fc
AB
3975 {
3976 TCGv_ptr fpst = get_fpstatus_ptr(false);
3977 TCGv_i32 ahp_mode = get_ahp_flag();
60011498
PB
3978 tmp = gen_vfp_mrs();
3979 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3980 if (dp) {
3981 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3982 fpst, ahp_mode);
239c20c7
WN
3983 } else {
3984 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3985 fpst, ahp_mode);
239c20c7 3986 }
486624fc
AB
3987 tcg_temp_free_i32(ahp_mode);
3988 tcg_temp_free_ptr(fpst);
7d1b0095 3989 tcg_temp_free_i32(tmp);
60011498 3990 break;
486624fc 3991 }
239c20c7 3992 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
486624fc
AB
3993 {
3994 TCGv_ptr fpst = get_fpstatus_ptr(false);
3995 TCGv_i32 ahp = get_ahp_flag();
60011498
PB
3996 tmp = gen_vfp_mrs();
3997 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3998 if (dp) {
3999 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 4000 fpst, ahp);
239c20c7
WN
4001 } else {
4002 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 4003 fpst, ahp);
239c20c7 4004 }
7d1b0095 4005 tcg_temp_free_i32(tmp);
486624fc
AB
4006 tcg_temp_free_i32(ahp);
4007 tcg_temp_free_ptr(fpst);
60011498 4008 break;
486624fc 4009 }
239c20c7 4010 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
486624fc
AB
4011 {
4012 TCGv_ptr fpst = get_fpstatus_ptr(false);
4013 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 4014 tmp = tcg_temp_new_i32();
486624fc 4015
239c20c7
WN
4016 if (dp) {
4017 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 4018 fpst, ahp);
239c20c7
WN
4019 } else {
4020 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 4021 fpst, ahp);
239c20c7 4022 }
486624fc
AB
4023 tcg_temp_free_i32(ahp);
4024 tcg_temp_free_ptr(fpst);
60011498
PB
4025 gen_mov_F0_vreg(0, rd);
4026 tmp2 = gen_vfp_mrs();
4027 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
4028 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4029 tcg_temp_free_i32(tmp2);
60011498
PB
4030 gen_vfp_msr(tmp);
4031 break;
486624fc 4032 }
239c20c7 4033 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
486624fc
AB
4034 {
4035 TCGv_ptr fpst = get_fpstatus_ptr(false);
4036 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 4037 tmp = tcg_temp_new_i32();
239c20c7
WN
4038 if (dp) {
4039 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 4040 fpst, ahp);
239c20c7
WN
4041 } else {
4042 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 4043 fpst, ahp);
239c20c7 4044 }
486624fc
AB
4045 tcg_temp_free_i32(ahp);
4046 tcg_temp_free_ptr(fpst);
60011498
PB
4047 tcg_gen_shli_i32(tmp, tmp, 16);
4048 gen_mov_F0_vreg(0, rd);
4049 tmp2 = gen_vfp_mrs();
4050 tcg_gen_ext16u_i32(tmp2, tmp2);
4051 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4052 tcg_temp_free_i32(tmp2);
60011498
PB
4053 gen_vfp_msr(tmp);
4054 break;
486624fc 4055 }
b7bcbe95
FB
4056 case 8: /* cmp */
4057 gen_vfp_cmp(dp);
4058 break;
4059 case 9: /* cmpe */
4060 gen_vfp_cmpe(dp);
4061 break;
4062 case 10: /* cmpz */
4063 gen_vfp_cmp(dp);
4064 break;
4065 case 11: /* cmpez */
4066 gen_vfp_F1_ld0(dp);
4067 gen_vfp_cmpe(dp);
4068 break;
664c6733
WN
4069 case 12: /* vrintr */
4070 {
4071 TCGv_ptr fpst = get_fpstatus_ptr(0);
4072 if (dp) {
4073 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4074 } else {
4075 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4076 }
4077 tcg_temp_free_ptr(fpst);
4078 break;
4079 }
a290c62a
WN
4080 case 13: /* vrintz */
4081 {
4082 TCGv_ptr fpst = get_fpstatus_ptr(0);
4083 TCGv_i32 tcg_rmode;
4084 tcg_rmode = tcg_const_i32(float_round_to_zero);
9b049916 4085 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
4086 if (dp) {
4087 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4088 } else {
4089 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4090 }
9b049916 4091 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
4092 tcg_temp_free_i32(tcg_rmode);
4093 tcg_temp_free_ptr(fpst);
4094 break;
4095 }
4e82bc01
WN
4096 case 14: /* vrintx */
4097 {
4098 TCGv_ptr fpst = get_fpstatus_ptr(0);
4099 if (dp) {
4100 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
4101 } else {
4102 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
4103 }
4104 tcg_temp_free_ptr(fpst);
4105 break;
4106 }
b7bcbe95 4107 case 15: /* single<->double conversion */
e80941bd 4108 if (dp) {
4373f3ce 4109 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
e80941bd 4110 } else {
4373f3ce 4111 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
e80941bd 4112 }
b7bcbe95
FB
4113 break;
4114 case 16: /* fuito */
5500b06c 4115 gen_vfp_uito(dp, 0);
b7bcbe95
FB
4116 break;
4117 case 17: /* fsito */
5500b06c 4118 gen_vfp_sito(dp, 0);
b7bcbe95 4119 break;
6c1f6f27
RH
4120 case 19: /* vjcvt */
4121 gen_helper_vjcvt(cpu_F0s, cpu_F0d, cpu_env);
4122 break;
9ee6e8bb 4123 case 20: /* fshto */
5500b06c 4124 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
4125 break;
4126 case 21: /* fslto */
5500b06c 4127 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
4128 break;
4129 case 22: /* fuhto */
5500b06c 4130 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
4131 break;
4132 case 23: /* fulto */
5500b06c 4133 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 4134 break;
b7bcbe95 4135 case 24: /* ftoui */
5500b06c 4136 gen_vfp_toui(dp, 0);
b7bcbe95
FB
4137 break;
4138 case 25: /* ftouiz */
5500b06c 4139 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
4140 break;
4141 case 26: /* ftosi */
5500b06c 4142 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
4143 break;
4144 case 27: /* ftosiz */
5500b06c 4145 gen_vfp_tosiz(dp, 0);
b7bcbe95 4146 break;
9ee6e8bb 4147 case 28: /* ftosh */
5500b06c 4148 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
4149 break;
4150 case 29: /* ftosl */
5500b06c 4151 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
4152 break;
4153 case 30: /* ftouh */
5500b06c 4154 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
4155 break;
4156 case 31: /* ftoul */
5500b06c 4157 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 4158 break;
b7bcbe95 4159 default: /* undefined */
e80941bd 4160 g_assert_not_reached();
b7bcbe95
FB
4161 }
4162 break;
4163 default: /* undefined */
b7bcbe95
FB
4164 return 1;
4165 }
4166
e80941bd
RH
4167 /* Write back the result, if any. */
4168 if (!no_output) {
4169 gen_mov_vreg_F0(rd_is_dp, rd);
239c20c7 4170 }
b7bcbe95
FB
4171
4172 /* break out of the loop if we have finished */
e80941bd 4173 if (veclen == 0) {
b7bcbe95 4174 break;
e80941bd 4175 }
b7bcbe95
FB
4176
4177 if (op == 15 && delta_m == 0) {
4178 /* single source one-many */
4179 while (veclen--) {
4180 rd = ((rd + delta_d) & (bank_mask - 1))
4181 | (rd & bank_mask);
4182 gen_mov_vreg_F0(dp, rd);
4183 }
4184 break;
4185 }
4186 /* Setup the next operands. */
4187 veclen--;
4188 rd = ((rd + delta_d) & (bank_mask - 1))
4189 | (rd & bank_mask);
4190
4191 if (op == 15) {
4192 /* One source operand. */
4193 rm = ((rm + delta_m) & (bank_mask - 1))
4194 | (rm & bank_mask);
4195 gen_mov_F0_vreg(dp, rm);
4196 } else {
4197 /* Two source operands. */
4198 rn = ((rn + delta_d) & (bank_mask - 1))
4199 | (rn & bank_mask);
4200 gen_mov_F0_vreg(dp, rn);
4201 if (delta_m) {
4202 rm = ((rm + delta_m) & (bank_mask - 1))
4203 | (rm & bank_mask);
4204 gen_mov_F1_vreg(dp, rm);
4205 }
4206 }
4207 }
4208 }
4209 break;
4210 case 0xc:
4211 case 0xd:
8387da81 4212 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4213 /* two-register transfer */
4214 rn = (insn >> 16) & 0xf;
4215 rd = (insn >> 12) & 0xf;
4216 if (dp) {
9ee6e8bb
PB
4217 VFP_DREG_M(rm, insn);
4218 } else {
4219 rm = VFP_SREG_M(insn);
4220 }
b7bcbe95 4221
18c9b560 4222 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4223 /* vfp->arm */
4224 if (dp) {
4373f3ce
PB
4225 gen_mov_F0_vreg(0, rm * 2);
4226 tmp = gen_vfp_mrs();
4227 store_reg(s, rd, tmp);
4228 gen_mov_F0_vreg(0, rm * 2 + 1);
4229 tmp = gen_vfp_mrs();
4230 store_reg(s, rn, tmp);
b7bcbe95
FB
4231 } else {
4232 gen_mov_F0_vreg(0, rm);
4373f3ce 4233 tmp = gen_vfp_mrs();
8387da81 4234 store_reg(s, rd, tmp);
b7bcbe95 4235 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4236 tmp = gen_vfp_mrs();
8387da81 4237 store_reg(s, rn, tmp);
b7bcbe95
FB
4238 }
4239 } else {
4240 /* arm->vfp */
4241 if (dp) {
4373f3ce
PB
4242 tmp = load_reg(s, rd);
4243 gen_vfp_msr(tmp);
4244 gen_mov_vreg_F0(0, rm * 2);
4245 tmp = load_reg(s, rn);
4246 gen_vfp_msr(tmp);
4247 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4248 } else {
8387da81 4249 tmp = load_reg(s, rd);
4373f3ce 4250 gen_vfp_msr(tmp);
b7bcbe95 4251 gen_mov_vreg_F0(0, rm);
8387da81 4252 tmp = load_reg(s, rn);
4373f3ce 4253 gen_vfp_msr(tmp);
b7bcbe95
FB
4254 gen_mov_vreg_F0(0, rm + 1);
4255 }
4256 }
4257 } else {
4258 /* Load/store */
4259 rn = (insn >> 16) & 0xf;
4260 if (dp)
9ee6e8bb 4261 VFP_DREG_D(rd, insn);
b7bcbe95 4262 else
9ee6e8bb 4263 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4264 if ((insn & 0x01200000) == 0x01000000) {
4265 /* Single load/store */
4266 offset = (insn & 0xff) << 2;
4267 if ((insn & (1 << 23)) == 0)
4268 offset = -offset;
934814f1
PM
4269 if (s->thumb && rn == 15) {
4270 /* This is actually UNPREDICTABLE */
4271 addr = tcg_temp_new_i32();
4272 tcg_gen_movi_i32(addr, s->pc & ~2);
4273 } else {
4274 addr = load_reg(s, rn);
4275 }
312eea9f 4276 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4277 if (insn & (1 << 20)) {
312eea9f 4278 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4279 gen_mov_vreg_F0(dp, rd);
4280 } else {
4281 gen_mov_F0_vreg(dp, rd);
312eea9f 4282 gen_vfp_st(s, dp, addr);
b7bcbe95 4283 }
7d1b0095 4284 tcg_temp_free_i32(addr);
b7bcbe95
FB
4285 } else {
4286 /* load/store multiple */
934814f1 4287 int w = insn & (1 << 21);
b7bcbe95
FB
4288 if (dp)
4289 n = (insn >> 1) & 0x7f;
4290 else
4291 n = insn & 0xff;
4292
934814f1
PM
4293 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4294 /* P == U , W == 1 => UNDEF */
4295 return 1;
4296 }
4297 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4298 /* UNPREDICTABLE cases for bad immediates: we choose to
4299 * UNDEF to avoid generating huge numbers of TCG ops
4300 */
4301 return 1;
4302 }
4303 if (rn == 15 && w) {
4304 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4305 return 1;
4306 }
4307
4308 if (s->thumb && rn == 15) {
4309 /* This is actually UNPREDICTABLE */
4310 addr = tcg_temp_new_i32();
4311 tcg_gen_movi_i32(addr, s->pc & ~2);
4312 } else {
4313 addr = load_reg(s, rn);
4314 }
b7bcbe95 4315 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4316 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95 4317
8a954faf
PM
4318 if (s->v8m_stackcheck && rn == 13 && w) {
4319 /*
4320 * Here 'addr' is the lowest address we will store to,
4321 * and is either the old SP (if post-increment) or
4322 * the new SP (if pre-decrement). For post-increment
4323 * where the old value is below the limit and the new
4324 * value is above, it is UNKNOWN whether the limit check
4325 * triggers; we choose to trigger.
4326 */
4327 gen_helper_v8m_stackcheck(cpu_env, addr);
4328 }
4329
b7bcbe95
FB
4330 if (dp)
4331 offset = 8;
4332 else
4333 offset = 4;
4334 for (i = 0; i < n; i++) {
18c9b560 4335 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4336 /* load */
312eea9f 4337 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4338 gen_mov_vreg_F0(dp, rd + i);
4339 } else {
4340 /* store */
4341 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4342 gen_vfp_st(s, dp, addr);
b7bcbe95 4343 }
312eea9f 4344 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4345 }
934814f1 4346 if (w) {
b7bcbe95
FB
4347 /* writeback */
4348 if (insn & (1 << 24))
4349 offset = -offset * n;
4350 else if (dp && (insn & 1))
4351 offset = 4;
4352 else
4353 offset = 0;
4354
4355 if (offset != 0)
312eea9f
FN
4356 tcg_gen_addi_i32(addr, addr, offset);
4357 store_reg(s, rn, addr);
4358 } else {
7d1b0095 4359 tcg_temp_free_i32(addr);
b7bcbe95
FB
4360 }
4361 }
4362 }
4363 break;
4364 default:
4365 /* Should never happen. */
4366 return 1;
4367 }
4368 return 0;
4369}
4370
90aa39a1 4371static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4372{
90aa39a1 4373#ifndef CONFIG_USER_ONLY
dcba3a8d 4374 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4375 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4376#else
4377 return true;
4378#endif
4379}
6e256c93 4380
8a6b28c7
EC
4381static void gen_goto_ptr(void)
4382{
7f11636d 4383 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4384}
4385
4cae8f56
AB
4386/* This will end the TB but doesn't guarantee we'll return to
4387 * cpu_loop_exec. Any live exit_requests will be processed as we
4388 * enter the next TB.
4389 */
8a6b28c7 4390static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4391{
4392 if (use_goto_tb(s, dest)) {
57fec1fe 4393 tcg_gen_goto_tb(n);
eaed129d 4394 gen_set_pc_im(s, dest);
07ea28b4 4395 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 4396 } else {
eaed129d 4397 gen_set_pc_im(s, dest);
8a6b28c7 4398 gen_goto_ptr();
6e256c93 4399 }
dcba3a8d 4400 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4401}
4402
8aaca4c0
FB
4403static inline void gen_jmp (DisasContext *s, uint32_t dest)
4404{
b636649f 4405 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4406 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4407 if (s->thumb)
d9ba4830
PB
4408 dest |= 1;
4409 gen_bx_im(s, dest);
8aaca4c0 4410 } else {
6e256c93 4411 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4412 }
4413}
4414
39d5492a 4415static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4416{
ee097184 4417 if (x)
d9ba4830 4418 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4419 else
d9ba4830 4420 gen_sxth(t0);
ee097184 4421 if (y)
d9ba4830 4422 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4423 else
d9ba4830
PB
4424 gen_sxth(t1);
4425 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4426}
4427
4428/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4429static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4430{
b5ff1b31
FB
4431 uint32_t mask;
4432
4433 mask = 0;
4434 if (flags & (1 << 0))
4435 mask |= 0xff;
4436 if (flags & (1 << 1))
4437 mask |= 0xff00;
4438 if (flags & (1 << 2))
4439 mask |= 0xff0000;
4440 if (flags & (1 << 3))
4441 mask |= 0xff000000;
9ee6e8bb 4442
2ae23e75 4443 /* Mask out undefined bits. */
9ee6e8bb 4444 mask &= ~CPSR_RESERVED;
d614a513 4445 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4446 mask &= ~CPSR_T;
d614a513
PM
4447 }
4448 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4449 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4450 }
4451 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4452 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4453 }
4454 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4455 mask &= ~CPSR_IT;
d614a513 4456 }
4051e12c
PM
4457 /* Mask out execution state and reserved bits. */
4458 if (!spsr) {
4459 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4460 }
b5ff1b31
FB
4461 /* Mask out privileged bits. */
4462 if (IS_USER(s))
9ee6e8bb 4463 mask &= CPSR_USER;
b5ff1b31
FB
4464 return mask;
4465}
4466
2fbac54b 4467/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4468static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4469{
39d5492a 4470 TCGv_i32 tmp;
b5ff1b31
FB
4471 if (spsr) {
4472 /* ??? This is also undefined in system mode. */
4473 if (IS_USER(s))
4474 return 1;
d9ba4830
PB
4475
4476 tmp = load_cpu_field(spsr);
4477 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4478 tcg_gen_andi_i32(t0, t0, mask);
4479 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4480 store_cpu_field(tmp, spsr);
b5ff1b31 4481 } else {
2fbac54b 4482 gen_set_cpsr(t0, mask);
b5ff1b31 4483 }
7d1b0095 4484 tcg_temp_free_i32(t0);
b5ff1b31
FB
4485 gen_lookup_tb(s);
4486 return 0;
4487}
4488
2fbac54b
FN
4489/* Returns nonzero if access to the PSR is not permitted. */
4490static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4491{
39d5492a 4492 TCGv_i32 tmp;
7d1b0095 4493 tmp = tcg_temp_new_i32();
2fbac54b
FN
4494 tcg_gen_movi_i32(tmp, val);
4495 return gen_set_psr(s, mask, spsr, tmp);
4496}
4497
8bfd0550
PM
4498static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4499 int *tgtmode, int *regno)
4500{
4501 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4502 * the target mode and register number, and identify the various
4503 * unpredictable cases.
4504 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4505 * + executed in user mode
4506 * + using R15 as the src/dest register
4507 * + accessing an unimplemented register
4508 * + accessing a register that's inaccessible at current PL/security state*
4509 * + accessing a register that you could access with a different insn
4510 * We choose to UNDEF in all these cases.
4511 * Since we don't know which of the various AArch32 modes we are in
4512 * we have to defer some checks to runtime.
4513 * Accesses to Monitor mode registers from Secure EL1 (which implies
4514 * that EL3 is AArch64) must trap to EL3.
4515 *
4516 * If the access checks fail this function will emit code to take
4517 * an exception and return false. Otherwise it will return true,
4518 * and set *tgtmode and *regno appropriately.
4519 */
4520 int exc_target = default_exception_el(s);
4521
4522 /* These instructions are present only in ARMv8, or in ARMv7 with the
4523 * Virtualization Extensions.
4524 */
4525 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4526 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4527 goto undef;
4528 }
4529
4530 if (IS_USER(s) || rn == 15) {
4531 goto undef;
4532 }
4533
4534 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4535 * of registers into (r, sysm).
4536 */
4537 if (r) {
4538 /* SPSRs for other modes */
4539 switch (sysm) {
4540 case 0xe: /* SPSR_fiq */
4541 *tgtmode = ARM_CPU_MODE_FIQ;
4542 break;
4543 case 0x10: /* SPSR_irq */
4544 *tgtmode = ARM_CPU_MODE_IRQ;
4545 break;
4546 case 0x12: /* SPSR_svc */
4547 *tgtmode = ARM_CPU_MODE_SVC;
4548 break;
4549 case 0x14: /* SPSR_abt */
4550 *tgtmode = ARM_CPU_MODE_ABT;
4551 break;
4552 case 0x16: /* SPSR_und */
4553 *tgtmode = ARM_CPU_MODE_UND;
4554 break;
4555 case 0x1c: /* SPSR_mon */
4556 *tgtmode = ARM_CPU_MODE_MON;
4557 break;
4558 case 0x1e: /* SPSR_hyp */
4559 *tgtmode = ARM_CPU_MODE_HYP;
4560 break;
4561 default: /* unallocated */
4562 goto undef;
4563 }
4564 /* We arbitrarily assign SPSR a register number of 16. */
4565 *regno = 16;
4566 } else {
4567 /* general purpose registers for other modes */
4568 switch (sysm) {
4569 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4570 *tgtmode = ARM_CPU_MODE_USR;
4571 *regno = sysm + 8;
4572 break;
4573 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4574 *tgtmode = ARM_CPU_MODE_FIQ;
4575 *regno = sysm;
4576 break;
4577 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4578 *tgtmode = ARM_CPU_MODE_IRQ;
4579 *regno = sysm & 1 ? 13 : 14;
4580 break;
4581 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4582 *tgtmode = ARM_CPU_MODE_SVC;
4583 *regno = sysm & 1 ? 13 : 14;
4584 break;
4585 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4586 *tgtmode = ARM_CPU_MODE_ABT;
4587 *regno = sysm & 1 ? 13 : 14;
4588 break;
4589 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4590 *tgtmode = ARM_CPU_MODE_UND;
4591 *regno = sysm & 1 ? 13 : 14;
4592 break;
4593 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4594 *tgtmode = ARM_CPU_MODE_MON;
4595 *regno = sysm & 1 ? 13 : 14;
4596 break;
4597 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4598 *tgtmode = ARM_CPU_MODE_HYP;
4599 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4600 *regno = sysm & 1 ? 13 : 17;
4601 break;
4602 default: /* unallocated */
4603 goto undef;
4604 }
4605 }
4606
4607 /* Catch the 'accessing inaccessible register' cases we can detect
4608 * at translate time.
4609 */
4610 switch (*tgtmode) {
4611 case ARM_CPU_MODE_MON:
4612 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4613 goto undef;
4614 }
4615 if (s->current_el == 1) {
4616 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4617 * then accesses to Mon registers trap to EL3
4618 */
4619 exc_target = 3;
4620 goto undef;
4621 }
4622 break;
4623 case ARM_CPU_MODE_HYP:
aec4dd09
PM
4624 /*
4625 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
4626 * (and so we can forbid accesses from EL2 or below). elr_hyp
4627 * can be accessed also from Hyp mode, so forbid accesses from
4628 * EL0 or EL1.
8bfd0550 4629 */
aec4dd09
PM
4630 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
4631 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
4632 goto undef;
4633 }
4634 break;
4635 default:
4636 break;
4637 }
4638
4639 return true;
4640
4641undef:
4642 /* If we get here then some access check did not pass */
4643 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4644 return false;
4645}
4646
4647static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4648{
4649 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4650 int tgtmode = 0, regno = 0;
4651
4652 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4653 return;
4654 }
4655
4656 /* Sync state because msr_banked() can raise exceptions */
4657 gen_set_condexec(s);
4658 gen_set_pc_im(s, s->pc - 4);
4659 tcg_reg = load_reg(s, rn);
4660 tcg_tgtmode = tcg_const_i32(tgtmode);
4661 tcg_regno = tcg_const_i32(regno);
4662 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4663 tcg_temp_free_i32(tcg_tgtmode);
4664 tcg_temp_free_i32(tcg_regno);
4665 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4666 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4667}
4668
4669static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4670{
4671 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4672 int tgtmode = 0, regno = 0;
4673
4674 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4675 return;
4676 }
4677
4678 /* Sync state because mrs_banked() can raise exceptions */
4679 gen_set_condexec(s);
4680 gen_set_pc_im(s, s->pc - 4);
4681 tcg_reg = tcg_temp_new_i32();
4682 tcg_tgtmode = tcg_const_i32(tgtmode);
4683 tcg_regno = tcg_const_i32(regno);
4684 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4685 tcg_temp_free_i32(tcg_tgtmode);
4686 tcg_temp_free_i32(tcg_regno);
4687 store_reg(s, rn, tcg_reg);
dcba3a8d 4688 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4689}
4690
fb0e8e79
PM
4691/* Store value to PC as for an exception return (ie don't
4692 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4693 * will do the masking based on the new value of the Thumb bit.
4694 */
4695static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4696{
fb0e8e79
PM
4697 tcg_gen_mov_i32(cpu_R[15], pc);
4698 tcg_temp_free_i32(pc);
b5ff1b31
FB
4699}
4700
b0109805 4701/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4702static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4703{
fb0e8e79
PM
4704 store_pc_exc_ret(s, pc);
4705 /* The cpsr_write_eret helper will mask the low bits of PC
4706 * appropriately depending on the new Thumb bit, so it must
4707 * be called after storing the new PC.
4708 */
e69ad9df
AL
4709 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4710 gen_io_start();
4711 }
235ea1f5 4712 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
4713 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4714 gen_io_end();
4715 }
7d1b0095 4716 tcg_temp_free_i32(cpsr);
b29fd33d 4717 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4718 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4719}
3b46e624 4720
fb0e8e79
PM
4721/* Generate an old-style exception return. Marks pc as dead. */
4722static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4723{
4724 gen_rfe(s, pc, load_cpu_field(spsr));
4725}
4726
c22edfeb
AB
4727/*
4728 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4729 * only call the helper when running single threaded TCG code to ensure
4730 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4731 * just skip this instruction. Currently the SEV/SEVL instructions
4732 * which are *one* of many ways to wake the CPU from WFE are not
4733 * implemented so we can't sleep like WFI does.
4734 */
9ee6e8bb
PB
4735static void gen_nop_hint(DisasContext *s, int val)
4736{
4737 switch (val) {
2399d4e7
EC
4738 /* When running in MTTCG we don't generate jumps to the yield and
4739 * WFE helpers as it won't affect the scheduling of other vCPUs.
4740 * If we wanted to more completely model WFE/SEV so we don't busy
4741 * spin unnecessarily we would need to do something more involved.
4742 */
c87e5a61 4743 case 1: /* yield */
2399d4e7 4744 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4745 gen_set_pc_im(s, s->pc);
dcba3a8d 4746 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4747 }
c87e5a61 4748 break;
9ee6e8bb 4749 case 3: /* wfi */
eaed129d 4750 gen_set_pc_im(s, s->pc);
dcba3a8d 4751 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4752 break;
4753 case 2: /* wfe */
2399d4e7 4754 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4755 gen_set_pc_im(s, s->pc);
dcba3a8d 4756 s->base.is_jmp = DISAS_WFE;
c22edfeb 4757 }
72c1d3af 4758 break;
9ee6e8bb 4759 case 4: /* sev */
12b10571
MR
4760 case 5: /* sevl */
4761 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4762 default: /* nop */
4763 break;
4764 }
4765}
99c475ab 4766
ad69471c 4767#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4768
39d5492a 4769static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4770{
4771 switch (size) {
dd8fbd78
FN
4772 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4773 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4774 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4775 default: abort();
9ee6e8bb 4776 }
9ee6e8bb
PB
4777}
4778
39d5492a 4779static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4780{
4781 switch (size) {
dd8fbd78
FN
4782 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4783 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4784 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4785 default: return;
4786 }
4787}
4788
4789/* 32-bit pairwise ops end up the same as the elementwise versions. */
9ecd3c5c
RH
4790#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
4791#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
4792#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
4793#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
ad69471c 4794
ad69471c
PB
4795#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4796 switch ((size << 1) | u) { \
4797 case 0: \
dd8fbd78 4798 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4799 break; \
4800 case 1: \
dd8fbd78 4801 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4802 break; \
4803 case 2: \
dd8fbd78 4804 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4805 break; \
4806 case 3: \
dd8fbd78 4807 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4808 break; \
4809 case 4: \
dd8fbd78 4810 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4811 break; \
4812 case 5: \
dd8fbd78 4813 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4814 break; \
4815 default: return 1; \
4816 }} while (0)
9ee6e8bb
PB
4817
4818#define GEN_NEON_INTEGER_OP(name) do { \
4819 switch ((size << 1) | u) { \
ad69471c 4820 case 0: \
dd8fbd78 4821 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4822 break; \
4823 case 1: \
dd8fbd78 4824 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4825 break; \
4826 case 2: \
dd8fbd78 4827 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4828 break; \
4829 case 3: \
dd8fbd78 4830 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4831 break; \
4832 case 4: \
dd8fbd78 4833 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4834 break; \
4835 case 5: \
dd8fbd78 4836 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4837 break; \
9ee6e8bb
PB
4838 default: return 1; \
4839 }} while (0)
4840
39d5492a 4841static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4842{
39d5492a 4843 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4844 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4845 return tmp;
9ee6e8bb
PB
4846}
4847
39d5492a 4848static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4849{
dd8fbd78 4850 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4851 tcg_temp_free_i32(var);
9ee6e8bb
PB
4852}
4853
39d5492a 4854static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4855{
39d5492a 4856 TCGv_i32 tmp;
9ee6e8bb 4857 if (size == 1) {
0fad6efc
PM
4858 tmp = neon_load_reg(reg & 7, reg >> 4);
4859 if (reg & 8) {
dd8fbd78 4860 gen_neon_dup_high16(tmp);
0fad6efc
PM
4861 } else {
4862 gen_neon_dup_low16(tmp);
dd8fbd78 4863 }
0fad6efc
PM
4864 } else {
4865 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4866 }
dd8fbd78 4867 return tmp;
9ee6e8bb
PB
4868}
4869
02acedf9 4870static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4871{
b13708bb
RH
4872 TCGv_ptr pd, pm;
4873
600b828c 4874 if (!q && size == 2) {
02acedf9
PM
4875 return 1;
4876 }
b13708bb
RH
4877 pd = vfp_reg_ptr(true, rd);
4878 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4879 if (q) {
4880 switch (size) {
4881 case 0:
b13708bb 4882 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4883 break;
4884 case 1:
b13708bb 4885 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4886 break;
4887 case 2:
b13708bb 4888 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4889 break;
4890 default:
4891 abort();
4892 }
4893 } else {
4894 switch (size) {
4895 case 0:
b13708bb 4896 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4897 break;
4898 case 1:
b13708bb 4899 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4900 break;
4901 default:
4902 abort();
4903 }
4904 }
b13708bb
RH
4905 tcg_temp_free_ptr(pd);
4906 tcg_temp_free_ptr(pm);
02acedf9 4907 return 0;
19457615
FN
4908}
4909
d68a6f3a 4910static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4911{
b13708bb
RH
4912 TCGv_ptr pd, pm;
4913
600b828c 4914 if (!q && size == 2) {
d68a6f3a
PM
4915 return 1;
4916 }
b13708bb
RH
4917 pd = vfp_reg_ptr(true, rd);
4918 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4919 if (q) {
4920 switch (size) {
4921 case 0:
b13708bb 4922 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4923 break;
4924 case 1:
b13708bb 4925 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4926 break;
4927 case 2:
b13708bb 4928 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4929 break;
4930 default:
4931 abort();
4932 }
4933 } else {
4934 switch (size) {
4935 case 0:
b13708bb 4936 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4937 break;
4938 case 1:
b13708bb 4939 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4940 break;
4941 default:
4942 abort();
4943 }
4944 }
b13708bb
RH
4945 tcg_temp_free_ptr(pd);
4946 tcg_temp_free_ptr(pm);
d68a6f3a 4947 return 0;
19457615
FN
4948}
4949
39d5492a 4950static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4951{
39d5492a 4952 TCGv_i32 rd, tmp;
19457615 4953
7d1b0095
PM
4954 rd = tcg_temp_new_i32();
4955 tmp = tcg_temp_new_i32();
19457615
FN
4956
4957 tcg_gen_shli_i32(rd, t0, 8);
4958 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4959 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4960 tcg_gen_or_i32(rd, rd, tmp);
4961
4962 tcg_gen_shri_i32(t1, t1, 8);
4963 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4964 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4965 tcg_gen_or_i32(t1, t1, tmp);
4966 tcg_gen_mov_i32(t0, rd);
4967
7d1b0095
PM
4968 tcg_temp_free_i32(tmp);
4969 tcg_temp_free_i32(rd);
19457615
FN
4970}
4971
39d5492a 4972static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4973{
39d5492a 4974 TCGv_i32 rd, tmp;
19457615 4975
7d1b0095
PM
4976 rd = tcg_temp_new_i32();
4977 tmp = tcg_temp_new_i32();
19457615
FN
4978
4979 tcg_gen_shli_i32(rd, t0, 16);
4980 tcg_gen_andi_i32(tmp, t1, 0xffff);
4981 tcg_gen_or_i32(rd, rd, tmp);
4982 tcg_gen_shri_i32(t1, t1, 16);
4983 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4984 tcg_gen_or_i32(t1, t1, tmp);
4985 tcg_gen_mov_i32(t0, rd);
4986
7d1b0095
PM
4987 tcg_temp_free_i32(tmp);
4988 tcg_temp_free_i32(rd);
19457615
FN
4989}
4990
4991
9ee6e8bb
PB
4992static struct {
4993 int nregs;
4994 int interleave;
4995 int spacing;
308e5636 4996} const neon_ls_element_type[11] = {
ac55d007
RH
4997 {1, 4, 1},
4998 {1, 4, 2},
9ee6e8bb 4999 {4, 1, 1},
ac55d007
RH
5000 {2, 2, 2},
5001 {1, 3, 1},
5002 {1, 3, 2},
9ee6e8bb
PB
5003 {3, 1, 1},
5004 {1, 1, 1},
ac55d007
RH
5005 {1, 2, 1},
5006 {1, 2, 2},
9ee6e8bb
PB
5007 {2, 1, 1}
5008};
5009
5010/* Translate a NEON load/store element instruction. Return nonzero if the
5011 instruction is invalid. */
7dcc1f89 5012static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5013{
5014 int rd, rn, rm;
5015 int op;
5016 int nregs;
5017 int interleave;
84496233 5018 int spacing;
9ee6e8bb
PB
5019 int stride;
5020 int size;
5021 int reg;
9ee6e8bb 5022 int load;
9ee6e8bb 5023 int n;
7377c2c9 5024 int vec_size;
ac55d007
RH
5025 int mmu_idx;
5026 TCGMemOp endian;
39d5492a
PM
5027 TCGv_i32 addr;
5028 TCGv_i32 tmp;
5029 TCGv_i32 tmp2;
84496233 5030 TCGv_i64 tmp64;
9ee6e8bb 5031
2c7ffc41
PM
5032 /* FIXME: this access check should not take precedence over UNDEF
5033 * for invalid encodings; we will generate incorrect syndrome information
5034 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5035 */
9dbbc748 5036 if (s->fp_excp_el) {
2c7ffc41 5037 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 5038 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5039 return 0;
5040 }
5041
5df8bac1 5042 if (!s->vfp_enabled)
9ee6e8bb
PB
5043 return 1;
5044 VFP_DREG_D(rd, insn);
5045 rn = (insn >> 16) & 0xf;
5046 rm = insn & 0xf;
5047 load = (insn & (1 << 21)) != 0;
ac55d007
RH
5048 endian = s->be_data;
5049 mmu_idx = get_mem_index(s);
9ee6e8bb
PB
5050 if ((insn & (1 << 23)) == 0) {
5051 /* Load store all elements. */
5052 op = (insn >> 8) & 0xf;
5053 size = (insn >> 6) & 3;
84496233 5054 if (op > 10)
9ee6e8bb 5055 return 1;
f2dd89d0
PM
5056 /* Catch UNDEF cases for bad values of align field */
5057 switch (op & 0xc) {
5058 case 4:
5059 if (((insn >> 5) & 1) == 1) {
5060 return 1;
5061 }
5062 break;
5063 case 8:
5064 if (((insn >> 4) & 3) == 3) {
5065 return 1;
5066 }
5067 break;
5068 default:
5069 break;
5070 }
9ee6e8bb
PB
5071 nregs = neon_ls_element_type[op].nregs;
5072 interleave = neon_ls_element_type[op].interleave;
84496233 5073 spacing = neon_ls_element_type[op].spacing;
ac55d007 5074 if (size == 3 && (interleave | spacing) != 1) {
84496233 5075 return 1;
ac55d007 5076 }
e23f12b3
RH
5077 /* For our purposes, bytes are always little-endian. */
5078 if (size == 0) {
5079 endian = MO_LE;
5080 }
5081 /* Consecutive little-endian elements from a single register
5082 * can be promoted to a larger little-endian operation.
5083 */
5084 if (interleave == 1 && endian == MO_LE) {
5085 size = 3;
5086 }
ac55d007 5087 tmp64 = tcg_temp_new_i64();
e318a60b 5088 addr = tcg_temp_new_i32();
ac55d007 5089 tmp2 = tcg_const_i32(1 << size);
dcc65026 5090 load_reg_var(s, addr, rn);
9ee6e8bb 5091 for (reg = 0; reg < nregs; reg++) {
ac55d007
RH
5092 for (n = 0; n < 8 >> size; n++) {
5093 int xs;
5094 for (xs = 0; xs < interleave; xs++) {
5095 int tt = rd + reg + spacing * xs;
5096
5097 if (load) {
5098 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
5099 neon_store_element64(tt, n, size, tmp64);
5100 } else {
5101 neon_load_element64(tmp64, tt, n, size);
5102 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
9ee6e8bb 5103 }
ac55d007 5104 tcg_gen_add_i32(addr, addr, tmp2);
9ee6e8bb
PB
5105 }
5106 }
9ee6e8bb 5107 }
e318a60b 5108 tcg_temp_free_i32(addr);
ac55d007
RH
5109 tcg_temp_free_i32(tmp2);
5110 tcg_temp_free_i64(tmp64);
5111 stride = nregs * interleave * 8;
9ee6e8bb
PB
5112 } else {
5113 size = (insn >> 10) & 3;
5114 if (size == 3) {
5115 /* Load single element to all lanes. */
8e18cde3
PM
5116 int a = (insn >> 4) & 1;
5117 if (!load) {
9ee6e8bb 5118 return 1;
8e18cde3 5119 }
9ee6e8bb
PB
5120 size = (insn >> 6) & 3;
5121 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
5122
5123 if (size == 3) {
5124 if (nregs != 4 || a == 0) {
9ee6e8bb 5125 return 1;
99c475ab 5126 }
8e18cde3
PM
5127 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5128 size = 2;
5129 }
5130 if (nregs == 1 && a == 1 && size == 0) {
5131 return 1;
5132 }
5133 if (nregs == 3 && a == 1) {
5134 return 1;
5135 }
e318a60b 5136 addr = tcg_temp_new_i32();
8e18cde3 5137 load_reg_var(s, addr, rn);
7377c2c9
RH
5138
5139 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
5140 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
5141 */
5142 stride = (insn & (1 << 5)) ? 2 : 1;
5143 vec_size = nregs == 1 ? stride * 8 : 8;
5144
5145 tmp = tcg_temp_new_i32();
5146 for (reg = 0; reg < nregs; reg++) {
5147 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5148 s->be_data | size);
5149 if ((rd & 1) && vec_size == 16) {
5150 /* We cannot write 16 bytes at once because the
5151 * destination is unaligned.
5152 */
5153 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5154 8, 8, tmp);
5155 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
5156 neon_reg_offset(rd, 0), 8, 8);
5157 } else {
5158 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5159 vec_size, vec_size, tmp);
8e18cde3 5160 }
7377c2c9
RH
5161 tcg_gen_addi_i32(addr, addr, 1 << size);
5162 rd += stride;
9ee6e8bb 5163 }
7377c2c9 5164 tcg_temp_free_i32(tmp);
e318a60b 5165 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5166 stride = (1 << size) * nregs;
5167 } else {
5168 /* Single element. */
93262b16 5169 int idx = (insn >> 4) & 0xf;
2d6ac920 5170 int reg_idx;
9ee6e8bb
PB
5171 switch (size) {
5172 case 0:
2d6ac920 5173 reg_idx = (insn >> 5) & 7;
9ee6e8bb
PB
5174 stride = 1;
5175 break;
5176 case 1:
2d6ac920 5177 reg_idx = (insn >> 6) & 3;
9ee6e8bb
PB
5178 stride = (insn & (1 << 5)) ? 2 : 1;
5179 break;
5180 case 2:
2d6ac920 5181 reg_idx = (insn >> 7) & 1;
9ee6e8bb
PB
5182 stride = (insn & (1 << 6)) ? 2 : 1;
5183 break;
5184 default:
5185 abort();
5186 }
5187 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5188 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5189 switch (nregs) {
5190 case 1:
5191 if (((idx & (1 << size)) != 0) ||
5192 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5193 return 1;
5194 }
5195 break;
5196 case 3:
5197 if ((idx & 1) != 0) {
5198 return 1;
5199 }
5200 /* fall through */
5201 case 2:
5202 if (size == 2 && (idx & 2) != 0) {
5203 return 1;
5204 }
5205 break;
5206 case 4:
5207 if ((size == 2) && ((idx & 3) == 3)) {
5208 return 1;
5209 }
5210 break;
5211 default:
5212 abort();
5213 }
5214 if ((rd + stride * (nregs - 1)) > 31) {
5215 /* Attempts to write off the end of the register file
5216 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5217 * the neon_load_reg() would write off the end of the array.
5218 */
5219 return 1;
5220 }
2d6ac920 5221 tmp = tcg_temp_new_i32();
e318a60b 5222 addr = tcg_temp_new_i32();
dcc65026 5223 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5224 for (reg = 0; reg < nregs; reg++) {
5225 if (load) {
2d6ac920
RH
5226 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5227 s->be_data | size);
5228 neon_store_element(rd, reg_idx, size, tmp);
9ee6e8bb 5229 } else { /* Store */
2d6ac920
RH
5230 neon_load_element(tmp, rd, reg_idx, size);
5231 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
5232 s->be_data | size);
99c475ab 5233 }
9ee6e8bb 5234 rd += stride;
1b2b1e54 5235 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5236 }
e318a60b 5237 tcg_temp_free_i32(addr);
2d6ac920 5238 tcg_temp_free_i32(tmp);
9ee6e8bb 5239 stride = nregs * (1 << size);
99c475ab 5240 }
9ee6e8bb
PB
5241 }
5242 if (rm != 15) {
39d5492a 5243 TCGv_i32 base;
b26eefb6
PB
5244
5245 base = load_reg(s, rn);
9ee6e8bb 5246 if (rm == 13) {
b26eefb6 5247 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5248 } else {
39d5492a 5249 TCGv_i32 index;
b26eefb6
PB
5250 index = load_reg(s, rm);
5251 tcg_gen_add_i32(base, base, index);
7d1b0095 5252 tcg_temp_free_i32(index);
9ee6e8bb 5253 }
b26eefb6 5254 store_reg(s, rn, base);
9ee6e8bb
PB
5255 }
5256 return 0;
5257}
3b46e624 5258
39d5492a 5259static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5260{
5261 switch (size) {
5262 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5263 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5264 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5265 default: abort();
5266 }
5267}
5268
39d5492a 5269static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5270{
5271 switch (size) {
02da0b2d
PM
5272 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5273 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5274 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5275 default: abort();
5276 }
5277}
5278
39d5492a 5279static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5280{
5281 switch (size) {
02da0b2d
PM
5282 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5283 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5284 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5285 default: abort();
5286 }
5287}
5288
39d5492a 5289static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5290{
5291 switch (size) {
02da0b2d
PM
5292 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5293 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5294 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5295 default: abort();
5296 }
5297}
5298
39d5492a 5299static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5300 int q, int u)
5301{
5302 if (q) {
5303 if (u) {
5304 switch (size) {
5305 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5306 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5307 default: abort();
5308 }
5309 } else {
5310 switch (size) {
5311 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5312 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5313 default: abort();
5314 }
5315 }
5316 } else {
5317 if (u) {
5318 switch (size) {
b408a9b0
CL
5319 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5320 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5321 default: abort();
5322 }
5323 } else {
5324 switch (size) {
5325 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5326 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5327 default: abort();
5328 }
5329 }
5330 }
5331}
5332
39d5492a 5333static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5334{
5335 if (u) {
5336 switch (size) {
5337 case 0: gen_helper_neon_widen_u8(dest, src); break;
5338 case 1: gen_helper_neon_widen_u16(dest, src); break;
5339 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5340 default: abort();
5341 }
5342 } else {
5343 switch (size) {
5344 case 0: gen_helper_neon_widen_s8(dest, src); break;
5345 case 1: gen_helper_neon_widen_s16(dest, src); break;
5346 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5347 default: abort();
5348 }
5349 }
7d1b0095 5350 tcg_temp_free_i32(src);
ad69471c
PB
5351}
5352
5353static inline void gen_neon_addl(int size)
5354{
5355 switch (size) {
5356 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5357 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5358 case 2: tcg_gen_add_i64(CPU_V001); break;
5359 default: abort();
5360 }
5361}
5362
5363static inline void gen_neon_subl(int size)
5364{
5365 switch (size) {
5366 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5367 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5368 case 2: tcg_gen_sub_i64(CPU_V001); break;
5369 default: abort();
5370 }
5371}
5372
a7812ae4 5373static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5374{
5375 switch (size) {
5376 case 0: gen_helper_neon_negl_u16(var, var); break;
5377 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5378 case 2:
5379 tcg_gen_neg_i64(var, var);
5380 break;
ad69471c
PB
5381 default: abort();
5382 }
5383}
5384
a7812ae4 5385static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5386{
5387 switch (size) {
02da0b2d
PM
5388 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5389 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5390 default: abort();
5391 }
5392}
5393
39d5492a
PM
5394static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5395 int size, int u)
ad69471c 5396{
a7812ae4 5397 TCGv_i64 tmp;
ad69471c
PB
5398
5399 switch ((size << 1) | u) {
5400 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5401 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5402 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5403 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5404 case 4:
5405 tmp = gen_muls_i64_i32(a, b);
5406 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5407 tcg_temp_free_i64(tmp);
ad69471c
PB
5408 break;
5409 case 5:
5410 tmp = gen_mulu_i64_i32(a, b);
5411 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5412 tcg_temp_free_i64(tmp);
ad69471c
PB
5413 break;
5414 default: abort();
5415 }
c6067f04
CL
5416
5417 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5418 Don't forget to clean them now. */
5419 if (size < 2) {
7d1b0095
PM
5420 tcg_temp_free_i32(a);
5421 tcg_temp_free_i32(b);
c6067f04 5422 }
ad69471c
PB
5423}
5424
39d5492a
PM
5425static void gen_neon_narrow_op(int op, int u, int size,
5426 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5427{
5428 if (op) {
5429 if (u) {
5430 gen_neon_unarrow_sats(size, dest, src);
5431 } else {
5432 gen_neon_narrow(size, dest, src);
5433 }
5434 } else {
5435 if (u) {
5436 gen_neon_narrow_satu(size, dest, src);
5437 } else {
5438 gen_neon_narrow_sats(size, dest, src);
5439 }
5440 }
5441}
5442
62698be3
PM
5443/* Symbolic constants for op fields for Neon 3-register same-length.
5444 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5445 * table A7-9.
5446 */
5447#define NEON_3R_VHADD 0
5448#define NEON_3R_VQADD 1
5449#define NEON_3R_VRHADD 2
5450#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5451#define NEON_3R_VHSUB 4
5452#define NEON_3R_VQSUB 5
5453#define NEON_3R_VCGT 6
5454#define NEON_3R_VCGE 7
5455#define NEON_3R_VSHL 8
5456#define NEON_3R_VQSHL 9
5457#define NEON_3R_VRSHL 10
5458#define NEON_3R_VQRSHL 11
5459#define NEON_3R_VMAX 12
5460#define NEON_3R_VMIN 13
5461#define NEON_3R_VABD 14
5462#define NEON_3R_VABA 15
5463#define NEON_3R_VADD_VSUB 16
5464#define NEON_3R_VTST_VCEQ 17
4a7832b0 5465#define NEON_3R_VML 18 /* VMLA, VMLS */
62698be3
PM
5466#define NEON_3R_VMUL 19
5467#define NEON_3R_VPMAX 20
5468#define NEON_3R_VPMIN 21
5469#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 5470#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 5471#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 5472#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
5473#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5474#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5475#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5476#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5477#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5478#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5479
5480static const uint8_t neon_3r_sizes[] = {
5481 [NEON_3R_VHADD] = 0x7,
5482 [NEON_3R_VQADD] = 0xf,
5483 [NEON_3R_VRHADD] = 0x7,
5484 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5485 [NEON_3R_VHSUB] = 0x7,
5486 [NEON_3R_VQSUB] = 0xf,
5487 [NEON_3R_VCGT] = 0x7,
5488 [NEON_3R_VCGE] = 0x7,
5489 [NEON_3R_VSHL] = 0xf,
5490 [NEON_3R_VQSHL] = 0xf,
5491 [NEON_3R_VRSHL] = 0xf,
5492 [NEON_3R_VQRSHL] = 0xf,
5493 [NEON_3R_VMAX] = 0x7,
5494 [NEON_3R_VMIN] = 0x7,
5495 [NEON_3R_VABD] = 0x7,
5496 [NEON_3R_VABA] = 0x7,
5497 [NEON_3R_VADD_VSUB] = 0xf,
5498 [NEON_3R_VTST_VCEQ] = 0x7,
5499 [NEON_3R_VML] = 0x7,
5500 [NEON_3R_VMUL] = 0x7,
5501 [NEON_3R_VPMAX] = 0x7,
5502 [NEON_3R_VPMIN] = 0x7,
5503 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 5504 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 5505 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 5506 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
5507 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5508 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5509 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5510 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5511 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5512 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5513};
5514
600b828c
PM
5515/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5516 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5517 * table A7-13.
5518 */
5519#define NEON_2RM_VREV64 0
5520#define NEON_2RM_VREV32 1
5521#define NEON_2RM_VREV16 2
5522#define NEON_2RM_VPADDL 4
5523#define NEON_2RM_VPADDL_U 5
9d935509
AB
5524#define NEON_2RM_AESE 6 /* Includes AESD */
5525#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5526#define NEON_2RM_VCLS 8
5527#define NEON_2RM_VCLZ 9
5528#define NEON_2RM_VCNT 10
5529#define NEON_2RM_VMVN 11
5530#define NEON_2RM_VPADAL 12
5531#define NEON_2RM_VPADAL_U 13
5532#define NEON_2RM_VQABS 14
5533#define NEON_2RM_VQNEG 15
5534#define NEON_2RM_VCGT0 16
5535#define NEON_2RM_VCGE0 17
5536#define NEON_2RM_VCEQ0 18
5537#define NEON_2RM_VCLE0 19
5538#define NEON_2RM_VCLT0 20
f1ecb913 5539#define NEON_2RM_SHA1H 21
600b828c
PM
5540#define NEON_2RM_VABS 22
5541#define NEON_2RM_VNEG 23
5542#define NEON_2RM_VCGT0_F 24
5543#define NEON_2RM_VCGE0_F 25
5544#define NEON_2RM_VCEQ0_F 26
5545#define NEON_2RM_VCLE0_F 27
5546#define NEON_2RM_VCLT0_F 28
5547#define NEON_2RM_VABS_F 30
5548#define NEON_2RM_VNEG_F 31
5549#define NEON_2RM_VSWP 32
5550#define NEON_2RM_VTRN 33
5551#define NEON_2RM_VUZP 34
5552#define NEON_2RM_VZIP 35
5553#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5554#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5555#define NEON_2RM_VSHLL 38
f1ecb913 5556#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5557#define NEON_2RM_VRINTN 40
2ce70625 5558#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5559#define NEON_2RM_VRINTA 42
5560#define NEON_2RM_VRINTZ 43
600b828c 5561#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5562#define NEON_2RM_VRINTM 45
600b828c 5563#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5564#define NEON_2RM_VRINTP 47
901ad525
WN
5565#define NEON_2RM_VCVTAU 48
5566#define NEON_2RM_VCVTAS 49
5567#define NEON_2RM_VCVTNU 50
5568#define NEON_2RM_VCVTNS 51
5569#define NEON_2RM_VCVTPU 52
5570#define NEON_2RM_VCVTPS 53
5571#define NEON_2RM_VCVTMU 54
5572#define NEON_2RM_VCVTMS 55
600b828c
PM
5573#define NEON_2RM_VRECPE 56
5574#define NEON_2RM_VRSQRTE 57
5575#define NEON_2RM_VRECPE_F 58
5576#define NEON_2RM_VRSQRTE_F 59
5577#define NEON_2RM_VCVT_FS 60
5578#define NEON_2RM_VCVT_FU 61
5579#define NEON_2RM_VCVT_SF 62
5580#define NEON_2RM_VCVT_UF 63
5581
5582static int neon_2rm_is_float_op(int op)
5583{
5584 /* Return true if this neon 2reg-misc op is float-to-float */
5585 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5586 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5587 op == NEON_2RM_VRINTM ||
5588 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5589 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5590}
5591
fe8fcf3d
PM
5592static bool neon_2rm_is_v8_op(int op)
5593{
5594 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5595 switch (op) {
5596 case NEON_2RM_VRINTN:
5597 case NEON_2RM_VRINTA:
5598 case NEON_2RM_VRINTM:
5599 case NEON_2RM_VRINTP:
5600 case NEON_2RM_VRINTZ:
5601 case NEON_2RM_VRINTX:
5602 case NEON_2RM_VCVTAU:
5603 case NEON_2RM_VCVTAS:
5604 case NEON_2RM_VCVTNU:
5605 case NEON_2RM_VCVTNS:
5606 case NEON_2RM_VCVTPU:
5607 case NEON_2RM_VCVTPS:
5608 case NEON_2RM_VCVTMU:
5609 case NEON_2RM_VCVTMS:
5610 return true;
5611 default:
5612 return false;
5613 }
5614}
5615
600b828c
PM
5616/* Each entry in this array has bit n set if the insn allows
5617 * size value n (otherwise it will UNDEF). Since unallocated
5618 * op values will have no bits set they always UNDEF.
5619 */
5620static const uint8_t neon_2rm_sizes[] = {
5621 [NEON_2RM_VREV64] = 0x7,
5622 [NEON_2RM_VREV32] = 0x3,
5623 [NEON_2RM_VREV16] = 0x1,
5624 [NEON_2RM_VPADDL] = 0x7,
5625 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5626 [NEON_2RM_AESE] = 0x1,
5627 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5628 [NEON_2RM_VCLS] = 0x7,
5629 [NEON_2RM_VCLZ] = 0x7,
5630 [NEON_2RM_VCNT] = 0x1,
5631 [NEON_2RM_VMVN] = 0x1,
5632 [NEON_2RM_VPADAL] = 0x7,
5633 [NEON_2RM_VPADAL_U] = 0x7,
5634 [NEON_2RM_VQABS] = 0x7,
5635 [NEON_2RM_VQNEG] = 0x7,
5636 [NEON_2RM_VCGT0] = 0x7,
5637 [NEON_2RM_VCGE0] = 0x7,
5638 [NEON_2RM_VCEQ0] = 0x7,
5639 [NEON_2RM_VCLE0] = 0x7,
5640 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5641 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5642 [NEON_2RM_VABS] = 0x7,
5643 [NEON_2RM_VNEG] = 0x7,
5644 [NEON_2RM_VCGT0_F] = 0x4,
5645 [NEON_2RM_VCGE0_F] = 0x4,
5646 [NEON_2RM_VCEQ0_F] = 0x4,
5647 [NEON_2RM_VCLE0_F] = 0x4,
5648 [NEON_2RM_VCLT0_F] = 0x4,
5649 [NEON_2RM_VABS_F] = 0x4,
5650 [NEON_2RM_VNEG_F] = 0x4,
5651 [NEON_2RM_VSWP] = 0x1,
5652 [NEON_2RM_VTRN] = 0x7,
5653 [NEON_2RM_VUZP] = 0x7,
5654 [NEON_2RM_VZIP] = 0x7,
5655 [NEON_2RM_VMOVN] = 0x7,
5656 [NEON_2RM_VQMOVN] = 0x7,
5657 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5658 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5659 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5660 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5661 [NEON_2RM_VRINTA] = 0x4,
5662 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5663 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5664 [NEON_2RM_VRINTM] = 0x4,
600b828c 5665 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5666 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5667 [NEON_2RM_VCVTAU] = 0x4,
5668 [NEON_2RM_VCVTAS] = 0x4,
5669 [NEON_2RM_VCVTNU] = 0x4,
5670 [NEON_2RM_VCVTNS] = 0x4,
5671 [NEON_2RM_VCVTPU] = 0x4,
5672 [NEON_2RM_VCVTPS] = 0x4,
5673 [NEON_2RM_VCVTMU] = 0x4,
5674 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5675 [NEON_2RM_VRECPE] = 0x4,
5676 [NEON_2RM_VRSQRTE] = 0x4,
5677 [NEON_2RM_VRECPE_F] = 0x4,
5678 [NEON_2RM_VRSQRTE_F] = 0x4,
5679 [NEON_2RM_VCVT_FS] = 0x4,
5680 [NEON_2RM_VCVT_FU] = 0x4,
5681 [NEON_2RM_VCVT_SF] = 0x4,
5682 [NEON_2RM_VCVT_UF] = 0x4,
5683};
5684
36a71934
RH
5685
5686/* Expand v8.1 simd helper. */
5687static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5688 int q, int rd, int rn, int rm)
5689{
962fcbf2 5690 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
5691 int opr_sz = (1 + q) * 8;
5692 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5693 vfp_reg_offset(1, rn),
5694 vfp_reg_offset(1, rm), cpu_env,
5695 opr_sz, opr_sz, 0, fn);
5696 return 0;
5697 }
5698 return 1;
5699}
5700
eabcd6fa
RH
5701/*
5702 * Expanders for VBitOps_VBIF, VBIT, VBSL.
5703 */
5704static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5705{
5706 tcg_gen_xor_i64(rn, rn, rm);
5707 tcg_gen_and_i64(rn, rn, rd);
5708 tcg_gen_xor_i64(rd, rm, rn);
5709}
5710
5711static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5712{
5713 tcg_gen_xor_i64(rn, rn, rd);
5714 tcg_gen_and_i64(rn, rn, rm);
5715 tcg_gen_xor_i64(rd, rd, rn);
5716}
5717
5718static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5719{
5720 tcg_gen_xor_i64(rn, rn, rd);
5721 tcg_gen_andc_i64(rn, rn, rm);
5722 tcg_gen_xor_i64(rd, rd, rn);
5723}
5724
5725static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5726{
5727 tcg_gen_xor_vec(vece, rn, rn, rm);
5728 tcg_gen_and_vec(vece, rn, rn, rd);
5729 tcg_gen_xor_vec(vece, rd, rm, rn);
5730}
5731
5732static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5733{
5734 tcg_gen_xor_vec(vece, rn, rn, rd);
5735 tcg_gen_and_vec(vece, rn, rn, rm);
5736 tcg_gen_xor_vec(vece, rd, rd, rn);
5737}
5738
5739static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5740{
5741 tcg_gen_xor_vec(vece, rn, rn, rd);
5742 tcg_gen_andc_vec(vece, rn, rn, rm);
5743 tcg_gen_xor_vec(vece, rd, rd, rn);
5744}
5745
5746const GVecGen3 bsl_op = {
5747 .fni8 = gen_bsl_i64,
5748 .fniv = gen_bsl_vec,
5749 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5750 .load_dest = true
5751};
5752
5753const GVecGen3 bit_op = {
5754 .fni8 = gen_bit_i64,
5755 .fniv = gen_bit_vec,
5756 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5757 .load_dest = true
5758};
5759
5760const GVecGen3 bif_op = {
5761 .fni8 = gen_bif_i64,
5762 .fniv = gen_bif_vec,
5763 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5764 .load_dest = true
5765};
5766
41f6c113
RH
5767static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5768{
5769 tcg_gen_vec_sar8i_i64(a, a, shift);
5770 tcg_gen_vec_add8_i64(d, d, a);
5771}
5772
5773static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5774{
5775 tcg_gen_vec_sar16i_i64(a, a, shift);
5776 tcg_gen_vec_add16_i64(d, d, a);
5777}
5778
5779static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5780{
5781 tcg_gen_sari_i32(a, a, shift);
5782 tcg_gen_add_i32(d, d, a);
5783}
5784
5785static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5786{
5787 tcg_gen_sari_i64(a, a, shift);
5788 tcg_gen_add_i64(d, d, a);
5789}
5790
5791static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5792{
5793 tcg_gen_sari_vec(vece, a, a, sh);
5794 tcg_gen_add_vec(vece, d, d, a);
5795}
5796
5797const GVecGen2i ssra_op[4] = {
5798 { .fni8 = gen_ssra8_i64,
5799 .fniv = gen_ssra_vec,
5800 .load_dest = true,
5801 .opc = INDEX_op_sari_vec,
5802 .vece = MO_8 },
5803 { .fni8 = gen_ssra16_i64,
5804 .fniv = gen_ssra_vec,
5805 .load_dest = true,
5806 .opc = INDEX_op_sari_vec,
5807 .vece = MO_16 },
5808 { .fni4 = gen_ssra32_i32,
5809 .fniv = gen_ssra_vec,
5810 .load_dest = true,
5811 .opc = INDEX_op_sari_vec,
5812 .vece = MO_32 },
5813 { .fni8 = gen_ssra64_i64,
5814 .fniv = gen_ssra_vec,
5815 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5816 .load_dest = true,
5817 .opc = INDEX_op_sari_vec,
5818 .vece = MO_64 },
5819};
5820
5821static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5822{
5823 tcg_gen_vec_shr8i_i64(a, a, shift);
5824 tcg_gen_vec_add8_i64(d, d, a);
5825}
5826
5827static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5828{
5829 tcg_gen_vec_shr16i_i64(a, a, shift);
5830 tcg_gen_vec_add16_i64(d, d, a);
5831}
5832
5833static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5834{
5835 tcg_gen_shri_i32(a, a, shift);
5836 tcg_gen_add_i32(d, d, a);
5837}
5838
5839static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5840{
5841 tcg_gen_shri_i64(a, a, shift);
5842 tcg_gen_add_i64(d, d, a);
5843}
5844
5845static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5846{
5847 tcg_gen_shri_vec(vece, a, a, sh);
5848 tcg_gen_add_vec(vece, d, d, a);
5849}
5850
5851const GVecGen2i usra_op[4] = {
5852 { .fni8 = gen_usra8_i64,
5853 .fniv = gen_usra_vec,
5854 .load_dest = true,
5855 .opc = INDEX_op_shri_vec,
5856 .vece = MO_8, },
5857 { .fni8 = gen_usra16_i64,
5858 .fniv = gen_usra_vec,
5859 .load_dest = true,
5860 .opc = INDEX_op_shri_vec,
5861 .vece = MO_16, },
5862 { .fni4 = gen_usra32_i32,
5863 .fniv = gen_usra_vec,
5864 .load_dest = true,
5865 .opc = INDEX_op_shri_vec,
5866 .vece = MO_32, },
5867 { .fni8 = gen_usra64_i64,
5868 .fniv = gen_usra_vec,
5869 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5870 .load_dest = true,
5871 .opc = INDEX_op_shri_vec,
5872 .vece = MO_64, },
5873};
eabcd6fa 5874
f3cd8218
RH
5875static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5876{
5877 uint64_t mask = dup_const(MO_8, 0xff >> shift);
5878 TCGv_i64 t = tcg_temp_new_i64();
5879
5880 tcg_gen_shri_i64(t, a, shift);
5881 tcg_gen_andi_i64(t, t, mask);
5882 tcg_gen_andi_i64(d, d, ~mask);
5883 tcg_gen_or_i64(d, d, t);
5884 tcg_temp_free_i64(t);
5885}
5886
5887static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5888{
5889 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
5890 TCGv_i64 t = tcg_temp_new_i64();
5891
5892 tcg_gen_shri_i64(t, a, shift);
5893 tcg_gen_andi_i64(t, t, mask);
5894 tcg_gen_andi_i64(d, d, ~mask);
5895 tcg_gen_or_i64(d, d, t);
5896 tcg_temp_free_i64(t);
5897}
5898
5899static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5900{
5901 tcg_gen_shri_i32(a, a, shift);
5902 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
5903}
5904
5905static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5906{
5907 tcg_gen_shri_i64(a, a, shift);
5908 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
5909}
5910
5911static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5912{
5913 if (sh == 0) {
5914 tcg_gen_mov_vec(d, a);
5915 } else {
5916 TCGv_vec t = tcg_temp_new_vec_matching(d);
5917 TCGv_vec m = tcg_temp_new_vec_matching(d);
5918
5919 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
5920 tcg_gen_shri_vec(vece, t, a, sh);
5921 tcg_gen_and_vec(vece, d, d, m);
5922 tcg_gen_or_vec(vece, d, d, t);
5923
5924 tcg_temp_free_vec(t);
5925 tcg_temp_free_vec(m);
5926 }
5927}
5928
5929const GVecGen2i sri_op[4] = {
5930 { .fni8 = gen_shr8_ins_i64,
5931 .fniv = gen_shr_ins_vec,
5932 .load_dest = true,
5933 .opc = INDEX_op_shri_vec,
5934 .vece = MO_8 },
5935 { .fni8 = gen_shr16_ins_i64,
5936 .fniv = gen_shr_ins_vec,
5937 .load_dest = true,
5938 .opc = INDEX_op_shri_vec,
5939 .vece = MO_16 },
5940 { .fni4 = gen_shr32_ins_i32,
5941 .fniv = gen_shr_ins_vec,
5942 .load_dest = true,
5943 .opc = INDEX_op_shri_vec,
5944 .vece = MO_32 },
5945 { .fni8 = gen_shr64_ins_i64,
5946 .fniv = gen_shr_ins_vec,
5947 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5948 .load_dest = true,
5949 .opc = INDEX_op_shri_vec,
5950 .vece = MO_64 },
5951};
5952
5953static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5954{
5955 uint64_t mask = dup_const(MO_8, 0xff << shift);
5956 TCGv_i64 t = tcg_temp_new_i64();
5957
5958 tcg_gen_shli_i64(t, a, shift);
5959 tcg_gen_andi_i64(t, t, mask);
5960 tcg_gen_andi_i64(d, d, ~mask);
5961 tcg_gen_or_i64(d, d, t);
5962 tcg_temp_free_i64(t);
5963}
5964
5965static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5966{
5967 uint64_t mask = dup_const(MO_16, 0xffff << shift);
5968 TCGv_i64 t = tcg_temp_new_i64();
5969
5970 tcg_gen_shli_i64(t, a, shift);
5971 tcg_gen_andi_i64(t, t, mask);
5972 tcg_gen_andi_i64(d, d, ~mask);
5973 tcg_gen_or_i64(d, d, t);
5974 tcg_temp_free_i64(t);
5975}
5976
5977static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5978{
5979 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
5980}
5981
5982static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5983{
5984 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
5985}
5986
5987static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5988{
5989 if (sh == 0) {
5990 tcg_gen_mov_vec(d, a);
5991 } else {
5992 TCGv_vec t = tcg_temp_new_vec_matching(d);
5993 TCGv_vec m = tcg_temp_new_vec_matching(d);
5994
5995 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
5996 tcg_gen_shli_vec(vece, t, a, sh);
5997 tcg_gen_and_vec(vece, d, d, m);
5998 tcg_gen_or_vec(vece, d, d, t);
5999
6000 tcg_temp_free_vec(t);
6001 tcg_temp_free_vec(m);
6002 }
6003}
6004
6005const GVecGen2i sli_op[4] = {
6006 { .fni8 = gen_shl8_ins_i64,
6007 .fniv = gen_shl_ins_vec,
6008 .load_dest = true,
6009 .opc = INDEX_op_shli_vec,
6010 .vece = MO_8 },
6011 { .fni8 = gen_shl16_ins_i64,
6012 .fniv = gen_shl_ins_vec,
6013 .load_dest = true,
6014 .opc = INDEX_op_shli_vec,
6015 .vece = MO_16 },
6016 { .fni4 = gen_shl32_ins_i32,
6017 .fniv = gen_shl_ins_vec,
6018 .load_dest = true,
6019 .opc = INDEX_op_shli_vec,
6020 .vece = MO_32 },
6021 { .fni8 = gen_shl64_ins_i64,
6022 .fniv = gen_shl_ins_vec,
6023 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6024 .load_dest = true,
6025 .opc = INDEX_op_shli_vec,
6026 .vece = MO_64 },
6027};
6028
4a7832b0
RH
6029static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6030{
6031 gen_helper_neon_mul_u8(a, a, b);
6032 gen_helper_neon_add_u8(d, d, a);
6033}
6034
6035static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6036{
6037 gen_helper_neon_mul_u8(a, a, b);
6038 gen_helper_neon_sub_u8(d, d, a);
6039}
6040
6041static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6042{
6043 gen_helper_neon_mul_u16(a, a, b);
6044 gen_helper_neon_add_u16(d, d, a);
6045}
6046
6047static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6048{
6049 gen_helper_neon_mul_u16(a, a, b);
6050 gen_helper_neon_sub_u16(d, d, a);
6051}
6052
6053static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6054{
6055 tcg_gen_mul_i32(a, a, b);
6056 tcg_gen_add_i32(d, d, a);
6057}
6058
6059static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6060{
6061 tcg_gen_mul_i32(a, a, b);
6062 tcg_gen_sub_i32(d, d, a);
6063}
6064
6065static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6066{
6067 tcg_gen_mul_i64(a, a, b);
6068 tcg_gen_add_i64(d, d, a);
6069}
6070
6071static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6072{
6073 tcg_gen_mul_i64(a, a, b);
6074 tcg_gen_sub_i64(d, d, a);
6075}
6076
6077static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6078{
6079 tcg_gen_mul_vec(vece, a, a, b);
6080 tcg_gen_add_vec(vece, d, d, a);
6081}
6082
6083static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6084{
6085 tcg_gen_mul_vec(vece, a, a, b);
6086 tcg_gen_sub_vec(vece, d, d, a);
6087}
6088
6089/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
6090 * these tables are shared with AArch64 which does support them.
6091 */
6092const GVecGen3 mla_op[4] = {
6093 { .fni4 = gen_mla8_i32,
6094 .fniv = gen_mla_vec,
6095 .opc = INDEX_op_mul_vec,
6096 .load_dest = true,
6097 .vece = MO_8 },
6098 { .fni4 = gen_mla16_i32,
6099 .fniv = gen_mla_vec,
6100 .opc = INDEX_op_mul_vec,
6101 .load_dest = true,
6102 .vece = MO_16 },
6103 { .fni4 = gen_mla32_i32,
6104 .fniv = gen_mla_vec,
6105 .opc = INDEX_op_mul_vec,
6106 .load_dest = true,
6107 .vece = MO_32 },
6108 { .fni8 = gen_mla64_i64,
6109 .fniv = gen_mla_vec,
6110 .opc = INDEX_op_mul_vec,
6111 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6112 .load_dest = true,
6113 .vece = MO_64 },
6114};
6115
6116const GVecGen3 mls_op[4] = {
6117 { .fni4 = gen_mls8_i32,
6118 .fniv = gen_mls_vec,
6119 .opc = INDEX_op_mul_vec,
6120 .load_dest = true,
6121 .vece = MO_8 },
6122 { .fni4 = gen_mls16_i32,
6123 .fniv = gen_mls_vec,
6124 .opc = INDEX_op_mul_vec,
6125 .load_dest = true,
6126 .vece = MO_16 },
6127 { .fni4 = gen_mls32_i32,
6128 .fniv = gen_mls_vec,
6129 .opc = INDEX_op_mul_vec,
6130 .load_dest = true,
6131 .vece = MO_32 },
6132 { .fni8 = gen_mls64_i64,
6133 .fniv = gen_mls_vec,
6134 .opc = INDEX_op_mul_vec,
6135 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6136 .load_dest = true,
6137 .vece = MO_64 },
6138};
6139
ea580fa3
RH
6140/* CMTST : test is "if (X & Y != 0)". */
6141static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6142{
6143 tcg_gen_and_i32(d, a, b);
6144 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
6145 tcg_gen_neg_i32(d, d);
6146}
6147
6148void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6149{
6150 tcg_gen_and_i64(d, a, b);
6151 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
6152 tcg_gen_neg_i64(d, d);
6153}
6154
6155static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6156{
6157 tcg_gen_and_vec(vece, d, a, b);
6158 tcg_gen_dupi_vec(vece, a, 0);
6159 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
6160}
6161
6162const GVecGen3 cmtst_op[4] = {
6163 { .fni4 = gen_helper_neon_tst_u8,
6164 .fniv = gen_cmtst_vec,
6165 .vece = MO_8 },
6166 { .fni4 = gen_helper_neon_tst_u16,
6167 .fniv = gen_cmtst_vec,
6168 .vece = MO_16 },
6169 { .fni4 = gen_cmtst_i32,
6170 .fniv = gen_cmtst_vec,
6171 .vece = MO_32 },
6172 { .fni8 = gen_cmtst_i64,
6173 .fniv = gen_cmtst_vec,
6174 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6175 .vece = MO_64 },
6176};
6177
89e68b57
RH
6178static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6179 TCGv_vec a, TCGv_vec b)
6180{
6181 TCGv_vec x = tcg_temp_new_vec_matching(t);
6182 tcg_gen_add_vec(vece, x, a, b);
6183 tcg_gen_usadd_vec(vece, t, a, b);
6184 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6185 tcg_gen_or_vec(vece, sat, sat, x);
6186 tcg_temp_free_vec(x);
6187}
6188
6189const GVecGen4 uqadd_op[4] = {
6190 { .fniv = gen_uqadd_vec,
6191 .fno = gen_helper_gvec_uqadd_b,
6192 .opc = INDEX_op_usadd_vec,
6193 .write_aofs = true,
6194 .vece = MO_8 },
6195 { .fniv = gen_uqadd_vec,
6196 .fno = gen_helper_gvec_uqadd_h,
6197 .opc = INDEX_op_usadd_vec,
6198 .write_aofs = true,
6199 .vece = MO_16 },
6200 { .fniv = gen_uqadd_vec,
6201 .fno = gen_helper_gvec_uqadd_s,
6202 .opc = INDEX_op_usadd_vec,
6203 .write_aofs = true,
6204 .vece = MO_32 },
6205 { .fniv = gen_uqadd_vec,
6206 .fno = gen_helper_gvec_uqadd_d,
6207 .opc = INDEX_op_usadd_vec,
6208 .write_aofs = true,
6209 .vece = MO_64 },
6210};
6211
6212static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6213 TCGv_vec a, TCGv_vec b)
6214{
6215 TCGv_vec x = tcg_temp_new_vec_matching(t);
6216 tcg_gen_add_vec(vece, x, a, b);
6217 tcg_gen_ssadd_vec(vece, t, a, b);
6218 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6219 tcg_gen_or_vec(vece, sat, sat, x);
6220 tcg_temp_free_vec(x);
6221}
6222
6223const GVecGen4 sqadd_op[4] = {
6224 { .fniv = gen_sqadd_vec,
6225 .fno = gen_helper_gvec_sqadd_b,
6226 .opc = INDEX_op_ssadd_vec,
6227 .write_aofs = true,
6228 .vece = MO_8 },
6229 { .fniv = gen_sqadd_vec,
6230 .fno = gen_helper_gvec_sqadd_h,
6231 .opc = INDEX_op_ssadd_vec,
6232 .write_aofs = true,
6233 .vece = MO_16 },
6234 { .fniv = gen_sqadd_vec,
6235 .fno = gen_helper_gvec_sqadd_s,
6236 .opc = INDEX_op_ssadd_vec,
6237 .write_aofs = true,
6238 .vece = MO_32 },
6239 { .fniv = gen_sqadd_vec,
6240 .fno = gen_helper_gvec_sqadd_d,
6241 .opc = INDEX_op_ssadd_vec,
6242 .write_aofs = true,
6243 .vece = MO_64 },
6244};
6245
6246static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6247 TCGv_vec a, TCGv_vec b)
6248{
6249 TCGv_vec x = tcg_temp_new_vec_matching(t);
6250 tcg_gen_sub_vec(vece, x, a, b);
6251 tcg_gen_ussub_vec(vece, t, a, b);
6252 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6253 tcg_gen_or_vec(vece, sat, sat, x);
6254 tcg_temp_free_vec(x);
6255}
6256
6257const GVecGen4 uqsub_op[4] = {
6258 { .fniv = gen_uqsub_vec,
6259 .fno = gen_helper_gvec_uqsub_b,
6260 .opc = INDEX_op_ussub_vec,
6261 .write_aofs = true,
6262 .vece = MO_8 },
6263 { .fniv = gen_uqsub_vec,
6264 .fno = gen_helper_gvec_uqsub_h,
6265 .opc = INDEX_op_ussub_vec,
6266 .write_aofs = true,
6267 .vece = MO_16 },
6268 { .fniv = gen_uqsub_vec,
6269 .fno = gen_helper_gvec_uqsub_s,
6270 .opc = INDEX_op_ussub_vec,
6271 .write_aofs = true,
6272 .vece = MO_32 },
6273 { .fniv = gen_uqsub_vec,
6274 .fno = gen_helper_gvec_uqsub_d,
6275 .opc = INDEX_op_ussub_vec,
6276 .write_aofs = true,
6277 .vece = MO_64 },
6278};
6279
6280static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6281 TCGv_vec a, TCGv_vec b)
6282{
6283 TCGv_vec x = tcg_temp_new_vec_matching(t);
6284 tcg_gen_sub_vec(vece, x, a, b);
6285 tcg_gen_sssub_vec(vece, t, a, b);
6286 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6287 tcg_gen_or_vec(vece, sat, sat, x);
6288 tcg_temp_free_vec(x);
6289}
6290
6291const GVecGen4 sqsub_op[4] = {
6292 { .fniv = gen_sqsub_vec,
6293 .fno = gen_helper_gvec_sqsub_b,
6294 .opc = INDEX_op_sssub_vec,
6295 .write_aofs = true,
6296 .vece = MO_8 },
6297 { .fniv = gen_sqsub_vec,
6298 .fno = gen_helper_gvec_sqsub_h,
6299 .opc = INDEX_op_sssub_vec,
6300 .write_aofs = true,
6301 .vece = MO_16 },
6302 { .fniv = gen_sqsub_vec,
6303 .fno = gen_helper_gvec_sqsub_s,
6304 .opc = INDEX_op_sssub_vec,
6305 .write_aofs = true,
6306 .vece = MO_32 },
6307 { .fniv = gen_sqsub_vec,
6308 .fno = gen_helper_gvec_sqsub_d,
6309 .opc = INDEX_op_sssub_vec,
6310 .write_aofs = true,
6311 .vece = MO_64 },
6312};
6313
9ee6e8bb
PB
6314/* Translate a NEON data processing instruction. Return nonzero if the
6315 instruction is invalid.
ad69471c
PB
6316 We process data in a mixture of 32-bit and 64-bit chunks.
6317 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 6318
7dcc1f89 6319static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
6320{
6321 int op;
6322 int q;
eabcd6fa 6323 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
9ee6e8bb
PB
6324 int size;
6325 int shift;
6326 int pass;
6327 int count;
6328 int pairwise;
6329 int u;
eabcd6fa 6330 int vec_size;
f3cd8218 6331 uint32_t imm;
39d5492a 6332 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 6333 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 6334 TCGv_i64 tmp64;
9ee6e8bb 6335
2c7ffc41
PM
6336 /* FIXME: this access check should not take precedence over UNDEF
6337 * for invalid encodings; we will generate incorrect syndrome information
6338 * for attempts to execute invalid vfp/neon encodings with FP disabled.
6339 */
9dbbc748 6340 if (s->fp_excp_el) {
2c7ffc41 6341 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 6342 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
6343 return 0;
6344 }
6345
5df8bac1 6346 if (!s->vfp_enabled)
9ee6e8bb
PB
6347 return 1;
6348 q = (insn & (1 << 6)) != 0;
6349 u = (insn >> 24) & 1;
6350 VFP_DREG_D(rd, insn);
6351 VFP_DREG_N(rn, insn);
6352 VFP_DREG_M(rm, insn);
6353 size = (insn >> 20) & 3;
eabcd6fa
RH
6354 vec_size = q ? 16 : 8;
6355 rd_ofs = neon_reg_offset(rd, 0);
6356 rn_ofs = neon_reg_offset(rn, 0);
6357 rm_ofs = neon_reg_offset(rm, 0);
6358
9ee6e8bb
PB
6359 if ((insn & (1 << 23)) == 0) {
6360 /* Three register same length. */
6361 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
6362 /* Catch invalid op and bad size combinations: UNDEF */
6363 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
6364 return 1;
6365 }
25f84f79
PM
6366 /* All insns of this form UNDEF for either this condition or the
6367 * superset of cases "Q==1"; we catch the latter later.
6368 */
6369 if (q && ((rd | rn | rm) & 1)) {
6370 return 1;
6371 }
36a71934
RH
6372 switch (op) {
6373 case NEON_3R_SHA:
6374 /* The SHA-1/SHA-256 3-register instructions require special
6375 * treatment here, as their size field is overloaded as an
6376 * op type selector, and they all consume their input in a
6377 * single pass.
6378 */
f1ecb913
AB
6379 if (!q) {
6380 return 1;
6381 }
6382 if (!u) { /* SHA-1 */
962fcbf2 6383 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
6384 return 1;
6385 }
1a66ac61
RH
6386 ptr1 = vfp_reg_ptr(true, rd);
6387 ptr2 = vfp_reg_ptr(true, rn);
6388 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 6389 tmp4 = tcg_const_i32(size);
1a66ac61 6390 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
6391 tcg_temp_free_i32(tmp4);
6392 } else { /* SHA-256 */
962fcbf2 6393 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
6394 return 1;
6395 }
1a66ac61
RH
6396 ptr1 = vfp_reg_ptr(true, rd);
6397 ptr2 = vfp_reg_ptr(true, rn);
6398 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
6399 switch (size) {
6400 case 0:
1a66ac61 6401 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
6402 break;
6403 case 1:
1a66ac61 6404 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
6405 break;
6406 case 2:
1a66ac61 6407 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
6408 break;
6409 }
6410 }
1a66ac61
RH
6411 tcg_temp_free_ptr(ptr1);
6412 tcg_temp_free_ptr(ptr2);
6413 tcg_temp_free_ptr(ptr3);
f1ecb913 6414 return 0;
36a71934
RH
6415
6416 case NEON_3R_VPADD_VQRDMLAH:
6417 if (!u) {
6418 break; /* VPADD */
6419 }
6420 /* VQRDMLAH */
6421 switch (size) {
6422 case 1:
6423 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
6424 q, rd, rn, rm);
6425 case 2:
6426 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
6427 q, rd, rn, rm);
6428 }
6429 return 1;
6430
6431 case NEON_3R_VFM_VQRDMLSH:
6432 if (!u) {
6433 /* VFM, VFMS */
6434 if (size == 1) {
6435 return 1;
6436 }
6437 break;
6438 }
6439 /* VQRDMLSH */
6440 switch (size) {
6441 case 1:
6442 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
6443 q, rd, rn, rm);
6444 case 2:
6445 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
6446 q, rd, rn, rm);
6447 }
6448 return 1;
eabcd6fa
RH
6449
6450 case NEON_3R_LOGIC: /* Logic ops. */
6451 switch ((u << 2) | size) {
6452 case 0: /* VAND */
6453 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
6454 vec_size, vec_size);
6455 break;
6456 case 1: /* VBIC */
6457 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
6458 vec_size, vec_size);
6459 break;
2900847f
RH
6460 case 2: /* VORR */
6461 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
6462 vec_size, vec_size);
eabcd6fa
RH
6463 break;
6464 case 3: /* VORN */
6465 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
6466 vec_size, vec_size);
6467 break;
6468 case 4: /* VEOR */
6469 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
6470 vec_size, vec_size);
6471 break;
6472 case 5: /* VBSL */
6473 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6474 vec_size, vec_size, &bsl_op);
6475 break;
6476 case 6: /* VBIT */
6477 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6478 vec_size, vec_size, &bit_op);
6479 break;
6480 case 7: /* VBIF */
6481 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6482 vec_size, vec_size, &bif_op);
6483 break;
6484 }
6485 return 0;
e4717ae0
RH
6486
6487 case NEON_3R_VADD_VSUB:
6488 if (u) {
6489 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
6490 vec_size, vec_size);
6491 } else {
6492 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
6493 vec_size, vec_size);
6494 }
6495 return 0;
82083184 6496
89e68b57
RH
6497 case NEON_3R_VQADD:
6498 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
6499 rn_ofs, rm_ofs, vec_size, vec_size,
6500 (u ? uqadd_op : sqadd_op) + size);
6501 break;
6502
6503 case NEON_3R_VQSUB:
6504 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
6505 rn_ofs, rm_ofs, vec_size, vec_size,
6506 (u ? uqsub_op : sqsub_op) + size);
6507 break;
6508
82083184
RH
6509 case NEON_3R_VMUL: /* VMUL */
6510 if (u) {
6511 /* Polynomial case allows only P8 and is handled below. */
6512 if (size != 0) {
6513 return 1;
6514 }
6515 } else {
6516 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
6517 vec_size, vec_size);
6518 return 0;
6519 }
6520 break;
4a7832b0
RH
6521
6522 case NEON_3R_VML: /* VMLA, VMLS */
6523 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
6524 u ? &mls_op[size] : &mla_op[size]);
6525 return 0;
ea580fa3
RH
6526
6527 case NEON_3R_VTST_VCEQ:
6528 if (u) { /* VCEQ */
6529 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
6530 vec_size, vec_size);
6531 } else { /* VTST */
6532 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6533 vec_size, vec_size, &cmtst_op[size]);
6534 }
6535 return 0;
6536
6537 case NEON_3R_VCGT:
6538 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
6539 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6540 return 0;
6541
6542 case NEON_3R_VCGE:
6543 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
6544 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6545 return 0;
6f278221
RH
6546
6547 case NEON_3R_VMAX:
6548 if (u) {
6549 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
6550 vec_size, vec_size);
6551 } else {
6552 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
6553 vec_size, vec_size);
6554 }
6555 return 0;
6556 case NEON_3R_VMIN:
6557 if (u) {
6558 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
6559 vec_size, vec_size);
6560 } else {
6561 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
6562 vec_size, vec_size);
6563 }
6564 return 0;
f1ecb913 6565 }
4a7832b0 6566
eabcd6fa 6567 if (size == 3) {
62698be3 6568 /* 64-bit element instructions. */
9ee6e8bb 6569 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
6570 neon_load_reg64(cpu_V0, rn + pass);
6571 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 6572 switch (op) {
62698be3 6573 case NEON_3R_VSHL:
ad69471c
PB
6574 if (u) {
6575 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
6576 } else {
6577 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
6578 }
6579 break;
62698be3 6580 case NEON_3R_VQSHL:
ad69471c 6581 if (u) {
02da0b2d
PM
6582 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6583 cpu_V1, cpu_V0);
ad69471c 6584 } else {
02da0b2d
PM
6585 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6586 cpu_V1, cpu_V0);
ad69471c
PB
6587 }
6588 break;
62698be3 6589 case NEON_3R_VRSHL:
ad69471c
PB
6590 if (u) {
6591 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 6592 } else {
ad69471c
PB
6593 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
6594 }
6595 break;
62698be3 6596 case NEON_3R_VQRSHL:
ad69471c 6597 if (u) {
02da0b2d
PM
6598 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
6599 cpu_V1, cpu_V0);
ad69471c 6600 } else {
02da0b2d
PM
6601 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
6602 cpu_V1, cpu_V0);
1e8d4eec 6603 }
9ee6e8bb 6604 break;
9ee6e8bb
PB
6605 default:
6606 abort();
2c0262af 6607 }
ad69471c 6608 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 6609 }
9ee6e8bb 6610 return 0;
2c0262af 6611 }
25f84f79 6612 pairwise = 0;
9ee6e8bb 6613 switch (op) {
62698be3
PM
6614 case NEON_3R_VSHL:
6615 case NEON_3R_VQSHL:
6616 case NEON_3R_VRSHL:
6617 case NEON_3R_VQRSHL:
9ee6e8bb 6618 {
ad69471c
PB
6619 int rtmp;
6620 /* Shift instruction operands are reversed. */
6621 rtmp = rn;
9ee6e8bb 6622 rn = rm;
ad69471c 6623 rm = rtmp;
9ee6e8bb 6624 }
2c0262af 6625 break;
36a71934 6626 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
6627 case NEON_3R_VPMAX:
6628 case NEON_3R_VPMIN:
9ee6e8bb 6629 pairwise = 1;
2c0262af 6630 break;
25f84f79
PM
6631 case NEON_3R_FLOAT_ARITH:
6632 pairwise = (u && size < 2); /* if VPADD (float) */
6633 break;
6634 case NEON_3R_FLOAT_MINMAX:
6635 pairwise = u; /* if VPMIN/VPMAX (float) */
6636 break;
6637 case NEON_3R_FLOAT_CMP:
6638 if (!u && size) {
6639 /* no encoding for U=0 C=1x */
6640 return 1;
6641 }
6642 break;
6643 case NEON_3R_FLOAT_ACMP:
6644 if (!u) {
6645 return 1;
6646 }
6647 break;
505935fc
WN
6648 case NEON_3R_FLOAT_MISC:
6649 /* VMAXNM/VMINNM in ARMv8 */
d614a513 6650 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
6651 return 1;
6652 }
2c0262af 6653 break;
36a71934
RH
6654 case NEON_3R_VFM_VQRDMLSH:
6655 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
6656 return 1;
6657 }
6658 break;
9ee6e8bb 6659 default:
2c0262af 6660 break;
9ee6e8bb 6661 }
dd8fbd78 6662
25f84f79
PM
6663 if (pairwise && q) {
6664 /* All the pairwise insns UNDEF if Q is set */
6665 return 1;
6666 }
6667
9ee6e8bb
PB
6668 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6669
6670 if (pairwise) {
6671 /* Pairwise. */
a5a14945
JR
6672 if (pass < 1) {
6673 tmp = neon_load_reg(rn, 0);
6674 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 6675 } else {
a5a14945
JR
6676 tmp = neon_load_reg(rm, 0);
6677 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
6678 }
6679 } else {
6680 /* Elementwise. */
dd8fbd78
FN
6681 tmp = neon_load_reg(rn, pass);
6682 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
6683 }
6684 switch (op) {
62698be3 6685 case NEON_3R_VHADD:
9ee6e8bb
PB
6686 GEN_NEON_INTEGER_OP(hadd);
6687 break;
62698be3 6688 case NEON_3R_VRHADD:
9ee6e8bb 6689 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 6690 break;
62698be3 6691 case NEON_3R_VHSUB:
9ee6e8bb
PB
6692 GEN_NEON_INTEGER_OP(hsub);
6693 break;
62698be3 6694 case NEON_3R_VSHL:
ad69471c 6695 GEN_NEON_INTEGER_OP(shl);
2c0262af 6696 break;
62698be3 6697 case NEON_3R_VQSHL:
02da0b2d 6698 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 6699 break;
62698be3 6700 case NEON_3R_VRSHL:
ad69471c 6701 GEN_NEON_INTEGER_OP(rshl);
2c0262af 6702 break;
62698be3 6703 case NEON_3R_VQRSHL:
02da0b2d 6704 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 6705 break;
62698be3 6706 case NEON_3R_VABD:
9ee6e8bb
PB
6707 GEN_NEON_INTEGER_OP(abd);
6708 break;
62698be3 6709 case NEON_3R_VABA:
9ee6e8bb 6710 GEN_NEON_INTEGER_OP(abd);
7d1b0095 6711 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
6712 tmp2 = neon_load_reg(rd, pass);
6713 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 6714 break;
62698be3 6715 case NEON_3R_VMUL:
82083184
RH
6716 /* VMUL.P8; other cases already eliminated. */
6717 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb 6718 break;
62698be3 6719 case NEON_3R_VPMAX:
9ee6e8bb
PB
6720 GEN_NEON_INTEGER_OP(pmax);
6721 break;
62698be3 6722 case NEON_3R_VPMIN:
9ee6e8bb
PB
6723 GEN_NEON_INTEGER_OP(pmin);
6724 break;
62698be3 6725 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
6726 if (!u) { /* VQDMULH */
6727 switch (size) {
02da0b2d
PM
6728 case 1:
6729 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6730 break;
6731 case 2:
6732 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6733 break;
62698be3 6734 default: abort();
9ee6e8bb 6735 }
62698be3 6736 } else { /* VQRDMULH */
9ee6e8bb 6737 switch (size) {
02da0b2d
PM
6738 case 1:
6739 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6740 break;
6741 case 2:
6742 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6743 break;
62698be3 6744 default: abort();
9ee6e8bb
PB
6745 }
6746 }
6747 break;
36a71934 6748 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 6749 switch (size) {
dd8fbd78
FN
6750 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6751 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6752 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6753 default: abort();
9ee6e8bb
PB
6754 }
6755 break;
62698be3 6756 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6757 {
6758 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6759 switch ((u << 2) | size) {
6760 case 0: /* VADD */
aa47cfdd
PM
6761 case 4: /* VPADD */
6762 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6763 break;
6764 case 2: /* VSUB */
aa47cfdd 6765 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6766 break;
6767 case 6: /* VABD */
aa47cfdd 6768 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6769 break;
6770 default:
62698be3 6771 abort();
9ee6e8bb 6772 }
aa47cfdd 6773 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6774 break;
aa47cfdd 6775 }
62698be3 6776 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6777 {
6778 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6779 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6780 if (!u) {
7d1b0095 6781 tcg_temp_free_i32(tmp2);
dd8fbd78 6782 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6783 if (size == 0) {
aa47cfdd 6784 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6785 } else {
aa47cfdd 6786 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6787 }
6788 }
aa47cfdd 6789 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6790 break;
aa47cfdd 6791 }
62698be3 6792 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6793 {
6794 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6795 if (!u) {
aa47cfdd 6796 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6797 } else {
aa47cfdd
PM
6798 if (size == 0) {
6799 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6800 } else {
6801 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6802 }
b5ff1b31 6803 }
aa47cfdd 6804 tcg_temp_free_ptr(fpstatus);
2c0262af 6805 break;
aa47cfdd 6806 }
62698be3 6807 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6808 {
6809 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6810 if (size == 0) {
6811 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6812 } else {
6813 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6814 }
6815 tcg_temp_free_ptr(fpstatus);
2c0262af 6816 break;
aa47cfdd 6817 }
62698be3 6818 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6819 {
6820 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6821 if (size == 0) {
f71a2ae5 6822 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6823 } else {
f71a2ae5 6824 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6825 }
6826 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6827 break;
aa47cfdd 6828 }
505935fc
WN
6829 case NEON_3R_FLOAT_MISC:
6830 if (u) {
6831 /* VMAXNM/VMINNM */
6832 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6833 if (size == 0) {
f71a2ae5 6834 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6835 } else {
f71a2ae5 6836 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6837 }
6838 tcg_temp_free_ptr(fpstatus);
6839 } else {
6840 if (size == 0) {
6841 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6842 } else {
6843 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6844 }
6845 }
2c0262af 6846 break;
36a71934 6847 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
6848 {
6849 /* VFMA, VFMS: fused multiply-add */
6850 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6851 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6852 if (size) {
6853 /* VFMS */
6854 gen_helper_vfp_negs(tmp, tmp);
6855 }
6856 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6857 tcg_temp_free_i32(tmp3);
6858 tcg_temp_free_ptr(fpstatus);
6859 break;
6860 }
9ee6e8bb
PB
6861 default:
6862 abort();
2c0262af 6863 }
7d1b0095 6864 tcg_temp_free_i32(tmp2);
dd8fbd78 6865
9ee6e8bb
PB
6866 /* Save the result. For elementwise operations we can put it
6867 straight into the destination register. For pairwise operations
6868 we have to be careful to avoid clobbering the source operands. */
6869 if (pairwise && rd == rm) {
dd8fbd78 6870 neon_store_scratch(pass, tmp);
9ee6e8bb 6871 } else {
dd8fbd78 6872 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6873 }
6874
6875 } /* for pass */
6876 if (pairwise && rd == rm) {
6877 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6878 tmp = neon_load_scratch(pass);
6879 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6880 }
6881 }
ad69471c 6882 /* End of 3 register same size operations. */
9ee6e8bb
PB
6883 } else if (insn & (1 << 4)) {
6884 if ((insn & 0x00380080) != 0) {
6885 /* Two registers and shift. */
6886 op = (insn >> 8) & 0xf;
6887 if (insn & (1 << 7)) {
cc13115b
PM
6888 /* 64-bit shift. */
6889 if (op > 7) {
6890 return 1;
6891 }
9ee6e8bb
PB
6892 size = 3;
6893 } else {
6894 size = 2;
6895 while ((insn & (1 << (size + 19))) == 0)
6896 size--;
6897 }
6898 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
9ee6e8bb
PB
6899 if (op < 8) {
6900 /* Shift by immediate:
6901 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6902 if (q && ((rd | rm) & 1)) {
6903 return 1;
6904 }
6905 if (!u && (op == 4 || op == 6)) {
6906 return 1;
6907 }
9ee6e8bb
PB
6908 /* Right shifts are encoded as N - shift, where N is the
6909 element size in bits. */
1dc8425e 6910 if (op <= 4) {
9ee6e8bb 6911 shift = shift - (1 << (size + 3));
1dc8425e
RH
6912 }
6913
6914 switch (op) {
6915 case 0: /* VSHR */
6916 /* Right shift comes here negative. */
6917 shift = -shift;
6918 /* Shifts larger than the element size are architecturally
6919 * valid. Unsigned results in all zeros; signed results
6920 * in all sign bits.
6921 */
6922 if (!u) {
6923 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
6924 MIN(shift, (8 << size) - 1),
6925 vec_size, vec_size);
6926 } else if (shift >= 8 << size) {
6927 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6928 } else {
6929 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
6930 vec_size, vec_size);
6931 }
6932 return 0;
6933
41f6c113
RH
6934 case 1: /* VSRA */
6935 /* Right shift comes here negative. */
6936 shift = -shift;
6937 /* Shifts larger than the element size are architecturally
6938 * valid. Unsigned results in all zeros; signed results
6939 * in all sign bits.
6940 */
6941 if (!u) {
6942 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6943 MIN(shift, (8 << size) - 1),
6944 &ssra_op[size]);
6945 } else if (shift >= 8 << size) {
6946 /* rd += 0 */
6947 } else {
6948 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6949 shift, &usra_op[size]);
6950 }
6951 return 0;
6952
f3cd8218
RH
6953 case 4: /* VSRI */
6954 if (!u) {
6955 return 1;
6956 }
6957 /* Right shift comes here negative. */
6958 shift = -shift;
6959 /* Shift out of range leaves destination unchanged. */
6960 if (shift < 8 << size) {
6961 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6962 shift, &sri_op[size]);
6963 }
6964 return 0;
6965
1dc8425e 6966 case 5: /* VSHL, VSLI */
f3cd8218
RH
6967 if (u) { /* VSLI */
6968 /* Shift out of range leaves destination unchanged. */
6969 if (shift < 8 << size) {
6970 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
6971 vec_size, shift, &sli_op[size]);
6972 }
6973 } else { /* VSHL */
1dc8425e
RH
6974 /* Shifts larger than the element size are
6975 * architecturally valid and results in zero.
6976 */
6977 if (shift >= 8 << size) {
6978 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6979 } else {
6980 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
6981 vec_size, vec_size);
6982 }
1dc8425e 6983 }
f3cd8218 6984 return 0;
1dc8425e
RH
6985 }
6986
9ee6e8bb
PB
6987 if (size == 3) {
6988 count = q + 1;
6989 } else {
6990 count = q ? 4: 2;
6991 }
1dc8425e
RH
6992
6993 /* To avoid excessive duplication of ops we implement shift
6994 * by immediate using the variable shift operations.
6995 */
6996 imm = dup_const(size, shift);
9ee6e8bb
PB
6997
6998 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6999 if (size == 3) {
7000 neon_load_reg64(cpu_V0, rm + pass);
7001 tcg_gen_movi_i64(cpu_V1, imm);
7002 switch (op) {
ad69471c
PB
7003 case 2: /* VRSHR */
7004 case 3: /* VRSRA */
7005 if (u)
7006 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 7007 else
ad69471c 7008 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 7009 break;
0322b26e 7010 case 6: /* VQSHLU */
02da0b2d
PM
7011 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
7012 cpu_V0, cpu_V1);
ad69471c 7013 break;
0322b26e
PM
7014 case 7: /* VQSHL */
7015 if (u) {
02da0b2d 7016 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
7017 cpu_V0, cpu_V1);
7018 } else {
02da0b2d 7019 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
7020 cpu_V0, cpu_V1);
7021 }
9ee6e8bb 7022 break;
1dc8425e
RH
7023 default:
7024 g_assert_not_reached();
9ee6e8bb 7025 }
41f6c113 7026 if (op == 3) {
ad69471c 7027 /* Accumulate. */
5371cb81 7028 neon_load_reg64(cpu_V1, rd + pass);
ad69471c 7029 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
7030 }
7031 neon_store_reg64(cpu_V0, rd + pass);
7032 } else { /* size < 3 */
7033 /* Operands in T0 and T1. */
dd8fbd78 7034 tmp = neon_load_reg(rm, pass);
7d1b0095 7035 tmp2 = tcg_temp_new_i32();
dd8fbd78 7036 tcg_gen_movi_i32(tmp2, imm);
ad69471c 7037 switch (op) {
ad69471c
PB
7038 case 2: /* VRSHR */
7039 case 3: /* VRSRA */
7040 GEN_NEON_INTEGER_OP(rshl);
7041 break;
0322b26e 7042 case 6: /* VQSHLU */
ad69471c 7043 switch (size) {
0322b26e 7044 case 0:
02da0b2d
PM
7045 gen_helper_neon_qshlu_s8(tmp, cpu_env,
7046 tmp, tmp2);
0322b26e
PM
7047 break;
7048 case 1:
02da0b2d
PM
7049 gen_helper_neon_qshlu_s16(tmp, cpu_env,
7050 tmp, tmp2);
0322b26e
PM
7051 break;
7052 case 2:
02da0b2d
PM
7053 gen_helper_neon_qshlu_s32(tmp, cpu_env,
7054 tmp, tmp2);
0322b26e
PM
7055 break;
7056 default:
cc13115b 7057 abort();
ad69471c
PB
7058 }
7059 break;
0322b26e 7060 case 7: /* VQSHL */
02da0b2d 7061 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 7062 break;
1dc8425e
RH
7063 default:
7064 g_assert_not_reached();
ad69471c 7065 }
7d1b0095 7066 tcg_temp_free_i32(tmp2);
ad69471c 7067
41f6c113 7068 if (op == 3) {
ad69471c 7069 /* Accumulate. */
dd8fbd78 7070 tmp2 = neon_load_reg(rd, pass);
5371cb81 7071 gen_neon_add(size, tmp, tmp2);
7d1b0095 7072 tcg_temp_free_i32(tmp2);
ad69471c 7073 }
dd8fbd78 7074 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7075 }
7076 } /* for pass */
7077 } else if (op < 10) {
ad69471c 7078 /* Shift by immediate and narrow:
9ee6e8bb 7079 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 7080 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
7081 if (rm & 1) {
7082 return 1;
7083 }
9ee6e8bb
PB
7084 shift = shift - (1 << (size + 3));
7085 size++;
92cdfaeb 7086 if (size == 3) {
a7812ae4 7087 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
7088 neon_load_reg64(cpu_V0, rm);
7089 neon_load_reg64(cpu_V1, rm + 1);
7090 for (pass = 0; pass < 2; pass++) {
7091 TCGv_i64 in;
7092 if (pass == 0) {
7093 in = cpu_V0;
7094 } else {
7095 in = cpu_V1;
7096 }
ad69471c 7097 if (q) {
0b36f4cd 7098 if (input_unsigned) {
92cdfaeb 7099 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 7100 } else {
92cdfaeb 7101 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 7102 }
ad69471c 7103 } else {
0b36f4cd 7104 if (input_unsigned) {
92cdfaeb 7105 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 7106 } else {
92cdfaeb 7107 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 7108 }
ad69471c 7109 }
7d1b0095 7110 tmp = tcg_temp_new_i32();
92cdfaeb
PM
7111 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
7112 neon_store_reg(rd, pass, tmp);
7113 } /* for pass */
7114 tcg_temp_free_i64(tmp64);
7115 } else {
7116 if (size == 1) {
7117 imm = (uint16_t)shift;
7118 imm |= imm << 16;
2c0262af 7119 } else {
92cdfaeb
PM
7120 /* size == 2 */
7121 imm = (uint32_t)shift;
7122 }
7123 tmp2 = tcg_const_i32(imm);
7124 tmp4 = neon_load_reg(rm + 1, 0);
7125 tmp5 = neon_load_reg(rm + 1, 1);
7126 for (pass = 0; pass < 2; pass++) {
7127 if (pass == 0) {
7128 tmp = neon_load_reg(rm, 0);
7129 } else {
7130 tmp = tmp4;
7131 }
0b36f4cd
CL
7132 gen_neon_shift_narrow(size, tmp, tmp2, q,
7133 input_unsigned);
92cdfaeb
PM
7134 if (pass == 0) {
7135 tmp3 = neon_load_reg(rm, 1);
7136 } else {
7137 tmp3 = tmp5;
7138 }
0b36f4cd
CL
7139 gen_neon_shift_narrow(size, tmp3, tmp2, q,
7140 input_unsigned);
36aa55dc 7141 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
7142 tcg_temp_free_i32(tmp);
7143 tcg_temp_free_i32(tmp3);
7144 tmp = tcg_temp_new_i32();
92cdfaeb
PM
7145 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
7146 neon_store_reg(rd, pass, tmp);
7147 } /* for pass */
c6067f04 7148 tcg_temp_free_i32(tmp2);
b75263d6 7149 }
9ee6e8bb 7150 } else if (op == 10) {
cc13115b
PM
7151 /* VSHLL, VMOVL */
7152 if (q || (rd & 1)) {
9ee6e8bb 7153 return 1;
cc13115b 7154 }
ad69471c
PB
7155 tmp = neon_load_reg(rm, 0);
7156 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7157 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7158 if (pass == 1)
7159 tmp = tmp2;
7160
7161 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 7162
9ee6e8bb
PB
7163 if (shift != 0) {
7164 /* The shift is less than the width of the source
ad69471c
PB
7165 type, so we can just shift the whole register. */
7166 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
7167 /* Widen the result of shift: we need to clear
7168 * the potential overflow bits resulting from
7169 * left bits of the narrow input appearing as
7170 * right bits of left the neighbour narrow
7171 * input. */
ad69471c
PB
7172 if (size < 2 || !u) {
7173 uint64_t imm64;
7174 if (size == 0) {
7175 imm = (0xffu >> (8 - shift));
7176 imm |= imm << 16;
acdf01ef 7177 } else if (size == 1) {
ad69471c 7178 imm = 0xffff >> (16 - shift);
acdf01ef
CL
7179 } else {
7180 /* size == 2 */
7181 imm = 0xffffffff >> (32 - shift);
7182 }
7183 if (size < 2) {
7184 imm64 = imm | (((uint64_t)imm) << 32);
7185 } else {
7186 imm64 = imm;
9ee6e8bb 7187 }
acdf01ef 7188 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
7189 }
7190 }
ad69471c 7191 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7192 }
f73534a5 7193 } else if (op >= 14) {
9ee6e8bb 7194 /* VCVT fixed-point. */
cc13115b
PM
7195 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
7196 return 1;
7197 }
f73534a5
PM
7198 /* We have already masked out the must-be-1 top bit of imm6,
7199 * hence this 32-shift where the ARM ARM has 64-imm6.
7200 */
7201 shift = 32 - shift;
9ee6e8bb 7202 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 7203 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 7204 if (!(op & 1)) {
9ee6e8bb 7205 if (u)
5500b06c 7206 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 7207 else
5500b06c 7208 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
7209 } else {
7210 if (u)
5500b06c 7211 gen_vfp_toul(0, shift, 1);
9ee6e8bb 7212 else
5500b06c 7213 gen_vfp_tosl(0, shift, 1);
2c0262af 7214 }
4373f3ce 7215 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
7216 }
7217 } else {
9ee6e8bb
PB
7218 return 1;
7219 }
7220 } else { /* (insn & 0x00380080) == 0 */
246fa4ac
RH
7221 int invert, reg_ofs, vec_size;
7222
7d80fee5
PM
7223 if (q && (rd & 1)) {
7224 return 1;
7225 }
9ee6e8bb
PB
7226
7227 op = (insn >> 8) & 0xf;
7228 /* One register and immediate. */
7229 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
7230 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
7231 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
7232 * We choose to not special-case this and will behave as if a
7233 * valid constant encoding of 0 had been given.
7234 */
9ee6e8bb
PB
7235 switch (op) {
7236 case 0: case 1:
7237 /* no-op */
7238 break;
7239 case 2: case 3:
7240 imm <<= 8;
7241 break;
7242 case 4: case 5:
7243 imm <<= 16;
7244 break;
7245 case 6: case 7:
7246 imm <<= 24;
7247 break;
7248 case 8: case 9:
7249 imm |= imm << 16;
7250 break;
7251 case 10: case 11:
7252 imm = (imm << 8) | (imm << 24);
7253 break;
7254 case 12:
8e31209e 7255 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
7256 break;
7257 case 13:
7258 imm = (imm << 16) | 0xffff;
7259 break;
7260 case 14:
7261 imm |= (imm << 8) | (imm << 16) | (imm << 24);
246fa4ac 7262 if (invert) {
9ee6e8bb 7263 imm = ~imm;
246fa4ac 7264 }
9ee6e8bb
PB
7265 break;
7266 case 15:
7d80fee5
PM
7267 if (invert) {
7268 return 1;
7269 }
9ee6e8bb
PB
7270 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
7271 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
7272 break;
7273 }
246fa4ac 7274 if (invert) {
9ee6e8bb 7275 imm = ~imm;
246fa4ac 7276 }
9ee6e8bb 7277
246fa4ac
RH
7278 reg_ofs = neon_reg_offset(rd, 0);
7279 vec_size = q ? 16 : 8;
7280
7281 if (op & 1 && op < 12) {
7282 if (invert) {
7283 /* The immediate value has already been inverted,
7284 * so BIC becomes AND.
7285 */
7286 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
7287 vec_size, vec_size);
9ee6e8bb 7288 } else {
246fa4ac
RH
7289 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
7290 vec_size, vec_size);
7291 }
7292 } else {
7293 /* VMOV, VMVN. */
7294 if (op == 14 && invert) {
7295 TCGv_i64 t64 = tcg_temp_new_i64();
7296
7297 for (pass = 0; pass <= q; ++pass) {
7298 uint64_t val = 0;
a5a14945 7299 int n;
246fa4ac
RH
7300
7301 for (n = 0; n < 8; n++) {
7302 if (imm & (1 << (n + pass * 8))) {
7303 val |= 0xffull << (n * 8);
7304 }
9ee6e8bb 7305 }
246fa4ac
RH
7306 tcg_gen_movi_i64(t64, val);
7307 neon_store_reg64(t64, rd + pass);
9ee6e8bb 7308 }
246fa4ac
RH
7309 tcg_temp_free_i64(t64);
7310 } else {
7311 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
9ee6e8bb
PB
7312 }
7313 }
7314 }
e4b3861d 7315 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
7316 if (size != 3) {
7317 op = (insn >> 8) & 0xf;
7318 if ((insn & (1 << 6)) == 0) {
7319 /* Three registers of different lengths. */
7320 int src1_wide;
7321 int src2_wide;
7322 int prewiden;
526d0096
PM
7323 /* undefreq: bit 0 : UNDEF if size == 0
7324 * bit 1 : UNDEF if size == 1
7325 * bit 2 : UNDEF if size == 2
7326 * bit 3 : UNDEF if U == 1
7327 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
7328 */
7329 int undefreq;
7330 /* prewiden, src1_wide, src2_wide, undefreq */
7331 static const int neon_3reg_wide[16][4] = {
7332 {1, 0, 0, 0}, /* VADDL */
7333 {1, 1, 0, 0}, /* VADDW */
7334 {1, 0, 0, 0}, /* VSUBL */
7335 {1, 1, 0, 0}, /* VSUBW */
7336 {0, 1, 1, 0}, /* VADDHN */
7337 {0, 0, 0, 0}, /* VABAL */
7338 {0, 1, 1, 0}, /* VSUBHN */
7339 {0, 0, 0, 0}, /* VABDL */
7340 {0, 0, 0, 0}, /* VMLAL */
526d0096 7341 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 7342 {0, 0, 0, 0}, /* VMLSL */
526d0096 7343 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 7344 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 7345 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 7346 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 7347 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
7348 };
7349
7350 prewiden = neon_3reg_wide[op][0];
7351 src1_wide = neon_3reg_wide[op][1];
7352 src2_wide = neon_3reg_wide[op][2];
695272dc 7353 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 7354
526d0096
PM
7355 if ((undefreq & (1 << size)) ||
7356 ((undefreq & 8) && u)) {
695272dc
PM
7357 return 1;
7358 }
7359 if ((src1_wide && (rn & 1)) ||
7360 (src2_wide && (rm & 1)) ||
7361 (!src2_wide && (rd & 1))) {
ad69471c 7362 return 1;
695272dc 7363 }
ad69471c 7364
4e624eda
PM
7365 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
7366 * outside the loop below as it only performs a single pass.
7367 */
7368 if (op == 14 && size == 2) {
7369 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
7370
962fcbf2 7371 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
7372 return 1;
7373 }
7374 tcg_rn = tcg_temp_new_i64();
7375 tcg_rm = tcg_temp_new_i64();
7376 tcg_rd = tcg_temp_new_i64();
7377 neon_load_reg64(tcg_rn, rn);
7378 neon_load_reg64(tcg_rm, rm);
7379 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
7380 neon_store_reg64(tcg_rd, rd);
7381 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
7382 neon_store_reg64(tcg_rd, rd + 1);
7383 tcg_temp_free_i64(tcg_rn);
7384 tcg_temp_free_i64(tcg_rm);
7385 tcg_temp_free_i64(tcg_rd);
7386 return 0;
7387 }
7388
9ee6e8bb
PB
7389 /* Avoid overlapping operands. Wide source operands are
7390 always aligned so will never overlap with wide
7391 destinations in problematic ways. */
8f8e3aa4 7392 if (rd == rm && !src2_wide) {
dd8fbd78
FN
7393 tmp = neon_load_reg(rm, 1);
7394 neon_store_scratch(2, tmp);
8f8e3aa4 7395 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
7396 tmp = neon_load_reg(rn, 1);
7397 neon_store_scratch(2, tmp);
9ee6e8bb 7398 }
f764718d 7399 tmp3 = NULL;
9ee6e8bb 7400 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7401 if (src1_wide) {
7402 neon_load_reg64(cpu_V0, rn + pass);
f764718d 7403 tmp = NULL;
9ee6e8bb 7404 } else {
ad69471c 7405 if (pass == 1 && rd == rn) {
dd8fbd78 7406 tmp = neon_load_scratch(2);
9ee6e8bb 7407 } else {
ad69471c
PB
7408 tmp = neon_load_reg(rn, pass);
7409 }
7410 if (prewiden) {
7411 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
7412 }
7413 }
ad69471c
PB
7414 if (src2_wide) {
7415 neon_load_reg64(cpu_V1, rm + pass);
f764718d 7416 tmp2 = NULL;
9ee6e8bb 7417 } else {
ad69471c 7418 if (pass == 1 && rd == rm) {
dd8fbd78 7419 tmp2 = neon_load_scratch(2);
9ee6e8bb 7420 } else {
ad69471c
PB
7421 tmp2 = neon_load_reg(rm, pass);
7422 }
7423 if (prewiden) {
7424 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 7425 }
9ee6e8bb
PB
7426 }
7427 switch (op) {
7428 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 7429 gen_neon_addl(size);
9ee6e8bb 7430 break;
79b0e534 7431 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 7432 gen_neon_subl(size);
9ee6e8bb
PB
7433 break;
7434 case 5: case 7: /* VABAL, VABDL */
7435 switch ((size << 1) | u) {
ad69471c
PB
7436 case 0:
7437 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
7438 break;
7439 case 1:
7440 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
7441 break;
7442 case 2:
7443 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
7444 break;
7445 case 3:
7446 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
7447 break;
7448 case 4:
7449 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
7450 break;
7451 case 5:
7452 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
7453 break;
9ee6e8bb
PB
7454 default: abort();
7455 }
7d1b0095
PM
7456 tcg_temp_free_i32(tmp2);
7457 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7458 break;
7459 case 8: case 9: case 10: case 11: case 12: case 13:
7460 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 7461 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
7462 break;
7463 case 14: /* Polynomial VMULL */
e5ca24cb 7464 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
7465 tcg_temp_free_i32(tmp2);
7466 tcg_temp_free_i32(tmp);
e5ca24cb 7467 break;
695272dc
PM
7468 default: /* 15 is RESERVED: caught earlier */
7469 abort();
9ee6e8bb 7470 }
ebcd88ce
PM
7471 if (op == 13) {
7472 /* VQDMULL */
7473 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7474 neon_store_reg64(cpu_V0, rd + pass);
7475 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 7476 /* Accumulate. */
ebcd88ce 7477 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7478 switch (op) {
4dc064e6
PM
7479 case 10: /* VMLSL */
7480 gen_neon_negl(cpu_V0, size);
7481 /* Fall through */
7482 case 5: case 8: /* VABAL, VMLAL */
ad69471c 7483 gen_neon_addl(size);
9ee6e8bb
PB
7484 break;
7485 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 7486 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7487 if (op == 11) {
7488 gen_neon_negl(cpu_V0, size);
7489 }
ad69471c
PB
7490 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
7491 break;
9ee6e8bb
PB
7492 default:
7493 abort();
7494 }
ad69471c 7495 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7496 } else if (op == 4 || op == 6) {
7497 /* Narrowing operation. */
7d1b0095 7498 tmp = tcg_temp_new_i32();
79b0e534 7499 if (!u) {
9ee6e8bb 7500 switch (size) {
ad69471c
PB
7501 case 0:
7502 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
7503 break;
7504 case 1:
7505 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
7506 break;
7507 case 2:
7508 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 7509 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 7510 break;
9ee6e8bb
PB
7511 default: abort();
7512 }
7513 } else {
7514 switch (size) {
ad69471c
PB
7515 case 0:
7516 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
7517 break;
7518 case 1:
7519 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
7520 break;
7521 case 2:
7522 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
7523 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 7524 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 7525 break;
9ee6e8bb
PB
7526 default: abort();
7527 }
7528 }
ad69471c
PB
7529 if (pass == 0) {
7530 tmp3 = tmp;
7531 } else {
7532 neon_store_reg(rd, 0, tmp3);
7533 neon_store_reg(rd, 1, tmp);
7534 }
9ee6e8bb
PB
7535 } else {
7536 /* Write back the result. */
ad69471c 7537 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7538 }
7539 }
7540 } else {
3e3326df
PM
7541 /* Two registers and a scalar. NB that for ops of this form
7542 * the ARM ARM labels bit 24 as Q, but it is in our variable
7543 * 'u', not 'q'.
7544 */
7545 if (size == 0) {
7546 return 1;
7547 }
9ee6e8bb 7548 switch (op) {
9ee6e8bb 7549 case 1: /* Float VMLA scalar */
9ee6e8bb 7550 case 5: /* Floating point VMLS scalar */
9ee6e8bb 7551 case 9: /* Floating point VMUL scalar */
3e3326df
PM
7552 if (size == 1) {
7553 return 1;
7554 }
7555 /* fall through */
7556 case 0: /* Integer VMLA scalar */
7557 case 4: /* Integer VMLS scalar */
7558 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
7559 case 12: /* VQDMULH scalar */
7560 case 13: /* VQRDMULH scalar */
3e3326df
PM
7561 if (u && ((rd | rn) & 1)) {
7562 return 1;
7563 }
dd8fbd78
FN
7564 tmp = neon_get_scalar(size, rm);
7565 neon_store_scratch(0, tmp);
9ee6e8bb 7566 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
7567 tmp = neon_load_scratch(0);
7568 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
7569 if (op == 12) {
7570 if (size == 1) {
02da0b2d 7571 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7572 } else {
02da0b2d 7573 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
7574 }
7575 } else if (op == 13) {
7576 if (size == 1) {
02da0b2d 7577 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7578 } else {
02da0b2d 7579 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
7580 }
7581 } else if (op & 1) {
aa47cfdd
PM
7582 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7583 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
7584 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
7585 } else {
7586 switch (size) {
dd8fbd78
FN
7587 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
7588 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
7589 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 7590 default: abort();
9ee6e8bb
PB
7591 }
7592 }
7d1b0095 7593 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7594 if (op < 8) {
7595 /* Accumulate. */
dd8fbd78 7596 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
7597 switch (op) {
7598 case 0:
dd8fbd78 7599 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
7600 break;
7601 case 1:
aa47cfdd
PM
7602 {
7603 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7604 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
7605 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7606 break;
aa47cfdd 7607 }
9ee6e8bb 7608 case 4:
dd8fbd78 7609 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
7610 break;
7611 case 5:
aa47cfdd
PM
7612 {
7613 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7614 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
7615 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7616 break;
aa47cfdd 7617 }
9ee6e8bb
PB
7618 default:
7619 abort();
7620 }
7d1b0095 7621 tcg_temp_free_i32(tmp2);
9ee6e8bb 7622 }
dd8fbd78 7623 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7624 }
7625 break;
9ee6e8bb 7626 case 3: /* VQDMLAL scalar */
9ee6e8bb 7627 case 7: /* VQDMLSL scalar */
9ee6e8bb 7628 case 11: /* VQDMULL scalar */
3e3326df 7629 if (u == 1) {
ad69471c 7630 return 1;
3e3326df
PM
7631 }
7632 /* fall through */
7633 case 2: /* VMLAL sclar */
7634 case 6: /* VMLSL scalar */
7635 case 10: /* VMULL scalar */
7636 if (rd & 1) {
7637 return 1;
7638 }
dd8fbd78 7639 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
7640 /* We need a copy of tmp2 because gen_neon_mull
7641 * deletes it during pass 0. */
7d1b0095 7642 tmp4 = tcg_temp_new_i32();
c6067f04 7643 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 7644 tmp3 = neon_load_reg(rn, 1);
ad69471c 7645
9ee6e8bb 7646 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7647 if (pass == 0) {
7648 tmp = neon_load_reg(rn, 0);
9ee6e8bb 7649 } else {
dd8fbd78 7650 tmp = tmp3;
c6067f04 7651 tmp2 = tmp4;
9ee6e8bb 7652 }
ad69471c 7653 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
7654 if (op != 11) {
7655 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7656 }
9ee6e8bb 7657 switch (op) {
4dc064e6
PM
7658 case 6:
7659 gen_neon_negl(cpu_V0, size);
7660 /* Fall through */
7661 case 2:
ad69471c 7662 gen_neon_addl(size);
9ee6e8bb
PB
7663 break;
7664 case 3: case 7:
ad69471c 7665 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7666 if (op == 7) {
7667 gen_neon_negl(cpu_V0, size);
7668 }
ad69471c 7669 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
7670 break;
7671 case 10:
7672 /* no-op */
7673 break;
7674 case 11:
ad69471c 7675 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
7676 break;
7677 default:
7678 abort();
7679 }
ad69471c 7680 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7681 }
61adacc8
RH
7682 break;
7683 case 14: /* VQRDMLAH scalar */
7684 case 15: /* VQRDMLSH scalar */
7685 {
7686 NeonGenThreeOpEnvFn *fn;
dd8fbd78 7687
962fcbf2 7688 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
7689 return 1;
7690 }
7691 if (u && ((rd | rn) & 1)) {
7692 return 1;
7693 }
7694 if (op == 14) {
7695 if (size == 1) {
7696 fn = gen_helper_neon_qrdmlah_s16;
7697 } else {
7698 fn = gen_helper_neon_qrdmlah_s32;
7699 }
7700 } else {
7701 if (size == 1) {
7702 fn = gen_helper_neon_qrdmlsh_s16;
7703 } else {
7704 fn = gen_helper_neon_qrdmlsh_s32;
7705 }
7706 }
dd8fbd78 7707
61adacc8
RH
7708 tmp2 = neon_get_scalar(size, rm);
7709 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7710 tmp = neon_load_reg(rn, pass);
7711 tmp3 = neon_load_reg(rd, pass);
7712 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7713 tcg_temp_free_i32(tmp3);
7714 neon_store_reg(rd, pass, tmp);
7715 }
7716 tcg_temp_free_i32(tmp2);
7717 }
9ee6e8bb 7718 break;
61adacc8
RH
7719 default:
7720 g_assert_not_reached();
9ee6e8bb
PB
7721 }
7722 }
7723 } else { /* size == 3 */
7724 if (!u) {
7725 /* Extract. */
9ee6e8bb 7726 imm = (insn >> 8) & 0xf;
ad69471c
PB
7727
7728 if (imm > 7 && !q)
7729 return 1;
7730
52579ea1
PM
7731 if (q && ((rd | rn | rm) & 1)) {
7732 return 1;
7733 }
7734
ad69471c
PB
7735 if (imm == 0) {
7736 neon_load_reg64(cpu_V0, rn);
7737 if (q) {
7738 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 7739 }
ad69471c
PB
7740 } else if (imm == 8) {
7741 neon_load_reg64(cpu_V0, rn + 1);
7742 if (q) {
7743 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7744 }
ad69471c 7745 } else if (q) {
a7812ae4 7746 tmp64 = tcg_temp_new_i64();
ad69471c
PB
7747 if (imm < 8) {
7748 neon_load_reg64(cpu_V0, rn);
a7812ae4 7749 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
7750 } else {
7751 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 7752 neon_load_reg64(tmp64, rm);
ad69471c
PB
7753 }
7754 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 7755 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
7756 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7757 if (imm < 8) {
7758 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7759 } else {
ad69471c
PB
7760 neon_load_reg64(cpu_V1, rm + 1);
7761 imm -= 8;
9ee6e8bb 7762 }
ad69471c 7763 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
7764 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7765 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 7766 tcg_temp_free_i64(tmp64);
ad69471c 7767 } else {
a7812ae4 7768 /* BUGFIX */
ad69471c 7769 neon_load_reg64(cpu_V0, rn);
a7812ae4 7770 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 7771 neon_load_reg64(cpu_V1, rm);
a7812ae4 7772 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
7773 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7774 }
7775 neon_store_reg64(cpu_V0, rd);
7776 if (q) {
7777 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7778 }
7779 } else if ((insn & (1 << 11)) == 0) {
7780 /* Two register misc. */
7781 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7782 size = (insn >> 18) & 3;
600b828c
PM
7783 /* UNDEF for unknown op values and bad op-size combinations */
7784 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7785 return 1;
7786 }
fe8fcf3d
PM
7787 if (neon_2rm_is_v8_op(op) &&
7788 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7789 return 1;
7790 }
fc2a9b37
PM
7791 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7792 q && ((rm | rd) & 1)) {
7793 return 1;
7794 }
9ee6e8bb 7795 switch (op) {
600b828c 7796 case NEON_2RM_VREV64:
9ee6e8bb 7797 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7798 tmp = neon_load_reg(rm, pass * 2);
7799 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7800 switch (size) {
dd8fbd78
FN
7801 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7802 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7803 case 2: /* no-op */ break;
7804 default: abort();
7805 }
dd8fbd78 7806 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7807 if (size == 2) {
dd8fbd78 7808 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7809 } else {
9ee6e8bb 7810 switch (size) {
dd8fbd78
FN
7811 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7812 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7813 default: abort();
7814 }
dd8fbd78 7815 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7816 }
7817 }
7818 break;
600b828c
PM
7819 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7820 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7821 for (pass = 0; pass < q + 1; pass++) {
7822 tmp = neon_load_reg(rm, pass * 2);
7823 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7824 tmp = neon_load_reg(rm, pass * 2 + 1);
7825 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7826 switch (size) {
7827 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7828 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7829 case 2: tcg_gen_add_i64(CPU_V001); break;
7830 default: abort();
7831 }
600b828c 7832 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7833 /* Accumulate. */
ad69471c
PB
7834 neon_load_reg64(cpu_V1, rd + pass);
7835 gen_neon_addl(size);
9ee6e8bb 7836 }
ad69471c 7837 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7838 }
7839 break;
600b828c 7840 case NEON_2RM_VTRN:
9ee6e8bb 7841 if (size == 2) {
a5a14945 7842 int n;
9ee6e8bb 7843 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7844 tmp = neon_load_reg(rm, n);
7845 tmp2 = neon_load_reg(rd, n + 1);
7846 neon_store_reg(rm, n, tmp2);
7847 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7848 }
7849 } else {
7850 goto elementwise;
7851 }
7852 break;
600b828c 7853 case NEON_2RM_VUZP:
02acedf9 7854 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7855 return 1;
9ee6e8bb
PB
7856 }
7857 break;
600b828c 7858 case NEON_2RM_VZIP:
d68a6f3a 7859 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7860 return 1;
9ee6e8bb
PB
7861 }
7862 break;
600b828c
PM
7863 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7864 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7865 if (rm & 1) {
7866 return 1;
7867 }
f764718d 7868 tmp2 = NULL;
9ee6e8bb 7869 for (pass = 0; pass < 2; pass++) {
ad69471c 7870 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7871 tmp = tcg_temp_new_i32();
600b828c
PM
7872 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7873 tmp, cpu_V0);
ad69471c
PB
7874 if (pass == 0) {
7875 tmp2 = tmp;
7876 } else {
7877 neon_store_reg(rd, 0, tmp2);
7878 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7879 }
9ee6e8bb
PB
7880 }
7881 break;
600b828c 7882 case NEON_2RM_VSHLL:
fc2a9b37 7883 if (q || (rd & 1)) {
9ee6e8bb 7884 return 1;
600b828c 7885 }
ad69471c
PB
7886 tmp = neon_load_reg(rm, 0);
7887 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7888 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7889 if (pass == 1)
7890 tmp = tmp2;
7891 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7892 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7893 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7894 }
7895 break;
600b828c 7896 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
7897 {
7898 TCGv_ptr fpst;
7899 TCGv_i32 ahp;
7900
602f6e42 7901 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
7902 q || (rm & 1)) {
7903 return 1;
7904 }
7d1b0095
PM
7905 tmp = tcg_temp_new_i32();
7906 tmp2 = tcg_temp_new_i32();
486624fc
AB
7907 fpst = get_fpstatus_ptr(true);
7908 ahp = get_ahp_flag();
60011498 7909 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
486624fc 7910 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498 7911 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
486624fc 7912 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7913 tcg_gen_shli_i32(tmp2, tmp2, 16);
7914 tcg_gen_or_i32(tmp2, tmp2, tmp);
7915 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
486624fc 7916 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498
PB
7917 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7918 neon_store_reg(rd, 0, tmp2);
7d1b0095 7919 tmp2 = tcg_temp_new_i32();
486624fc 7920 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7921 tcg_gen_shli_i32(tmp2, tmp2, 16);
7922 tcg_gen_or_i32(tmp2, tmp2, tmp);
7923 neon_store_reg(rd, 1, tmp2);
7d1b0095 7924 tcg_temp_free_i32(tmp);
486624fc
AB
7925 tcg_temp_free_i32(ahp);
7926 tcg_temp_free_ptr(fpst);
60011498 7927 break;
486624fc 7928 }
600b828c 7929 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
7930 {
7931 TCGv_ptr fpst;
7932 TCGv_i32 ahp;
602f6e42 7933 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
7934 q || (rd & 1)) {
7935 return 1;
7936 }
486624fc
AB
7937 fpst = get_fpstatus_ptr(true);
7938 ahp = get_ahp_flag();
7d1b0095 7939 tmp3 = tcg_temp_new_i32();
60011498
PB
7940 tmp = neon_load_reg(rm, 0);
7941 tmp2 = neon_load_reg(rm, 1);
7942 tcg_gen_ext16u_i32(tmp3, tmp);
486624fc 7943 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7944 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7945 tcg_gen_shri_i32(tmp3, tmp, 16);
486624fc 7946 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7947 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7948 tcg_temp_free_i32(tmp);
60011498 7949 tcg_gen_ext16u_i32(tmp3, tmp2);
486624fc 7950 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7951 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7952 tcg_gen_shri_i32(tmp3, tmp2, 16);
486624fc 7953 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7954 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7955 tcg_temp_free_i32(tmp2);
7956 tcg_temp_free_i32(tmp3);
486624fc
AB
7957 tcg_temp_free_i32(ahp);
7958 tcg_temp_free_ptr(fpst);
60011498 7959 break;
486624fc 7960 }
9d935509 7961 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 7962 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
7963 return 1;
7964 }
1a66ac61
RH
7965 ptr1 = vfp_reg_ptr(true, rd);
7966 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7967
7968 /* Bit 6 is the lowest opcode bit; it distinguishes between
7969 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7970 */
7971 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7972
7973 if (op == NEON_2RM_AESE) {
1a66ac61 7974 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7975 } else {
1a66ac61 7976 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 7977 }
1a66ac61
RH
7978 tcg_temp_free_ptr(ptr1);
7979 tcg_temp_free_ptr(ptr2);
9d935509
AB
7980 tcg_temp_free_i32(tmp3);
7981 break;
f1ecb913 7982 case NEON_2RM_SHA1H:
962fcbf2 7983 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
7984 return 1;
7985 }
1a66ac61
RH
7986 ptr1 = vfp_reg_ptr(true, rd);
7987 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7988
1a66ac61 7989 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 7990
1a66ac61
RH
7991 tcg_temp_free_ptr(ptr1);
7992 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
7993 break;
7994 case NEON_2RM_SHA1SU1:
7995 if ((rm | rd) & 1) {
7996 return 1;
7997 }
7998 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7999 if (q) {
962fcbf2 8000 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
8001 return 1;
8002 }
962fcbf2 8003 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
8004 return 1;
8005 }
1a66ac61
RH
8006 ptr1 = vfp_reg_ptr(true, rd);
8007 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 8008 if (q) {
1a66ac61 8009 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 8010 } else {
1a66ac61 8011 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 8012 }
1a66ac61
RH
8013 tcg_temp_free_ptr(ptr1);
8014 tcg_temp_free_ptr(ptr2);
f1ecb913 8015 break;
4bf940be
RH
8016
8017 case NEON_2RM_VMVN:
8018 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
8019 break;
8020 case NEON_2RM_VNEG:
8021 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
8022 break;
8023
9ee6e8bb
PB
8024 default:
8025 elementwise:
8026 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 8027 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
8028 tcg_gen_ld_f32(cpu_F0s, cpu_env,
8029 neon_reg_offset(rm, pass));
f764718d 8030 tmp = NULL;
9ee6e8bb 8031 } else {
dd8fbd78 8032 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
8033 }
8034 switch (op) {
600b828c 8035 case NEON_2RM_VREV32:
9ee6e8bb 8036 switch (size) {
dd8fbd78
FN
8037 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8038 case 1: gen_swap_half(tmp); break;
600b828c 8039 default: abort();
9ee6e8bb
PB
8040 }
8041 break;
600b828c 8042 case NEON_2RM_VREV16:
dd8fbd78 8043 gen_rev16(tmp);
9ee6e8bb 8044 break;
600b828c 8045 case NEON_2RM_VCLS:
9ee6e8bb 8046 switch (size) {
dd8fbd78
FN
8047 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
8048 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
8049 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 8050 default: abort();
9ee6e8bb
PB
8051 }
8052 break;
600b828c 8053 case NEON_2RM_VCLZ:
9ee6e8bb 8054 switch (size) {
dd8fbd78
FN
8055 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
8056 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 8057 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 8058 default: abort();
9ee6e8bb
PB
8059 }
8060 break;
600b828c 8061 case NEON_2RM_VCNT:
dd8fbd78 8062 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 8063 break;
600b828c 8064 case NEON_2RM_VQABS:
9ee6e8bb 8065 switch (size) {
02da0b2d
PM
8066 case 0:
8067 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
8068 break;
8069 case 1:
8070 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
8071 break;
8072 case 2:
8073 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
8074 break;
600b828c 8075 default: abort();
9ee6e8bb
PB
8076 }
8077 break;
600b828c 8078 case NEON_2RM_VQNEG:
9ee6e8bb 8079 switch (size) {
02da0b2d
PM
8080 case 0:
8081 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
8082 break;
8083 case 1:
8084 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
8085 break;
8086 case 2:
8087 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
8088 break;
600b828c 8089 default: abort();
9ee6e8bb
PB
8090 }
8091 break;
600b828c 8092 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 8093 tmp2 = tcg_const_i32(0);
9ee6e8bb 8094 switch(size) {
dd8fbd78
FN
8095 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
8096 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
8097 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 8098 default: abort();
9ee6e8bb 8099 }
39d5492a 8100 tcg_temp_free_i32(tmp2);
600b828c 8101 if (op == NEON_2RM_VCLE0) {
dd8fbd78 8102 tcg_gen_not_i32(tmp, tmp);
600b828c 8103 }
9ee6e8bb 8104 break;
600b828c 8105 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 8106 tmp2 = tcg_const_i32(0);
9ee6e8bb 8107 switch(size) {
dd8fbd78
FN
8108 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
8109 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
8110 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 8111 default: abort();
9ee6e8bb 8112 }
39d5492a 8113 tcg_temp_free_i32(tmp2);
600b828c 8114 if (op == NEON_2RM_VCLT0) {
dd8fbd78 8115 tcg_gen_not_i32(tmp, tmp);
600b828c 8116 }
9ee6e8bb 8117 break;
600b828c 8118 case NEON_2RM_VCEQ0:
dd8fbd78 8119 tmp2 = tcg_const_i32(0);
9ee6e8bb 8120 switch(size) {
dd8fbd78
FN
8121 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
8122 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
8123 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 8124 default: abort();
9ee6e8bb 8125 }
39d5492a 8126 tcg_temp_free_i32(tmp2);
9ee6e8bb 8127 break;
600b828c 8128 case NEON_2RM_VABS:
9ee6e8bb 8129 switch(size) {
dd8fbd78
FN
8130 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
8131 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
8132 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 8133 default: abort();
9ee6e8bb
PB
8134 }
8135 break;
600b828c 8136 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
8137 {
8138 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 8139 tmp2 = tcg_const_i32(0);
aa47cfdd 8140 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8141 tcg_temp_free_i32(tmp2);
aa47cfdd 8142 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8143 break;
aa47cfdd 8144 }
600b828c 8145 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
8146 {
8147 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 8148 tmp2 = tcg_const_i32(0);
aa47cfdd 8149 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8150 tcg_temp_free_i32(tmp2);
aa47cfdd 8151 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8152 break;
aa47cfdd 8153 }
600b828c 8154 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
8155 {
8156 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 8157 tmp2 = tcg_const_i32(0);
aa47cfdd 8158 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8159 tcg_temp_free_i32(tmp2);
aa47cfdd 8160 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8161 break;
aa47cfdd 8162 }
600b828c 8163 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
8164 {
8165 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 8166 tmp2 = tcg_const_i32(0);
aa47cfdd 8167 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 8168 tcg_temp_free_i32(tmp2);
aa47cfdd 8169 tcg_temp_free_ptr(fpstatus);
0e326109 8170 break;
aa47cfdd 8171 }
600b828c 8172 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
8173 {
8174 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 8175 tmp2 = tcg_const_i32(0);
aa47cfdd 8176 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 8177 tcg_temp_free_i32(tmp2);
aa47cfdd 8178 tcg_temp_free_ptr(fpstatus);
0e326109 8179 break;
aa47cfdd 8180 }
600b828c 8181 case NEON_2RM_VABS_F:
4373f3ce 8182 gen_vfp_abs(0);
9ee6e8bb 8183 break;
600b828c 8184 case NEON_2RM_VNEG_F:
4373f3ce 8185 gen_vfp_neg(0);
9ee6e8bb 8186 break;
600b828c 8187 case NEON_2RM_VSWP:
dd8fbd78
FN
8188 tmp2 = neon_load_reg(rd, pass);
8189 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 8190 break;
600b828c 8191 case NEON_2RM_VTRN:
dd8fbd78 8192 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 8193 switch (size) {
dd8fbd78
FN
8194 case 0: gen_neon_trn_u8(tmp, tmp2); break;
8195 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 8196 default: abort();
9ee6e8bb 8197 }
dd8fbd78 8198 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 8199 break;
34f7b0a2
WN
8200 case NEON_2RM_VRINTN:
8201 case NEON_2RM_VRINTA:
8202 case NEON_2RM_VRINTM:
8203 case NEON_2RM_VRINTP:
8204 case NEON_2RM_VRINTZ:
8205 {
8206 TCGv_i32 tcg_rmode;
8207 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8208 int rmode;
8209
8210 if (op == NEON_2RM_VRINTZ) {
8211 rmode = FPROUNDING_ZERO;
8212 } else {
8213 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
8214 }
8215
8216 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8217 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8218 cpu_env);
8219 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
8220 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8221 cpu_env);
8222 tcg_temp_free_ptr(fpstatus);
8223 tcg_temp_free_i32(tcg_rmode);
8224 break;
8225 }
2ce70625
WN
8226 case NEON_2RM_VRINTX:
8227 {
8228 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8229 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
8230 tcg_temp_free_ptr(fpstatus);
8231 break;
8232 }
901ad525
WN
8233 case NEON_2RM_VCVTAU:
8234 case NEON_2RM_VCVTAS:
8235 case NEON_2RM_VCVTNU:
8236 case NEON_2RM_VCVTNS:
8237 case NEON_2RM_VCVTPU:
8238 case NEON_2RM_VCVTPS:
8239 case NEON_2RM_VCVTMU:
8240 case NEON_2RM_VCVTMS:
8241 {
8242 bool is_signed = !extract32(insn, 7, 1);
8243 TCGv_ptr fpst = get_fpstatus_ptr(1);
8244 TCGv_i32 tcg_rmode, tcg_shift;
8245 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
8246
8247 tcg_shift = tcg_const_i32(0);
8248 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8249 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8250 cpu_env);
8251
8252 if (is_signed) {
8253 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
8254 tcg_shift, fpst);
8255 } else {
8256 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
8257 tcg_shift, fpst);
8258 }
8259
8260 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8261 cpu_env);
8262 tcg_temp_free_i32(tcg_rmode);
8263 tcg_temp_free_i32(tcg_shift);
8264 tcg_temp_free_ptr(fpst);
8265 break;
8266 }
600b828c 8267 case NEON_2RM_VRECPE:
b6d4443a
AB
8268 {
8269 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8270 gen_helper_recpe_u32(tmp, tmp, fpstatus);
8271 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8272 break;
b6d4443a 8273 }
600b828c 8274 case NEON_2RM_VRSQRTE:
c2fb418e
AB
8275 {
8276 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8277 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
8278 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8279 break;
c2fb418e 8280 }
600b828c 8281 case NEON_2RM_VRECPE_F:
b6d4443a
AB
8282 {
8283 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8284 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
8285 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8286 break;
b6d4443a 8287 }
600b828c 8288 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
8289 {
8290 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8291 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
8292 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8293 break;
c2fb418e 8294 }
600b828c 8295 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 8296 gen_vfp_sito(0, 1);
9ee6e8bb 8297 break;
600b828c 8298 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 8299 gen_vfp_uito(0, 1);
9ee6e8bb 8300 break;
600b828c 8301 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 8302 gen_vfp_tosiz(0, 1);
9ee6e8bb 8303 break;
600b828c 8304 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 8305 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
8306 break;
8307 default:
600b828c
PM
8308 /* Reserved op values were caught by the
8309 * neon_2rm_sizes[] check earlier.
8310 */
8311 abort();
9ee6e8bb 8312 }
600b828c 8313 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
8314 tcg_gen_st_f32(cpu_F0s, cpu_env,
8315 neon_reg_offset(rd, pass));
9ee6e8bb 8316 } else {
dd8fbd78 8317 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
8318 }
8319 }
8320 break;
8321 }
8322 } else if ((insn & (1 << 10)) == 0) {
8323 /* VTBL, VTBX. */
56907d77
PM
8324 int n = ((insn >> 8) & 3) + 1;
8325 if ((rn + n) > 32) {
8326 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
8327 * helper function running off the end of the register file.
8328 */
8329 return 1;
8330 }
8331 n <<= 3;
9ee6e8bb 8332 if (insn & (1 << 6)) {
8f8e3aa4 8333 tmp = neon_load_reg(rd, 0);
9ee6e8bb 8334 } else {
7d1b0095 8335 tmp = tcg_temp_new_i32();
8f8e3aa4 8336 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8337 }
8f8e3aa4 8338 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 8339 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 8340 tmp5 = tcg_const_i32(n);
e7c06c4e 8341 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 8342 tcg_temp_free_i32(tmp);
9ee6e8bb 8343 if (insn & (1 << 6)) {
8f8e3aa4 8344 tmp = neon_load_reg(rd, 1);
9ee6e8bb 8345 } else {
7d1b0095 8346 tmp = tcg_temp_new_i32();
8f8e3aa4 8347 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8348 }
8f8e3aa4 8349 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 8350 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 8351 tcg_temp_free_i32(tmp5);
e7c06c4e 8352 tcg_temp_free_ptr(ptr1);
8f8e3aa4 8353 neon_store_reg(rd, 0, tmp2);
3018f259 8354 neon_store_reg(rd, 1, tmp3);
7d1b0095 8355 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8356 } else if ((insn & 0x380) == 0) {
8357 /* VDUP */
32f91fb7
RH
8358 int element;
8359 TCGMemOp size;
8360
133da6aa
JR
8361 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
8362 return 1;
8363 }
9ee6e8bb 8364 if (insn & (1 << 16)) {
32f91fb7
RH
8365 size = MO_8;
8366 element = (insn >> 17) & 7;
9ee6e8bb 8367 } else if (insn & (1 << 17)) {
32f91fb7
RH
8368 size = MO_16;
8369 element = (insn >> 18) & 3;
8370 } else {
8371 size = MO_32;
8372 element = (insn >> 19) & 1;
9ee6e8bb 8373 }
32f91fb7
RH
8374 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
8375 neon_element_offset(rm, element, size),
8376 q ? 16 : 8, q ? 16 : 8);
9ee6e8bb
PB
8377 } else {
8378 return 1;
8379 }
8380 }
8381 }
8382 return 0;
8383}
8384
8b7209fa
RH
8385/* Advanced SIMD three registers of the same length extension.
8386 * 31 25 23 22 20 16 12 11 10 9 8 3 0
8387 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8388 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8389 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8390 */
8391static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
8392{
26c470a7
RH
8393 gen_helper_gvec_3 *fn_gvec = NULL;
8394 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
8395 int rd, rn, rm, opr_sz;
8396 int data = 0;
87732318
RH
8397 int off_rn, off_rm;
8398 bool is_long = false, q = extract32(insn, 6, 1);
8399 bool ptr_is_env = false;
8b7209fa
RH
8400
8401 if ((insn & 0xfe200f10) == 0xfc200800) {
8402 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
8403 int size = extract32(insn, 20, 1);
8404 data = extract32(insn, 23, 2); /* rot */
962fcbf2 8405 if (!dc_isar_feature(aa32_vcma, s)
5763190f 8406 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
8407 return 1;
8408 }
8409 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
8410 } else if ((insn & 0xfea00f10) == 0xfc800800) {
8411 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
8412 int size = extract32(insn, 20, 1);
8413 data = extract32(insn, 24, 1); /* rot */
962fcbf2 8414 if (!dc_isar_feature(aa32_vcma, s)
5763190f 8415 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
8416 return 1;
8417 }
8418 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
8419 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
8420 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
8421 bool u = extract32(insn, 4, 1);
962fcbf2 8422 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
8423 return 1;
8424 }
8425 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
87732318
RH
8426 } else if ((insn & 0xff300f10) == 0xfc200810) {
8427 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
8428 int is_s = extract32(insn, 23, 1);
8429 if (!dc_isar_feature(aa32_fhm, s)) {
8430 return 1;
8431 }
8432 is_long = true;
8433 data = is_s; /* is_2 == 0 */
8434 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
8435 ptr_is_env = true;
8b7209fa
RH
8436 } else {
8437 return 1;
8438 }
8439
87732318
RH
8440 VFP_DREG_D(rd, insn);
8441 if (rd & q) {
8442 return 1;
8443 }
8444 if (q || !is_long) {
8445 VFP_DREG_N(rn, insn);
8446 VFP_DREG_M(rm, insn);
8447 if ((rn | rm) & q & !is_long) {
8448 return 1;
8449 }
8450 off_rn = vfp_reg_offset(1, rn);
8451 off_rm = vfp_reg_offset(1, rm);
8452 } else {
8453 rn = VFP_SREG_N(insn);
8454 rm = VFP_SREG_M(insn);
8455 off_rn = vfp_reg_offset(0, rn);
8456 off_rm = vfp_reg_offset(0, rm);
8457 }
8458
8b7209fa
RH
8459 if (s->fp_excp_el) {
8460 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 8461 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8b7209fa
RH
8462 return 0;
8463 }
8464 if (!s->vfp_enabled) {
8465 return 1;
8466 }
8467
8468 opr_sz = (1 + q) * 8;
26c470a7 8469 if (fn_gvec_ptr) {
87732318
RH
8470 TCGv_ptr ptr;
8471 if (ptr_is_env) {
8472 ptr = cpu_env;
8473 } else {
8474 ptr = get_fpstatus_ptr(1);
8475 }
8476 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 8477 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
8478 if (!ptr_is_env) {
8479 tcg_temp_free_ptr(ptr);
8480 }
26c470a7 8481 } else {
87732318 8482 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
8483 opr_sz, opr_sz, data, fn_gvec);
8484 }
8b7209fa
RH
8485 return 0;
8486}
8487
638808ff
RH
8488/* Advanced SIMD two registers and a scalar extension.
8489 * 31 24 23 22 20 16 12 11 10 9 8 3 0
8490 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8491 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8492 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8493 *
8494 */
8495
8496static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
8497{
26c470a7
RH
8498 gen_helper_gvec_3 *fn_gvec = NULL;
8499 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 8500 int rd, rn, rm, opr_sz, data;
87732318
RH
8501 int off_rn, off_rm;
8502 bool is_long = false, q = extract32(insn, 6, 1);
8503 bool ptr_is_env = false;
638808ff
RH
8504
8505 if ((insn & 0xff000f10) == 0xfe000800) {
8506 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
8507 int rot = extract32(insn, 20, 2);
8508 int size = extract32(insn, 23, 1);
8509 int index;
8510
962fcbf2 8511 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
8512 return 1;
8513 }
2cc99919 8514 if (size == 0) {
5763190f 8515 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2cc99919
RH
8516 return 1;
8517 }
8518 /* For fp16, rm is just Vm, and index is M. */
8519 rm = extract32(insn, 0, 4);
8520 index = extract32(insn, 5, 1);
8521 } else {
8522 /* For fp32, rm is the usual M:Vm, and index is 0. */
8523 VFP_DREG_M(rm, insn);
8524 index = 0;
8525 }
8526 data = (index << 2) | rot;
8527 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
8528 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
8529 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
8530 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
8531 int u = extract32(insn, 4, 1);
87732318 8532
962fcbf2 8533 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
8534 return 1;
8535 }
8536 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
8537 /* rm is just Vm, and index is M. */
8538 data = extract32(insn, 5, 1); /* index */
8539 rm = extract32(insn, 0, 4);
87732318
RH
8540 } else if ((insn & 0xffa00f10) == 0xfe000810) {
8541 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
8542 int is_s = extract32(insn, 20, 1);
8543 int vm20 = extract32(insn, 0, 3);
8544 int vm3 = extract32(insn, 3, 1);
8545 int m = extract32(insn, 5, 1);
8546 int index;
8547
8548 if (!dc_isar_feature(aa32_fhm, s)) {
8549 return 1;
8550 }
8551 if (q) {
8552 rm = vm20;
8553 index = m * 2 + vm3;
8554 } else {
8555 rm = vm20 * 2 + m;
8556 index = vm3;
8557 }
8558 is_long = true;
8559 data = (index << 2) | is_s; /* is_2 == 0 */
8560 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
8561 ptr_is_env = true;
638808ff
RH
8562 } else {
8563 return 1;
8564 }
8565
87732318
RH
8566 VFP_DREG_D(rd, insn);
8567 if (rd & q) {
8568 return 1;
8569 }
8570 if (q || !is_long) {
8571 VFP_DREG_N(rn, insn);
8572 if (rn & q & !is_long) {
8573 return 1;
8574 }
8575 off_rn = vfp_reg_offset(1, rn);
8576 off_rm = vfp_reg_offset(1, rm);
8577 } else {
8578 rn = VFP_SREG_N(insn);
8579 off_rn = vfp_reg_offset(0, rn);
8580 off_rm = vfp_reg_offset(0, rm);
8581 }
638808ff
RH
8582 if (s->fp_excp_el) {
8583 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 8584 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
638808ff
RH
8585 return 0;
8586 }
8587 if (!s->vfp_enabled) {
8588 return 1;
8589 }
8590
8591 opr_sz = (1 + q) * 8;
26c470a7 8592 if (fn_gvec_ptr) {
87732318
RH
8593 TCGv_ptr ptr;
8594 if (ptr_is_env) {
8595 ptr = cpu_env;
8596 } else {
8597 ptr = get_fpstatus_ptr(1);
8598 }
8599 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 8600 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
8601 if (!ptr_is_env) {
8602 tcg_temp_free_ptr(ptr);
8603 }
26c470a7 8604 } else {
87732318 8605 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
8606 opr_sz, opr_sz, data, fn_gvec);
8607 }
638808ff
RH
8608 return 0;
8609}
8610
7dcc1f89 8611static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 8612{
4b6a83fb
PM
8613 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
8614 const ARMCPRegInfo *ri;
9ee6e8bb
PB
8615
8616 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
8617
8618 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 8619 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
8620 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
8621 return 1;
8622 }
d614a513 8623 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 8624 return disas_iwmmxt_insn(s, insn);
d614a513 8625 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 8626 return disas_dsp_insn(s, insn);
c0f4af17
PM
8627 }
8628 return 1;
4b6a83fb
PM
8629 }
8630
8631 /* Otherwise treat as a generic register access */
8632 is64 = (insn & (1 << 25)) == 0;
8633 if (!is64 && ((insn & (1 << 4)) == 0)) {
8634 /* cdp */
8635 return 1;
8636 }
8637
8638 crm = insn & 0xf;
8639 if (is64) {
8640 crn = 0;
8641 opc1 = (insn >> 4) & 0xf;
8642 opc2 = 0;
8643 rt2 = (insn >> 16) & 0xf;
8644 } else {
8645 crn = (insn >> 16) & 0xf;
8646 opc1 = (insn >> 21) & 7;
8647 opc2 = (insn >> 5) & 7;
8648 rt2 = 0;
8649 }
8650 isread = (insn >> 20) & 1;
8651 rt = (insn >> 12) & 0xf;
8652
60322b39 8653 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 8654 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
8655 if (ri) {
8656 /* Check access permissions */
dcbff19b 8657 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
8658 return 1;
8659 }
8660
c0f4af17 8661 if (ri->accessfn ||
d614a513 8662 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
8663 /* Emit code to perform further access permissions checks at
8664 * runtime; this may result in an exception.
c0f4af17
PM
8665 * Note that on XScale all cp0..c13 registers do an access check
8666 * call in order to handle c15_cpar.
f59df3f2
PM
8667 */
8668 TCGv_ptr tmpptr;
3f208fd7 8669 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
8670 uint32_t syndrome;
8671
8672 /* Note that since we are an implementation which takes an
8673 * exception on a trapped conditional instruction only if the
8674 * instruction passes its condition code check, we can take
8675 * advantage of the clause in the ARM ARM that allows us to set
8676 * the COND field in the instruction to 0xE in all cases.
8677 * We could fish the actual condition out of the insn (ARM)
8678 * or the condexec bits (Thumb) but it isn't necessary.
8679 */
8680 switch (cpnum) {
8681 case 14:
8682 if (is64) {
8683 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8684 isread, false);
8bcbf37c
PM
8685 } else {
8686 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8687 rt, isread, false);
8bcbf37c
PM
8688 }
8689 break;
8690 case 15:
8691 if (is64) {
8692 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8693 isread, false);
8bcbf37c
PM
8694 } else {
8695 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8696 rt, isread, false);
8bcbf37c
PM
8697 }
8698 break;
8699 default:
8700 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8701 * so this can only happen if this is an ARMv7 or earlier CPU,
8702 * in which case the syndrome information won't actually be
8703 * guest visible.
8704 */
d614a513 8705 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
8706 syndrome = syn_uncategorized();
8707 break;
8708 }
8709
43bfa4a1 8710 gen_set_condexec(s);
3977ee5d 8711 gen_set_pc_im(s, s->pc - 4);
f59df3f2 8712 tmpptr = tcg_const_ptr(ri);
8bcbf37c 8713 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
8714 tcg_isread = tcg_const_i32(isread);
8715 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8716 tcg_isread);
f59df3f2 8717 tcg_temp_free_ptr(tmpptr);
8bcbf37c 8718 tcg_temp_free_i32(tcg_syn);
3f208fd7 8719 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
8720 }
8721
4b6a83fb
PM
8722 /* Handle special cases first */
8723 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8724 case ARM_CP_NOP:
8725 return 0;
8726 case ARM_CP_WFI:
8727 if (isread) {
8728 return 1;
8729 }
eaed129d 8730 gen_set_pc_im(s, s->pc);
dcba3a8d 8731 s->base.is_jmp = DISAS_WFI;
2bee5105 8732 return 0;
4b6a83fb
PM
8733 default:
8734 break;
8735 }
8736
c5a49c63 8737 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8738 gen_io_start();
8739 }
8740
4b6a83fb
PM
8741 if (isread) {
8742 /* Read */
8743 if (is64) {
8744 TCGv_i64 tmp64;
8745 TCGv_i32 tmp;
8746 if (ri->type & ARM_CP_CONST) {
8747 tmp64 = tcg_const_i64(ri->resetvalue);
8748 } else if (ri->readfn) {
8749 TCGv_ptr tmpptr;
4b6a83fb
PM
8750 tmp64 = tcg_temp_new_i64();
8751 tmpptr = tcg_const_ptr(ri);
8752 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8753 tcg_temp_free_ptr(tmpptr);
8754 } else {
8755 tmp64 = tcg_temp_new_i64();
8756 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8757 }
8758 tmp = tcg_temp_new_i32();
ecc7b3aa 8759 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
8760 store_reg(s, rt, tmp);
8761 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 8762 tmp = tcg_temp_new_i32();
ecc7b3aa 8763 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 8764 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
8765 store_reg(s, rt2, tmp);
8766 } else {
39d5492a 8767 TCGv_i32 tmp;
4b6a83fb
PM
8768 if (ri->type & ARM_CP_CONST) {
8769 tmp = tcg_const_i32(ri->resetvalue);
8770 } else if (ri->readfn) {
8771 TCGv_ptr tmpptr;
4b6a83fb
PM
8772 tmp = tcg_temp_new_i32();
8773 tmpptr = tcg_const_ptr(ri);
8774 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8775 tcg_temp_free_ptr(tmpptr);
8776 } else {
8777 tmp = load_cpu_offset(ri->fieldoffset);
8778 }
8779 if (rt == 15) {
8780 /* Destination register of r15 for 32 bit loads sets
8781 * the condition codes from the high 4 bits of the value
8782 */
8783 gen_set_nzcv(tmp);
8784 tcg_temp_free_i32(tmp);
8785 } else {
8786 store_reg(s, rt, tmp);
8787 }
8788 }
8789 } else {
8790 /* Write */
8791 if (ri->type & ARM_CP_CONST) {
8792 /* If not forbidden by access permissions, treat as WI */
8793 return 0;
8794 }
8795
8796 if (is64) {
39d5492a 8797 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
8798 TCGv_i64 tmp64 = tcg_temp_new_i64();
8799 tmplo = load_reg(s, rt);
8800 tmphi = load_reg(s, rt2);
8801 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8802 tcg_temp_free_i32(tmplo);
8803 tcg_temp_free_i32(tmphi);
8804 if (ri->writefn) {
8805 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
8806 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8807 tcg_temp_free_ptr(tmpptr);
8808 } else {
8809 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8810 }
8811 tcg_temp_free_i64(tmp64);
8812 } else {
8813 if (ri->writefn) {
39d5492a 8814 TCGv_i32 tmp;
4b6a83fb 8815 TCGv_ptr tmpptr;
4b6a83fb
PM
8816 tmp = load_reg(s, rt);
8817 tmpptr = tcg_const_ptr(ri);
8818 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8819 tcg_temp_free_ptr(tmpptr);
8820 tcg_temp_free_i32(tmp);
8821 } else {
39d5492a 8822 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
8823 store_cpu_offset(tmp, ri->fieldoffset);
8824 }
8825 }
2452731c
PM
8826 }
8827
c5a49c63 8828 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8829 /* I/O operations must end the TB here (whether read or write) */
8830 gen_io_end();
8831 gen_lookup_tb(s);
8832 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
8833 /* We default to ending the TB on a coprocessor register write,
8834 * but allow this to be suppressed by the register definition
8835 * (usually only necessary to work around guest bugs).
8836 */
2452731c 8837 gen_lookup_tb(s);
4b6a83fb 8838 }
2452731c 8839
4b6a83fb
PM
8840 return 0;
8841 }
8842
626187d8
PM
8843 /* Unknown register; this might be a guest error or a QEMU
8844 * unimplemented feature.
8845 */
8846 if (is64) {
8847 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8848 "64 bit system register cp:%d opc1: %d crm:%d "
8849 "(%s)\n",
8850 isread ? "read" : "write", cpnum, opc1, crm,
8851 s->ns ? "non-secure" : "secure");
626187d8
PM
8852 } else {
8853 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8854 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8855 "(%s)\n",
8856 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8857 s->ns ? "non-secure" : "secure");
626187d8
PM
8858 }
8859
4a9a539f 8860 return 1;
9ee6e8bb
PB
8861}
8862
5e3f878a
PB
8863
8864/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 8865static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 8866{
39d5492a 8867 TCGv_i32 tmp;
7d1b0095 8868 tmp = tcg_temp_new_i32();
ecc7b3aa 8869 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 8870 store_reg(s, rlow, tmp);
7d1b0095 8871 tmp = tcg_temp_new_i32();
5e3f878a 8872 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 8873 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
8874 store_reg(s, rhigh, tmp);
8875}
8876
8877/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 8878static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 8879{
a7812ae4 8880 TCGv_i64 tmp;
39d5492a 8881 TCGv_i32 tmp2;
5e3f878a 8882
36aa55dc 8883 /* Load value and extend to 64 bits. */
a7812ae4 8884 tmp = tcg_temp_new_i64();
5e3f878a
PB
8885 tmp2 = load_reg(s, rlow);
8886 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 8887 tcg_temp_free_i32(tmp2);
5e3f878a 8888 tcg_gen_add_i64(val, val, tmp);
b75263d6 8889 tcg_temp_free_i64(tmp);
5e3f878a
PB
8890}
8891
8892/* load and add a 64-bit value from a register pair. */
a7812ae4 8893static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 8894{
a7812ae4 8895 TCGv_i64 tmp;
39d5492a
PM
8896 TCGv_i32 tmpl;
8897 TCGv_i32 tmph;
5e3f878a
PB
8898
8899 /* Load 64-bit value rd:rn. */
36aa55dc
PB
8900 tmpl = load_reg(s, rlow);
8901 tmph = load_reg(s, rhigh);
a7812ae4 8902 tmp = tcg_temp_new_i64();
36aa55dc 8903 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
8904 tcg_temp_free_i32(tmpl);
8905 tcg_temp_free_i32(tmph);
5e3f878a 8906 tcg_gen_add_i64(val, val, tmp);
b75263d6 8907 tcg_temp_free_i64(tmp);
5e3f878a
PB
8908}
8909
c9f10124 8910/* Set N and Z flags from hi|lo. */
39d5492a 8911static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 8912{
c9f10124
RH
8913 tcg_gen_mov_i32(cpu_NF, hi);
8914 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
8915}
8916
426f5abc
PB
8917/* Load/Store exclusive instructions are implemented by remembering
8918 the value/address loaded, and seeing if these are the same
354161b3 8919 when the store is performed. This should be sufficient to implement
426f5abc 8920 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
8921 regular stores. The compare vs the remembered value is done during
8922 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 8923static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 8924 TCGv_i32 addr, int size)
426f5abc 8925{
94ee24e7 8926 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 8927 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 8928
50225ad0
PM
8929 s->is_ldex = true;
8930
426f5abc 8931 if (size == 3) {
39d5492a 8932 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 8933 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 8934
3448d47b
PM
8935 /* For AArch32, architecturally the 32-bit word at the lowest
8936 * address is always Rt and the one at addr+4 is Rt2, even if
8937 * the CPU is big-endian. That means we don't want to do a
8938 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8939 * for an architecturally 64-bit access, but instead do a
8940 * 64-bit access using MO_BE if appropriate and then split
8941 * the two halves.
8942 * This only makes a difference for BE32 user-mode, where
8943 * frob64() must not flip the two halves of the 64-bit data
8944 * but this code must treat BE32 user-mode like BE32 system.
8945 */
8946 TCGv taddr = gen_aa32_addr(s, addr, opc);
8947
8948 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8949 tcg_temp_free(taddr);
354161b3 8950 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
8951 if (s->be_data == MO_BE) {
8952 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8953 } else {
8954 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8955 }
354161b3
EC
8956 tcg_temp_free_i64(t64);
8957
8958 store_reg(s, rt2, tmp2);
03d05e2d 8959 } else {
354161b3 8960 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 8961 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 8962 }
03d05e2d
PM
8963
8964 store_reg(s, rt, tmp);
8965 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
8966}
8967
8968static void gen_clrex(DisasContext *s)
8969{
03d05e2d 8970 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
8971}
8972
426f5abc 8973static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 8974 TCGv_i32 addr, int size)
426f5abc 8975{
354161b3
EC
8976 TCGv_i32 t0, t1, t2;
8977 TCGv_i64 extaddr;
8978 TCGv taddr;
42a268c2
RH
8979 TCGLabel *done_label;
8980 TCGLabel *fail_label;
354161b3 8981 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
8982
8983 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8984 [addr] = {Rt};
8985 {Rd} = 0;
8986 } else {
8987 {Rd} = 1;
8988 } */
8989 fail_label = gen_new_label();
8990 done_label = gen_new_label();
03d05e2d
PM
8991 extaddr = tcg_temp_new_i64();
8992 tcg_gen_extu_i32_i64(extaddr, addr);
8993 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8994 tcg_temp_free_i64(extaddr);
8995
354161b3
EC
8996 taddr = gen_aa32_addr(s, addr, opc);
8997 t0 = tcg_temp_new_i32();
8998 t1 = load_reg(s, rt);
426f5abc 8999 if (size == 3) {
354161b3
EC
9000 TCGv_i64 o64 = tcg_temp_new_i64();
9001 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 9002
354161b3 9003 t2 = load_reg(s, rt2);
3448d47b
PM
9004 /* For AArch32, architecturally the 32-bit word at the lowest
9005 * address is always Rt and the one at addr+4 is Rt2, even if
9006 * the CPU is big-endian. Since we're going to treat this as a
9007 * single 64-bit BE store, we need to put the two halves in the
9008 * opposite order for BE to LE, so that they end up in the right
9009 * places.
9010 * We don't want gen_aa32_frob64() because that does the wrong
9011 * thing for BE32 usermode.
9012 */
9013 if (s->be_data == MO_BE) {
9014 tcg_gen_concat_i32_i64(n64, t2, t1);
9015 } else {
9016 tcg_gen_concat_i32_i64(n64, t1, t2);
9017 }
354161b3 9018 tcg_temp_free_i32(t2);
03d05e2d 9019
354161b3
EC
9020 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
9021 get_mem_index(s), opc);
9022 tcg_temp_free_i64(n64);
9023
354161b3
EC
9024 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
9025 tcg_gen_extrl_i64_i32(t0, o64);
9026
9027 tcg_temp_free_i64(o64);
9028 } else {
9029 t2 = tcg_temp_new_i32();
9030 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
9031 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
9032 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
9033 tcg_temp_free_i32(t2);
426f5abc 9034 }
354161b3
EC
9035 tcg_temp_free_i32(t1);
9036 tcg_temp_free(taddr);
9037 tcg_gen_mov_i32(cpu_R[rd], t0);
9038 tcg_temp_free_i32(t0);
426f5abc 9039 tcg_gen_br(done_label);
354161b3 9040
426f5abc
PB
9041 gen_set_label(fail_label);
9042 tcg_gen_movi_i32(cpu_R[rd], 1);
9043 gen_set_label(done_label);
03d05e2d 9044 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 9045}
426f5abc 9046
81465888
PM
9047/* gen_srs:
9048 * @env: CPUARMState
9049 * @s: DisasContext
9050 * @mode: mode field from insn (which stack to store to)
9051 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
9052 * @writeback: true if writeback bit set
9053 *
9054 * Generate code for the SRS (Store Return State) insn.
9055 */
9056static void gen_srs(DisasContext *s,
9057 uint32_t mode, uint32_t amode, bool writeback)
9058{
9059 int32_t offset;
cbc0326b
PM
9060 TCGv_i32 addr, tmp;
9061 bool undef = false;
9062
9063 /* SRS is:
9064 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 9065 * and specified mode is monitor mode
cbc0326b
PM
9066 * - UNDEFINED in Hyp mode
9067 * - UNPREDICTABLE in User or System mode
9068 * - UNPREDICTABLE if the specified mode is:
9069 * -- not implemented
9070 * -- not a valid mode number
9071 * -- a mode that's at a higher exception level
9072 * -- Monitor, if we are Non-secure
f01377f5 9073 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 9074 */
ba63cf47 9075 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
9076 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
9077 return;
9078 }
9079
9080 if (s->current_el == 0 || s->current_el == 2) {
9081 undef = true;
9082 }
9083
9084 switch (mode) {
9085 case ARM_CPU_MODE_USR:
9086 case ARM_CPU_MODE_FIQ:
9087 case ARM_CPU_MODE_IRQ:
9088 case ARM_CPU_MODE_SVC:
9089 case ARM_CPU_MODE_ABT:
9090 case ARM_CPU_MODE_UND:
9091 case ARM_CPU_MODE_SYS:
9092 break;
9093 case ARM_CPU_MODE_HYP:
9094 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
9095 undef = true;
9096 }
9097 break;
9098 case ARM_CPU_MODE_MON:
9099 /* No need to check specifically for "are we non-secure" because
9100 * we've already made EL0 UNDEF and handled the trap for S-EL1;
9101 * so if this isn't EL3 then we must be non-secure.
9102 */
9103 if (s->current_el != 3) {
9104 undef = true;
9105 }
9106 break;
9107 default:
9108 undef = true;
9109 }
9110
9111 if (undef) {
9112 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9113 default_exception_el(s));
9114 return;
9115 }
9116
9117 addr = tcg_temp_new_i32();
9118 tmp = tcg_const_i32(mode);
f01377f5
PM
9119 /* get_r13_banked() will raise an exception if called from System mode */
9120 gen_set_condexec(s);
9121 gen_set_pc_im(s, s->pc - 4);
81465888
PM
9122 gen_helper_get_r13_banked(addr, cpu_env, tmp);
9123 tcg_temp_free_i32(tmp);
9124 switch (amode) {
9125 case 0: /* DA */
9126 offset = -4;
9127 break;
9128 case 1: /* IA */
9129 offset = 0;
9130 break;
9131 case 2: /* DB */
9132 offset = -8;
9133 break;
9134 case 3: /* IB */
9135 offset = 4;
9136 break;
9137 default:
9138 abort();
9139 }
9140 tcg_gen_addi_i32(addr, addr, offset);
9141 tmp = load_reg(s, 14);
12dcc321 9142 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9143 tcg_temp_free_i32(tmp);
81465888
PM
9144 tmp = load_cpu_field(spsr);
9145 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 9146 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9147 tcg_temp_free_i32(tmp);
81465888
PM
9148 if (writeback) {
9149 switch (amode) {
9150 case 0:
9151 offset = -8;
9152 break;
9153 case 1:
9154 offset = 4;
9155 break;
9156 case 2:
9157 offset = -4;
9158 break;
9159 case 3:
9160 offset = 0;
9161 break;
9162 default:
9163 abort();
9164 }
9165 tcg_gen_addi_i32(addr, addr, offset);
9166 tmp = tcg_const_i32(mode);
9167 gen_helper_set_r13_banked(cpu_env, tmp, addr);
9168 tcg_temp_free_i32(tmp);
9169 }
9170 tcg_temp_free_i32(addr);
dcba3a8d 9171 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
9172}
9173
c2d9644e
RK
9174/* Generate a label used for skipping this instruction */
9175static void arm_gen_condlabel(DisasContext *s)
9176{
9177 if (!s->condjmp) {
9178 s->condlabel = gen_new_label();
9179 s->condjmp = 1;
9180 }
9181}
9182
9183/* Skip this instruction if the ARM condition is false */
9184static void arm_skip_unless(DisasContext *s, uint32_t cond)
9185{
9186 arm_gen_condlabel(s);
9187 arm_gen_test_cc(cond ^ 1, s->condlabel);
9188}
9189
f4df2210 9190static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 9191{
f4df2210 9192 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
9193 TCGv_i32 tmp;
9194 TCGv_i32 tmp2;
9195 TCGv_i32 tmp3;
9196 TCGv_i32 addr;
a7812ae4 9197 TCGv_i64 tmp64;
9ee6e8bb 9198
e13886e3
PM
9199 /* M variants do not implement ARM mode; this must raise the INVSTATE
9200 * UsageFault exception.
9201 */
b53d8923 9202 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
9203 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
9204 default_exception_el(s));
9205 return;
b53d8923 9206 }
9ee6e8bb
PB
9207 cond = insn >> 28;
9208 if (cond == 0xf){
be5e7a76
DES
9209 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
9210 * choose to UNDEF. In ARMv5 and above the space is used
9211 * for miscellaneous unconditional instructions.
9212 */
9213 ARCH(5);
9214
9ee6e8bb
PB
9215 /* Unconditional instructions. */
9216 if (((insn >> 25) & 7) == 1) {
9217 /* NEON Data processing. */
d614a513 9218 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 9219 goto illegal_op;
d614a513 9220 }
9ee6e8bb 9221
7dcc1f89 9222 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 9223 goto illegal_op;
7dcc1f89 9224 }
9ee6e8bb
PB
9225 return;
9226 }
9227 if ((insn & 0x0f100000) == 0x04000000) {
9228 /* NEON load/store. */
d614a513 9229 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 9230 goto illegal_op;
d614a513 9231 }
9ee6e8bb 9232
7dcc1f89 9233 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 9234 goto illegal_op;
7dcc1f89 9235 }
9ee6e8bb
PB
9236 return;
9237 }
6a57f3eb
WN
9238 if ((insn & 0x0f000e10) == 0x0e000a00) {
9239 /* VFP. */
7dcc1f89 9240 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9241 goto illegal_op;
9242 }
9243 return;
9244 }
3d185e5d
PM
9245 if (((insn & 0x0f30f000) == 0x0510f000) ||
9246 ((insn & 0x0f30f010) == 0x0710f000)) {
9247 if ((insn & (1 << 22)) == 0) {
9248 /* PLDW; v7MP */
d614a513 9249 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
9250 goto illegal_op;
9251 }
9252 }
9253 /* Otherwise PLD; v5TE+ */
be5e7a76 9254 ARCH(5TE);
3d185e5d
PM
9255 return;
9256 }
9257 if (((insn & 0x0f70f000) == 0x0450f000) ||
9258 ((insn & 0x0f70f010) == 0x0650f000)) {
9259 ARCH(7);
9260 return; /* PLI; V7 */
9261 }
9262 if (((insn & 0x0f700000) == 0x04100000) ||
9263 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 9264 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
9265 goto illegal_op;
9266 }
9267 return; /* v7MP: Unallocated memory hint: must NOP */
9268 }
9269
9270 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
9271 ARCH(6);
9272 /* setend */
9886ecdf
PB
9273 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
9274 gen_helper_setend(cpu_env);
dcba3a8d 9275 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
9276 }
9277 return;
9278 } else if ((insn & 0x0fffff00) == 0x057ff000) {
9279 switch ((insn >> 4) & 0xf) {
9280 case 1: /* clrex */
9281 ARCH(6K);
426f5abc 9282 gen_clrex(s);
9ee6e8bb
PB
9283 return;
9284 case 4: /* dsb */
9285 case 5: /* dmb */
9ee6e8bb 9286 ARCH(7);
61e4c432 9287 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 9288 return;
6df99dec
SS
9289 case 6: /* isb */
9290 /* We need to break the TB after this insn to execute
9291 * self-modifying code correctly and also to take
9292 * any pending interrupts immediately.
9293 */
0b609cc1 9294 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 9295 return;
9888bd1e
RH
9296 case 7: /* sb */
9297 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
9298 goto illegal_op;
9299 }
9300 /*
9301 * TODO: There is no speculation barrier opcode
9302 * for TCG; MB and end the TB instead.
9303 */
9304 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9305 gen_goto_tb(s, 0, s->pc & ~1);
9306 return;
9ee6e8bb
PB
9307 default:
9308 goto illegal_op;
9309 }
9310 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
9311 /* srs */
81465888
PM
9312 ARCH(6);
9313 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 9314 return;
ea825eee 9315 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 9316 /* rfe */
c67b6b71 9317 int32_t offset;
9ee6e8bb
PB
9318 if (IS_USER(s))
9319 goto illegal_op;
9320 ARCH(6);
9321 rn = (insn >> 16) & 0xf;
b0109805 9322 addr = load_reg(s, rn);
9ee6e8bb
PB
9323 i = (insn >> 23) & 3;
9324 switch (i) {
b0109805 9325 case 0: offset = -4; break; /* DA */
c67b6b71
FN
9326 case 1: offset = 0; break; /* IA */
9327 case 2: offset = -8; break; /* DB */
b0109805 9328 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
9329 default: abort();
9330 }
9331 if (offset)
b0109805
PB
9332 tcg_gen_addi_i32(addr, addr, offset);
9333 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 9334 tmp = tcg_temp_new_i32();
12dcc321 9335 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9336 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9337 tmp2 = tcg_temp_new_i32();
12dcc321 9338 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9339 if (insn & (1 << 21)) {
9340 /* Base writeback. */
9341 switch (i) {
b0109805 9342 case 0: offset = -8; break;
c67b6b71
FN
9343 case 1: offset = 4; break;
9344 case 2: offset = -4; break;
b0109805 9345 case 3: offset = 0; break;
9ee6e8bb
PB
9346 default: abort();
9347 }
9348 if (offset)
b0109805
PB
9349 tcg_gen_addi_i32(addr, addr, offset);
9350 store_reg(s, rn, addr);
9351 } else {
7d1b0095 9352 tcg_temp_free_i32(addr);
9ee6e8bb 9353 }
b0109805 9354 gen_rfe(s, tmp, tmp2);
c67b6b71 9355 return;
9ee6e8bb
PB
9356 } else if ((insn & 0x0e000000) == 0x0a000000) {
9357 /* branch link and change to thumb (blx <offset>) */
9358 int32_t offset;
9359
9360 val = (uint32_t)s->pc;
7d1b0095 9361 tmp = tcg_temp_new_i32();
d9ba4830
PB
9362 tcg_gen_movi_i32(tmp, val);
9363 store_reg(s, 14, tmp);
9ee6e8bb
PB
9364 /* Sign-extend the 24-bit offset */
9365 offset = (((int32_t)insn) << 8) >> 8;
9366 /* offset * 4 + bit24 * 2 + (thumb bit) */
9367 val += (offset << 2) | ((insn >> 23) & 2) | 1;
9368 /* pipeline offset */
9369 val += 4;
be5e7a76 9370 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 9371 gen_bx_im(s, val);
9ee6e8bb
PB
9372 return;
9373 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 9374 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 9375 /* iWMMXt register transfer. */
c0f4af17 9376 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 9377 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 9378 return;
c0f4af17
PM
9379 }
9380 }
9ee6e8bb 9381 }
8b7209fa
RH
9382 } else if ((insn & 0x0e000a00) == 0x0c000800
9383 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9384 if (disas_neon_insn_3same_ext(s, insn)) {
9385 goto illegal_op;
9386 }
9387 return;
638808ff
RH
9388 } else if ((insn & 0x0f000a00) == 0x0e000800
9389 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9390 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
9391 goto illegal_op;
9392 }
9393 return;
9ee6e8bb
PB
9394 } else if ((insn & 0x0fe00000) == 0x0c400000) {
9395 /* Coprocessor double register transfer. */
be5e7a76 9396 ARCH(5TE);
9ee6e8bb
PB
9397 } else if ((insn & 0x0f000010) == 0x0e000010) {
9398 /* Additional coprocessor register transfer. */
7997d92f 9399 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
9400 uint32_t mask;
9401 uint32_t val;
9402 /* cps (privileged) */
9403 if (IS_USER(s))
9404 return;
9405 mask = val = 0;
9406 if (insn & (1 << 19)) {
9407 if (insn & (1 << 8))
9408 mask |= CPSR_A;
9409 if (insn & (1 << 7))
9410 mask |= CPSR_I;
9411 if (insn & (1 << 6))
9412 mask |= CPSR_F;
9413 if (insn & (1 << 18))
9414 val |= mask;
9415 }
7997d92f 9416 if (insn & (1 << 17)) {
9ee6e8bb
PB
9417 mask |= CPSR_M;
9418 val |= (insn & 0x1f);
9419 }
9420 if (mask) {
2fbac54b 9421 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
9422 }
9423 return;
9424 }
9425 goto illegal_op;
9426 }
9427 if (cond != 0xe) {
9428 /* if not always execute, we generate a conditional jump to
9429 next instruction */
c2d9644e 9430 arm_skip_unless(s, cond);
9ee6e8bb
PB
9431 }
9432 if ((insn & 0x0f900000) == 0x03000000) {
9433 if ((insn & (1 << 21)) == 0) {
9434 ARCH(6T2);
9435 rd = (insn >> 12) & 0xf;
9436 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9437 if ((insn & (1 << 22)) == 0) {
9438 /* MOVW */
7d1b0095 9439 tmp = tcg_temp_new_i32();
5e3f878a 9440 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
9441 } else {
9442 /* MOVT */
5e3f878a 9443 tmp = load_reg(s, rd);
86831435 9444 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9445 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 9446 }
5e3f878a 9447 store_reg(s, rd, tmp);
9ee6e8bb
PB
9448 } else {
9449 if (((insn >> 12) & 0xf) != 0xf)
9450 goto illegal_op;
9451 if (((insn >> 16) & 0xf) == 0) {
9452 gen_nop_hint(s, insn & 0xff);
9453 } else {
9454 /* CPSR = immediate */
9455 val = insn & 0xff;
9456 shift = ((insn >> 8) & 0xf) * 2;
9457 if (shift)
9458 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 9459 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
9460 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
9461 i, val)) {
9ee6e8bb 9462 goto illegal_op;
7dcc1f89 9463 }
9ee6e8bb
PB
9464 }
9465 }
9466 } else if ((insn & 0x0f900000) == 0x01000000
9467 && (insn & 0x00000090) != 0x00000090) {
9468 /* miscellaneous instructions */
9469 op1 = (insn >> 21) & 3;
9470 sh = (insn >> 4) & 0xf;
9471 rm = insn & 0xf;
9472 switch (sh) {
8bfd0550
PM
9473 case 0x0: /* MSR, MRS */
9474 if (insn & (1 << 9)) {
9475 /* MSR (banked) and MRS (banked) */
9476 int sysm = extract32(insn, 16, 4) |
9477 (extract32(insn, 8, 1) << 4);
9478 int r = extract32(insn, 22, 1);
9479
9480 if (op1 & 1) {
9481 /* MSR (banked) */
9482 gen_msr_banked(s, r, sysm, rm);
9483 } else {
9484 /* MRS (banked) */
9485 int rd = extract32(insn, 12, 4);
9486
9487 gen_mrs_banked(s, r, sysm, rd);
9488 }
9489 break;
9490 }
9491
9492 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
9493 if (op1 & 1) {
9494 /* PSR = reg */
2fbac54b 9495 tmp = load_reg(s, rm);
9ee6e8bb 9496 i = ((op1 & 2) != 0);
7dcc1f89 9497 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
9498 goto illegal_op;
9499 } else {
9500 /* reg = PSR */
9501 rd = (insn >> 12) & 0xf;
9502 if (op1 & 2) {
9503 if (IS_USER(s))
9504 goto illegal_op;
d9ba4830 9505 tmp = load_cpu_field(spsr);
9ee6e8bb 9506 } else {
7d1b0095 9507 tmp = tcg_temp_new_i32();
9ef39277 9508 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9509 }
d9ba4830 9510 store_reg(s, rd, tmp);
9ee6e8bb
PB
9511 }
9512 break;
9513 case 0x1:
9514 if (op1 == 1) {
9515 /* branch/exchange thumb (bx). */
be5e7a76 9516 ARCH(4T);
d9ba4830
PB
9517 tmp = load_reg(s, rm);
9518 gen_bx(s, tmp);
9ee6e8bb
PB
9519 } else if (op1 == 3) {
9520 /* clz */
be5e7a76 9521 ARCH(5);
9ee6e8bb 9522 rd = (insn >> 12) & 0xf;
1497c961 9523 tmp = load_reg(s, rm);
7539a012 9524 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 9525 store_reg(s, rd, tmp);
9ee6e8bb
PB
9526 } else {
9527 goto illegal_op;
9528 }
9529 break;
9530 case 0x2:
9531 if (op1 == 1) {
9532 ARCH(5J); /* bxj */
9533 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9534 tmp = load_reg(s, rm);
9535 gen_bx(s, tmp);
9ee6e8bb
PB
9536 } else {
9537 goto illegal_op;
9538 }
9539 break;
9540 case 0x3:
9541 if (op1 != 1)
9542 goto illegal_op;
9543
be5e7a76 9544 ARCH(5);
9ee6e8bb 9545 /* branch link/exchange thumb (blx) */
d9ba4830 9546 tmp = load_reg(s, rm);
7d1b0095 9547 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
9548 tcg_gen_movi_i32(tmp2, s->pc);
9549 store_reg(s, 14, tmp2);
9550 gen_bx(s, tmp);
9ee6e8bb 9551 break;
eb0ecd5a
WN
9552 case 0x4:
9553 {
9554 /* crc32/crc32c */
9555 uint32_t c = extract32(insn, 8, 4);
9556
9557 /* Check this CPU supports ARMv8 CRC instructions.
9558 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
9559 * Bits 8, 10 and 11 should be zero.
9560 */
962fcbf2 9561 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
eb0ecd5a
WN
9562 goto illegal_op;
9563 }
9564
9565 rn = extract32(insn, 16, 4);
9566 rd = extract32(insn, 12, 4);
9567
9568 tmp = load_reg(s, rn);
9569 tmp2 = load_reg(s, rm);
aa633469
PM
9570 if (op1 == 0) {
9571 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9572 } else if (op1 == 1) {
9573 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9574 }
eb0ecd5a
WN
9575 tmp3 = tcg_const_i32(1 << op1);
9576 if (c & 0x2) {
9577 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9578 } else {
9579 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9580 }
9581 tcg_temp_free_i32(tmp2);
9582 tcg_temp_free_i32(tmp3);
9583 store_reg(s, rd, tmp);
9584 break;
9585 }
9ee6e8bb 9586 case 0x5: /* saturating add/subtract */
be5e7a76 9587 ARCH(5TE);
9ee6e8bb
PB
9588 rd = (insn >> 12) & 0xf;
9589 rn = (insn >> 16) & 0xf;
b40d0353 9590 tmp = load_reg(s, rm);
5e3f878a 9591 tmp2 = load_reg(s, rn);
9ee6e8bb 9592 if (op1 & 2)
9ef39277 9593 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 9594 if (op1 & 1)
9ef39277 9595 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9596 else
9ef39277 9597 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9598 tcg_temp_free_i32(tmp2);
5e3f878a 9599 store_reg(s, rd, tmp);
9ee6e8bb 9600 break;
55c544ed
PM
9601 case 0x6: /* ERET */
9602 if (op1 != 3) {
9603 goto illegal_op;
9604 }
9605 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
9606 goto illegal_op;
9607 }
9608 if ((insn & 0x000fff0f) != 0x0000000e) {
9609 /* UNPREDICTABLE; we choose to UNDEF */
9610 goto illegal_op;
9611 }
9612
9613 if (s->current_el == 2) {
9614 tmp = load_cpu_field(elr_el[2]);
9615 } else {
9616 tmp = load_reg(s, 14);
9617 }
9618 gen_exception_return(s, tmp);
9619 break;
49e14940 9620 case 7:
d4a2dc67
PM
9621 {
9622 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 9623 switch (op1) {
19a6e31c
PM
9624 case 0:
9625 /* HLT */
9626 gen_hlt(s, imm16);
9627 break;
37e6456e
PM
9628 case 1:
9629 /* bkpt */
9630 ARCH(5);
c900a2e6 9631 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
9632 break;
9633 case 2:
9634 /* Hypervisor call (v7) */
9635 ARCH(7);
9636 if (IS_USER(s)) {
9637 goto illegal_op;
9638 }
9639 gen_hvc(s, imm16);
9640 break;
9641 case 3:
9642 /* Secure monitor call (v6+) */
9643 ARCH(6K);
9644 if (IS_USER(s)) {
9645 goto illegal_op;
9646 }
9647 gen_smc(s);
9648 break;
9649 default:
19a6e31c 9650 g_assert_not_reached();
49e14940 9651 }
9ee6e8bb 9652 break;
d4a2dc67 9653 }
9ee6e8bb
PB
9654 case 0x8: /* signed multiply */
9655 case 0xa:
9656 case 0xc:
9657 case 0xe:
be5e7a76 9658 ARCH(5TE);
9ee6e8bb
PB
9659 rs = (insn >> 8) & 0xf;
9660 rn = (insn >> 12) & 0xf;
9661 rd = (insn >> 16) & 0xf;
9662 if (op1 == 1) {
9663 /* (32 * 16) >> 16 */
5e3f878a
PB
9664 tmp = load_reg(s, rm);
9665 tmp2 = load_reg(s, rs);
9ee6e8bb 9666 if (sh & 4)
5e3f878a 9667 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9668 else
5e3f878a 9669 gen_sxth(tmp2);
a7812ae4
PB
9670 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9671 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9672 tmp = tcg_temp_new_i32();
ecc7b3aa 9673 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 9674 tcg_temp_free_i64(tmp64);
9ee6e8bb 9675 if ((sh & 2) == 0) {
5e3f878a 9676 tmp2 = load_reg(s, rn);
9ef39277 9677 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9678 tcg_temp_free_i32(tmp2);
9ee6e8bb 9679 }
5e3f878a 9680 store_reg(s, rd, tmp);
9ee6e8bb
PB
9681 } else {
9682 /* 16 * 16 */
5e3f878a
PB
9683 tmp = load_reg(s, rm);
9684 tmp2 = load_reg(s, rs);
9685 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 9686 tcg_temp_free_i32(tmp2);
9ee6e8bb 9687 if (op1 == 2) {
a7812ae4
PB
9688 tmp64 = tcg_temp_new_i64();
9689 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9690 tcg_temp_free_i32(tmp);
a7812ae4
PB
9691 gen_addq(s, tmp64, rn, rd);
9692 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 9693 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9694 } else {
9695 if (op1 == 0) {
5e3f878a 9696 tmp2 = load_reg(s, rn);
9ef39277 9697 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9698 tcg_temp_free_i32(tmp2);
9ee6e8bb 9699 }
5e3f878a 9700 store_reg(s, rd, tmp);
9ee6e8bb
PB
9701 }
9702 }
9703 break;
9704 default:
9705 goto illegal_op;
9706 }
9707 } else if (((insn & 0x0e000000) == 0 &&
9708 (insn & 0x00000090) != 0x90) ||
9709 ((insn & 0x0e000000) == (1 << 25))) {
9710 int set_cc, logic_cc, shiftop;
9711
9712 op1 = (insn >> 21) & 0xf;
9713 set_cc = (insn >> 20) & 1;
9714 logic_cc = table_logic_cc[op1] & set_cc;
9715
9716 /* data processing instruction */
9717 if (insn & (1 << 25)) {
9718 /* immediate operand */
9719 val = insn & 0xff;
9720 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 9721 if (shift) {
9ee6e8bb 9722 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 9723 }
7d1b0095 9724 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
9725 tcg_gen_movi_i32(tmp2, val);
9726 if (logic_cc && shift) {
9727 gen_set_CF_bit31(tmp2);
9728 }
9ee6e8bb
PB
9729 } else {
9730 /* register */
9731 rm = (insn) & 0xf;
e9bb4aa9 9732 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9733 shiftop = (insn >> 5) & 3;
9734 if (!(insn & (1 << 4))) {
9735 shift = (insn >> 7) & 0x1f;
e9bb4aa9 9736 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
9737 } else {
9738 rs = (insn >> 8) & 0xf;
8984bd2e 9739 tmp = load_reg(s, rs);
e9bb4aa9 9740 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
9741 }
9742 }
9743 if (op1 != 0x0f && op1 != 0x0d) {
9744 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
9745 tmp = load_reg(s, rn);
9746 } else {
f764718d 9747 tmp = NULL;
9ee6e8bb
PB
9748 }
9749 rd = (insn >> 12) & 0xf;
9750 switch(op1) {
9751 case 0x00:
e9bb4aa9
JR
9752 tcg_gen_and_i32(tmp, tmp, tmp2);
9753 if (logic_cc) {
9754 gen_logic_CC(tmp);
9755 }
7dcc1f89 9756 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9757 break;
9758 case 0x01:
e9bb4aa9
JR
9759 tcg_gen_xor_i32(tmp, tmp, tmp2);
9760 if (logic_cc) {
9761 gen_logic_CC(tmp);
9762 }
7dcc1f89 9763 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9764 break;
9765 case 0x02:
9766 if (set_cc && rd == 15) {
9767 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 9768 if (IS_USER(s)) {
9ee6e8bb 9769 goto illegal_op;
e9bb4aa9 9770 }
72485ec4 9771 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 9772 gen_exception_return(s, tmp);
9ee6e8bb 9773 } else {
e9bb4aa9 9774 if (set_cc) {
72485ec4 9775 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9776 } else {
9777 tcg_gen_sub_i32(tmp, tmp, tmp2);
9778 }
7dcc1f89 9779 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9780 }
9781 break;
9782 case 0x03:
e9bb4aa9 9783 if (set_cc) {
72485ec4 9784 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9785 } else {
9786 tcg_gen_sub_i32(tmp, tmp2, tmp);
9787 }
7dcc1f89 9788 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9789 break;
9790 case 0x04:
e9bb4aa9 9791 if (set_cc) {
72485ec4 9792 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9793 } else {
9794 tcg_gen_add_i32(tmp, tmp, tmp2);
9795 }
7dcc1f89 9796 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9797 break;
9798 case 0x05:
e9bb4aa9 9799 if (set_cc) {
49b4c31e 9800 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9801 } else {
9802 gen_add_carry(tmp, tmp, tmp2);
9803 }
7dcc1f89 9804 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9805 break;
9806 case 0x06:
e9bb4aa9 9807 if (set_cc) {
2de68a49 9808 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9809 } else {
9810 gen_sub_carry(tmp, tmp, tmp2);
9811 }
7dcc1f89 9812 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9813 break;
9814 case 0x07:
e9bb4aa9 9815 if (set_cc) {
2de68a49 9816 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9817 } else {
9818 gen_sub_carry(tmp, tmp2, tmp);
9819 }
7dcc1f89 9820 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9821 break;
9822 case 0x08:
9823 if (set_cc) {
e9bb4aa9
JR
9824 tcg_gen_and_i32(tmp, tmp, tmp2);
9825 gen_logic_CC(tmp);
9ee6e8bb 9826 }
7d1b0095 9827 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9828 break;
9829 case 0x09:
9830 if (set_cc) {
e9bb4aa9
JR
9831 tcg_gen_xor_i32(tmp, tmp, tmp2);
9832 gen_logic_CC(tmp);
9ee6e8bb 9833 }
7d1b0095 9834 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9835 break;
9836 case 0x0a:
9837 if (set_cc) {
72485ec4 9838 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 9839 }
7d1b0095 9840 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9841 break;
9842 case 0x0b:
9843 if (set_cc) {
72485ec4 9844 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9845 }
7d1b0095 9846 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9847 break;
9848 case 0x0c:
e9bb4aa9
JR
9849 tcg_gen_or_i32(tmp, tmp, tmp2);
9850 if (logic_cc) {
9851 gen_logic_CC(tmp);
9852 }
7dcc1f89 9853 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9854 break;
9855 case 0x0d:
9856 if (logic_cc && rd == 15) {
9857 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 9858 if (IS_USER(s)) {
9ee6e8bb 9859 goto illegal_op;
e9bb4aa9
JR
9860 }
9861 gen_exception_return(s, tmp2);
9ee6e8bb 9862 } else {
e9bb4aa9
JR
9863 if (logic_cc) {
9864 gen_logic_CC(tmp2);
9865 }
7dcc1f89 9866 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9867 }
9868 break;
9869 case 0x0e:
f669df27 9870 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
9871 if (logic_cc) {
9872 gen_logic_CC(tmp);
9873 }
7dcc1f89 9874 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9875 break;
9876 default:
9877 case 0x0f:
e9bb4aa9
JR
9878 tcg_gen_not_i32(tmp2, tmp2);
9879 if (logic_cc) {
9880 gen_logic_CC(tmp2);
9881 }
7dcc1f89 9882 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9883 break;
9884 }
e9bb4aa9 9885 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 9886 tcg_temp_free_i32(tmp2);
e9bb4aa9 9887 }
9ee6e8bb
PB
9888 } else {
9889 /* other instructions */
9890 op1 = (insn >> 24) & 0xf;
9891 switch(op1) {
9892 case 0x0:
9893 case 0x1:
9894 /* multiplies, extra load/stores */
9895 sh = (insn >> 5) & 3;
9896 if (sh == 0) {
9897 if (op1 == 0x0) {
9898 rd = (insn >> 16) & 0xf;
9899 rn = (insn >> 12) & 0xf;
9900 rs = (insn >> 8) & 0xf;
9901 rm = (insn) & 0xf;
9902 op1 = (insn >> 20) & 0xf;
9903 switch (op1) {
9904 case 0: case 1: case 2: case 3: case 6:
9905 /* 32 bit mul */
5e3f878a
PB
9906 tmp = load_reg(s, rs);
9907 tmp2 = load_reg(s, rm);
9908 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9909 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9910 if (insn & (1 << 22)) {
9911 /* Subtract (mls) */
9912 ARCH(6T2);
5e3f878a
PB
9913 tmp2 = load_reg(s, rn);
9914 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 9915 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9916 } else if (insn & (1 << 21)) {
9917 /* Add */
5e3f878a
PB
9918 tmp2 = load_reg(s, rn);
9919 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9920 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9921 }
9922 if (insn & (1 << 20))
5e3f878a
PB
9923 gen_logic_CC(tmp);
9924 store_reg(s, rd, tmp);
9ee6e8bb 9925 break;
8aac08b1
AJ
9926 case 4:
9927 /* 64 bit mul double accumulate (UMAAL) */
9928 ARCH(6);
9929 tmp = load_reg(s, rs);
9930 tmp2 = load_reg(s, rm);
9931 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9932 gen_addq_lo(s, tmp64, rn);
9933 gen_addq_lo(s, tmp64, rd);
9934 gen_storeq_reg(s, rn, rd, tmp64);
9935 tcg_temp_free_i64(tmp64);
9936 break;
9937 case 8: case 9: case 10: case 11:
9938 case 12: case 13: case 14: case 15:
9939 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
9940 tmp = load_reg(s, rs);
9941 tmp2 = load_reg(s, rm);
8aac08b1 9942 if (insn & (1 << 22)) {
c9f10124 9943 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 9944 } else {
c9f10124 9945 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
9946 }
9947 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
9948 TCGv_i32 al = load_reg(s, rn);
9949 TCGv_i32 ah = load_reg(s, rd);
c9f10124 9950 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
9951 tcg_temp_free_i32(al);
9952 tcg_temp_free_i32(ah);
9ee6e8bb 9953 }
8aac08b1 9954 if (insn & (1 << 20)) {
c9f10124 9955 gen_logicq_cc(tmp, tmp2);
8aac08b1 9956 }
c9f10124
RH
9957 store_reg(s, rn, tmp);
9958 store_reg(s, rd, tmp2);
9ee6e8bb 9959 break;
8aac08b1
AJ
9960 default:
9961 goto illegal_op;
9ee6e8bb
PB
9962 }
9963 } else {
9964 rn = (insn >> 16) & 0xf;
9965 rd = (insn >> 12) & 0xf;
9966 if (insn & (1 << 23)) {
9967 /* load/store exclusive */
96c55295
PM
9968 bool is_ld = extract32(insn, 20, 1);
9969 bool is_lasr = !extract32(insn, 8, 1);
2359bf80 9970 int op2 = (insn >> 8) & 3;
86753403 9971 op1 = (insn >> 21) & 0x3;
2359bf80
MR
9972
9973 switch (op2) {
9974 case 0: /* lda/stl */
9975 if (op1 == 1) {
9976 goto illegal_op;
9977 }
9978 ARCH(8);
9979 break;
9980 case 1: /* reserved */
9981 goto illegal_op;
9982 case 2: /* ldaex/stlex */
9983 ARCH(8);
9984 break;
9985 case 3: /* ldrex/strex */
9986 if (op1) {
9987 ARCH(6K);
9988 } else {
9989 ARCH(6);
9990 }
9991 break;
9992 }
9993
3174f8e9 9994 addr = tcg_temp_local_new_i32();
98a46317 9995 load_reg_var(s, addr, rn);
2359bf80 9996
96c55295
PM
9997 if (is_lasr && !is_ld) {
9998 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9999 }
10000
2359bf80 10001 if (op2 == 0) {
96c55295 10002 if (is_ld) {
2359bf80
MR
10003 tmp = tcg_temp_new_i32();
10004 switch (op1) {
10005 case 0: /* lda */
9bb6558a
PM
10006 gen_aa32_ld32u_iss(s, tmp, addr,
10007 get_mem_index(s),
10008 rd | ISSIsAcqRel);
2359bf80
MR
10009 break;
10010 case 2: /* ldab */
9bb6558a
PM
10011 gen_aa32_ld8u_iss(s, tmp, addr,
10012 get_mem_index(s),
10013 rd | ISSIsAcqRel);
2359bf80
MR
10014 break;
10015 case 3: /* ldah */
9bb6558a
PM
10016 gen_aa32_ld16u_iss(s, tmp, addr,
10017 get_mem_index(s),
10018 rd | ISSIsAcqRel);
2359bf80
MR
10019 break;
10020 default:
10021 abort();
10022 }
10023 store_reg(s, rd, tmp);
10024 } else {
10025 rm = insn & 0xf;
10026 tmp = load_reg(s, rm);
10027 switch (op1) {
10028 case 0: /* stl */
9bb6558a
PM
10029 gen_aa32_st32_iss(s, tmp, addr,
10030 get_mem_index(s),
10031 rm | ISSIsAcqRel);
2359bf80
MR
10032 break;
10033 case 2: /* stlb */
9bb6558a
PM
10034 gen_aa32_st8_iss(s, tmp, addr,
10035 get_mem_index(s),
10036 rm | ISSIsAcqRel);
2359bf80
MR
10037 break;
10038 case 3: /* stlh */
9bb6558a
PM
10039 gen_aa32_st16_iss(s, tmp, addr,
10040 get_mem_index(s),
10041 rm | ISSIsAcqRel);
2359bf80
MR
10042 break;
10043 default:
10044 abort();
10045 }
10046 tcg_temp_free_i32(tmp);
10047 }
96c55295 10048 } else if (is_ld) {
86753403
PB
10049 switch (op1) {
10050 case 0: /* ldrex */
426f5abc 10051 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
10052 break;
10053 case 1: /* ldrexd */
426f5abc 10054 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
10055 break;
10056 case 2: /* ldrexb */
426f5abc 10057 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
10058 break;
10059 case 3: /* ldrexh */
426f5abc 10060 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
10061 break;
10062 default:
10063 abort();
10064 }
9ee6e8bb
PB
10065 } else {
10066 rm = insn & 0xf;
86753403
PB
10067 switch (op1) {
10068 case 0: /* strex */
426f5abc 10069 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
10070 break;
10071 case 1: /* strexd */
502e64fe 10072 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
10073 break;
10074 case 2: /* strexb */
426f5abc 10075 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
10076 break;
10077 case 3: /* strexh */
426f5abc 10078 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
10079 break;
10080 default:
10081 abort();
10082 }
9ee6e8bb 10083 }
39d5492a 10084 tcg_temp_free_i32(addr);
96c55295
PM
10085
10086 if (is_lasr && is_ld) {
10087 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
10088 }
c4869ca6
OS
10089 } else if ((insn & 0x00300f00) == 0) {
10090 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
10091 * - SWP, SWPB
10092 */
10093
cf12bce0
EC
10094 TCGv taddr;
10095 TCGMemOp opc = s->be_data;
10096
9ee6e8bb
PB
10097 rm = (insn) & 0xf;
10098
9ee6e8bb 10099 if (insn & (1 << 22)) {
cf12bce0 10100 opc |= MO_UB;
9ee6e8bb 10101 } else {
cf12bce0 10102 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 10103 }
cf12bce0
EC
10104
10105 addr = load_reg(s, rn);
10106 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 10107 tcg_temp_free_i32(addr);
cf12bce0
EC
10108
10109 tmp = load_reg(s, rm);
10110 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
10111 get_mem_index(s), opc);
10112 tcg_temp_free(taddr);
10113 store_reg(s, rd, tmp);
c4869ca6
OS
10114 } else {
10115 goto illegal_op;
9ee6e8bb
PB
10116 }
10117 }
10118 } else {
10119 int address_offset;
3960c336 10120 bool load = insn & (1 << 20);
63f26fcf
PM
10121 bool wbit = insn & (1 << 21);
10122 bool pbit = insn & (1 << 24);
3960c336 10123 bool doubleword = false;
9bb6558a
PM
10124 ISSInfo issinfo;
10125
9ee6e8bb
PB
10126 /* Misc load/store */
10127 rn = (insn >> 16) & 0xf;
10128 rd = (insn >> 12) & 0xf;
3960c336 10129
9bb6558a
PM
10130 /* ISS not valid if writeback */
10131 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
10132
3960c336
PM
10133 if (!load && (sh & 2)) {
10134 /* doubleword */
10135 ARCH(5TE);
10136 if (rd & 1) {
10137 /* UNPREDICTABLE; we choose to UNDEF */
10138 goto illegal_op;
10139 }
10140 load = (sh & 1) == 0;
10141 doubleword = true;
10142 }
10143
b0109805 10144 addr = load_reg(s, rn);
63f26fcf 10145 if (pbit) {
b0109805 10146 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 10147 }
9ee6e8bb 10148 address_offset = 0;
3960c336
PM
10149
10150 if (doubleword) {
10151 if (!load) {
9ee6e8bb 10152 /* store */
b0109805 10153 tmp = load_reg(s, rd);
12dcc321 10154 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10155 tcg_temp_free_i32(tmp);
b0109805
PB
10156 tcg_gen_addi_i32(addr, addr, 4);
10157 tmp = load_reg(s, rd + 1);
12dcc321 10158 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10159 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10160 } else {
10161 /* load */
5a839c0d 10162 tmp = tcg_temp_new_i32();
12dcc321 10163 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10164 store_reg(s, rd, tmp);
10165 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 10166 tmp = tcg_temp_new_i32();
12dcc321 10167 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10168 rd++;
9ee6e8bb
PB
10169 }
10170 address_offset = -4;
3960c336
PM
10171 } else if (load) {
10172 /* load */
10173 tmp = tcg_temp_new_i32();
10174 switch (sh) {
10175 case 1:
9bb6558a
PM
10176 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10177 issinfo);
3960c336
PM
10178 break;
10179 case 2:
9bb6558a
PM
10180 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
10181 issinfo);
3960c336
PM
10182 break;
10183 default:
10184 case 3:
9bb6558a
PM
10185 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
10186 issinfo);
3960c336
PM
10187 break;
10188 }
9ee6e8bb
PB
10189 } else {
10190 /* store */
b0109805 10191 tmp = load_reg(s, rd);
9bb6558a 10192 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 10193 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10194 }
10195 /* Perform base writeback before the loaded value to
10196 ensure correct behavior with overlapping index registers.
b6af0975 10197 ldrd with base writeback is undefined if the
9ee6e8bb 10198 destination and index registers overlap. */
63f26fcf 10199 if (!pbit) {
b0109805
PB
10200 gen_add_datah_offset(s, insn, address_offset, addr);
10201 store_reg(s, rn, addr);
63f26fcf 10202 } else if (wbit) {
9ee6e8bb 10203 if (address_offset)
b0109805
PB
10204 tcg_gen_addi_i32(addr, addr, address_offset);
10205 store_reg(s, rn, addr);
10206 } else {
7d1b0095 10207 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10208 }
10209 if (load) {
10210 /* Complete the load. */
b0109805 10211 store_reg(s, rd, tmp);
9ee6e8bb
PB
10212 }
10213 }
10214 break;
10215 case 0x4:
10216 case 0x5:
10217 goto do_ldst;
10218 case 0x6:
10219 case 0x7:
10220 if (insn & (1 << 4)) {
10221 ARCH(6);
10222 /* Armv6 Media instructions. */
10223 rm = insn & 0xf;
10224 rn = (insn >> 16) & 0xf;
2c0262af 10225 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
10226 rs = (insn >> 8) & 0xf;
10227 switch ((insn >> 23) & 3) {
10228 case 0: /* Parallel add/subtract. */
10229 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
10230 tmp = load_reg(s, rn);
10231 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10232 sh = (insn >> 5) & 7;
10233 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
10234 goto illegal_op;
6ddbc6e4 10235 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 10236 tcg_temp_free_i32(tmp2);
6ddbc6e4 10237 store_reg(s, rd, tmp);
9ee6e8bb
PB
10238 break;
10239 case 1:
10240 if ((insn & 0x00700020) == 0) {
6c95676b 10241 /* Halfword pack. */
3670669c
PB
10242 tmp = load_reg(s, rn);
10243 tmp2 = load_reg(s, rm);
9ee6e8bb 10244 shift = (insn >> 7) & 0x1f;
3670669c
PB
10245 if (insn & (1 << 6)) {
10246 /* pkhtb */
22478e79
AZ
10247 if (shift == 0)
10248 shift = 31;
10249 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 10250 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 10251 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
10252 } else {
10253 /* pkhbt */
22478e79
AZ
10254 if (shift)
10255 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 10256 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
10257 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10258 }
10259 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10260 tcg_temp_free_i32(tmp2);
3670669c 10261 store_reg(s, rd, tmp);
9ee6e8bb
PB
10262 } else if ((insn & 0x00200020) == 0x00200000) {
10263 /* [us]sat */
6ddbc6e4 10264 tmp = load_reg(s, rm);
9ee6e8bb
PB
10265 shift = (insn >> 7) & 0x1f;
10266 if (insn & (1 << 6)) {
10267 if (shift == 0)
10268 shift = 31;
6ddbc6e4 10269 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10270 } else {
6ddbc6e4 10271 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
10272 }
10273 sh = (insn >> 16) & 0x1f;
40d3c433
CL
10274 tmp2 = tcg_const_i32(sh);
10275 if (insn & (1 << 22))
9ef39277 10276 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 10277 else
9ef39277 10278 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 10279 tcg_temp_free_i32(tmp2);
6ddbc6e4 10280 store_reg(s, rd, tmp);
9ee6e8bb
PB
10281 } else if ((insn & 0x00300fe0) == 0x00200f20) {
10282 /* [us]sat16 */
6ddbc6e4 10283 tmp = load_reg(s, rm);
9ee6e8bb 10284 sh = (insn >> 16) & 0x1f;
40d3c433
CL
10285 tmp2 = tcg_const_i32(sh);
10286 if (insn & (1 << 22))
9ef39277 10287 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 10288 else
9ef39277 10289 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 10290 tcg_temp_free_i32(tmp2);
6ddbc6e4 10291 store_reg(s, rd, tmp);
9ee6e8bb
PB
10292 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
10293 /* Select bytes. */
6ddbc6e4
PB
10294 tmp = load_reg(s, rn);
10295 tmp2 = load_reg(s, rm);
7d1b0095 10296 tmp3 = tcg_temp_new_i32();
0ecb72a5 10297 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 10298 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10299 tcg_temp_free_i32(tmp3);
10300 tcg_temp_free_i32(tmp2);
6ddbc6e4 10301 store_reg(s, rd, tmp);
9ee6e8bb 10302 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 10303 tmp = load_reg(s, rm);
9ee6e8bb 10304 shift = (insn >> 10) & 3;
1301f322 10305 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10306 rotate, a shift is sufficient. */
10307 if (shift != 0)
f669df27 10308 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10309 op1 = (insn >> 20) & 7;
10310 switch (op1) {
5e3f878a
PB
10311 case 0: gen_sxtb16(tmp); break;
10312 case 2: gen_sxtb(tmp); break;
10313 case 3: gen_sxth(tmp); break;
10314 case 4: gen_uxtb16(tmp); break;
10315 case 6: gen_uxtb(tmp); break;
10316 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
10317 default: goto illegal_op;
10318 }
10319 if (rn != 15) {
5e3f878a 10320 tmp2 = load_reg(s, rn);
9ee6e8bb 10321 if ((op1 & 3) == 0) {
5e3f878a 10322 gen_add16(tmp, tmp2);
9ee6e8bb 10323 } else {
5e3f878a 10324 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10325 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10326 }
10327 }
6c95676b 10328 store_reg(s, rd, tmp);
9ee6e8bb
PB
10329 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
10330 /* rev */
b0109805 10331 tmp = load_reg(s, rm);
9ee6e8bb
PB
10332 if (insn & (1 << 22)) {
10333 if (insn & (1 << 7)) {
b0109805 10334 gen_revsh(tmp);
9ee6e8bb
PB
10335 } else {
10336 ARCH(6T2);
b0109805 10337 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10338 }
10339 } else {
10340 if (insn & (1 << 7))
b0109805 10341 gen_rev16(tmp);
9ee6e8bb 10342 else
66896cb8 10343 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 10344 }
b0109805 10345 store_reg(s, rd, tmp);
9ee6e8bb
PB
10346 } else {
10347 goto illegal_op;
10348 }
10349 break;
10350 case 2: /* Multiplies (Type 3). */
41e9564d
PM
10351 switch ((insn >> 20) & 0x7) {
10352 case 5:
10353 if (((insn >> 6) ^ (insn >> 7)) & 1) {
10354 /* op2 not 00x or 11x : UNDEF */
10355 goto illegal_op;
10356 }
838fa72d
AJ
10357 /* Signed multiply most significant [accumulate].
10358 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
10359 tmp = load_reg(s, rm);
10360 tmp2 = load_reg(s, rs);
a7812ae4 10361 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 10362
955a7dd5 10363 if (rd != 15) {
838fa72d 10364 tmp = load_reg(s, rd);
9ee6e8bb 10365 if (insn & (1 << 6)) {
838fa72d 10366 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 10367 } else {
838fa72d 10368 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
10369 }
10370 }
838fa72d
AJ
10371 if (insn & (1 << 5)) {
10372 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10373 }
10374 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10375 tmp = tcg_temp_new_i32();
ecc7b3aa 10376 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10377 tcg_temp_free_i64(tmp64);
955a7dd5 10378 store_reg(s, rn, tmp);
41e9564d
PM
10379 break;
10380 case 0:
10381 case 4:
10382 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
10383 if (insn & (1 << 7)) {
10384 goto illegal_op;
10385 }
10386 tmp = load_reg(s, rm);
10387 tmp2 = load_reg(s, rs);
9ee6e8bb 10388 if (insn & (1 << 5))
5e3f878a
PB
10389 gen_swap_half(tmp2);
10390 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10391 if (insn & (1 << 22)) {
5e3f878a 10392 /* smlald, smlsld */
33bbd75a
PC
10393 TCGv_i64 tmp64_2;
10394
a7812ae4 10395 tmp64 = tcg_temp_new_i64();
33bbd75a 10396 tmp64_2 = tcg_temp_new_i64();
a7812ae4 10397 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 10398 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 10399 tcg_temp_free_i32(tmp);
33bbd75a
PC
10400 tcg_temp_free_i32(tmp2);
10401 if (insn & (1 << 6)) {
10402 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
10403 } else {
10404 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
10405 }
10406 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
10407 gen_addq(s, tmp64, rd, rn);
10408 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 10409 tcg_temp_free_i64(tmp64);
9ee6e8bb 10410 } else {
5e3f878a 10411 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
10412 if (insn & (1 << 6)) {
10413 /* This subtraction cannot overflow. */
10414 tcg_gen_sub_i32(tmp, tmp, tmp2);
10415 } else {
10416 /* This addition cannot overflow 32 bits;
10417 * however it may overflow considered as a
10418 * signed operation, in which case we must set
10419 * the Q flag.
10420 */
10421 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10422 }
10423 tcg_temp_free_i32(tmp2);
22478e79 10424 if (rd != 15)
9ee6e8bb 10425 {
22478e79 10426 tmp2 = load_reg(s, rd);
9ef39277 10427 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10428 tcg_temp_free_i32(tmp2);
9ee6e8bb 10429 }
22478e79 10430 store_reg(s, rn, tmp);
9ee6e8bb 10431 }
41e9564d 10432 break;
b8b8ea05
PM
10433 case 1:
10434 case 3:
10435 /* SDIV, UDIV */
7e0cf8b4 10436 if (!dc_isar_feature(arm_div, s)) {
b8b8ea05
PM
10437 goto illegal_op;
10438 }
10439 if (((insn >> 5) & 7) || (rd != 15)) {
10440 goto illegal_op;
10441 }
10442 tmp = load_reg(s, rm);
10443 tmp2 = load_reg(s, rs);
10444 if (insn & (1 << 21)) {
10445 gen_helper_udiv(tmp, tmp, tmp2);
10446 } else {
10447 gen_helper_sdiv(tmp, tmp, tmp2);
10448 }
10449 tcg_temp_free_i32(tmp2);
10450 store_reg(s, rn, tmp);
10451 break;
41e9564d
PM
10452 default:
10453 goto illegal_op;
9ee6e8bb
PB
10454 }
10455 break;
10456 case 3:
10457 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
10458 switch (op1) {
10459 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
10460 ARCH(6);
10461 tmp = load_reg(s, rm);
10462 tmp2 = load_reg(s, rs);
10463 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10464 tcg_temp_free_i32(tmp2);
ded9d295
AZ
10465 if (rd != 15) {
10466 tmp2 = load_reg(s, rd);
6ddbc6e4 10467 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10468 tcg_temp_free_i32(tmp2);
9ee6e8bb 10469 }
ded9d295 10470 store_reg(s, rn, tmp);
9ee6e8bb
PB
10471 break;
10472 case 0x20: case 0x24: case 0x28: case 0x2c:
10473 /* Bitfield insert/clear. */
10474 ARCH(6T2);
10475 shift = (insn >> 7) & 0x1f;
10476 i = (insn >> 16) & 0x1f;
45140a57
KB
10477 if (i < shift) {
10478 /* UNPREDICTABLE; we choose to UNDEF */
10479 goto illegal_op;
10480 }
9ee6e8bb
PB
10481 i = i + 1 - shift;
10482 if (rm == 15) {
7d1b0095 10483 tmp = tcg_temp_new_i32();
5e3f878a 10484 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 10485 } else {
5e3f878a 10486 tmp = load_reg(s, rm);
9ee6e8bb
PB
10487 }
10488 if (i != 32) {
5e3f878a 10489 tmp2 = load_reg(s, rd);
d593c48e 10490 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 10491 tcg_temp_free_i32(tmp2);
9ee6e8bb 10492 }
5e3f878a 10493 store_reg(s, rd, tmp);
9ee6e8bb
PB
10494 break;
10495 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
10496 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 10497 ARCH(6T2);
5e3f878a 10498 tmp = load_reg(s, rm);
9ee6e8bb
PB
10499 shift = (insn >> 7) & 0x1f;
10500 i = ((insn >> 16) & 0x1f) + 1;
10501 if (shift + i > 32)
10502 goto illegal_op;
10503 if (i < 32) {
10504 if (op1 & 0x20) {
59a71b4c 10505 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 10506 } else {
59a71b4c 10507 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
10508 }
10509 }
5e3f878a 10510 store_reg(s, rd, tmp);
9ee6e8bb
PB
10511 break;
10512 default:
10513 goto illegal_op;
10514 }
10515 break;
10516 }
10517 break;
10518 }
10519 do_ldst:
10520 /* Check for undefined extension instructions
10521 * per the ARM Bible IE:
10522 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
10523 */
10524 sh = (0xf << 20) | (0xf << 4);
10525 if (op1 == 0x7 && ((insn & sh) == sh))
10526 {
10527 goto illegal_op;
10528 }
10529 /* load/store byte/word */
10530 rn = (insn >> 16) & 0xf;
10531 rd = (insn >> 12) & 0xf;
b0109805 10532 tmp2 = load_reg(s, rn);
a99caa48
PM
10533 if ((insn & 0x01200000) == 0x00200000) {
10534 /* ldrt/strt */
579d21cc 10535 i = get_a32_user_mem_index(s);
a99caa48
PM
10536 } else {
10537 i = get_mem_index(s);
10538 }
9ee6e8bb 10539 if (insn & (1 << 24))
b0109805 10540 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
10541 if (insn & (1 << 20)) {
10542 /* load */
5a839c0d 10543 tmp = tcg_temp_new_i32();
9ee6e8bb 10544 if (insn & (1 << 22)) {
9bb6558a 10545 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 10546 } else {
9bb6558a 10547 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 10548 }
9ee6e8bb
PB
10549 } else {
10550 /* store */
b0109805 10551 tmp = load_reg(s, rd);
5a839c0d 10552 if (insn & (1 << 22)) {
9bb6558a 10553 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 10554 } else {
9bb6558a 10555 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
10556 }
10557 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10558 }
10559 if (!(insn & (1 << 24))) {
b0109805
PB
10560 gen_add_data_offset(s, insn, tmp2);
10561 store_reg(s, rn, tmp2);
10562 } else if (insn & (1 << 21)) {
10563 store_reg(s, rn, tmp2);
10564 } else {
7d1b0095 10565 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10566 }
10567 if (insn & (1 << 20)) {
10568 /* Complete the load. */
7dcc1f89 10569 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
10570 }
10571 break;
10572 case 0x08:
10573 case 0x09:
10574 {
da3e53dd
PM
10575 int j, n, loaded_base;
10576 bool exc_return = false;
10577 bool is_load = extract32(insn, 20, 1);
10578 bool user = false;
39d5492a 10579 TCGv_i32 loaded_var;
9ee6e8bb
PB
10580 /* load/store multiple words */
10581 /* XXX: store correct base if write back */
9ee6e8bb 10582 if (insn & (1 << 22)) {
da3e53dd 10583 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
10584 if (IS_USER(s))
10585 goto illegal_op; /* only usable in supervisor mode */
10586
da3e53dd
PM
10587 if (is_load && extract32(insn, 15, 1)) {
10588 exc_return = true;
10589 } else {
10590 user = true;
10591 }
9ee6e8bb
PB
10592 }
10593 rn = (insn >> 16) & 0xf;
b0109805 10594 addr = load_reg(s, rn);
9ee6e8bb
PB
10595
10596 /* compute total size */
10597 loaded_base = 0;
f764718d 10598 loaded_var = NULL;
9ee6e8bb
PB
10599 n = 0;
10600 for(i=0;i<16;i++) {
10601 if (insn & (1 << i))
10602 n++;
10603 }
10604 /* XXX: test invalid n == 0 case ? */
10605 if (insn & (1 << 23)) {
10606 if (insn & (1 << 24)) {
10607 /* pre increment */
b0109805 10608 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10609 } else {
10610 /* post increment */
10611 }
10612 } else {
10613 if (insn & (1 << 24)) {
10614 /* pre decrement */
b0109805 10615 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10616 } else {
10617 /* post decrement */
10618 if (n != 1)
b0109805 10619 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10620 }
10621 }
10622 j = 0;
10623 for(i=0;i<16;i++) {
10624 if (insn & (1 << i)) {
da3e53dd 10625 if (is_load) {
9ee6e8bb 10626 /* load */
5a839c0d 10627 tmp = tcg_temp_new_i32();
12dcc321 10628 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 10629 if (user) {
b75263d6 10630 tmp2 = tcg_const_i32(i);
1ce94f81 10631 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 10632 tcg_temp_free_i32(tmp2);
7d1b0095 10633 tcg_temp_free_i32(tmp);
9ee6e8bb 10634 } else if (i == rn) {
b0109805 10635 loaded_var = tmp;
9ee6e8bb 10636 loaded_base = 1;
9d090d17 10637 } else if (i == 15 && exc_return) {
fb0e8e79 10638 store_pc_exc_ret(s, tmp);
9ee6e8bb 10639 } else {
7dcc1f89 10640 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
10641 }
10642 } else {
10643 /* store */
10644 if (i == 15) {
10645 /* special case: r15 = PC + 8 */
10646 val = (long)s->pc + 4;
7d1b0095 10647 tmp = tcg_temp_new_i32();
b0109805 10648 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 10649 } else if (user) {
7d1b0095 10650 tmp = tcg_temp_new_i32();
b75263d6 10651 tmp2 = tcg_const_i32(i);
9ef39277 10652 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 10653 tcg_temp_free_i32(tmp2);
9ee6e8bb 10654 } else {
b0109805 10655 tmp = load_reg(s, i);
9ee6e8bb 10656 }
12dcc321 10657 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10658 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10659 }
10660 j++;
10661 /* no need to add after the last transfer */
10662 if (j != n)
b0109805 10663 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10664 }
10665 }
10666 if (insn & (1 << 21)) {
10667 /* write back */
10668 if (insn & (1 << 23)) {
10669 if (insn & (1 << 24)) {
10670 /* pre increment */
10671 } else {
10672 /* post increment */
b0109805 10673 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10674 }
10675 } else {
10676 if (insn & (1 << 24)) {
10677 /* pre decrement */
10678 if (n != 1)
b0109805 10679 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10680 } else {
10681 /* post decrement */
b0109805 10682 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10683 }
10684 }
b0109805
PB
10685 store_reg(s, rn, addr);
10686 } else {
7d1b0095 10687 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10688 }
10689 if (loaded_base) {
b0109805 10690 store_reg(s, rn, loaded_var);
9ee6e8bb 10691 }
da3e53dd 10692 if (exc_return) {
9ee6e8bb 10693 /* Restore CPSR from SPSR. */
d9ba4830 10694 tmp = load_cpu_field(spsr);
e69ad9df
AL
10695 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10696 gen_io_start();
10697 }
235ea1f5 10698 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
10699 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10700 gen_io_end();
10701 }
7d1b0095 10702 tcg_temp_free_i32(tmp);
b29fd33d 10703 /* Must exit loop to check un-masked IRQs */
dcba3a8d 10704 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
10705 }
10706 }
10707 break;
10708 case 0xa:
10709 case 0xb:
10710 {
10711 int32_t offset;
10712
10713 /* branch (and link) */
10714 val = (int32_t)s->pc;
10715 if (insn & (1 << 24)) {
7d1b0095 10716 tmp = tcg_temp_new_i32();
5e3f878a
PB
10717 tcg_gen_movi_i32(tmp, val);
10718 store_reg(s, 14, tmp);
9ee6e8bb 10719 }
534df156
PM
10720 offset = sextract32(insn << 2, 0, 26);
10721 val += offset + 4;
9ee6e8bb
PB
10722 gen_jmp(s, val);
10723 }
10724 break;
10725 case 0xc:
10726 case 0xd:
10727 case 0xe:
6a57f3eb
WN
10728 if (((insn >> 8) & 0xe) == 10) {
10729 /* VFP. */
7dcc1f89 10730 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10731 goto illegal_op;
10732 }
7dcc1f89 10733 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 10734 /* Coprocessor. */
9ee6e8bb 10735 goto illegal_op;
6a57f3eb 10736 }
9ee6e8bb
PB
10737 break;
10738 case 0xf:
10739 /* swi */
eaed129d 10740 gen_set_pc_im(s, s->pc);
d4a2dc67 10741 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 10742 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
10743 break;
10744 default:
10745 illegal_op:
73710361
GB
10746 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10747 default_exception_el(s));
9ee6e8bb
PB
10748 break;
10749 }
10750 }
10751}
10752
296e5a0a
PM
10753static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10754{
10755 /* Return true if this is a 16 bit instruction. We must be precise
10756 * about this (matching the decode). We assume that s->pc still
10757 * points to the first 16 bits of the insn.
10758 */
10759 if ((insn >> 11) < 0x1d) {
10760 /* Definitely a 16-bit instruction */
10761 return true;
10762 }
10763
10764 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10765 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10766 * end up actually treating this as two 16-bit insns, though,
10767 * if it's half of a bl/blx pair that might span a page boundary.
10768 */
14120108
JS
10769 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10770 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
10771 /* Thumb2 cores (including all M profile ones) always treat
10772 * 32-bit insns as 32-bit.
10773 */
10774 return false;
10775 }
10776
bfe7ad5b 10777 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
10778 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10779 * is not on the next page; we merge this into a 32-bit
10780 * insn.
10781 */
10782 return false;
10783 }
10784 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10785 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10786 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10787 * -- handle as single 16 bit insn
10788 */
10789 return true;
10790}
10791
9ee6e8bb
PB
10792/* Return true if this is a Thumb-2 logical op. */
10793static int
10794thumb2_logic_op(int op)
10795{
10796 return (op < 8);
10797}
10798
10799/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10800 then set condition code flags based on the result of the operation.
10801 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10802 to the high bit of T1.
10803 Returns zero if the opcode is valid. */
10804
10805static int
39d5492a
PM
10806gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10807 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
10808{
10809 int logic_cc;
10810
10811 logic_cc = 0;
10812 switch (op) {
10813 case 0: /* and */
396e467c 10814 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
10815 logic_cc = conds;
10816 break;
10817 case 1: /* bic */
f669df27 10818 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
10819 logic_cc = conds;
10820 break;
10821 case 2: /* orr */
396e467c 10822 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
10823 logic_cc = conds;
10824 break;
10825 case 3: /* orn */
29501f1b 10826 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
10827 logic_cc = conds;
10828 break;
10829 case 4: /* eor */
396e467c 10830 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
10831 logic_cc = conds;
10832 break;
10833 case 8: /* add */
10834 if (conds)
72485ec4 10835 gen_add_CC(t0, t0, t1);
9ee6e8bb 10836 else
396e467c 10837 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
10838 break;
10839 case 10: /* adc */
10840 if (conds)
49b4c31e 10841 gen_adc_CC(t0, t0, t1);
9ee6e8bb 10842 else
396e467c 10843 gen_adc(t0, t1);
9ee6e8bb
PB
10844 break;
10845 case 11: /* sbc */
2de68a49
RH
10846 if (conds) {
10847 gen_sbc_CC(t0, t0, t1);
10848 } else {
396e467c 10849 gen_sub_carry(t0, t0, t1);
2de68a49 10850 }
9ee6e8bb
PB
10851 break;
10852 case 13: /* sub */
10853 if (conds)
72485ec4 10854 gen_sub_CC(t0, t0, t1);
9ee6e8bb 10855 else
396e467c 10856 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
10857 break;
10858 case 14: /* rsb */
10859 if (conds)
72485ec4 10860 gen_sub_CC(t0, t1, t0);
9ee6e8bb 10861 else
396e467c 10862 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
10863 break;
10864 default: /* 5, 6, 7, 9, 12, 15. */
10865 return 1;
10866 }
10867 if (logic_cc) {
396e467c 10868 gen_logic_CC(t0);
9ee6e8bb 10869 if (shifter_out)
396e467c 10870 gen_set_CF_bit31(t1);
9ee6e8bb
PB
10871 }
10872 return 0;
10873}
10874
2eea841c
PM
10875/* Translate a 32-bit thumb instruction. */
10876static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 10877{
296e5a0a 10878 uint32_t imm, shift, offset;
9ee6e8bb 10879 uint32_t rd, rn, rm, rs;
39d5492a
PM
10880 TCGv_i32 tmp;
10881 TCGv_i32 tmp2;
10882 TCGv_i32 tmp3;
10883 TCGv_i32 addr;
a7812ae4 10884 TCGv_i64 tmp64;
9ee6e8bb
PB
10885 int op;
10886 int shiftop;
10887 int conds;
10888 int logic_cc;
10889
14120108
JS
10890 /*
10891 * ARMv6-M supports a limited subset of Thumb2 instructions.
10892 * Other Thumb1 architectures allow only 32-bit
10893 * combined BL/BLX prefix and suffix.
296e5a0a 10894 */
14120108
JS
10895 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10896 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10897 int i;
10898 bool found = false;
8297cb13
JS
10899 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10900 0xf3b08040 /* dsb */,
10901 0xf3b08050 /* dmb */,
10902 0xf3b08060 /* isb */,
10903 0xf3e08000 /* mrs */,
10904 0xf000d000 /* bl */};
10905 static const uint32_t armv6m_mask[] = {0xffe0d000,
10906 0xfff0d0f0,
10907 0xfff0d0f0,
10908 0xfff0d0f0,
10909 0xffe0d000,
10910 0xf800d000};
14120108
JS
10911
10912 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10913 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10914 found = true;
10915 break;
10916 }
10917 }
10918 if (!found) {
10919 goto illegal_op;
10920 }
10921 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
10922 ARCH(6T2);
10923 }
10924
10925 rn = (insn >> 16) & 0xf;
10926 rs = (insn >> 12) & 0xf;
10927 rd = (insn >> 8) & 0xf;
10928 rm = insn & 0xf;
10929 switch ((insn >> 25) & 0xf) {
10930 case 0: case 1: case 2: case 3:
10931 /* 16-bit instructions. Should never happen. */
10932 abort();
10933 case 4:
10934 if (insn & (1 << 22)) {
ebfe27c5
PM
10935 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10936 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 10937 * table branch, TT.
ebfe27c5 10938 */
76eff04d
PM
10939 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10940 arm_dc_feature(s, ARM_FEATURE_V8)) {
10941 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10942 * - SG (v8M only)
10943 * The bulk of the behaviour for this instruction is implemented
10944 * in v7m_handle_execute_nsc(), which deals with the insn when
10945 * it is executed by a CPU in non-secure state from memory
10946 * which is Secure & NonSecure-Callable.
10947 * Here we only need to handle the remaining cases:
10948 * * in NS memory (including the "security extension not
10949 * implemented" case) : NOP
10950 * * in S memory but CPU already secure (clear IT bits)
10951 * We know that the attribute for the memory this insn is
10952 * in must match the current CPU state, because otherwise
10953 * get_phys_addr_pmsav8 would have generated an exception.
10954 */
10955 if (s->v8m_secure) {
10956 /* Like the IT insn, we don't need to generate any code */
10957 s->condexec_cond = 0;
10958 s->condexec_mask = 0;
10959 }
10960 } else if (insn & 0x01200000) {
ebfe27c5
PM
10961 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10962 * - load/store dual (post-indexed)
10963 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10964 * - load/store dual (literal and immediate)
10965 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10966 * - load/store dual (pre-indexed)
10967 */
910d7692
PM
10968 bool wback = extract32(insn, 21, 1);
10969
9ee6e8bb 10970 if (rn == 15) {
ebfe27c5
PM
10971 if (insn & (1 << 21)) {
10972 /* UNPREDICTABLE */
10973 goto illegal_op;
10974 }
7d1b0095 10975 addr = tcg_temp_new_i32();
b0109805 10976 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 10977 } else {
b0109805 10978 addr = load_reg(s, rn);
9ee6e8bb
PB
10979 }
10980 offset = (insn & 0xff) * 4;
910d7692 10981 if ((insn & (1 << 23)) == 0) {
9ee6e8bb 10982 offset = -offset;
910d7692
PM
10983 }
10984
10985 if (s->v8m_stackcheck && rn == 13 && wback) {
10986 /*
10987 * Here 'addr' is the current SP; if offset is +ve we're
10988 * moving SP up, else down. It is UNKNOWN whether the limit
10989 * check triggers when SP starts below the limit and ends
10990 * up above it; check whichever of the current and final
10991 * SP is lower, so QEMU will trigger in that situation.
10992 */
10993 if ((int32_t)offset < 0) {
10994 TCGv_i32 newsp = tcg_temp_new_i32();
10995
10996 tcg_gen_addi_i32(newsp, addr, offset);
10997 gen_helper_v8m_stackcheck(cpu_env, newsp);
10998 tcg_temp_free_i32(newsp);
10999 } else {
11000 gen_helper_v8m_stackcheck(cpu_env, addr);
11001 }
11002 }
11003
9ee6e8bb 11004 if (insn & (1 << 24)) {
b0109805 11005 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
11006 offset = 0;
11007 }
11008 if (insn & (1 << 20)) {
11009 /* ldrd */
e2592fad 11010 tmp = tcg_temp_new_i32();
12dcc321 11011 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
11012 store_reg(s, rs, tmp);
11013 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 11014 tmp = tcg_temp_new_i32();
12dcc321 11015 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11016 store_reg(s, rd, tmp);
9ee6e8bb
PB
11017 } else {
11018 /* strd */
b0109805 11019 tmp = load_reg(s, rs);
12dcc321 11020 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 11021 tcg_temp_free_i32(tmp);
b0109805
PB
11022 tcg_gen_addi_i32(addr, addr, 4);
11023 tmp = load_reg(s, rd);
12dcc321 11024 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 11025 tcg_temp_free_i32(tmp);
9ee6e8bb 11026 }
910d7692 11027 if (wback) {
9ee6e8bb 11028 /* Base writeback. */
b0109805
PB
11029 tcg_gen_addi_i32(addr, addr, offset - 4);
11030 store_reg(s, rn, addr);
11031 } else {
7d1b0095 11032 tcg_temp_free_i32(addr);
9ee6e8bb
PB
11033 }
11034 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
11035 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
11036 * - load/store exclusive word
5158de24 11037 * - TT (v8M only)
ebfe27c5
PM
11038 */
11039 if (rs == 15) {
5158de24
PM
11040 if (!(insn & (1 << 20)) &&
11041 arm_dc_feature(s, ARM_FEATURE_M) &&
11042 arm_dc_feature(s, ARM_FEATURE_V8)) {
11043 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
11044 * - TT (v8M only)
11045 */
11046 bool alt = insn & (1 << 7);
11047 TCGv_i32 addr, op, ttresp;
11048
11049 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
11050 /* we UNDEF for these UNPREDICTABLE cases */
11051 goto illegal_op;
11052 }
11053
11054 if (alt && !s->v8m_secure) {
11055 goto illegal_op;
11056 }
11057
11058 addr = load_reg(s, rn);
11059 op = tcg_const_i32(extract32(insn, 6, 2));
11060 ttresp = tcg_temp_new_i32();
11061 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
11062 tcg_temp_free_i32(addr);
11063 tcg_temp_free_i32(op);
11064 store_reg(s, rd, ttresp);
384c6c03 11065 break;
5158de24 11066 }
ebfe27c5
PM
11067 goto illegal_op;
11068 }
39d5492a 11069 addr = tcg_temp_local_new_i32();
98a46317 11070 load_reg_var(s, addr, rn);
426f5abc 11071 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 11072 if (insn & (1 << 20)) {
426f5abc 11073 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 11074 } else {
426f5abc 11075 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 11076 }
39d5492a 11077 tcg_temp_free_i32(addr);
2359bf80 11078 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
11079 /* Table Branch. */
11080 if (rn == 15) {
7d1b0095 11081 addr = tcg_temp_new_i32();
b0109805 11082 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 11083 } else {
b0109805 11084 addr = load_reg(s, rn);
9ee6e8bb 11085 }
b26eefb6 11086 tmp = load_reg(s, rm);
b0109805 11087 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
11088 if (insn & (1 << 4)) {
11089 /* tbh */
b0109805 11090 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11091 tcg_temp_free_i32(tmp);
e2592fad 11092 tmp = tcg_temp_new_i32();
12dcc321 11093 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 11094 } else { /* tbb */
7d1b0095 11095 tcg_temp_free_i32(tmp);
e2592fad 11096 tmp = tcg_temp_new_i32();
12dcc321 11097 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 11098 }
7d1b0095 11099 tcg_temp_free_i32(addr);
b0109805
PB
11100 tcg_gen_shli_i32(tmp, tmp, 1);
11101 tcg_gen_addi_i32(tmp, tmp, s->pc);
11102 store_reg(s, 15, tmp);
9ee6e8bb 11103 } else {
96c55295
PM
11104 bool is_lasr = false;
11105 bool is_ld = extract32(insn, 20, 1);
2359bf80 11106 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 11107 op = (insn >> 4) & 0x3;
2359bf80
MR
11108 switch (op2) {
11109 case 0:
426f5abc 11110 goto illegal_op;
2359bf80
MR
11111 case 1:
11112 /* Load/store exclusive byte/halfword/doubleword */
11113 if (op == 2) {
11114 goto illegal_op;
11115 }
11116 ARCH(7);
11117 break;
11118 case 2:
11119 /* Load-acquire/store-release */
11120 if (op == 3) {
11121 goto illegal_op;
11122 }
11123 /* Fall through */
11124 case 3:
11125 /* Load-acquire/store-release exclusive */
11126 ARCH(8);
96c55295 11127 is_lasr = true;
2359bf80 11128 break;
426f5abc 11129 }
96c55295
PM
11130
11131 if (is_lasr && !is_ld) {
11132 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
11133 }
11134
39d5492a 11135 addr = tcg_temp_local_new_i32();
98a46317 11136 load_reg_var(s, addr, rn);
2359bf80 11137 if (!(op2 & 1)) {
96c55295 11138 if (is_ld) {
2359bf80
MR
11139 tmp = tcg_temp_new_i32();
11140 switch (op) {
11141 case 0: /* ldab */
9bb6558a
PM
11142 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
11143 rs | ISSIsAcqRel);
2359bf80
MR
11144 break;
11145 case 1: /* ldah */
9bb6558a
PM
11146 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
11147 rs | ISSIsAcqRel);
2359bf80
MR
11148 break;
11149 case 2: /* lda */
9bb6558a
PM
11150 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11151 rs | ISSIsAcqRel);
2359bf80
MR
11152 break;
11153 default:
11154 abort();
11155 }
11156 store_reg(s, rs, tmp);
11157 } else {
11158 tmp = load_reg(s, rs);
11159 switch (op) {
11160 case 0: /* stlb */
9bb6558a
PM
11161 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
11162 rs | ISSIsAcqRel);
2359bf80
MR
11163 break;
11164 case 1: /* stlh */
9bb6558a
PM
11165 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
11166 rs | ISSIsAcqRel);
2359bf80
MR
11167 break;
11168 case 2: /* stl */
9bb6558a
PM
11169 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
11170 rs | ISSIsAcqRel);
2359bf80
MR
11171 break;
11172 default:
11173 abort();
11174 }
11175 tcg_temp_free_i32(tmp);
11176 }
96c55295 11177 } else if (is_ld) {
426f5abc 11178 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 11179 } else {
426f5abc 11180 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 11181 }
39d5492a 11182 tcg_temp_free_i32(addr);
96c55295
PM
11183
11184 if (is_lasr && is_ld) {
11185 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
11186 }
9ee6e8bb
PB
11187 }
11188 } else {
11189 /* Load/store multiple, RFE, SRS. */
11190 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 11191 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 11192 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11193 goto illegal_op;
00115976 11194 }
9ee6e8bb
PB
11195 if (insn & (1 << 20)) {
11196 /* rfe */
b0109805
PB
11197 addr = load_reg(s, rn);
11198 if ((insn & (1 << 24)) == 0)
11199 tcg_gen_addi_i32(addr, addr, -8);
11200 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 11201 tmp = tcg_temp_new_i32();
12dcc321 11202 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11203 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 11204 tmp2 = tcg_temp_new_i32();
12dcc321 11205 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
11206 if (insn & (1 << 21)) {
11207 /* Base writeback. */
b0109805
PB
11208 if (insn & (1 << 24)) {
11209 tcg_gen_addi_i32(addr, addr, 4);
11210 } else {
11211 tcg_gen_addi_i32(addr, addr, -4);
11212 }
11213 store_reg(s, rn, addr);
11214 } else {
7d1b0095 11215 tcg_temp_free_i32(addr);
9ee6e8bb 11216 }
b0109805 11217 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
11218 } else {
11219 /* srs */
81465888
PM
11220 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
11221 insn & (1 << 21));
9ee6e8bb
PB
11222 }
11223 } else {
5856d44e 11224 int i, loaded_base = 0;
39d5492a 11225 TCGv_i32 loaded_var;
7c0ed88e 11226 bool wback = extract32(insn, 21, 1);
9ee6e8bb 11227 /* Load/store multiple. */
b0109805 11228 addr = load_reg(s, rn);
9ee6e8bb
PB
11229 offset = 0;
11230 for (i = 0; i < 16; i++) {
11231 if (insn & (1 << i))
11232 offset += 4;
11233 }
7c0ed88e 11234
9ee6e8bb 11235 if (insn & (1 << 24)) {
b0109805 11236 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
11237 }
11238
7c0ed88e
PM
11239 if (s->v8m_stackcheck && rn == 13 && wback) {
11240 /*
11241 * If the writeback is incrementing SP rather than
11242 * decrementing it, and the initial SP is below the
11243 * stack limit but the final written-back SP would
11244 * be above, then then we must not perform any memory
11245 * accesses, but it is IMPDEF whether we generate
11246 * an exception. We choose to do so in this case.
11247 * At this point 'addr' is the lowest address, so
11248 * either the original SP (if incrementing) or our
11249 * final SP (if decrementing), so that's what we check.
11250 */
11251 gen_helper_v8m_stackcheck(cpu_env, addr);
11252 }
11253
f764718d 11254 loaded_var = NULL;
9ee6e8bb
PB
11255 for (i = 0; i < 16; i++) {
11256 if ((insn & (1 << i)) == 0)
11257 continue;
11258 if (insn & (1 << 20)) {
11259 /* Load. */
e2592fad 11260 tmp = tcg_temp_new_i32();
12dcc321 11261 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 11262 if (i == 15) {
3bb8a96f 11263 gen_bx_excret(s, tmp);
5856d44e
YO
11264 } else if (i == rn) {
11265 loaded_var = tmp;
11266 loaded_base = 1;
9ee6e8bb 11267 } else {
b0109805 11268 store_reg(s, i, tmp);
9ee6e8bb
PB
11269 }
11270 } else {
11271 /* Store. */
b0109805 11272 tmp = load_reg(s, i);
12dcc321 11273 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 11274 tcg_temp_free_i32(tmp);
9ee6e8bb 11275 }
b0109805 11276 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 11277 }
5856d44e
YO
11278 if (loaded_base) {
11279 store_reg(s, rn, loaded_var);
11280 }
7c0ed88e 11281 if (wback) {
9ee6e8bb
PB
11282 /* Base register writeback. */
11283 if (insn & (1 << 24)) {
b0109805 11284 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
11285 }
11286 /* Fault if writeback register is in register list. */
11287 if (insn & (1 << rn))
11288 goto illegal_op;
b0109805
PB
11289 store_reg(s, rn, addr);
11290 } else {
7d1b0095 11291 tcg_temp_free_i32(addr);
9ee6e8bb
PB
11292 }
11293 }
11294 }
11295 break;
2af9ab77
JB
11296 case 5:
11297
9ee6e8bb 11298 op = (insn >> 21) & 0xf;
2af9ab77 11299 if (op == 6) {
62b44f05
AR
11300 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11301 goto illegal_op;
11302 }
2af9ab77
JB
11303 /* Halfword pack. */
11304 tmp = load_reg(s, rn);
11305 tmp2 = load_reg(s, rm);
11306 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
11307 if (insn & (1 << 5)) {
11308 /* pkhtb */
11309 if (shift == 0)
11310 shift = 31;
11311 tcg_gen_sari_i32(tmp2, tmp2, shift);
11312 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
11313 tcg_gen_ext16u_i32(tmp2, tmp2);
11314 } else {
11315 /* pkhbt */
11316 if (shift)
11317 tcg_gen_shli_i32(tmp2, tmp2, shift);
11318 tcg_gen_ext16u_i32(tmp, tmp);
11319 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
11320 }
11321 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 11322 tcg_temp_free_i32(tmp2);
3174f8e9
FN
11323 store_reg(s, rd, tmp);
11324 } else {
2af9ab77
JB
11325 /* Data processing register constant shift. */
11326 if (rn == 15) {
7d1b0095 11327 tmp = tcg_temp_new_i32();
2af9ab77
JB
11328 tcg_gen_movi_i32(tmp, 0);
11329 } else {
11330 tmp = load_reg(s, rn);
11331 }
11332 tmp2 = load_reg(s, rm);
11333
11334 shiftop = (insn >> 4) & 3;
11335 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
11336 conds = (insn & (1 << 20)) != 0;
11337 logic_cc = (conds && thumb2_logic_op(op));
11338 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
11339 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
11340 goto illegal_op;
7d1b0095 11341 tcg_temp_free_i32(tmp2);
55203189
PM
11342 if (rd == 13 &&
11343 ((op == 2 && rn == 15) ||
11344 (op == 8 && rn == 13) ||
11345 (op == 13 && rn == 13))) {
11346 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
11347 store_sp_checked(s, tmp);
11348 } else if (rd != 15) {
2af9ab77
JB
11349 store_reg(s, rd, tmp);
11350 } else {
7d1b0095 11351 tcg_temp_free_i32(tmp);
2af9ab77 11352 }
3174f8e9 11353 }
9ee6e8bb
PB
11354 break;
11355 case 13: /* Misc data processing. */
11356 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
11357 if (op < 4 && (insn & 0xf000) != 0xf000)
11358 goto illegal_op;
11359 switch (op) {
11360 case 0: /* Register controlled shift. */
8984bd2e
PB
11361 tmp = load_reg(s, rn);
11362 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11363 if ((insn & 0x70) != 0)
11364 goto illegal_op;
a2d12f0f
PM
11365 /*
11366 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
11367 * - MOV, MOVS (register-shifted register), flagsetting
11368 */
9ee6e8bb 11369 op = (insn >> 21) & 3;
8984bd2e
PB
11370 logic_cc = (insn & (1 << 20)) != 0;
11371 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
11372 if (logic_cc)
11373 gen_logic_CC(tmp);
bedb8a6b 11374 store_reg(s, rd, tmp);
9ee6e8bb
PB
11375 break;
11376 case 1: /* Sign/zero extend. */
62b44f05
AR
11377 op = (insn >> 20) & 7;
11378 switch (op) {
11379 case 0: /* SXTAH, SXTH */
11380 case 1: /* UXTAH, UXTH */
11381 case 4: /* SXTAB, SXTB */
11382 case 5: /* UXTAB, UXTB */
11383 break;
11384 case 2: /* SXTAB16, SXTB16 */
11385 case 3: /* UXTAB16, UXTB16 */
11386 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11387 goto illegal_op;
11388 }
11389 break;
11390 default:
11391 goto illegal_op;
11392 }
11393 if (rn != 15) {
11394 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11395 goto illegal_op;
11396 }
11397 }
5e3f878a 11398 tmp = load_reg(s, rm);
9ee6e8bb 11399 shift = (insn >> 4) & 3;
1301f322 11400 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
11401 rotate, a shift is sufficient. */
11402 if (shift != 0)
f669df27 11403 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
11404 op = (insn >> 20) & 7;
11405 switch (op) {
5e3f878a
PB
11406 case 0: gen_sxth(tmp); break;
11407 case 1: gen_uxth(tmp); break;
11408 case 2: gen_sxtb16(tmp); break;
11409 case 3: gen_uxtb16(tmp); break;
11410 case 4: gen_sxtb(tmp); break;
11411 case 5: gen_uxtb(tmp); break;
62b44f05
AR
11412 default:
11413 g_assert_not_reached();
9ee6e8bb
PB
11414 }
11415 if (rn != 15) {
5e3f878a 11416 tmp2 = load_reg(s, rn);
9ee6e8bb 11417 if ((op >> 1) == 1) {
5e3f878a 11418 gen_add16(tmp, tmp2);
9ee6e8bb 11419 } else {
5e3f878a 11420 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11421 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11422 }
11423 }
5e3f878a 11424 store_reg(s, rd, tmp);
9ee6e8bb
PB
11425 break;
11426 case 2: /* SIMD add/subtract. */
62b44f05
AR
11427 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11428 goto illegal_op;
11429 }
9ee6e8bb
PB
11430 op = (insn >> 20) & 7;
11431 shift = (insn >> 4) & 7;
11432 if ((op & 3) == 3 || (shift & 3) == 3)
11433 goto illegal_op;
6ddbc6e4
PB
11434 tmp = load_reg(s, rn);
11435 tmp2 = load_reg(s, rm);
11436 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 11437 tcg_temp_free_i32(tmp2);
6ddbc6e4 11438 store_reg(s, rd, tmp);
9ee6e8bb
PB
11439 break;
11440 case 3: /* Other data processing. */
11441 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
11442 if (op < 4) {
11443 /* Saturating add/subtract. */
62b44f05
AR
11444 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11445 goto illegal_op;
11446 }
d9ba4830
PB
11447 tmp = load_reg(s, rn);
11448 tmp2 = load_reg(s, rm);
9ee6e8bb 11449 if (op & 1)
9ef39277 11450 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 11451 if (op & 2)
9ef39277 11452 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 11453 else
9ef39277 11454 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 11455 tcg_temp_free_i32(tmp2);
9ee6e8bb 11456 } else {
62b44f05
AR
11457 switch (op) {
11458 case 0x0a: /* rbit */
11459 case 0x08: /* rev */
11460 case 0x09: /* rev16 */
11461 case 0x0b: /* revsh */
11462 case 0x18: /* clz */
11463 break;
11464 case 0x10: /* sel */
11465 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11466 goto illegal_op;
11467 }
11468 break;
11469 case 0x20: /* crc32/crc32c */
11470 case 0x21:
11471 case 0x22:
11472 case 0x28:
11473 case 0x29:
11474 case 0x2a:
962fcbf2 11475 if (!dc_isar_feature(aa32_crc32, s)) {
62b44f05
AR
11476 goto illegal_op;
11477 }
11478 break;
11479 default:
11480 goto illegal_op;
11481 }
d9ba4830 11482 tmp = load_reg(s, rn);
9ee6e8bb
PB
11483 switch (op) {
11484 case 0x0a: /* rbit */
d9ba4830 11485 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
11486 break;
11487 case 0x08: /* rev */
66896cb8 11488 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
11489 break;
11490 case 0x09: /* rev16 */
d9ba4830 11491 gen_rev16(tmp);
9ee6e8bb
PB
11492 break;
11493 case 0x0b: /* revsh */
d9ba4830 11494 gen_revsh(tmp);
9ee6e8bb
PB
11495 break;
11496 case 0x10: /* sel */
d9ba4830 11497 tmp2 = load_reg(s, rm);
7d1b0095 11498 tmp3 = tcg_temp_new_i32();
0ecb72a5 11499 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 11500 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
11501 tcg_temp_free_i32(tmp3);
11502 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11503 break;
11504 case 0x18: /* clz */
7539a012 11505 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 11506 break;
eb0ecd5a
WN
11507 case 0x20:
11508 case 0x21:
11509 case 0x22:
11510 case 0x28:
11511 case 0x29:
11512 case 0x2a:
11513 {
11514 /* crc32/crc32c */
11515 uint32_t sz = op & 0x3;
11516 uint32_t c = op & 0x8;
11517
eb0ecd5a 11518 tmp2 = load_reg(s, rm);
aa633469
PM
11519 if (sz == 0) {
11520 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
11521 } else if (sz == 1) {
11522 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
11523 }
eb0ecd5a
WN
11524 tmp3 = tcg_const_i32(1 << sz);
11525 if (c) {
11526 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
11527 } else {
11528 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
11529 }
11530 tcg_temp_free_i32(tmp2);
11531 tcg_temp_free_i32(tmp3);
11532 break;
11533 }
9ee6e8bb 11534 default:
62b44f05 11535 g_assert_not_reached();
9ee6e8bb
PB
11536 }
11537 }
d9ba4830 11538 store_reg(s, rd, tmp);
9ee6e8bb
PB
11539 break;
11540 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
11541 switch ((insn >> 20) & 7) {
11542 case 0: /* 32 x 32 -> 32 */
11543 case 7: /* Unsigned sum of absolute differences. */
11544 break;
11545 case 1: /* 16 x 16 -> 32 */
11546 case 2: /* Dual multiply add. */
11547 case 3: /* 32 * 16 -> 32msb */
11548 case 4: /* Dual multiply subtract. */
11549 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11550 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11551 goto illegal_op;
11552 }
11553 break;
11554 }
9ee6e8bb 11555 op = (insn >> 4) & 0xf;
d9ba4830
PB
11556 tmp = load_reg(s, rn);
11557 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11558 switch ((insn >> 20) & 7) {
11559 case 0: /* 32 x 32 -> 32 */
d9ba4830 11560 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 11561 tcg_temp_free_i32(tmp2);
9ee6e8bb 11562 if (rs != 15) {
d9ba4830 11563 tmp2 = load_reg(s, rs);
9ee6e8bb 11564 if (op)
d9ba4830 11565 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 11566 else
d9ba4830 11567 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11568 tcg_temp_free_i32(tmp2);
9ee6e8bb 11569 }
9ee6e8bb
PB
11570 break;
11571 case 1: /* 16 x 16 -> 32 */
d9ba4830 11572 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 11573 tcg_temp_free_i32(tmp2);
9ee6e8bb 11574 if (rs != 15) {
d9ba4830 11575 tmp2 = load_reg(s, rs);
9ef39277 11576 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11577 tcg_temp_free_i32(tmp2);
9ee6e8bb 11578 }
9ee6e8bb
PB
11579 break;
11580 case 2: /* Dual multiply add. */
11581 case 4: /* Dual multiply subtract. */
11582 if (op)
d9ba4830
PB
11583 gen_swap_half(tmp2);
11584 gen_smul_dual(tmp, tmp2);
9ee6e8bb 11585 if (insn & (1 << 22)) {
e1d177b9 11586 /* This subtraction cannot overflow. */
d9ba4830 11587 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11588 } else {
e1d177b9
PM
11589 /* This addition cannot overflow 32 bits;
11590 * however it may overflow considered as a signed
11591 * operation, in which case we must set the Q flag.
11592 */
9ef39277 11593 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 11594 }
7d1b0095 11595 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11596 if (rs != 15)
11597 {
d9ba4830 11598 tmp2 = load_reg(s, rs);
9ef39277 11599 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11600 tcg_temp_free_i32(tmp2);
9ee6e8bb 11601 }
9ee6e8bb
PB
11602 break;
11603 case 3: /* 32 * 16 -> 32msb */
11604 if (op)
d9ba4830 11605 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 11606 else
d9ba4830 11607 gen_sxth(tmp2);
a7812ae4
PB
11608 tmp64 = gen_muls_i64_i32(tmp, tmp2);
11609 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 11610 tmp = tcg_temp_new_i32();
ecc7b3aa 11611 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 11612 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
11613 if (rs != 15)
11614 {
d9ba4830 11615 tmp2 = load_reg(s, rs);
9ef39277 11616 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11617 tcg_temp_free_i32(tmp2);
9ee6e8bb 11618 }
9ee6e8bb 11619 break;
838fa72d
AJ
11620 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11621 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11622 if (rs != 15) {
838fa72d
AJ
11623 tmp = load_reg(s, rs);
11624 if (insn & (1 << 20)) {
11625 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 11626 } else {
838fa72d 11627 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 11628 }
2c0262af 11629 }
838fa72d
AJ
11630 if (insn & (1 << 4)) {
11631 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
11632 }
11633 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 11634 tmp = tcg_temp_new_i32();
ecc7b3aa 11635 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 11636 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
11637 break;
11638 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 11639 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 11640 tcg_temp_free_i32(tmp2);
9ee6e8bb 11641 if (rs != 15) {
d9ba4830
PB
11642 tmp2 = load_reg(s, rs);
11643 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11644 tcg_temp_free_i32(tmp2);
5fd46862 11645 }
9ee6e8bb 11646 break;
2c0262af 11647 }
d9ba4830 11648 store_reg(s, rd, tmp);
2c0262af 11649 break;
9ee6e8bb
PB
11650 case 6: case 7: /* 64-bit multiply, Divide. */
11651 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
11652 tmp = load_reg(s, rn);
11653 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11654 if ((op & 0x50) == 0x10) {
11655 /* sdiv, udiv */
7e0cf8b4 11656 if (!dc_isar_feature(thumb_div, s)) {
9ee6e8bb 11657 goto illegal_op;
47789990 11658 }
9ee6e8bb 11659 if (op & 0x20)
5e3f878a 11660 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 11661 else
5e3f878a 11662 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 11663 tcg_temp_free_i32(tmp2);
5e3f878a 11664 store_reg(s, rd, tmp);
9ee6e8bb
PB
11665 } else if ((op & 0xe) == 0xc) {
11666 /* Dual multiply accumulate long. */
62b44f05
AR
11667 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11668 tcg_temp_free_i32(tmp);
11669 tcg_temp_free_i32(tmp2);
11670 goto illegal_op;
11671 }
9ee6e8bb 11672 if (op & 1)
5e3f878a
PB
11673 gen_swap_half(tmp2);
11674 gen_smul_dual(tmp, tmp2);
9ee6e8bb 11675 if (op & 0x10) {
5e3f878a 11676 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 11677 } else {
5e3f878a 11678 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 11679 }
7d1b0095 11680 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11681 /* BUGFIX */
11682 tmp64 = tcg_temp_new_i64();
11683 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11684 tcg_temp_free_i32(tmp);
a7812ae4
PB
11685 gen_addq(s, tmp64, rs, rd);
11686 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11687 tcg_temp_free_i64(tmp64);
2c0262af 11688 } else {
9ee6e8bb
PB
11689 if (op & 0x20) {
11690 /* Unsigned 64-bit multiply */
a7812ae4 11691 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 11692 } else {
9ee6e8bb
PB
11693 if (op & 8) {
11694 /* smlalxy */
62b44f05
AR
11695 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11696 tcg_temp_free_i32(tmp2);
11697 tcg_temp_free_i32(tmp);
11698 goto illegal_op;
11699 }
5e3f878a 11700 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 11701 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11702 tmp64 = tcg_temp_new_i64();
11703 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11704 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11705 } else {
11706 /* Signed 64-bit multiply */
a7812ae4 11707 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11708 }
b5ff1b31 11709 }
9ee6e8bb
PB
11710 if (op & 4) {
11711 /* umaal */
62b44f05
AR
11712 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11713 tcg_temp_free_i64(tmp64);
11714 goto illegal_op;
11715 }
a7812ae4
PB
11716 gen_addq_lo(s, tmp64, rs);
11717 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
11718 } else if (op & 0x40) {
11719 /* 64-bit accumulate. */
a7812ae4 11720 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 11721 }
a7812ae4 11722 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11723 tcg_temp_free_i64(tmp64);
5fd46862 11724 }
2c0262af 11725 break;
9ee6e8bb
PB
11726 }
11727 break;
11728 case 6: case 7: case 14: case 15:
11729 /* Coprocessor. */
7517748e 11730 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8859ba3c
PM
11731 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
11732 if (extract32(insn, 24, 2) == 3) {
11733 goto illegal_op; /* op0 = 0b11 : unallocated */
11734 }
11735
11736 /*
11737 * Decode VLLDM and VLSTM first: these are nonstandard because:
11738 * * if there is no FPU then these insns must NOP in
11739 * Secure state and UNDEF in Nonsecure state
11740 * * if there is an FPU then these insns do not have
11741 * the usual behaviour that disas_vfp_insn() provides of
11742 * being controlled by CPACR/NSACR enable bits or the
11743 * lazy-stacking logic.
7517748e 11744 */
b1e5336a
PM
11745 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
11746 (insn & 0xffa00f00) == 0xec200a00) {
11747 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
11748 * - VLLDM, VLSTM
11749 * We choose to UNDEF if the RAZ bits are non-zero.
11750 */
11751 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
11752 goto illegal_op;
11753 }
11754 /* Just NOP since FP support is not implemented */
11755 break;
11756 }
8859ba3c
PM
11757 if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
11758 ((insn >> 8) & 0xe) == 10) {
11759 /* FP, and the CPU supports it */
11760 if (disas_vfp_insn(s, insn)) {
11761 goto illegal_op;
11762 }
11763 break;
11764 }
11765
b1e5336a 11766 /* All other insns: NOCP */
7517748e
PM
11767 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
11768 default_exception_el(s));
11769 break;
11770 }
0052087e
RH
11771 if ((insn & 0xfe000a00) == 0xfc000800
11772 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11773 /* The Thumb2 and ARM encodings are identical. */
11774 if (disas_neon_insn_3same_ext(s, insn)) {
11775 goto illegal_op;
11776 }
11777 } else if ((insn & 0xff000a00) == 0xfe000800
11778 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11779 /* The Thumb2 and ARM encodings are identical. */
11780 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
11781 goto illegal_op;
11782 }
11783 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 11784 /* Translate into the equivalent ARM encoding. */
f06053e3 11785 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 11786 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 11787 goto illegal_op;
7dcc1f89 11788 }
6a57f3eb 11789 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 11790 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
11791 goto illegal_op;
11792 }
9ee6e8bb
PB
11793 } else {
11794 if (insn & (1 << 28))
11795 goto illegal_op;
7dcc1f89 11796 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 11797 goto illegal_op;
7dcc1f89 11798 }
9ee6e8bb
PB
11799 }
11800 break;
11801 case 8: case 9: case 10: case 11:
11802 if (insn & (1 << 15)) {
11803 /* Branches, misc control. */
11804 if (insn & 0x5000) {
11805 /* Unconditional branch. */
11806 /* signextend(hw1[10:0]) -> offset[:12]. */
11807 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
11808 /* hw1[10:0] -> offset[11:1]. */
11809 offset |= (insn & 0x7ff) << 1;
11810 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
11811 offset[24:22] already have the same value because of the
11812 sign extension above. */
11813 offset ^= ((~insn) & (1 << 13)) << 10;
11814 offset ^= ((~insn) & (1 << 11)) << 11;
11815
9ee6e8bb
PB
11816 if (insn & (1 << 14)) {
11817 /* Branch and link. */
3174f8e9 11818 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 11819 }
3b46e624 11820
b0109805 11821 offset += s->pc;
9ee6e8bb
PB
11822 if (insn & (1 << 12)) {
11823 /* b/bl */
b0109805 11824 gen_jmp(s, offset);
9ee6e8bb
PB
11825 } else {
11826 /* blx */
b0109805 11827 offset &= ~(uint32_t)2;
be5e7a76 11828 /* thumb2 bx, no need to check */
b0109805 11829 gen_bx_im(s, offset);
2c0262af 11830 }
9ee6e8bb
PB
11831 } else if (((insn >> 23) & 7) == 7) {
11832 /* Misc control */
11833 if (insn & (1 << 13))
11834 goto illegal_op;
11835
11836 if (insn & (1 << 26)) {
001b3cab
PM
11837 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11838 goto illegal_op;
11839 }
37e6456e
PM
11840 if (!(insn & (1 << 20))) {
11841 /* Hypervisor call (v7) */
11842 int imm16 = extract32(insn, 16, 4) << 12
11843 | extract32(insn, 0, 12);
11844 ARCH(7);
11845 if (IS_USER(s)) {
11846 goto illegal_op;
11847 }
11848 gen_hvc(s, imm16);
11849 } else {
11850 /* Secure monitor call (v6+) */
11851 ARCH(6K);
11852 if (IS_USER(s)) {
11853 goto illegal_op;
11854 }
11855 gen_smc(s);
11856 }
2c0262af 11857 } else {
9ee6e8bb
PB
11858 op = (insn >> 20) & 7;
11859 switch (op) {
11860 case 0: /* msr cpsr. */
b53d8923 11861 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 11862 tmp = load_reg(s, rn);
b28b3377
PM
11863 /* the constant is the mask and SYSm fields */
11864 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 11865 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 11866 tcg_temp_free_i32(addr);
7d1b0095 11867 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11868 gen_lookup_tb(s);
11869 break;
11870 }
11871 /* fall through */
11872 case 1: /* msr spsr. */
b53d8923 11873 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11874 goto illegal_op;
b53d8923 11875 }
8bfd0550
PM
11876
11877 if (extract32(insn, 5, 1)) {
11878 /* MSR (banked) */
11879 int sysm = extract32(insn, 8, 4) |
11880 (extract32(insn, 4, 1) << 4);
11881 int r = op & 1;
11882
11883 gen_msr_banked(s, r, sysm, rm);
11884 break;
11885 }
11886
11887 /* MSR (for PSRs) */
2fbac54b
FN
11888 tmp = load_reg(s, rn);
11889 if (gen_set_psr(s,
7dcc1f89 11890 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 11891 op == 1, tmp))
9ee6e8bb
PB
11892 goto illegal_op;
11893 break;
11894 case 2: /* cps, nop-hint. */
11895 if (((insn >> 8) & 7) == 0) {
11896 gen_nop_hint(s, insn & 0xff);
11897 }
11898 /* Implemented as NOP in user mode. */
11899 if (IS_USER(s))
11900 break;
11901 offset = 0;
11902 imm = 0;
11903 if (insn & (1 << 10)) {
11904 if (insn & (1 << 7))
11905 offset |= CPSR_A;
11906 if (insn & (1 << 6))
11907 offset |= CPSR_I;
11908 if (insn & (1 << 5))
11909 offset |= CPSR_F;
11910 if (insn & (1 << 9))
11911 imm = CPSR_A | CPSR_I | CPSR_F;
11912 }
11913 if (insn & (1 << 8)) {
11914 offset |= 0x1f;
11915 imm |= (insn & 0x1f);
11916 }
11917 if (offset) {
2fbac54b 11918 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
11919 }
11920 break;
11921 case 3: /* Special control operations. */
14120108 11922 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 11923 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
11924 goto illegal_op;
11925 }
9ee6e8bb
PB
11926 op = (insn >> 4) & 0xf;
11927 switch (op) {
11928 case 2: /* clrex */
426f5abc 11929 gen_clrex(s);
9ee6e8bb
PB
11930 break;
11931 case 4: /* dsb */
11932 case 5: /* dmb */
61e4c432 11933 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 11934 break;
6df99dec
SS
11935 case 6: /* isb */
11936 /* We need to break the TB after this insn
11937 * to execute self-modifying code correctly
11938 * and also to take any pending interrupts
11939 * immediately.
11940 */
0b609cc1 11941 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 11942 break;
9888bd1e
RH
11943 case 7: /* sb */
11944 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
11945 goto illegal_op;
11946 }
11947 /*
11948 * TODO: There is no speculation barrier opcode
11949 * for TCG; MB and end the TB instead.
11950 */
11951 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
11952 gen_goto_tb(s, 0, s->pc & ~1);
11953 break;
9ee6e8bb
PB
11954 default:
11955 goto illegal_op;
11956 }
11957 break;
11958 case 4: /* bxj */
9d7c59c8
PM
11959 /* Trivial implementation equivalent to bx.
11960 * This instruction doesn't exist at all for M-profile.
11961 */
11962 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11963 goto illegal_op;
11964 }
d9ba4830
PB
11965 tmp = load_reg(s, rn);
11966 gen_bx(s, tmp);
9ee6e8bb
PB
11967 break;
11968 case 5: /* Exception return. */
b8b45b68
RV
11969 if (IS_USER(s)) {
11970 goto illegal_op;
11971 }
11972 if (rn != 14 || rd != 15) {
11973 goto illegal_op;
11974 }
55c544ed
PM
11975 if (s->current_el == 2) {
11976 /* ERET from Hyp uses ELR_Hyp, not LR */
11977 if (insn & 0xff) {
11978 goto illegal_op;
11979 }
11980 tmp = load_cpu_field(elr_el[2]);
11981 } else {
11982 tmp = load_reg(s, rn);
11983 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11984 }
b8b45b68
RV
11985 gen_exception_return(s, tmp);
11986 break;
8bfd0550 11987 case 6: /* MRS */
43ac6574
PM
11988 if (extract32(insn, 5, 1) &&
11989 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11990 /* MRS (banked) */
11991 int sysm = extract32(insn, 16, 4) |
11992 (extract32(insn, 4, 1) << 4);
11993
11994 gen_mrs_banked(s, 0, sysm, rd);
11995 break;
11996 }
11997
3d54026f
PM
11998 if (extract32(insn, 16, 4) != 0xf) {
11999 goto illegal_op;
12000 }
12001 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
12002 extract32(insn, 0, 8) != 0) {
12003 goto illegal_op;
12004 }
12005
8bfd0550 12006 /* mrs cpsr */
7d1b0095 12007 tmp = tcg_temp_new_i32();
b53d8923 12008 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
12009 addr = tcg_const_i32(insn & 0xff);
12010 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 12011 tcg_temp_free_i32(addr);
9ee6e8bb 12012 } else {
9ef39277 12013 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 12014 }
8984bd2e 12015 store_reg(s, rd, tmp);
9ee6e8bb 12016 break;
8bfd0550 12017 case 7: /* MRS */
43ac6574
PM
12018 if (extract32(insn, 5, 1) &&
12019 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
12020 /* MRS (banked) */
12021 int sysm = extract32(insn, 16, 4) |
12022 (extract32(insn, 4, 1) << 4);
12023
12024 gen_mrs_banked(s, 1, sysm, rd);
12025 break;
12026 }
12027
12028 /* mrs spsr. */
9ee6e8bb 12029 /* Not accessible in user mode. */
b53d8923 12030 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 12031 goto illegal_op;
b53d8923 12032 }
3d54026f
PM
12033
12034 if (extract32(insn, 16, 4) != 0xf ||
12035 extract32(insn, 0, 8) != 0) {
12036 goto illegal_op;
12037 }
12038
d9ba4830
PB
12039 tmp = load_cpu_field(spsr);
12040 store_reg(s, rd, tmp);
9ee6e8bb 12041 break;
2c0262af
FB
12042 }
12043 }
9ee6e8bb
PB
12044 } else {
12045 /* Conditional branch. */
12046 op = (insn >> 22) & 0xf;
12047 /* Generate a conditional jump to next instruction. */
c2d9644e 12048 arm_skip_unless(s, op);
9ee6e8bb
PB
12049
12050 /* offset[11:1] = insn[10:0] */
12051 offset = (insn & 0x7ff) << 1;
12052 /* offset[17:12] = insn[21:16]. */
12053 offset |= (insn & 0x003f0000) >> 4;
12054 /* offset[31:20] = insn[26]. */
12055 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
12056 /* offset[18] = insn[13]. */
12057 offset |= (insn & (1 << 13)) << 5;
12058 /* offset[19] = insn[11]. */
12059 offset |= (insn & (1 << 11)) << 8;
12060
12061 /* jump to the offset */
b0109805 12062 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
12063 }
12064 } else {
55203189
PM
12065 /*
12066 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
12067 * - Data-processing (modified immediate, plain binary immediate)
12068 */
9ee6e8bb 12069 if (insn & (1 << 25)) {
55203189
PM
12070 /*
12071 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
12072 * - Data-processing (plain binary immediate)
12073 */
9ee6e8bb
PB
12074 if (insn & (1 << 24)) {
12075 if (insn & (1 << 20))
12076 goto illegal_op;
12077 /* Bitfield/Saturate. */
12078 op = (insn >> 21) & 7;
12079 imm = insn & 0x1f;
12080 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 12081 if (rn == 15) {
7d1b0095 12082 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
12083 tcg_gen_movi_i32(tmp, 0);
12084 } else {
12085 tmp = load_reg(s, rn);
12086 }
9ee6e8bb
PB
12087 switch (op) {
12088 case 2: /* Signed bitfield extract. */
12089 imm++;
12090 if (shift + imm > 32)
12091 goto illegal_op;
59a71b4c
RH
12092 if (imm < 32) {
12093 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
12094 }
9ee6e8bb
PB
12095 break;
12096 case 6: /* Unsigned bitfield extract. */
12097 imm++;
12098 if (shift + imm > 32)
12099 goto illegal_op;
59a71b4c
RH
12100 if (imm < 32) {
12101 tcg_gen_extract_i32(tmp, tmp, shift, imm);
12102 }
9ee6e8bb
PB
12103 break;
12104 case 3: /* Bitfield insert/clear. */
12105 if (imm < shift)
12106 goto illegal_op;
12107 imm = imm + 1 - shift;
12108 if (imm != 32) {
6ddbc6e4 12109 tmp2 = load_reg(s, rd);
d593c48e 12110 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 12111 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
12112 }
12113 break;
12114 case 7:
12115 goto illegal_op;
12116 default: /* Saturate. */
9ee6e8bb
PB
12117 if (shift) {
12118 if (op & 1)
6ddbc6e4 12119 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 12120 else
6ddbc6e4 12121 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 12122 }
6ddbc6e4 12123 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
12124 if (op & 4) {
12125 /* Unsigned. */
62b44f05
AR
12126 if ((op & 1) && shift == 0) {
12127 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
12128 tcg_temp_free_i32(tmp);
12129 tcg_temp_free_i32(tmp2);
12130 goto illegal_op;
12131 }
9ef39277 12132 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 12133 } else {
9ef39277 12134 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 12135 }
2c0262af 12136 } else {
9ee6e8bb 12137 /* Signed. */
62b44f05
AR
12138 if ((op & 1) && shift == 0) {
12139 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
12140 tcg_temp_free_i32(tmp);
12141 tcg_temp_free_i32(tmp2);
12142 goto illegal_op;
12143 }
9ef39277 12144 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 12145 } else {
9ef39277 12146 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 12147 }
2c0262af 12148 }
b75263d6 12149 tcg_temp_free_i32(tmp2);
9ee6e8bb 12150 break;
2c0262af 12151 }
6ddbc6e4 12152 store_reg(s, rd, tmp);
9ee6e8bb
PB
12153 } else {
12154 imm = ((insn & 0x04000000) >> 15)
12155 | ((insn & 0x7000) >> 4) | (insn & 0xff);
12156 if (insn & (1 << 22)) {
12157 /* 16-bit immediate. */
12158 imm |= (insn >> 4) & 0xf000;
12159 if (insn & (1 << 23)) {
12160 /* movt */
5e3f878a 12161 tmp = load_reg(s, rd);
86831435 12162 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 12163 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 12164 } else {
9ee6e8bb 12165 /* movw */
7d1b0095 12166 tmp = tcg_temp_new_i32();
5e3f878a 12167 tcg_gen_movi_i32(tmp, imm);
2c0262af 12168 }
55203189 12169 store_reg(s, rd, tmp);
2c0262af 12170 } else {
9ee6e8bb
PB
12171 /* Add/sub 12-bit immediate. */
12172 if (rn == 15) {
b0109805 12173 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 12174 if (insn & (1 << 23))
b0109805 12175 offset -= imm;
9ee6e8bb 12176 else
b0109805 12177 offset += imm;
7d1b0095 12178 tmp = tcg_temp_new_i32();
5e3f878a 12179 tcg_gen_movi_i32(tmp, offset);
55203189 12180 store_reg(s, rd, tmp);
2c0262af 12181 } else {
5e3f878a 12182 tmp = load_reg(s, rn);
9ee6e8bb 12183 if (insn & (1 << 23))
5e3f878a 12184 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 12185 else
5e3f878a 12186 tcg_gen_addi_i32(tmp, tmp, imm);
55203189
PM
12187 if (rn == 13 && rd == 13) {
12188 /* ADD SP, SP, imm or SUB SP, SP, imm */
12189 store_sp_checked(s, tmp);
12190 } else {
12191 store_reg(s, rd, tmp);
12192 }
2c0262af 12193 }
9ee6e8bb 12194 }
191abaa2 12195 }
9ee6e8bb 12196 } else {
55203189
PM
12197 /*
12198 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
12199 * - Data-processing (modified immediate)
12200 */
9ee6e8bb
PB
12201 int shifter_out = 0;
12202 /* modified 12-bit immediate. */
12203 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
12204 imm = (insn & 0xff);
12205 switch (shift) {
12206 case 0: /* XY */
12207 /* Nothing to do. */
12208 break;
12209 case 1: /* 00XY00XY */
12210 imm |= imm << 16;
12211 break;
12212 case 2: /* XY00XY00 */
12213 imm |= imm << 16;
12214 imm <<= 8;
12215 break;
12216 case 3: /* XYXYXYXY */
12217 imm |= imm << 16;
12218 imm |= imm << 8;
12219 break;
12220 default: /* Rotated constant. */
12221 shift = (shift << 1) | (imm >> 7);
12222 imm |= 0x80;
12223 imm = imm << (32 - shift);
12224 shifter_out = 1;
12225 break;
b5ff1b31 12226 }
7d1b0095 12227 tmp2 = tcg_temp_new_i32();
3174f8e9 12228 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 12229 rn = (insn >> 16) & 0xf;
3174f8e9 12230 if (rn == 15) {
7d1b0095 12231 tmp = tcg_temp_new_i32();
3174f8e9
FN
12232 tcg_gen_movi_i32(tmp, 0);
12233 } else {
12234 tmp = load_reg(s, rn);
12235 }
9ee6e8bb
PB
12236 op = (insn >> 21) & 0xf;
12237 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 12238 shifter_out, tmp, tmp2))
9ee6e8bb 12239 goto illegal_op;
7d1b0095 12240 tcg_temp_free_i32(tmp2);
9ee6e8bb 12241 rd = (insn >> 8) & 0xf;
55203189
PM
12242 if (rd == 13 && rn == 13
12243 && (op == 8 || op == 13)) {
12244 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
12245 store_sp_checked(s, tmp);
12246 } else if (rd != 15) {
3174f8e9
FN
12247 store_reg(s, rd, tmp);
12248 } else {
7d1b0095 12249 tcg_temp_free_i32(tmp);
2c0262af 12250 }
2c0262af 12251 }
9ee6e8bb
PB
12252 }
12253 break;
12254 case 12: /* Load/store single data item. */
12255 {
12256 int postinc = 0;
12257 int writeback = 0;
a99caa48 12258 int memidx;
9bb6558a
PM
12259 ISSInfo issinfo;
12260
9ee6e8bb 12261 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 12262 if (disas_neon_ls_insn(s, insn)) {
c1713132 12263 goto illegal_op;
7dcc1f89 12264 }
9ee6e8bb
PB
12265 break;
12266 }
a2fdc890
PM
12267 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
12268 if (rs == 15) {
12269 if (!(insn & (1 << 20))) {
12270 goto illegal_op;
12271 }
12272 if (op != 2) {
12273 /* Byte or halfword load space with dest == r15 : memory hints.
12274 * Catch them early so we don't emit pointless addressing code.
12275 * This space is a mix of:
12276 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
12277 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
12278 * cores)
12279 * unallocated hints, which must be treated as NOPs
12280 * UNPREDICTABLE space, which we NOP or UNDEF depending on
12281 * which is easiest for the decoding logic
12282 * Some space which must UNDEF
12283 */
12284 int op1 = (insn >> 23) & 3;
12285 int op2 = (insn >> 6) & 0x3f;
12286 if (op & 2) {
12287 goto illegal_op;
12288 }
12289 if (rn == 15) {
02afbf64
PM
12290 /* UNPREDICTABLE, unallocated hint or
12291 * PLD/PLDW/PLI (literal)
12292 */
2eea841c 12293 return;
a2fdc890
PM
12294 }
12295 if (op1 & 1) {
2eea841c 12296 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
12297 }
12298 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 12299 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
12300 }
12301 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 12302 goto illegal_op;
a2fdc890
PM
12303 }
12304 }
a99caa48 12305 memidx = get_mem_index(s);
9ee6e8bb 12306 if (rn == 15) {
7d1b0095 12307 addr = tcg_temp_new_i32();
9ee6e8bb
PB
12308 /* PC relative. */
12309 /* s->pc has already been incremented by 4. */
12310 imm = s->pc & 0xfffffffc;
12311 if (insn & (1 << 23))
12312 imm += insn & 0xfff;
12313 else
12314 imm -= insn & 0xfff;
b0109805 12315 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 12316 } else {
b0109805 12317 addr = load_reg(s, rn);
9ee6e8bb
PB
12318 if (insn & (1 << 23)) {
12319 /* Positive offset. */
12320 imm = insn & 0xfff;
b0109805 12321 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 12322 } else {
9ee6e8bb 12323 imm = insn & 0xff;
2a0308c5
PM
12324 switch ((insn >> 8) & 0xf) {
12325 case 0x0: /* Shifted Register. */
9ee6e8bb 12326 shift = (insn >> 4) & 0xf;
2a0308c5
PM
12327 if (shift > 3) {
12328 tcg_temp_free_i32(addr);
18c9b560 12329 goto illegal_op;
2a0308c5 12330 }
b26eefb6 12331 tmp = load_reg(s, rm);
9ee6e8bb 12332 if (shift)
b26eefb6 12333 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 12334 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12335 tcg_temp_free_i32(tmp);
9ee6e8bb 12336 break;
2a0308c5 12337 case 0xc: /* Negative offset. */
b0109805 12338 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 12339 break;
2a0308c5 12340 case 0xe: /* User privilege. */
b0109805 12341 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 12342 memidx = get_a32_user_mem_index(s);
9ee6e8bb 12343 break;
2a0308c5 12344 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
12345 imm = -imm;
12346 /* Fall through. */
2a0308c5 12347 case 0xb: /* Post-increment. */
9ee6e8bb
PB
12348 postinc = 1;
12349 writeback = 1;
12350 break;
2a0308c5 12351 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
12352 imm = -imm;
12353 /* Fall through. */
2a0308c5 12354 case 0xf: /* Pre-increment. */
9ee6e8bb
PB
12355 writeback = 1;
12356 break;
12357 default:
2a0308c5 12358 tcg_temp_free_i32(addr);
b7bcbe95 12359 goto illegal_op;
9ee6e8bb
PB
12360 }
12361 }
12362 }
9bb6558a
PM
12363
12364 issinfo = writeback ? ISSInvalid : rs;
12365
0bc003ba
PM
12366 if (s->v8m_stackcheck && rn == 13 && writeback) {
12367 /*
12368 * Stackcheck. Here we know 'addr' is the current SP;
12369 * if imm is +ve we're moving SP up, else down. It is
12370 * UNKNOWN whether the limit check triggers when SP starts
12371 * below the limit and ends up above it; we chose to do so.
12372 */
12373 if ((int32_t)imm < 0) {
12374 TCGv_i32 newsp = tcg_temp_new_i32();
12375
12376 tcg_gen_addi_i32(newsp, addr, imm);
12377 gen_helper_v8m_stackcheck(cpu_env, newsp);
12378 tcg_temp_free_i32(newsp);
12379 } else {
12380 gen_helper_v8m_stackcheck(cpu_env, addr);
12381 }
12382 }
12383
12384 if (writeback && !postinc) {
12385 tcg_gen_addi_i32(addr, addr, imm);
12386 }
12387
9ee6e8bb
PB
12388 if (insn & (1 << 20)) {
12389 /* Load. */
5a839c0d 12390 tmp = tcg_temp_new_i32();
a2fdc890 12391 switch (op) {
5a839c0d 12392 case 0:
9bb6558a 12393 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12394 break;
12395 case 4:
9bb6558a 12396 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12397 break;
12398 case 1:
9bb6558a 12399 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12400 break;
12401 case 5:
9bb6558a 12402 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12403 break;
12404 case 2:
9bb6558a 12405 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 12406 break;
2a0308c5 12407 default:
5a839c0d 12408 tcg_temp_free_i32(tmp);
2a0308c5
PM
12409 tcg_temp_free_i32(addr);
12410 goto illegal_op;
a2fdc890
PM
12411 }
12412 if (rs == 15) {
3bb8a96f 12413 gen_bx_excret(s, tmp);
9ee6e8bb 12414 } else {
a2fdc890 12415 store_reg(s, rs, tmp);
9ee6e8bb
PB
12416 }
12417 } else {
12418 /* Store. */
b0109805 12419 tmp = load_reg(s, rs);
9ee6e8bb 12420 switch (op) {
5a839c0d 12421 case 0:
9bb6558a 12422 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12423 break;
12424 case 1:
9bb6558a 12425 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12426 break;
12427 case 2:
9bb6558a 12428 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 12429 break;
2a0308c5 12430 default:
5a839c0d 12431 tcg_temp_free_i32(tmp);
2a0308c5
PM
12432 tcg_temp_free_i32(addr);
12433 goto illegal_op;
b7bcbe95 12434 }
5a839c0d 12435 tcg_temp_free_i32(tmp);
2c0262af 12436 }
9ee6e8bb 12437 if (postinc)
b0109805
PB
12438 tcg_gen_addi_i32(addr, addr, imm);
12439 if (writeback) {
12440 store_reg(s, rn, addr);
12441 } else {
7d1b0095 12442 tcg_temp_free_i32(addr);
b0109805 12443 }
9ee6e8bb
PB
12444 }
12445 break;
12446 default:
12447 goto illegal_op;
2c0262af 12448 }
2eea841c 12449 return;
9ee6e8bb 12450illegal_op:
2eea841c
PM
12451 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
12452 default_exception_el(s));
2c0262af
FB
12453}
12454
296e5a0a 12455static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 12456{
296e5a0a 12457 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
12458 int32_t offset;
12459 int i;
39d5492a
PM
12460 TCGv_i32 tmp;
12461 TCGv_i32 tmp2;
12462 TCGv_i32 addr;
99c475ab 12463
99c475ab
FB
12464 switch (insn >> 12) {
12465 case 0: case 1:
396e467c 12466
99c475ab
FB
12467 rd = insn & 7;
12468 op = (insn >> 11) & 3;
12469 if (op == 3) {
a2d12f0f
PM
12470 /*
12471 * 0b0001_1xxx_xxxx_xxxx
12472 * - Add, subtract (three low registers)
12473 * - Add, subtract (two low registers and immediate)
12474 */
99c475ab 12475 rn = (insn >> 3) & 7;
396e467c 12476 tmp = load_reg(s, rn);
99c475ab
FB
12477 if (insn & (1 << 10)) {
12478 /* immediate */
7d1b0095 12479 tmp2 = tcg_temp_new_i32();
396e467c 12480 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
12481 } else {
12482 /* reg */
12483 rm = (insn >> 6) & 7;
396e467c 12484 tmp2 = load_reg(s, rm);
99c475ab 12485 }
9ee6e8bb
PB
12486 if (insn & (1 << 9)) {
12487 if (s->condexec_mask)
396e467c 12488 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 12489 else
72485ec4 12490 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
12491 } else {
12492 if (s->condexec_mask)
396e467c 12493 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 12494 else
72485ec4 12495 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 12496 }
7d1b0095 12497 tcg_temp_free_i32(tmp2);
396e467c 12498 store_reg(s, rd, tmp);
99c475ab
FB
12499 } else {
12500 /* shift immediate */
12501 rm = (insn >> 3) & 7;
12502 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
12503 tmp = load_reg(s, rm);
12504 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
12505 if (!s->condexec_mask)
12506 gen_logic_CC(tmp);
12507 store_reg(s, rd, tmp);
99c475ab
FB
12508 }
12509 break;
12510 case 2: case 3:
a2d12f0f
PM
12511 /*
12512 * 0b001x_xxxx_xxxx_xxxx
12513 * - Add, subtract, compare, move (one low register and immediate)
12514 */
99c475ab
FB
12515 op = (insn >> 11) & 3;
12516 rd = (insn >> 8) & 0x7;
396e467c 12517 if (op == 0) { /* mov */
7d1b0095 12518 tmp = tcg_temp_new_i32();
396e467c 12519 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 12520 if (!s->condexec_mask)
396e467c
FN
12521 gen_logic_CC(tmp);
12522 store_reg(s, rd, tmp);
12523 } else {
12524 tmp = load_reg(s, rd);
7d1b0095 12525 tmp2 = tcg_temp_new_i32();
396e467c
FN
12526 tcg_gen_movi_i32(tmp2, insn & 0xff);
12527 switch (op) {
12528 case 1: /* cmp */
72485ec4 12529 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
12530 tcg_temp_free_i32(tmp);
12531 tcg_temp_free_i32(tmp2);
396e467c
FN
12532 break;
12533 case 2: /* add */
12534 if (s->condexec_mask)
12535 tcg_gen_add_i32(tmp, tmp, tmp2);
12536 else
72485ec4 12537 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 12538 tcg_temp_free_i32(tmp2);
396e467c
FN
12539 store_reg(s, rd, tmp);
12540 break;
12541 case 3: /* sub */
12542 if (s->condexec_mask)
12543 tcg_gen_sub_i32(tmp, tmp, tmp2);
12544 else
72485ec4 12545 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 12546 tcg_temp_free_i32(tmp2);
396e467c
FN
12547 store_reg(s, rd, tmp);
12548 break;
12549 }
99c475ab 12550 }
99c475ab
FB
12551 break;
12552 case 4:
12553 if (insn & (1 << 11)) {
12554 rd = (insn >> 8) & 7;
5899f386
FB
12555 /* load pc-relative. Bit 1 of PC is ignored. */
12556 val = s->pc + 2 + ((insn & 0xff) * 4);
12557 val &= ~(uint32_t)2;
7d1b0095 12558 addr = tcg_temp_new_i32();
b0109805 12559 tcg_gen_movi_i32(addr, val);
c40c8556 12560 tmp = tcg_temp_new_i32();
9bb6558a
PM
12561 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
12562 rd | ISSIs16Bit);
7d1b0095 12563 tcg_temp_free_i32(addr);
b0109805 12564 store_reg(s, rd, tmp);
99c475ab
FB
12565 break;
12566 }
12567 if (insn & (1 << 10)) {
ebfe27c5
PM
12568 /* 0b0100_01xx_xxxx_xxxx
12569 * - data processing extended, branch and exchange
12570 */
99c475ab
FB
12571 rd = (insn & 7) | ((insn >> 4) & 8);
12572 rm = (insn >> 3) & 0xf;
12573 op = (insn >> 8) & 3;
12574 switch (op) {
12575 case 0: /* add */
396e467c
FN
12576 tmp = load_reg(s, rd);
12577 tmp2 = load_reg(s, rm);
12578 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 12579 tcg_temp_free_i32(tmp2);
55203189
PM
12580 if (rd == 13) {
12581 /* ADD SP, SP, reg */
12582 store_sp_checked(s, tmp);
12583 } else {
12584 store_reg(s, rd, tmp);
12585 }
99c475ab
FB
12586 break;
12587 case 1: /* cmp */
396e467c
FN
12588 tmp = load_reg(s, rd);
12589 tmp2 = load_reg(s, rm);
72485ec4 12590 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
12591 tcg_temp_free_i32(tmp2);
12592 tcg_temp_free_i32(tmp);
99c475ab
FB
12593 break;
12594 case 2: /* mov/cpy */
396e467c 12595 tmp = load_reg(s, rm);
55203189
PM
12596 if (rd == 13) {
12597 /* MOV SP, reg */
12598 store_sp_checked(s, tmp);
12599 } else {
12600 store_reg(s, rd, tmp);
12601 }
99c475ab 12602 break;
ebfe27c5
PM
12603 case 3:
12604 {
12605 /* 0b0100_0111_xxxx_xxxx
12606 * - branch [and link] exchange thumb register
12607 */
12608 bool link = insn & (1 << 7);
12609
fb602cb7 12610 if (insn & 3) {
ebfe27c5
PM
12611 goto undef;
12612 }
12613 if (link) {
be5e7a76 12614 ARCH(5);
ebfe27c5 12615 }
fb602cb7
PM
12616 if ((insn & 4)) {
12617 /* BXNS/BLXNS: only exists for v8M with the
12618 * security extensions, and always UNDEF if NonSecure.
12619 * We don't implement these in the user-only mode
12620 * either (in theory you can use them from Secure User
12621 * mode but they are too tied in to system emulation.)
12622 */
12623 if (!s->v8m_secure || IS_USER_ONLY) {
12624 goto undef;
12625 }
12626 if (link) {
3e3fa230 12627 gen_blxns(s, rm);
fb602cb7
PM
12628 } else {
12629 gen_bxns(s, rm);
12630 }
12631 break;
12632 }
12633 /* BLX/BX */
ebfe27c5
PM
12634 tmp = load_reg(s, rm);
12635 if (link) {
99c475ab 12636 val = (uint32_t)s->pc | 1;
7d1b0095 12637 tmp2 = tcg_temp_new_i32();
b0109805
PB
12638 tcg_gen_movi_i32(tmp2, val);
12639 store_reg(s, 14, tmp2);
3bb8a96f
PM
12640 gen_bx(s, tmp);
12641 } else {
12642 /* Only BX works as exception-return, not BLX */
12643 gen_bx_excret(s, tmp);
99c475ab 12644 }
99c475ab
FB
12645 break;
12646 }
ebfe27c5 12647 }
99c475ab
FB
12648 break;
12649 }
12650
a2d12f0f
PM
12651 /*
12652 * 0b0100_00xx_xxxx_xxxx
12653 * - Data-processing (two low registers)
12654 */
99c475ab
FB
12655 rd = insn & 7;
12656 rm = (insn >> 3) & 7;
12657 op = (insn >> 6) & 0xf;
12658 if (op == 2 || op == 3 || op == 4 || op == 7) {
12659 /* the shift/rotate ops want the operands backwards */
12660 val = rm;
12661 rm = rd;
12662 rd = val;
12663 val = 1;
12664 } else {
12665 val = 0;
12666 }
12667
396e467c 12668 if (op == 9) { /* neg */
7d1b0095 12669 tmp = tcg_temp_new_i32();
396e467c
FN
12670 tcg_gen_movi_i32(tmp, 0);
12671 } else if (op != 0xf) { /* mvn doesn't read its first operand */
12672 tmp = load_reg(s, rd);
12673 } else {
f764718d 12674 tmp = NULL;
396e467c 12675 }
99c475ab 12676
396e467c 12677 tmp2 = load_reg(s, rm);
5899f386 12678 switch (op) {
99c475ab 12679 case 0x0: /* and */
396e467c 12680 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 12681 if (!s->condexec_mask)
396e467c 12682 gen_logic_CC(tmp);
99c475ab
FB
12683 break;
12684 case 0x1: /* eor */
396e467c 12685 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 12686 if (!s->condexec_mask)
396e467c 12687 gen_logic_CC(tmp);
99c475ab
FB
12688 break;
12689 case 0x2: /* lsl */
9ee6e8bb 12690 if (s->condexec_mask) {
365af80e 12691 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 12692 } else {
9ef39277 12693 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12694 gen_logic_CC(tmp2);
9ee6e8bb 12695 }
99c475ab
FB
12696 break;
12697 case 0x3: /* lsr */
9ee6e8bb 12698 if (s->condexec_mask) {
365af80e 12699 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 12700 } else {
9ef39277 12701 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12702 gen_logic_CC(tmp2);
9ee6e8bb 12703 }
99c475ab
FB
12704 break;
12705 case 0x4: /* asr */
9ee6e8bb 12706 if (s->condexec_mask) {
365af80e 12707 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 12708 } else {
9ef39277 12709 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12710 gen_logic_CC(tmp2);
9ee6e8bb 12711 }
99c475ab
FB
12712 break;
12713 case 0x5: /* adc */
49b4c31e 12714 if (s->condexec_mask) {
396e467c 12715 gen_adc(tmp, tmp2);
49b4c31e
RH
12716 } else {
12717 gen_adc_CC(tmp, tmp, tmp2);
12718 }
99c475ab
FB
12719 break;
12720 case 0x6: /* sbc */
2de68a49 12721 if (s->condexec_mask) {
396e467c 12722 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
12723 } else {
12724 gen_sbc_CC(tmp, tmp, tmp2);
12725 }
99c475ab
FB
12726 break;
12727 case 0x7: /* ror */
9ee6e8bb 12728 if (s->condexec_mask) {
f669df27
AJ
12729 tcg_gen_andi_i32(tmp, tmp, 0x1f);
12730 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 12731 } else {
9ef39277 12732 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12733 gen_logic_CC(tmp2);
9ee6e8bb 12734 }
99c475ab
FB
12735 break;
12736 case 0x8: /* tst */
396e467c
FN
12737 tcg_gen_and_i32(tmp, tmp, tmp2);
12738 gen_logic_CC(tmp);
99c475ab 12739 rd = 16;
5899f386 12740 break;
99c475ab 12741 case 0x9: /* neg */
9ee6e8bb 12742 if (s->condexec_mask)
396e467c 12743 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 12744 else
72485ec4 12745 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12746 break;
12747 case 0xa: /* cmp */
72485ec4 12748 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12749 rd = 16;
12750 break;
12751 case 0xb: /* cmn */
72485ec4 12752 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
12753 rd = 16;
12754 break;
12755 case 0xc: /* orr */
396e467c 12756 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 12757 if (!s->condexec_mask)
396e467c 12758 gen_logic_CC(tmp);
99c475ab
FB
12759 break;
12760 case 0xd: /* mul */
7b2919a0 12761 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 12762 if (!s->condexec_mask)
396e467c 12763 gen_logic_CC(tmp);
99c475ab
FB
12764 break;
12765 case 0xe: /* bic */
f669df27 12766 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 12767 if (!s->condexec_mask)
396e467c 12768 gen_logic_CC(tmp);
99c475ab
FB
12769 break;
12770 case 0xf: /* mvn */
396e467c 12771 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 12772 if (!s->condexec_mask)
396e467c 12773 gen_logic_CC(tmp2);
99c475ab 12774 val = 1;
5899f386 12775 rm = rd;
99c475ab
FB
12776 break;
12777 }
12778 if (rd != 16) {
396e467c
FN
12779 if (val) {
12780 store_reg(s, rm, tmp2);
12781 if (op != 0xf)
7d1b0095 12782 tcg_temp_free_i32(tmp);
396e467c
FN
12783 } else {
12784 store_reg(s, rd, tmp);
7d1b0095 12785 tcg_temp_free_i32(tmp2);
396e467c
FN
12786 }
12787 } else {
7d1b0095
PM
12788 tcg_temp_free_i32(tmp);
12789 tcg_temp_free_i32(tmp2);
99c475ab
FB
12790 }
12791 break;
12792
12793 case 5:
12794 /* load/store register offset. */
12795 rd = insn & 7;
12796 rn = (insn >> 3) & 7;
12797 rm = (insn >> 6) & 7;
12798 op = (insn >> 9) & 7;
b0109805 12799 addr = load_reg(s, rn);
b26eefb6 12800 tmp = load_reg(s, rm);
b0109805 12801 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12802 tcg_temp_free_i32(tmp);
99c475ab 12803
c40c8556 12804 if (op < 3) { /* store */
b0109805 12805 tmp = load_reg(s, rd);
c40c8556
PM
12806 } else {
12807 tmp = tcg_temp_new_i32();
12808 }
99c475ab
FB
12809
12810 switch (op) {
12811 case 0: /* str */
9bb6558a 12812 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12813 break;
12814 case 1: /* strh */
9bb6558a 12815 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12816 break;
12817 case 2: /* strb */
9bb6558a 12818 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12819 break;
12820 case 3: /* ldrsb */
9bb6558a 12821 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12822 break;
12823 case 4: /* ldr */
9bb6558a 12824 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12825 break;
12826 case 5: /* ldrh */
9bb6558a 12827 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12828 break;
12829 case 6: /* ldrb */
9bb6558a 12830 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12831 break;
12832 case 7: /* ldrsh */
9bb6558a 12833 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12834 break;
12835 }
c40c8556 12836 if (op >= 3) { /* load */
b0109805 12837 store_reg(s, rd, tmp);
c40c8556
PM
12838 } else {
12839 tcg_temp_free_i32(tmp);
12840 }
7d1b0095 12841 tcg_temp_free_i32(addr);
99c475ab
FB
12842 break;
12843
12844 case 6:
12845 /* load/store word immediate offset */
12846 rd = insn & 7;
12847 rn = (insn >> 3) & 7;
b0109805 12848 addr = load_reg(s, rn);
99c475ab 12849 val = (insn >> 4) & 0x7c;
b0109805 12850 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12851
12852 if (insn & (1 << 11)) {
12853 /* load */
c40c8556 12854 tmp = tcg_temp_new_i32();
12dcc321 12855 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12856 store_reg(s, rd, tmp);
99c475ab
FB
12857 } else {
12858 /* store */
b0109805 12859 tmp = load_reg(s, rd);
12dcc321 12860 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12861 tcg_temp_free_i32(tmp);
99c475ab 12862 }
7d1b0095 12863 tcg_temp_free_i32(addr);
99c475ab
FB
12864 break;
12865
12866 case 7:
12867 /* load/store byte immediate offset */
12868 rd = insn & 7;
12869 rn = (insn >> 3) & 7;
b0109805 12870 addr = load_reg(s, rn);
99c475ab 12871 val = (insn >> 6) & 0x1f;
b0109805 12872 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12873
12874 if (insn & (1 << 11)) {
12875 /* load */
c40c8556 12876 tmp = tcg_temp_new_i32();
9bb6558a 12877 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12878 store_reg(s, rd, tmp);
99c475ab
FB
12879 } else {
12880 /* store */
b0109805 12881 tmp = load_reg(s, rd);
9bb6558a 12882 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12883 tcg_temp_free_i32(tmp);
99c475ab 12884 }
7d1b0095 12885 tcg_temp_free_i32(addr);
99c475ab
FB
12886 break;
12887
12888 case 8:
12889 /* load/store halfword immediate offset */
12890 rd = insn & 7;
12891 rn = (insn >> 3) & 7;
b0109805 12892 addr = load_reg(s, rn);
99c475ab 12893 val = (insn >> 5) & 0x3e;
b0109805 12894 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12895
12896 if (insn & (1 << 11)) {
12897 /* load */
c40c8556 12898 tmp = tcg_temp_new_i32();
9bb6558a 12899 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12900 store_reg(s, rd, tmp);
99c475ab
FB
12901 } else {
12902 /* store */
b0109805 12903 tmp = load_reg(s, rd);
9bb6558a 12904 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12905 tcg_temp_free_i32(tmp);
99c475ab 12906 }
7d1b0095 12907 tcg_temp_free_i32(addr);
99c475ab
FB
12908 break;
12909
12910 case 9:
12911 /* load/store from stack */
12912 rd = (insn >> 8) & 7;
b0109805 12913 addr = load_reg(s, 13);
99c475ab 12914 val = (insn & 0xff) * 4;
b0109805 12915 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12916
12917 if (insn & (1 << 11)) {
12918 /* load */
c40c8556 12919 tmp = tcg_temp_new_i32();
9bb6558a 12920 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12921 store_reg(s, rd, tmp);
99c475ab
FB
12922 } else {
12923 /* store */
b0109805 12924 tmp = load_reg(s, rd);
9bb6558a 12925 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12926 tcg_temp_free_i32(tmp);
99c475ab 12927 }
7d1b0095 12928 tcg_temp_free_i32(addr);
99c475ab
FB
12929 break;
12930
12931 case 10:
55203189
PM
12932 /*
12933 * 0b1010_xxxx_xxxx_xxxx
12934 * - Add PC/SP (immediate)
12935 */
99c475ab 12936 rd = (insn >> 8) & 7;
5899f386
FB
12937 if (insn & (1 << 11)) {
12938 /* SP */
5e3f878a 12939 tmp = load_reg(s, 13);
5899f386
FB
12940 } else {
12941 /* PC. bit 1 is ignored. */
7d1b0095 12942 tmp = tcg_temp_new_i32();
5e3f878a 12943 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 12944 }
99c475ab 12945 val = (insn & 0xff) * 4;
5e3f878a
PB
12946 tcg_gen_addi_i32(tmp, tmp, val);
12947 store_reg(s, rd, tmp);
99c475ab
FB
12948 break;
12949
12950 case 11:
12951 /* misc */
12952 op = (insn >> 8) & 0xf;
12953 switch (op) {
12954 case 0:
55203189
PM
12955 /*
12956 * 0b1011_0000_xxxx_xxxx
12957 * - ADD (SP plus immediate)
12958 * - SUB (SP minus immediate)
12959 */
b26eefb6 12960 tmp = load_reg(s, 13);
99c475ab
FB
12961 val = (insn & 0x7f) * 4;
12962 if (insn & (1 << 7))
6a0d8a1d 12963 val = -(int32_t)val;
b26eefb6 12964 tcg_gen_addi_i32(tmp, tmp, val);
55203189 12965 store_sp_checked(s, tmp);
99c475ab
FB
12966 break;
12967
9ee6e8bb
PB
12968 case 2: /* sign/zero extend. */
12969 ARCH(6);
12970 rd = insn & 7;
12971 rm = (insn >> 3) & 7;
b0109805 12972 tmp = load_reg(s, rm);
9ee6e8bb 12973 switch ((insn >> 6) & 3) {
b0109805
PB
12974 case 0: gen_sxth(tmp); break;
12975 case 1: gen_sxtb(tmp); break;
12976 case 2: gen_uxth(tmp); break;
12977 case 3: gen_uxtb(tmp); break;
9ee6e8bb 12978 }
b0109805 12979 store_reg(s, rd, tmp);
9ee6e8bb 12980 break;
99c475ab 12981 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
12982 /*
12983 * 0b1011_x10x_xxxx_xxxx
12984 * - push/pop
12985 */
b0109805 12986 addr = load_reg(s, 13);
5899f386
FB
12987 if (insn & (1 << 8))
12988 offset = 4;
99c475ab 12989 else
5899f386
FB
12990 offset = 0;
12991 for (i = 0; i < 8; i++) {
12992 if (insn & (1 << i))
12993 offset += 4;
12994 }
12995 if ((insn & (1 << 11)) == 0) {
b0109805 12996 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12997 }
aa369e5c
PM
12998
12999 if (s->v8m_stackcheck) {
13000 /*
13001 * Here 'addr' is the lower of "old SP" and "new SP";
13002 * if this is a pop that starts below the limit and ends
13003 * above it, it is UNKNOWN whether the limit check triggers;
13004 * we choose to trigger.
13005 */
13006 gen_helper_v8m_stackcheck(cpu_env, addr);
13007 }
13008
99c475ab
FB
13009 for (i = 0; i < 8; i++) {
13010 if (insn & (1 << i)) {
13011 if (insn & (1 << 11)) {
13012 /* pop */
c40c8556 13013 tmp = tcg_temp_new_i32();
12dcc321 13014 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 13015 store_reg(s, i, tmp);
99c475ab
FB
13016 } else {
13017 /* push */
b0109805 13018 tmp = load_reg(s, i);
12dcc321 13019 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 13020 tcg_temp_free_i32(tmp);
99c475ab 13021 }
5899f386 13022 /* advance to the next address. */
b0109805 13023 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
13024 }
13025 }
f764718d 13026 tmp = NULL;
99c475ab
FB
13027 if (insn & (1 << 8)) {
13028 if (insn & (1 << 11)) {
13029 /* pop pc */
c40c8556 13030 tmp = tcg_temp_new_i32();
12dcc321 13031 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
13032 /* don't set the pc until the rest of the instruction
13033 has completed */
13034 } else {
13035 /* push lr */
b0109805 13036 tmp = load_reg(s, 14);
12dcc321 13037 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 13038 tcg_temp_free_i32(tmp);
99c475ab 13039 }
b0109805 13040 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 13041 }
5899f386 13042 if ((insn & (1 << 11)) == 0) {
b0109805 13043 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 13044 }
99c475ab 13045 /* write back the new stack pointer */
b0109805 13046 store_reg(s, 13, addr);
99c475ab 13047 /* set the new PC value */
be5e7a76 13048 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 13049 store_reg_from_load(s, 15, tmp);
be5e7a76 13050 }
99c475ab
FB
13051 break;
13052
9ee6e8bb
PB
13053 case 1: case 3: case 9: case 11: /* czb */
13054 rm = insn & 7;
d9ba4830 13055 tmp = load_reg(s, rm);
c2d9644e 13056 arm_gen_condlabel(s);
9ee6e8bb 13057 if (insn & (1 << 11))
cb63669a 13058 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 13059 else
cb63669a 13060 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 13061 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
13062 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
13063 val = (uint32_t)s->pc + 2;
13064 val += offset;
13065 gen_jmp(s, val);
13066 break;
13067
13068 case 15: /* IT, nop-hint. */
13069 if ((insn & 0xf) == 0) {
13070 gen_nop_hint(s, (insn >> 4) & 0xf);
13071 break;
13072 }
13073 /* If Then. */
13074 s->condexec_cond = (insn >> 4) & 0xe;
13075 s->condexec_mask = insn & 0x1f;
13076 /* No actual code generated for this insn, just setup state. */
13077 break;
13078
06c949e6 13079 case 0xe: /* bkpt */
d4a2dc67
PM
13080 {
13081 int imm8 = extract32(insn, 0, 8);
be5e7a76 13082 ARCH(5);
c900a2e6 13083 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 13084 break;
d4a2dc67 13085 }
06c949e6 13086
19a6e31c
PM
13087 case 0xa: /* rev, and hlt */
13088 {
13089 int op1 = extract32(insn, 6, 2);
13090
13091 if (op1 == 2) {
13092 /* HLT */
13093 int imm6 = extract32(insn, 0, 6);
13094
13095 gen_hlt(s, imm6);
13096 break;
13097 }
13098
13099 /* Otherwise this is rev */
9ee6e8bb
PB
13100 ARCH(6);
13101 rn = (insn >> 3) & 0x7;
13102 rd = insn & 0x7;
b0109805 13103 tmp = load_reg(s, rn);
19a6e31c 13104 switch (op1) {
66896cb8 13105 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
13106 case 1: gen_rev16(tmp); break;
13107 case 3: gen_revsh(tmp); break;
19a6e31c
PM
13108 default:
13109 g_assert_not_reached();
9ee6e8bb 13110 }
b0109805 13111 store_reg(s, rd, tmp);
9ee6e8bb 13112 break;
19a6e31c 13113 }
9ee6e8bb 13114
d9e028c1
PM
13115 case 6:
13116 switch ((insn >> 5) & 7) {
13117 case 2:
13118 /* setend */
13119 ARCH(6);
9886ecdf
PB
13120 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
13121 gen_helper_setend(cpu_env);
dcba3a8d 13122 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 13123 }
9ee6e8bb 13124 break;
d9e028c1
PM
13125 case 3:
13126 /* cps */
13127 ARCH(6);
13128 if (IS_USER(s)) {
13129 break;
8984bd2e 13130 }
b53d8923 13131 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
13132 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
13133 /* FAULTMASK */
13134 if (insn & 1) {
13135 addr = tcg_const_i32(19);
13136 gen_helper_v7m_msr(cpu_env, addr, tmp);
13137 tcg_temp_free_i32(addr);
13138 }
13139 /* PRIMASK */
13140 if (insn & 2) {
13141 addr = tcg_const_i32(16);
13142 gen_helper_v7m_msr(cpu_env, addr, tmp);
13143 tcg_temp_free_i32(addr);
13144 }
13145 tcg_temp_free_i32(tmp);
13146 gen_lookup_tb(s);
13147 } else {
13148 if (insn & (1 << 4)) {
13149 shift = CPSR_A | CPSR_I | CPSR_F;
13150 } else {
13151 shift = 0;
13152 }
13153 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 13154 }
d9e028c1
PM
13155 break;
13156 default:
13157 goto undef;
9ee6e8bb
PB
13158 }
13159 break;
13160
99c475ab
FB
13161 default:
13162 goto undef;
13163 }
13164 break;
13165
13166 case 12:
a7d3970d 13167 {
99c475ab 13168 /* load/store multiple */
f764718d 13169 TCGv_i32 loaded_var = NULL;
99c475ab 13170 rn = (insn >> 8) & 0x7;
b0109805 13171 addr = load_reg(s, rn);
99c475ab
FB
13172 for (i = 0; i < 8; i++) {
13173 if (insn & (1 << i)) {
99c475ab
FB
13174 if (insn & (1 << 11)) {
13175 /* load */
c40c8556 13176 tmp = tcg_temp_new_i32();
12dcc321 13177 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
13178 if (i == rn) {
13179 loaded_var = tmp;
13180 } else {
13181 store_reg(s, i, tmp);
13182 }
99c475ab
FB
13183 } else {
13184 /* store */
b0109805 13185 tmp = load_reg(s, i);
12dcc321 13186 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 13187 tcg_temp_free_i32(tmp);
99c475ab 13188 }
5899f386 13189 /* advance to the next address */
b0109805 13190 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
13191 }
13192 }
b0109805 13193 if ((insn & (1 << rn)) == 0) {
a7d3970d 13194 /* base reg not in list: base register writeback */
b0109805
PB
13195 store_reg(s, rn, addr);
13196 } else {
a7d3970d
PM
13197 /* base reg in list: if load, complete it now */
13198 if (insn & (1 << 11)) {
13199 store_reg(s, rn, loaded_var);
13200 }
7d1b0095 13201 tcg_temp_free_i32(addr);
b0109805 13202 }
99c475ab 13203 break;
a7d3970d 13204 }
99c475ab
FB
13205 case 13:
13206 /* conditional branch or swi */
13207 cond = (insn >> 8) & 0xf;
13208 if (cond == 0xe)
13209 goto undef;
13210
13211 if (cond == 0xf) {
13212 /* swi */
eaed129d 13213 gen_set_pc_im(s, s->pc);
d4a2dc67 13214 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 13215 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
13216 break;
13217 }
13218 /* generate a conditional jump to next instruction */
c2d9644e 13219 arm_skip_unless(s, cond);
99c475ab
FB
13220
13221 /* jump to the offset */
5899f386 13222 val = (uint32_t)s->pc + 2;
99c475ab 13223 offset = ((int32_t)insn << 24) >> 24;
5899f386 13224 val += offset << 1;
8aaca4c0 13225 gen_jmp(s, val);
99c475ab
FB
13226 break;
13227
13228 case 14:
358bf29e 13229 if (insn & (1 << 11)) {
296e5a0a
PM
13230 /* thumb_insn_is_16bit() ensures we can't get here for
13231 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
13232 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
13233 */
13234 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
13235 ARCH(5);
13236 offset = ((insn & 0x7ff) << 1);
13237 tmp = load_reg(s, 14);
13238 tcg_gen_addi_i32(tmp, tmp, offset);
13239 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
13240
13241 tmp2 = tcg_temp_new_i32();
13242 tcg_gen_movi_i32(tmp2, s->pc | 1);
13243 store_reg(s, 14, tmp2);
13244 gen_bx(s, tmp);
358bf29e
PB
13245 break;
13246 }
9ee6e8bb 13247 /* unconditional branch */
99c475ab
FB
13248 val = (uint32_t)s->pc;
13249 offset = ((int32_t)insn << 21) >> 21;
13250 val += (offset << 1) + 2;
8aaca4c0 13251 gen_jmp(s, val);
99c475ab
FB
13252 break;
13253
13254 case 15:
296e5a0a
PM
13255 /* thumb_insn_is_16bit() ensures we can't get here for
13256 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
13257 */
13258 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
13259
13260 if (insn & (1 << 11)) {
13261 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
13262 offset = ((insn & 0x7ff) << 1) | 1;
13263 tmp = load_reg(s, 14);
13264 tcg_gen_addi_i32(tmp, tmp, offset);
13265
13266 tmp2 = tcg_temp_new_i32();
13267 tcg_gen_movi_i32(tmp2, s->pc | 1);
13268 store_reg(s, 14, tmp2);
13269 gen_bx(s, tmp);
13270 } else {
13271 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
13272 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
13273
13274 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
13275 }
9ee6e8bb 13276 break;
99c475ab
FB
13277 }
13278 return;
9ee6e8bb 13279illegal_op:
99c475ab 13280undef:
73710361
GB
13281 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
13282 default_exception_el(s));
99c475ab
FB
13283}
13284
541ebcd4
PM
13285static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
13286{
13287 /* Return true if the insn at dc->pc might cross a page boundary.
13288 * (False positives are OK, false negatives are not.)
5b8d7289
PM
13289 * We know this is a Thumb insn, and our caller ensures we are
13290 * only called if dc->pc is less than 4 bytes from the page
13291 * boundary, so we cross the page if the first 16 bits indicate
13292 * that this is a 32 bit insn.
541ebcd4 13293 */
5b8d7289 13294 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 13295
5b8d7289 13296 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
13297}
13298
b542683d 13299static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 13300{
1d8a5535 13301 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 13302 CPUARMState *env = cs->env_ptr;
4e5e1215 13303 ARMCPU *cpu = arm_env_get_cpu(env);
aad821ac
RH
13304 uint32_t tb_flags = dc->base.tb->flags;
13305 uint32_t condexec, core_mmu_idx;
3b46e624 13306
962fcbf2 13307 dc->isar = &cpu->isar;
dcba3a8d 13308 dc->pc = dc->base.pc_first;
e50e6a20 13309 dc->condjmp = 0;
3926cc84 13310
40f860cd 13311 dc->aarch64 = 0;
cef9ee70
SS
13312 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
13313 * there is no secure EL1, so we route exceptions to EL3.
13314 */
13315 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
13316 !arm_el_is_aa64(env, 3);
aad821ac
RH
13317 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
13318 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
13319 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
13320 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
13321 dc->condexec_mask = (condexec & 0xf) << 1;
13322 dc->condexec_cond = condexec >> 4;
13323 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
13324 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
c1e37810 13325 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 13326#if !defined(CONFIG_USER_ONLY)
c1e37810 13327 dc->user = (dc->current_el == 0);
3926cc84 13328#endif
aad821ac
RH
13329 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
13330 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
13331 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
13332 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
ea7ac69d
PM
13333 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
13334 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
13335 dc->vec_stride = 0;
13336 } else {
13337 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
13338 dc->c15_cpar = 0;
13339 }
aad821ac 13340 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
fb602cb7
PM
13341 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
13342 regime_is_secure(env, dc->mmu_idx);
aad821ac 13343 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
60322b39 13344 dc->cp_regs = cpu->cp_regs;
a984e42c 13345 dc->features = env->features;
40f860cd 13346
50225ad0
PM
13347 /* Single step state. The code-generation logic here is:
13348 * SS_ACTIVE == 0:
13349 * generate code with no special handling for single-stepping (except
13350 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13351 * this happens anyway because those changes are all system register or
13352 * PSTATE writes).
13353 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13354 * emit code for one insn
13355 * emit code to clear PSTATE.SS
13356 * emit code to generate software step exception for completed step
13357 * end TB (as usual for having generated an exception)
13358 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13359 * emit code to generate a software step exception
13360 * end the TB
13361 */
aad821ac
RH
13362 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
13363 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
50225ad0
PM
13364 dc->is_ldex = false;
13365 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
13366
bfe7ad5b 13367 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 13368
f7708456
RH
13369 /* If architectural single step active, limit to 1. */
13370 if (is_singlestepping(dc)) {
b542683d 13371 dc->base.max_insns = 1;
f7708456
RH
13372 }
13373
d0264d86
RH
13374 /* ARM is a fixed-length ISA. Bound the number of insns to execute
13375 to those left on the page. */
13376 if (!dc->thumb) {
bfe7ad5b 13377 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 13378 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
13379 }
13380
a7812ae4
PB
13381 cpu_F0s = tcg_temp_new_i32();
13382 cpu_F1s = tcg_temp_new_i32();
13383 cpu_F0d = tcg_temp_new_i64();
13384 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
13385 cpu_V0 = cpu_F0d;
13386 cpu_V1 = cpu_F1d;
e677137d 13387 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 13388 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
13389}
13390
b1476854
LV
13391static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
13392{
13393 DisasContext *dc = container_of(dcbase, DisasContext, base);
13394
13395 /* A note on handling of the condexec (IT) bits:
13396 *
13397 * We want to avoid the overhead of having to write the updated condexec
13398 * bits back to the CPUARMState for every instruction in an IT block. So:
13399 * (1) if the condexec bits are not already zero then we write
13400 * zero back into the CPUARMState now. This avoids complications trying
13401 * to do it at the end of the block. (For example if we don't do this
13402 * it's hard to identify whether we can safely skip writing condexec
13403 * at the end of the TB, which we definitely want to do for the case
13404 * where a TB doesn't do anything with the IT state at all.)
13405 * (2) if we are going to leave the TB then we call gen_set_condexec()
13406 * which will write the correct value into CPUARMState if zero is wrong.
13407 * This is done both for leaving the TB at the end, and for leaving
13408 * it because of an exception we know will happen, which is done in
13409 * gen_exception_insn(). The latter is necessary because we need to
13410 * leave the TB with the PC/IT state just prior to execution of the
13411 * instruction which caused the exception.
13412 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
13413 * then the CPUARMState will be wrong and we need to reset it.
13414 * This is handled in the same way as restoration of the
13415 * PC in these situations; we save the value of the condexec bits
13416 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
13417 * then uses this to restore them after an exception.
13418 *
13419 * Note that there are no instructions which can read the condexec
13420 * bits, and none which can write non-static values to them, so
13421 * we don't need to care about whether CPUARMState is correct in the
13422 * middle of a TB.
13423 */
13424
13425 /* Reset the conditional execution bits immediately. This avoids
13426 complications trying to do it at the end of the block. */
13427 if (dc->condexec_mask || dc->condexec_cond) {
13428 TCGv_i32 tmp = tcg_temp_new_i32();
13429 tcg_gen_movi_i32(tmp, 0);
13430 store_cpu_field(tmp, condexec_bits);
13431 }
13432}
13433
f62bd897
LV
13434static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
13435{
13436 DisasContext *dc = container_of(dcbase, DisasContext, base);
13437
f62bd897
LV
13438 tcg_gen_insn_start(dc->pc,
13439 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
13440 0);
15fa08f8 13441 dc->insn_start = tcg_last_op();
f62bd897
LV
13442}
13443
a68956ad
LV
13444static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
13445 const CPUBreakpoint *bp)
13446{
13447 DisasContext *dc = container_of(dcbase, DisasContext, base);
13448
13449 if (bp->flags & BP_CPU) {
13450 gen_set_condexec(dc);
13451 gen_set_pc_im(dc, dc->pc);
13452 gen_helper_check_breakpoints(cpu_env);
13453 /* End the TB early; it's likely not going to be executed */
13454 dc->base.is_jmp = DISAS_TOO_MANY;
13455 } else {
13456 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
13457 /* The address covered by the breakpoint must be
13458 included in [tb->pc, tb->pc + tb->size) in order
13459 to for it to be properly cleared -- thus we
13460 increment the PC here so that the logic setting
13461 tb->size below does the right thing. */
13462 /* TODO: Advance PC by correct instruction length to
13463 * avoid disassembler error messages */
13464 dc->pc += 2;
13465 dc->base.is_jmp = DISAS_NORETURN;
13466 }
13467
13468 return true;
13469}
13470
722ef0a5 13471static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 13472{
13189a90
LV
13473#ifdef CONFIG_USER_ONLY
13474 /* Intercept jump to the magic kernel page. */
13475 if (dc->pc >= 0xffff0000) {
13476 /* We always get here via a jump, so know we are not in a
13477 conditional execution block. */
13478 gen_exception_internal(EXCP_KERNEL_TRAP);
13479 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 13480 return true;
13189a90
LV
13481 }
13482#endif
13483
13484 if (dc->ss_active && !dc->pstate_ss) {
13485 /* Singlestep state is Active-pending.
13486 * If we're in this state at the start of a TB then either
13487 * a) we just took an exception to an EL which is being debugged
13488 * and this is the first insn in the exception handler
13489 * b) debug exceptions were masked and we just unmasked them
13490 * without changing EL (eg by clearing PSTATE.D)
13491 * In either case we're going to take a swstep exception in the
13492 * "did not step an insn" case, and so the syndrome ISV and EX
13493 * bits should be zero.
13494 */
13495 assert(dc->base.num_insns == 1);
13496 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
13497 default_exception_el(dc));
13498 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 13499 return true;
13189a90
LV
13500 }
13501
722ef0a5
RH
13502 return false;
13503}
13189a90 13504
d0264d86 13505static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 13506{
13189a90
LV
13507 if (dc->condjmp && !dc->base.is_jmp) {
13508 gen_set_label(dc->condlabel);
13509 dc->condjmp = 0;
13510 }
13189a90 13511 dc->base.pc_next = dc->pc;
23169224 13512 translator_loop_temp_check(&dc->base);
13189a90
LV
13513}
13514
722ef0a5
RH
13515static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13516{
13517 DisasContext *dc = container_of(dcbase, DisasContext, base);
13518 CPUARMState *env = cpu->env_ptr;
13519 unsigned int insn;
13520
13521 if (arm_pre_translate_insn(dc)) {
13522 return;
13523 }
13524
13525 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 13526 dc->insn = insn;
722ef0a5
RH
13527 dc->pc += 4;
13528 disas_arm_insn(dc, insn);
13529
d0264d86
RH
13530 arm_post_translate_insn(dc);
13531
13532 /* ARM is a fixed-length ISA. We performed the cross-page check
13533 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
13534}
13535
dcf14dfb
PM
13536static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
13537{
13538 /* Return true if this Thumb insn is always unconditional,
13539 * even inside an IT block. This is true of only a very few
13540 * instructions: BKPT, HLT, and SG.
13541 *
13542 * A larger class of instructions are UNPREDICTABLE if used
13543 * inside an IT block; we do not need to detect those here, because
13544 * what we do by default (perform the cc check and update the IT
13545 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
13546 * choice for those situations.
13547 *
13548 * insn is either a 16-bit or a 32-bit instruction; the two are
13549 * distinguishable because for the 16-bit case the top 16 bits
13550 * are zeroes, and that isn't a valid 32-bit encoding.
13551 */
13552 if ((insn & 0xffffff00) == 0xbe00) {
13553 /* BKPT */
13554 return true;
13555 }
13556
13557 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
13558 !arm_dc_feature(s, ARM_FEATURE_M)) {
13559 /* HLT: v8A only. This is unconditional even when it is going to
13560 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
13561 * For v7 cores this was a plain old undefined encoding and so
13562 * honours its cc check. (We might be using the encoding as
13563 * a semihosting trap, but we don't change the cc check behaviour
13564 * on that account, because a debugger connected to a real v7A
13565 * core and emulating semihosting traps by catching the UNDEF
13566 * exception would also only see cases where the cc check passed.
13567 * No guest code should be trying to do a HLT semihosting trap
13568 * in an IT block anyway.
13569 */
13570 return true;
13571 }
13572
13573 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
13574 arm_dc_feature(s, ARM_FEATURE_M)) {
13575 /* SG: v8M only */
13576 return true;
13577 }
13578
13579 return false;
13580}
13581
722ef0a5
RH
13582static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13583{
13584 DisasContext *dc = container_of(dcbase, DisasContext, base);
13585 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
13586 uint32_t insn;
13587 bool is_16bit;
722ef0a5
RH
13588
13589 if (arm_pre_translate_insn(dc)) {
13590 return;
13591 }
13592
296e5a0a
PM
13593 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13594 is_16bit = thumb_insn_is_16bit(dc, insn);
13595 dc->pc += 2;
13596 if (!is_16bit) {
13597 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13598
13599 insn = insn << 16 | insn2;
13600 dc->pc += 2;
13601 }
58803318 13602 dc->insn = insn;
296e5a0a 13603
dcf14dfb 13604 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
13605 uint32_t cond = dc->condexec_cond;
13606
13607 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
c2d9644e 13608 arm_skip_unless(dc, cond);
296e5a0a
PM
13609 }
13610 }
13611
13612 if (is_16bit) {
13613 disas_thumb_insn(dc, insn);
13614 } else {
2eea841c 13615 disas_thumb2_insn(dc, insn);
296e5a0a 13616 }
722ef0a5
RH
13617
13618 /* Advance the Thumb condexec condition. */
13619 if (dc->condexec_mask) {
13620 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
13621 ((dc->condexec_mask >> 4) & 1));
13622 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
13623 if (dc->condexec_mask == 0) {
13624 dc->condexec_cond = 0;
13625 }
13626 }
13627
d0264d86
RH
13628 arm_post_translate_insn(dc);
13629
13630 /* Thumb is a variable-length ISA. Stop translation when the next insn
13631 * will touch a new page. This ensures that prefetch aborts occur at
13632 * the right place.
13633 *
13634 * We want to stop the TB if the next insn starts in a new page,
13635 * or if it spans between this page and the next. This means that
13636 * if we're looking at the last halfword in the page we need to
13637 * see if it's a 16-bit Thumb insn (which will fit in this TB)
13638 * or a 32-bit Thumb insn (which won't).
13639 * This is to avoid generating a silly TB with a single 16-bit insn
13640 * in it at the end of this page (which would execute correctly
13641 * but isn't very efficient).
13642 */
13643 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
13644 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
13645 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
13646 && insn_crosses_page(env, dc)))) {
13647 dc->base.is_jmp = DISAS_TOO_MANY;
13648 }
722ef0a5
RH
13649}
13650
70d3c035 13651static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 13652{
70d3c035 13653 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 13654
c5a49c63 13655 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
13656 /* FIXME: This can theoretically happen with self-modifying code. */
13657 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 13658 }
9ee6e8bb 13659
b5ff1b31 13660 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
13661 instruction was a conditional branch or trap, and the PC has
13662 already been written. */
f021b2c4 13663 gen_set_condexec(dc);
dcba3a8d 13664 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
13665 /* Exception return branches need some special case code at the
13666 * end of the TB, which is complex enough that it has to
13667 * handle the single-step vs not and the condition-failed
13668 * insn codepath itself.
13669 */
13670 gen_bx_excret_final_code(dc);
13671 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 13672 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 13673 switch (dc->base.is_jmp) {
7999a5c8 13674 case DISAS_SWI:
50225ad0 13675 gen_ss_advance(dc);
73710361
GB
13676 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13677 default_exception_el(dc));
7999a5c8
SF
13678 break;
13679 case DISAS_HVC:
37e6456e 13680 gen_ss_advance(dc);
73710361 13681 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
13682 break;
13683 case DISAS_SMC:
37e6456e 13684 gen_ss_advance(dc);
73710361 13685 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
13686 break;
13687 case DISAS_NEXT:
a68956ad 13688 case DISAS_TOO_MANY:
7999a5c8
SF
13689 case DISAS_UPDATE:
13690 gen_set_pc_im(dc, dc->pc);
13691 /* fall through */
13692 default:
5425415e
PM
13693 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
13694 gen_singlestep_exception(dc);
a0c231e6
RH
13695 break;
13696 case DISAS_NORETURN:
13697 break;
7999a5c8 13698 }
8aaca4c0 13699 } else {
9ee6e8bb
PB
13700 /* While branches must always occur at the end of an IT block,
13701 there are a few other things that can cause us to terminate
65626741 13702 the TB in the middle of an IT block:
9ee6e8bb
PB
13703 - Exception generating instructions (bkpt, swi, undefined).
13704 - Page boundaries.
13705 - Hardware watchpoints.
13706 Hardware breakpoints have already been handled and skip this code.
13707 */
dcba3a8d 13708 switch(dc->base.is_jmp) {
8aaca4c0 13709 case DISAS_NEXT:
a68956ad 13710 case DISAS_TOO_MANY:
6e256c93 13711 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 13712 break;
577bf808 13713 case DISAS_JUMP:
8a6b28c7
EC
13714 gen_goto_ptr();
13715 break;
e8d52302
AB
13716 case DISAS_UPDATE:
13717 gen_set_pc_im(dc, dc->pc);
13718 /* fall through */
577bf808 13719 default:
8aaca4c0 13720 /* indicate that the hash table must be used to find the next TB */
07ea28b4 13721 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 13722 break;
a0c231e6 13723 case DISAS_NORETURN:
8aaca4c0
FB
13724 /* nothing more to generate */
13725 break;
9ee6e8bb 13726 case DISAS_WFI:
58803318
SS
13727 {
13728 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
13729 !(dc->insn & (1U << 31))) ? 2 : 4);
13730
13731 gen_helper_wfi(cpu_env, tmp);
13732 tcg_temp_free_i32(tmp);
84549b6d
PM
13733 /* The helper doesn't necessarily throw an exception, but we
13734 * must go back to the main loop to check for interrupts anyway.
13735 */
07ea28b4 13736 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 13737 break;
58803318 13738 }
72c1d3af
PM
13739 case DISAS_WFE:
13740 gen_helper_wfe(cpu_env);
13741 break;
c87e5a61
PM
13742 case DISAS_YIELD:
13743 gen_helper_yield(cpu_env);
13744 break;
9ee6e8bb 13745 case DISAS_SWI:
73710361
GB
13746 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13747 default_exception_el(dc));
9ee6e8bb 13748 break;
37e6456e 13749 case DISAS_HVC:
73710361 13750 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
13751 break;
13752 case DISAS_SMC:
73710361 13753 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 13754 break;
8aaca4c0 13755 }
f021b2c4
PM
13756 }
13757
13758 if (dc->condjmp) {
13759 /* "Condition failed" instruction codepath for the branch/trap insn */
13760 gen_set_label(dc->condlabel);
13761 gen_set_condexec(dc);
b636649f 13762 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
13763 gen_set_pc_im(dc, dc->pc);
13764 gen_singlestep_exception(dc);
13765 } else {
6e256c93 13766 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 13767 }
2c0262af 13768 }
23169224
LV
13769
13770 /* Functions above can change dc->pc, so re-align db->pc_next */
13771 dc->base.pc_next = dc->pc;
70d3c035
LV
13772}
13773
4013f7fc
LV
13774static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
13775{
13776 DisasContext *dc = container_of(dcbase, DisasContext, base);
13777
13778 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 13779 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
13780}
13781
23169224
LV
13782static const TranslatorOps arm_translator_ops = {
13783 .init_disas_context = arm_tr_init_disas_context,
13784 .tb_start = arm_tr_tb_start,
13785 .insn_start = arm_tr_insn_start,
13786 .breakpoint_check = arm_tr_breakpoint_check,
13787 .translate_insn = arm_tr_translate_insn,
13788 .tb_stop = arm_tr_tb_stop,
13789 .disas_log = arm_tr_disas_log,
13790};
13791
722ef0a5
RH
13792static const TranslatorOps thumb_translator_ops = {
13793 .init_disas_context = arm_tr_init_disas_context,
13794 .tb_start = arm_tr_tb_start,
13795 .insn_start = arm_tr_insn_start,
13796 .breakpoint_check = arm_tr_breakpoint_check,
13797 .translate_insn = thumb_tr_translate_insn,
13798 .tb_stop = arm_tr_tb_stop,
13799 .disas_log = arm_tr_disas_log,
13800};
13801
70d3c035 13802/* generate intermediate code for basic block 'tb'. */
8b86d6d2 13803void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
70d3c035 13804{
23169224
LV
13805 DisasContext dc;
13806 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 13807
aad821ac 13808 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
722ef0a5
RH
13809 ops = &thumb_translator_ops;
13810 }
23169224 13811#ifdef TARGET_AARCH64
aad821ac 13812 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
23169224 13813 ops = &aarch64_translator_ops;
2c0262af
FB
13814 }
13815#endif
23169224 13816
8b86d6d2 13817 translator_loop(ops, &dc.base, cpu, tb, max_insns);
2c0262af
FB
13818}
13819
90c84c56 13820void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
2c0262af 13821{
878096ee
AF
13822 ARMCPU *cpu = ARM_CPU(cs);
13823 CPUARMState *env = &cpu->env;
2c0262af
FB
13824 int i;
13825
17731115 13826 if (is_a64(env)) {
90c84c56 13827 aarch64_cpu_dump_state(cs, f, flags);
17731115
PM
13828 return;
13829 }
13830
2c0262af 13831 for(i=0;i<16;i++) {
90c84c56 13832 qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 13833 if ((i % 4) == 3)
90c84c56 13834 qemu_fprintf(f, "\n");
2c0262af 13835 else
90c84c56 13836 qemu_fprintf(f, " ");
2c0262af 13837 }
06e5cf7a 13838
5b906f35
PM
13839 if (arm_feature(env, ARM_FEATURE_M)) {
13840 uint32_t xpsr = xpsr_read(env);
13841 const char *mode;
1e577cc7
PM
13842 const char *ns_status = "";
13843
13844 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
13845 ns_status = env->v7m.secure ? "S " : "NS ";
13846 }
5b906f35
PM
13847
13848 if (xpsr & XPSR_EXCP) {
13849 mode = "handler";
13850 } else {
8bfc26ea 13851 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
13852 mode = "unpriv-thread";
13853 } else {
13854 mode = "priv-thread";
13855 }
13856 }
13857
90c84c56
MA
13858 qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
13859 xpsr,
13860 xpsr & XPSR_N ? 'N' : '-',
13861 xpsr & XPSR_Z ? 'Z' : '-',
13862 xpsr & XPSR_C ? 'C' : '-',
13863 xpsr & XPSR_V ? 'V' : '-',
13864 xpsr & XPSR_T ? 'T' : 'A',
13865 ns_status,
13866 mode);
06e5cf7a 13867 } else {
5b906f35
PM
13868 uint32_t psr = cpsr_read(env);
13869 const char *ns_status = "";
13870
13871 if (arm_feature(env, ARM_FEATURE_EL3) &&
13872 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
13873 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
13874 }
13875
90c84c56
MA
13876 qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
13877 psr,
13878 psr & CPSR_N ? 'N' : '-',
13879 psr & CPSR_Z ? 'Z' : '-',
13880 psr & CPSR_C ? 'C' : '-',
13881 psr & CPSR_V ? 'V' : '-',
13882 psr & CPSR_T ? 'T' : 'A',
13883 ns_status,
13884 aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
5b906f35 13885 }
b7bcbe95 13886
f2617cfc
PM
13887 if (flags & CPU_DUMP_FPU) {
13888 int numvfpregs = 0;
13889 if (arm_feature(env, ARM_FEATURE_VFP)) {
13890 numvfpregs += 16;
13891 }
13892 if (arm_feature(env, ARM_FEATURE_VFP3)) {
13893 numvfpregs += 16;
13894 }
13895 for (i = 0; i < numvfpregs; i++) {
9a2b5256 13896 uint64_t v = *aa32_vfp_dreg(env, i);
90c84c56
MA
13897 qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
13898 i * 2, (uint32_t)v,
13899 i * 2 + 1, (uint32_t)(v >> 32),
13900 i, v);
f2617cfc 13901 }
90c84c56 13902 qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
b7bcbe95 13903 }
2c0262af 13904}
a6b025d3 13905
bad729e2
RH
13906void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
13907 target_ulong *data)
d2856f1a 13908{
3926cc84 13909 if (is_a64(env)) {
bad729e2 13910 env->pc = data[0];
40f860cd 13911 env->condexec_bits = 0;
aaa1f954 13912 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13913 } else {
bad729e2
RH
13914 env->regs[15] = data[0];
13915 env->condexec_bits = data[1];
aaa1f954 13916 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13917 }
d2856f1a 13918}